repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jason-vallet/graph-ryder-api | routes/utils.py | 1 | 2215 | from flask_restful import reqparse
from flask import make_response
import json
import sys
import shutil
parser = reqparse.RequestParser()
parser.add_argument('limit')
parser.add_argument('orderBy')
parser.add_argument('start')
parser.add_argument('end')
def addlimit():
args = parser.parse_args()
if args['limit']:
return " LIMIT %s" % args['limit']
else:
return ''
def addorderby():
args = parser.parse_args()
if args['orderBy']:
orderby = args['orderBy'].split(':')
if len(orderby) > 1:
return " ORDER BY n.%s %s" % (orderby[0], orderby[1])
else:
return " ORDER BY n.%s" % orderby[0]
else:
return ''
def addargs():
req = addorderby()
req += addlimit()
return req
def addTimeFilter():
args = parser.parse_args()
req = ''
if args['start']:
req += "WHERE %s <= p.timestamp " % args['start']
if args['start'] and args['end']:
req += "AND %s >= p.timestamp " % args['end']
if not args['start'] and args['end']:
req += "WHERE %s >= p.timestamp " % args['end']
return req
def makeResponse(result, code=200, file=False):
if file:
try:
#strict = false is very important
result = json.load(open(result, 'r', encoding="utf-8"), strict=False)
except Exception as inst:
shutil.copyfile(result,'fichier_erreur.txt')
print(inst)
result = json.dumps(result)
response = make_response(result, code)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,POST,OPTIONS')
response.headers.add('Content-Type', 'application/json')
return response
def sendFile(result, code=200):
response = make_response(result, code)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,POST,OPTIONS')
response.headers.add('Content-Type', 'application/tlp')
return response
| lgpl-3.0 |
AOSPU/external_chromium_org | native_client_sdk/src/build_tools/nacl-mono-archive.py | 102 | 2289 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
import tarfile
import buildbot_common
def main(args):
parser = optparse.OptionParser()
parser.add_option('--install-dir',
help='Install Directory',
dest='install_dir',
default='naclmono')
parser.add_option('--tar-path',
help='Tarfile path',
dest='tar_path',
default='naclmono_%pepperrev%.bz2')
parser.add_option('--upload-path',
help='Upload path (nativeclient-mirror/nacl/nacl_sdk/XXX)',
dest='upload_path',
default=None)
parser.add_option('--pepper-revision',
help='Pepper revision',
dest='pepper_revision',
default=None)
parser.add_option('--skip-upload',
help='Skips upload step',
action="store_true",
dest='skip_upload')
(options, args) = parser.parse_args(args[1:])
if not options.upload_path:
buildbot_common.ErrorExit('--upload-path is required')
if not options.pepper_revision:
buildbot_common.ErrorExit('--pepper-revision is required')
options.tar_path = options.tar_path.replace('%pepperrev%',
options.pepper_revision)
install_folders = ['bin', 'etc', 'include', 'lib', 'lib32', 'libarm', 'share']
buildbot_common.BuildStep('Archive Build')
tar_file = None
buildbot_common.RemoveFile(options.tar_path)
try:
tar_file = tarfile.open(options.tar_path, mode='w:bz2', dereference=True)
for subfolder in install_folders:
tar_file.add(os.path.join(options.install_dir, subfolder),
arcname=subfolder)
finally:
if tar_file:
tar_file.close()
if not options.skip_upload:
buildbot_common.Archive(os.path.basename(options.tar_path),
'nativeclient-mirror/nacl/nacl_sdk/%s' % options.upload_path,
cwd=os.path.dirname(os.path.abspath(options.tar_path)))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
tanmaythakur/django | django/contrib/gis/db/backends/oracle/schema.py | 608 | 4050 | from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.db.backends.utils import truncate_name
class OracleGISSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_metadata = ("""
INSERT INTO USER_SDO_GEOM_METADATA
("TABLE_NAME", "COLUMN_NAME", "DIMINFO", "SRID")
VALUES (
%(table)s,
%(column)s,
MDSYS.SDO_DIM_ARRAY(
MDSYS.SDO_DIM_ELEMENT('LONG', %(dim0)s, %(dim2)s, %(tolerance)s),
MDSYS.SDO_DIM_ELEMENT('LAT', %(dim1)s, %(dim3)s, %(tolerance)s)
),
%(srid)s
)""")
sql_add_spatial_index = 'CREATE INDEX %(index)s ON %(table)s(%(column)s) INDEXTYPE IS MDSYS.SPATIAL_INDEX'
sql_drop_spatial_index = 'DROP INDEX %(index)s'
sql_clear_geometry_table_metadata = 'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s'
sql_clear_geometry_field_metadata = (
'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s '
'AND COLUMN_NAME = %(column)s'
)
def __init__(self, *args, **kwargs):
super(OracleGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
column_sql = super(OracleGISSchemaEditor, self).column_sql(model, field, include_default)
if isinstance(field, GeometryField):
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_geometry_metadata % {
'table': self.geo_quote_name(db_table),
'column': self.geo_quote_name(field.column),
'dim0': field._extent[0],
'dim1': field._extent[1],
'dim2': field._extent[2],
'dim3': field._extent[3],
'tolerance': field._tolerance,
'srid': field.srid,
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
'table': self.quote_name(db_table),
'column': self.quote_name(field.column),
}
)
return column_sql
def create_model(self, model):
super(OracleGISSchemaEditor, self).create_model(model)
self.run_geometry_sql()
def delete_model(self, model):
super(OracleGISSchemaEditor, self).delete_model(model)
self.execute(self.sql_clear_geometry_table_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super(OracleGISSchemaEditor, self).add_field(model, field)
self.run_geometry_sql()
def remove_field(self, model, field):
if isinstance(field, GeometryField):
self.execute(self.sql_clear_geometry_field_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
'column': self.geo_quote_name(field.column),
})
if field.spatial_index:
self.execute(self.sql_drop_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
})
super(OracleGISSchemaEditor, self).remove_field(model, field)
def run_geometry_sql(self):
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def _create_spatial_index_name(self, model, field):
# Oracle doesn't allow object names > 30 characters. Use this scheme
# instead of self._create_index_name() for backwards compatibility.
return truncate_name('%s_%s_id' % (model._meta.db_table, field.column), 30)
| bsd-3-clause |
wemanuel/smry | ee/tests/function_test.py | 14 | 2866 | #!/usr/bin/env python
"""Tests for the ee.function module."""
import unittest
import ee
# A function to experiment on.
TEST_FUNC = ee.Function()
TEST_FUNC.getSignature = lambda: { # pylint: disable-msg=g-long-lambda
'description': 'Method description.',
'returns': 'Image',
'args': [
{
'type': 'Image',
'name': 'a',
'description': 'Arg A doc.'},
{
'type': 'Image',
'name': 'b',
'description': 'Arg B doc.',
'optional': True
}
]
}
EXPECTED_DOC = """Method description.
Args:
a: Arg A doc.
b: Arg B doc."""
class FunctionTest(unittest.TestCase):
def testNameArgs(self):
"""Verifies that Functions can convert positional to named arguments."""
self.assertEquals({}, TEST_FUNC.nameArgs([]))
self.assertEquals({'a': 42}, TEST_FUNC.nameArgs([42]))
self.assertEquals({'a': 42, 'b': 13}, TEST_FUNC.nameArgs([42, 13]))
self.assertEquals({'a': 3, 'b': 5}, TEST_FUNC.nameArgs([3], {'b': 5}))
self.assertRaisesWithRegexpMatch('Too many', TEST_FUNC.nameArgs, [1, 2, 3])
def testPromoteArgs(self):
"""Verifies that Functions can promote and verify their arguments."""
old_promoter = ee.Function._promoter
ee.Function._registerPromoter(lambda obj, type_name: [type_name, obj])
# Regular call.
self.assertEquals({'a': ['Image', 42], 'b': ['Image', 13]},
TEST_FUNC.promoteArgs({'a': 42, 'b': 13}))
# Allow missing optional argument.
self.assertEquals({'a': ['Image', 42]},
TEST_FUNC.promoteArgs({'a': 42}))
# Disallow unknown arguments.
self.assertRaisesWithRegexpMatch(
'Required argument', TEST_FUNC.promoteArgs, {})
# Disallow unknown arguments.
self.assertRaisesWithRegexpMatch(
'Unrecognized', TEST_FUNC.promoteArgs, {'a': 42, 'c': 13})
# Clean up.
ee.Function._registerPromoter(old_promoter)
def testCall(self):
"""Verifies the full function invocation flow."""
old_promoter = ee.Function._promoter
ee.Function._registerPromoter(lambda obj, type_name: [type_name, obj])
return_type, return_value = TEST_FUNC.call(42, 13)
self.assertEquals('Image', return_type)
self.assertEquals(TEST_FUNC, return_value.func)
self.assertEquals({'a': ['Image', 42], 'b': ['Image', 13]},
return_value.args)
# Clean up.
ee.Function._registerPromoter(old_promoter)
def testToString(self):
"""Verifies function docstring generation."""
self.assertEquals(EXPECTED_DOC, str(TEST_FUNC))
def assertRaisesWithRegexpMatch(self, msg, func, *args):
try:
func(*args)
except ee.EEException as e:
self.assertTrue(msg in str(e))
else:
self.fail('Expected an exception.')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
aam-at/tensorflow | tensorflow/python/kernel_tests/identity_n_op_py_test.py | 14 | 2624 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IdentityNOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class IdentityNOpTest(test.TestCase):
def testInt32String_6(self):
value0, value1 = self.evaluate(
array_ops.identity_n([[1, 2, 3, 4, 5, 6],
[b"a", b"b", b"C", b"d", b"E", b"f", b"g"]]))
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value0)
self.assertAllEqual(
np.array([b"a", b"b", b"C", b"d", b"E", b"f", b"g"]), value1)
def testInt32_shapes(self):
inp0 = constant_op.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
inp1 = constant_op.constant([11, 21, 31, 41, 51, 61], shape=[3, 2])
inp2 = constant_op.constant(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], shape=[5, 3])
value0, value1, value2 = self.evaluate(
array_ops.identity_n([inp0, inp1, inp2]))
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value0)
self.assertAllEqual(np.array([[11, 21], [31, 41], [51, 61]]), value1)
self.assertAllEqual(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]),
value2)
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
[value] = self.evaluate(array_ops.identity_n([source]))
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.cached_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = constant_op.constant(array_2x3)
self.assertEqual(shape, tensor.get_shape())
self.assertEqual(shape, array_ops.identity_n([tensor])[0].get_shape())
self.assertEqual(shape, array_ops.identity_n([array_2x3])[0].get_shape())
if __name__ == "__main__":
test.main()
| apache-2.0 |
hpe-storage/horizon-hpe-storage-ui | horizon_hpe_storage/storage_panel/config/tables.py | 2 | 12289 | # (c) Copyright [2015] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.utils.translation import ungettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils import safestring
from horizon import exceptions
from horizon import forms
from horizon import tables
import horizon_hpe_storage.api.keystone_api as keystone
import horizon_hpe_storage.api.barbican_api as barbican
class CreateEndpointAction(tables.LinkAction):
name = "create_endpoint"
verbose_name = _("Create Link")
url = "horizon:admin:hpe_storage:config:create_endpoint"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("volume", "volume:deep_link"),)
class EditEndpointAction(tables.LinkAction):
name = "edit_endpoint"
verbose_name = _("Edit Link")
url = "horizon:admin:hpe_storage:config:edit_endpoint"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:deep_link"),)
class DeleteEndpointAction(tables.DeleteAction):
name = "delete_endpoint"
policy_rules = (("volume", "volume:deep_link"),)
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Link",
u"Delete Links",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Link",
u"Deleted Links",
count
)
def delete(self, request, service_id):
self.keystone_api.do_setup(request)
self.barbican_api.do_setup(self.keystone_api.get_session())
host = self.keystone_api.get_ssmc_service_name(service_id)
backend = host[5:] # remove 'ssmc-' prefix
# first delete the credentials
self.barbican_api.delete_ssmc_credentials(backend)
# now delete service and endpoint
self.keystone_api.delete_ssmc_endpoint(service_id)
# cached SSMC token is no longer valid
cache.delete('ssmc-link-' + host)
class EndpointsTable(tables.DataTable):
cinder_backend = tables.Column(
'backend',
verbose_name=_('Cinder Backend'),
form_field=forms.CharField(max_length=64))
ssmc_endpoint = tables.Column(
'endpoint',
verbose_name=_('SSMC Instance'),
form_field=forms.CharField(max_length=64))
access = tables.Column(
'username',
verbose_name=_('SSMC Login'),
form_field=forms.CharField(max_length=64))
def get_object_display(self, endpoint):
return endpoint['backend']
def get_object_id(self, endpoint):
return endpoint['id']
class Meta(object):
name = "endpoints"
verbose_name = _("Deep Links Between Horizon Volumes and HPE SSMC")
hidden_title = False
table_actions = (CreateEndpointAction,
DeleteEndpointAction,)
row_actions = (EditEndpointAction,
DeleteEndpointAction,)
class RegisterCinderAction(tables.LinkAction):
name = "register_cinder_node"
verbose_name = _("Register Cinder Node")
url = "horizon:admin:hpe_storage:config:register_cinder_node"
classes = ("ajax-modal",)
icon = "plus"
class DeleteCinderAction(tables.DeleteAction):
name = "cinder_delete"
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Cinder Node",
u"Delete Cinder Nodes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Cinder Node",
u"Deleted Cinder Nodes",
count
)
def delete(self, request, obj_id):
try:
self.keystone_api.do_setup(request)
self.barbican_api.do_setup(self.keystone_api.get_session())
self.barbican_api.delete_node(
obj_id, barbican.CINDER_NODE_TYPE)
except Exception as ex:
redirect = reverse("horizon:admin:hpe_storage:index")
exceptions.handle(request,
_('Unable to delete Cinder node registration'),
redirect=redirect)
class ValidateAllCinderAction(tables.LinkAction):
name = "validate_all_cinder_nodes"
verbose_name = _("Validate SSH Credentials on All Cinder Nodes")
url = "horizon:admin:hpe_storage:config:validate_all_cinder_nodes"
classes = ("ajax-modal",)
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
def allowed(self, request, node=None):
self.keystone_api.do_setup(request)
self.barbican_api.do_setup(self.keystone_api.get_session())
return self.barbican_api.nodes_exist(
barbican.CINDER_NODE_TYPE)
class ValidateCinderAction(tables.LinkAction):
name = "validate_cinder_node"
verbose_name = _("Validate SSH Credentials")
url = "horizon:admin:hpe_storage:config:validate_cinder_node"
classes = ("ajax-modal",)
def get_link_url(self, node):
return reverse(self.url, args=[node['node_name']])
class EditCinderAction(tables.LinkAction):
name = "edit_cinder_node"
verbose_name = _("Edit Cinder Node")
url = "horizon:admin:hpe_storage:config:edit_cinder_node"
classes = ("ajax-modal",)
class ViewCinderSoftwareTestsAction(tables.LinkAction):
name = "sw_cinder_tests"
verbose_name = _("View Software Test list")
url = "horizon:admin:hpe_storage:config:software_tests:index"
classes = ("ajax-modal",)
def get_link_url(self, extra_spec=None):
return reverse(self.url, args=[barbican.CINDER_NODE_TYPE])
class TestResultsColumn(tables.Column):
# Customized column class.
def get_raw_data(self, node):
if 'validation_time' in node:
results = node['validation_time']
if results == 'Failed':
results = '<font color="red">FAIL</font>'
return safestring.mark_safe(results)
else:
return safestring.mark_safe('<font color="green">PASS</font>')
class CinderNodeTable(tables.DataTable):
test_name = tables.Column(
'node_name',
verbose_name=_('Name'),
form_field=forms.CharField(max_length=64))
node_ip = tables.Column(
'node_ip',
verbose_name=_('IP Address'),
form_field=forms.CharField(max_length=64))
host_name = tables.Column(
'host_name',
verbose_name=_('Host Name'),
form_field=forms.CharField(max_length=64))
ssh_user = tables.Column(
'ssh_name',
verbose_name=_('SSH Username'),
form_field=forms.CharField(max_length=64))
conf_file_path = tables.Column(
'config_path',
verbose_name=_('Config File Path'),
form_field=forms.CharField(max_length=64))
validated = TestResultsColumn(
'validation_time',
verbose_name=_('SSH Connection Test'))
def get_object_display(self, node):
return node['node_name']
def get_object_id(self, node):
return node['node_name']
class Meta(object):
name = "reg_cinder_nodes"
verbose_name = _("Cinder Nodes")
hidden_title = False
table_actions = (RegisterCinderAction,
ValidateAllCinderAction,
ViewCinderSoftwareTestsAction,
DeleteCinderAction,)
row_actions = (ValidateCinderAction,
EditCinderAction,
DeleteCinderAction)
class RegisterNovaAction(tables.LinkAction):
name = "register_nova_node"
verbose_name = _("Register Nova Node")
url = "horizon:admin:hpe_storage:config:register_nova_node"
classes = ("ajax-modal",)
icon = "plus"
class DeleteNovaAction(tables.DeleteAction):
name = "nova_delete"
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Nova Node",
u"Delete Nova Nodes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Nova Node",
u"Deleted Nova Nodes",
count
)
def delete(self, request, obj_id):
self.keystone_api.do_setup(request)
self.barbican_api.do_setup(self.keystone_api.get_session())
self.barbican_api.delete_node(
obj_id, barbican.NOVA_NODE_TYPE)
class ValidateNovaAction(tables.LinkAction):
name = "validate_nova"
verbose_name = _("Validate SSH Credentials")
url = "horizon:admin:hpe_storage:config:validate_nova_node"
classes = ("ajax-modal",)
def get_link_url(self, node):
return reverse(self.url, args=[node['node_name']])
class ValidateAllNovaAction(tables.LinkAction):
name = "validate_all_nova_nodes"
verbose_name = _("Validate SSH Credentials on All Nova Nodes")
url = "horizon:admin:hpe_storage:config:validate_all_nova_nodes"
classes = ("ajax-modal",)
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
def allowed(self, request, node=None):
self.keystone_api.do_setup(request)
self.barbican_api.do_setup(self.keystone_api.get_session())
return self.barbican_api.nodes_exist(
barbican.NOVA_NODE_TYPE)
class ViewNovaSoftwareTestsAction(tables.LinkAction):
name = "sw_nova_tests"
verbose_name = _("View Software Test list")
url = "horizon:admin:hpe_storage:config:software_tests:index"
classes = ("ajax-modal",)
def get_link_url(self, extra_spec=None):
return reverse(self.url, args=[barbican.NOVA_NODE_TYPE])
class EditNovaAction(tables.LinkAction):
name = "edit_nova_node"
verbose_name = _("Edit Nova Node")
url = "horizon:admin:hpe_storage:config:edit_nova_node"
classes = ("ajax-modal",)
class ManageOSVariables(tables.LinkAction):
name = "manage_os_vars"
verbose_name = _("Manage OpenStack Variables")
url = "horizon:admin:hpe_storage:config:manage_os_vars"
classes = ("ajax-modal",)
class NovaNodeTable(tables.DataTable):
test_name = tables.Column(
'node_name',
verbose_name=_('Name'),
form_field=forms.CharField(max_length=64))
node_ip = tables.Column(
'node_ip',
verbose_name=_('IP Address'),
form_field=forms.CharField(max_length=64))
host_name = tables.Column(
'host_name',
verbose_name=_('Host Name'),
form_field=forms.CharField(max_length=64))
ssh_user = tables.Column(
'ssh_name',
verbose_name=_('SSH Username'),
form_field=forms.CharField(max_length=64))
validated = TestResultsColumn(
'validation_time',
verbose_name=_('SSH Connection Test'))
def get_object_display(self, node):
return node['node_name']
def get_object_id(self, node):
return node['node_name']
class Meta(object):
name = "reg_nova_nodes"
verbose_name = _("Nova Nodes (Optional)")
hidden_title = False
table_actions = (RegisterNovaAction,
ValidateAllNovaAction,
ViewNovaSoftwareTestsAction,
DeleteNovaAction)
row_actions = (ValidateNovaAction,
EditNovaAction,
ManageOSVariables,
DeleteNovaAction)
| apache-2.0 |
joshuajan/odoo | addons/account_followup/wizard/__init__.py | 437 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_followup_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chrisndodge/edx-platform | common/test/acceptance/tests/lms/test_oauth2.py | 7 | 3872 | # -*- coding: utf-8 -*-
"""Tests for OAuth2 permission delegation."""
from common.test.acceptance.pages.lms.oauth2_confirmation import OAuth2Confirmation
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from bok_choy.web_app_test import WebAppTest
from urlparse import urlparse, parse_qsl
class OAuth2PermissionDelegationTests(WebAppTest):
"""
Tests for acceptance/denial of permission delegation requests.
"""
def setUp(self):
super(OAuth2PermissionDelegationTests, self).setUp()
self.oauth_page = OAuth2Confirmation(self.browser)
def _auth(self):
"""Authenticate the user."""
AutoAuthPage(self.browser).visit()
def _qs(self, url):
"""Parse url's querystring into a dict."""
return dict(parse_qsl(urlparse(url).query))
def test_error_for_invalid_scopes(self):
"""Requests for invalid scopes throw errors."""
self._auth()
self.oauth_page.scopes = ('email', 'does-not-exist')
assert self.oauth_page.visit()
self.assertTrue(self.oauth_page.has_error)
self.assertIn('not a valid scope', self.oauth_page.error_message)
def test_cancelling_redirects(self):
"""
If you cancel the request, you're redirected to the redirect_url with a
denied query param.
"""
self._auth()
assert self.oauth_page.visit()
self.oauth_page.cancel()
def check_redirect():
"""
Checks that the page correctly redirects to a url with a
denied query param.
"""
query = self._qs(self.browser.current_url)
return 'access_denied' in query['error']
def check_redirect_chrome():
"""
Similar to `check_redirect`, but, due to a bug in ChromeDriver,
we use `self.browser.title` here instead of `self.browser.current_url`
"""
query = self._qs(self.browser.title)
return 'access_denied' in query['error']
# This redirects to an invalid URI. For chrome verify title, current_url otherwise
if self.browser.name == 'chrome':
self.oauth_page.wait_for(check_redirect_chrome, 'redirected to invalid URL (chrome)')
else:
self.oauth_page.wait_for(check_redirect, 'redirected to invalid URL')
def test_accepting_redirects(self):
"""
If you accept the request, you're redirected to the redirect_url with
the correct query params.
"""
self._auth()
assert self.oauth_page.visit()
# This redirects to an invalid URI.
self.oauth_page.confirm()
self.oauth_page.wait_for_element_absence(
'input[name=authorize]', 'Authorization button is not present'
)
def check_query_string():
"""
Checks that 'code' appears in the browser's current url.
"""
query = self._qs(self.browser.current_url)
return 'code' in query
def check_query_string_chrome():
"""
Similar to check_query_string, but, due to a bug in ChromeDriver,
when chrome is on an invalid URI, `self.browser.current_url` outputs
"data:text/html,chromewebdata" instead of the current URI.
However, since the query string is present in the `title`, we use
that for chrome.
"""
query = self._qs(self.browser.title)
return 'code' in query
if self.browser.name == 'chrome':
self.oauth_page.wait_for(
check_query_string_chrome, 'redirected with correct query parameters (chrome)'
)
else:
self.oauth_page.wait_for(
check_query_string, 'redirected with correct query parameters'
)
| agpl-3.0 |
abzaloid/maps | django-project/lib/python2.7/site-packages/django/views/decorators/clickjacking.py | 550 | 1759 | from functools import wraps
from django.utils.decorators import available_attrs
def xframe_options_deny(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'DENY' as long as the response doesn't already have that
header set.
e.g.
@xframe_options_deny
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options', None) is None:
resp['X-Frame-Options'] = 'DENY'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_sameorigin(view_func):
"""
Modifies a view function so its response has the X-Frame-Options HTTP
header set to 'SAMEORIGIN' as long as the response doesn't already have
that header set.
e.g.
@xframe_options_sameorigin
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
if resp.get('X-Frame-Options', None) is None:
resp['X-Frame-Options'] = 'SAMEORIGIN'
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
def xframe_options_exempt(view_func):
"""
Modifies a view function by setting a response variable that instructs
XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header.
e.g.
@xframe_options_exempt
def some_view(request):
...
"""
def wrapped_view(*args, **kwargs):
resp = view_func(*args, **kwargs)
resp.xframe_options_exempt = True
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| mit |
adit-chandra/tensorflow | tensorflow/python/keras/layers/convolutional.py | 3 | 114187 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras convolution layers and image transformation layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
class Conv(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if (self.padding == 'causal' and not isinstance(self,
(Conv1D, SeparableConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and ``SeparableConv1D`.')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel, self.filters)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_channel})
self._build_conv_op_input_shape = input_shape
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
self._conv_op_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
self._convolution_op = nn_ops.Convolution(
input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
self.built = True
def call(self, inputs):
if self._recreate_conv_op(inputs):
self._convolution_op = nn_ops.Convolution(
inputs.get_shape(),
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format)
# Apply causal padding to inputs for Conv1D.
if self.padding == 'causal' and self.__class__.__name__ == 'Conv1D':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == 'channels_first':
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NCHW')
else:
outputs = nn.bias_add(outputs, self.bias, data_format='NHWC')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
def _recreate_conv_op(self, inputs):
"""Recreate conv_op if necessary.
Check if the input_shape in call() is different from that in build().
For the values that are not None, if they are different, recreate
the _convolution_op to avoid the stateful behavior.
Args:
inputs: The input data to call() method.
Returns:
`True` or `False` to indicate whether to recreate the conv_op.
"""
call_input_shape = inputs.get_shape()
for axis in range(1, len(call_input_shape)):
if (call_input_shape[axis] is not None
and self._build_conv_op_input_shape[axis] is not None
and call_input_shape[axis] != self._build_conv_op_input_shape[axis]):
return True
return False
@keras_export('keras.layers.Conv1D', 'keras.layers.Convolution1D')
class Conv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. output[t]
does not depend on input[t+1:]. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Examples:
```python
# Small convolutional model for 128-length vectors with 6 timesteps
# model.input_shape == (None, 6, 128)
model = Sequential()
model.add(Conv1D(32, 3,
activation='relu',
input_shape=(6, 128)))
# now: model.output_shape == (None, 4, 32)
```
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv2D', 'keras.layers.Convolution2D')
class Conv2D(Conv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv3D', 'keras.layers.Convolution3D')
class Conv3D(Conv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along each spatial
dimension.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(samples, channels, conv_dim1, conv_dim2, conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, conv_dim1, conv_dim2, conv_dim3, channels)` if
data_format='channels_last'.
Output shape:
5D tensor with shape:
`(samples, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` if
data_format='channels_first'
or 5D tensor with shape:
`(samples, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` if
data_format='channels_last'.
`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have
changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3D, self).__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
@keras_export('keras.layers.Conv2DTranspose',
'keras.layers.Convolution2DTranspose')
class Conv2DTranspose(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input shape: ' +
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = inputs_shape[h_axis], inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = array_ops.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
@keras_export('keras.layers.Conv3DTranspose',
'keras.layers.Convolution3DTranspose')
class Conv3DTranspose(Conv3D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 3)` for a 128x128x128 volume with 3 channels
if `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth, height
and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
output_padding: An integer or tuple/list of 3 integers,
specifying the amount of padding along the depth, height, and
width.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
5D tensor with shape:
`(batch, channels, depth, rows, cols)` if data_format='channels_first'
or 5D tensor with shape:
`(batch, depth, rows, cols, channels)` if data_format='channels_last'.
Output shape:
5D tensor with shape:
`(batch, filters, new_depth, new_rows, new_cols)` if
data_format='channels_first'
or 5D tensor with shape:
`(batch, new_depth, new_rows, new_cols, filters)` if
data_format='channels_last'.
`depth` and `rows` and `cols` values might have changed due to padding.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
output_padding=None,
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv3DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 3, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 5:
raise ValueError('Inputs should have rank 5, received input shape:',
str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined, found None: ' + str(input_shape))
input_dim = int(input_shape[channel_axis])
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.input_spec = InputSpec(ndim=5, axes={channel_axis: input_dim})
self.kernel = self.add_weight(
'kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
d_axis, h_axis, w_axis = 2, 3, 4
else:
d_axis, h_axis, w_axis = 1, 2, 3
depth = inputs_shape[d_axis]
height = inputs_shape[h_axis]
width = inputs_shape[w_axis]
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_depth = conv_utils.deconv_output_length(depth,
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_depth, out_height,
out_width)
strides = (1, 1, stride_d, stride_h, stride_w)
else:
output_shape = (batch_size, out_depth, out_height, out_width,
self.filters)
strides = (1, stride_d, stride_h, stride_w, 1)
output_shape_tensor = array_ops.stack(output_shape)
outputs = nn.conv3d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides,
data_format=conv_utils.convert_data_format(self.data_format, ndim=5),
padding=self.padding.upper())
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, d_axis, h_axis, w_axis = 1, 2, 3, 4
else:
c_axis, d_axis, h_axis, w_axis = 4, 1, 2, 3
kernel_d, kernel_h, kernel_w = self.kernel_size
stride_d, stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_d = out_pad_h = out_pad_w = None
else:
out_pad_d, out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[d_axis] = conv_utils.deconv_output_length(
output_shape[d_axis],
kernel_d,
padding=self.padding,
output_padding=out_pad_d,
stride=stride_d)
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h)
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w)
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv3DTranspose, self).get_config()
config.pop('dilation_rate')
config['output_padding'] = self.output_padding
return config
class SeparableConv(Conv):
"""Abstract base layer for separable nD convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv, self).__init__(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
bias_initializer=initializers.get(bias_initializer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_kernel = self.add_weight(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype)
self.pointwise_kernel = self.add_weight(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'depth_multiplier':
self.depth_multiplier,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SeparableConv1D',
'keras.layers.SeparableConvolution1D')
class SeparableConv1D(SeparableConv):
"""Depthwise separable 1D convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A single integer specifying the spatial
dimensions of the filters.
strides: A single integer specifying the strides
of the convolution.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
dilation_rate: A single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel.
pointwise_initializer: An initializer for the pointwise convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv1D, self).__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
if self.padding == 'causal':
inputs = array_ops.pad(inputs, self._compute_causal_padding())
if self.data_format == 'channels_last':
strides = (1,) + self.strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + self.strides * 2
spatial_start_dim = 2
# Explicitly broadcast inputs and kernels to 4D.
# TODO(fchollet): refactor when a native separable_conv1d op is available.
inputs = array_ops.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)
dilation_rate = (1,) + self.dilation_rate
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
outputs = nn.separable_conv2d(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=op_padding.upper(),
rate=dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
outputs = array_ops.squeeze(outputs, [spatial_start_dim])
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.SeparableConv2D',
'keras.layers.SeparableConvolution2D')
class SeparableConv2D(SeparableConv):
"""Depthwise separable 2D convolution.
Separable convolutions consist in first performing
a depthwise spatial convolution
(which acts on each input channel separately)
followed by a pointwise convolution which mixes together the resulting
output channels. The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Intuitively, separable convolutions can be understood as
a way to factorize a convolution kernel into two smaller kernels,
or as an extreme version of an Inception block.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
pointwise_initializer: Initializer for the pointwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
pointwise_regularizer: Regularizer function applied to
the pointwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
pointwise_constraint: Constraint function applied to
the pointwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs):
super(SeparableConv2D, self).__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
depth_multiplier=depth_multiplier,
activation=activations.get(activation),
use_bias=use_bias,
depthwise_initializer=initializers.get(depthwise_initializer),
pointwise_initializer=initializers.get(pointwise_initializer),
bias_initializer=initializers.get(bias_initializer),
depthwise_regularizer=regularizers.get(depthwise_regularizer),
pointwise_regularizer=regularizers.get(pointwise_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
depthwise_constraint=constraints.get(depthwise_constraint),
pointwise_constraint=constraints.get(pointwise_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
def call(self, inputs):
# Apply the actual ops.
if self.data_format == 'channels_last':
strides = (1,) + self.strides + (1,)
else:
strides = (1, 1) + self.strides
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=strides,
padding=self.padding.upper(),
rate=self.dilation_rate,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
@keras_export('keras.layers.DepthwiseConv2D')
class DepthwiseConv2D(Conv2D):
"""Depthwise separable 2D convolution.
Depthwise Separable convolutions consists in performing
just the first step in a depthwise spatial convolution
(which acts on each input channel separately).
The `depth_multiplier` argument controls how many
output channels are generated per input channel in the depthwise step.
Arguments:
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive).
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be 'channels_last'.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. 'linear' activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix.
bias_initializer: Initializer for the bias vector.
depthwise_regularizer: Regularizer function applied to
the depthwise kernel matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its 'activation').
depthwise_constraint: Constraint function applied to
the depthwise kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`[batch, channels, rows, cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, rows, cols, channels]` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`[batch, filters, new_rows, new_cols]` if data_format='channels_first'
or 4D tensor with shape:
`[batch, new_rows, new_cols, filters]` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0],
self.kernel_size[1],
input_dim,
self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(input_dim * self.depth_multiplier,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
outputs = backend.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.use_bias:
outputs = backend.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
@keras_export('keras.layers.UpSampling1D')
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Arguments:
size: Integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch, steps, features)`.
Output shape:
3D tensor with shape: `(batch, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super(UpSampling1D, self).__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
size = self.size * input_shape[1] if input_shape[1] is not None else None
return tensor_shape.TensorShape([input_shape[0], size, input_shape[2]])
def call(self, inputs):
output = backend.repeat_elements(inputs, self.size, axis=1)
return output
def get_config(self):
config = {'size': self.size}
base_config = super(UpSampling1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling2D')
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by `size[0]` and `size[1]` respectively.
Arguments:
size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
interpolation: A string, one of `nearest` or `bilinear`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self,
size=(2, 2),
data_format=None,
interpolation='nearest',
**kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
if interpolation not in {'nearest', 'bilinear'}:
raise ValueError('`interpolation` argument should be one of `"nearest"` '
'or `"bilinear"`.')
self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return backend.resize_images(
inputs, self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation)
def get_config(self):
config = {
'size': self.size,
'data_format': self.data_format,
'interpolation': self.interpolation
}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.UpSampling3D')
class UpSampling3D(Layer):
"""Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by `size[0]`, `size[1]` and `size[2]` respectively.
Arguments:
size: Int, or tuple of 3 integers.
The upsampling factors for dim1, dim2 and dim3.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, dim1, dim2, dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, dim1, dim2, dim3)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`
"""
def __init__(self, size=(2, 2, 2), data_format=None, **kwargs):
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 3, 'size')
self.input_spec = InputSpec(ndim=5)
super(UpSampling3D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
dim1 = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
dim2 = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
dim3 = self.size[2] * input_shape[
4] if input_shape[4] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
else:
dim1 = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
dim2 = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
dim3 = self.size[2] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.resize_volumes(
inputs, self.size[0], self.size[1], self.size[2], self.data_format)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(UpSampling3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding1D')
class ZeroPadding1D(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Arguments:
padding: Int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and at the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return backend.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding2D')
class ZeroPadding2D(Layer):
"""Zero-padding layer for 2D input (e.g. picture).
This layer can add rows and columns of zeros
at the top, bottom, left and right side of an image tensor.
Arguments:
padding: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_height_pad, symmetric_width_pad)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_pad, bottom_pad), (left_pad, right_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, padded_rows, padded_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, padded_rows, padded_cols)`
"""
def __init__(self, padding=(1, 1), data_format=None, **kwargs):
super(ZeroPadding2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding))
elif hasattr(padding, '__len__'):
if len(padding) != 2:
raise ValueError('`padding` should have two elements. '
'Found: ' + str(padding))
height_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
width_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
self.padding = (height_padding, width_padding)
else:
raise ValueError('`padding` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_pad, symmetric_width_pad), '
'or a tuple of 2 tuples of 2 ints '
'((top_pad, bottom_pad), (left_pad, right_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
rows = input_shape[2] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[3] is not None:
cols = input_shape[3] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
rows = input_shape[1] + self.padding[0][0] + self.padding[0][1]
else:
rows = None
if input_shape[2] is not None:
cols = input_shape[2] + self.padding[1][0] + self.padding[1][1]
else:
cols = None
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def call(self, inputs):
return backend.spatial_2d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ZeroPadding3D')
class ZeroPadding3D(Layer):
"""Zero-padding layer for 3D data (spatial or spatio-temporal).
Arguments:
padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric padding
is applied to height and width.
- If tuple of 3 ints:
interpreted as two different
symmetric padding values for height and width:
`(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- If tuple of 3 tuples of 2 ints:
interpreted as
`((left_dim1_pad, right_dim1_pad), (left_dim2_pad,
right_dim2_pad), (left_dim3_pad, right_dim3_pad))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_pad, second_axis_to_pad,
third_axis_to_pad)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_padded_axis, second_padded_axis,
third_axis_to_pad)`
"""
def __init__(self, padding=(1, 1, 1), data_format=None, **kwargs):
super(ZeroPadding3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(padding, int):
self.padding = ((padding, padding), (padding, padding), (padding,
padding))
elif hasattr(padding, '__len__'):
if len(padding) != 3:
raise ValueError('`padding` should have 3 elements. '
'Found: ' + str(padding))
dim1_padding = conv_utils.normalize_tuple(padding[0], 2,
'1st entry of padding')
dim2_padding = conv_utils.normalize_tuple(padding[1], 2,
'2nd entry of padding')
dim3_padding = conv_utils.normalize_tuple(padding[2], 2,
'3rd entry of padding')
self.padding = (dim1_padding, dim2_padding, dim3_padding)
else:
raise ValueError(
'`padding` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_pad, right_dim1_pad),'
' (left_dim2_pad, right_dim2_pad),'
' (left_dim3_pad, right_dim2_pad)). '
'Found: ' + str(padding))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] + 2 * self.padding[0][0]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] + 2 * self.padding[1][0]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] + 2 * self.padding[2][0]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] + 2 * self.padding[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] + 2 * self.padding[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] + 2 * self.padding[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
def call(self, inputs):
return backend.spatial_3d_padding(
inputs, padding=self.padding, data_format=self.data_format)
def get_config(self):
config = {'padding': self.padding, 'data_format': self.data_format}
base_config = super(ZeroPadding3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping1D')
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Arguments:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super(Cropping1D, self).__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if self.cropping[1] == 0:
return inputs[:, self.cropping[0]:, :]
else:
return inputs[:, self.cropping[0]:-self.cropping[1], :]
def get_config(self):
config = {'cropping': self.cropping}
base_config = super(Cropping1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping2D')
class Cropping2D(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Arguments:
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, cropped_rows, cropped_cols)`
Examples:
```python
# Crop the input 2D images or feature maps
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# now model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# now model.output_shape == (None, 20, 16. 64)
```
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.Cropping3D')
class Cropping3D(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Arguments:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints: interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution1D = SeparableConv1D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| apache-2.0 |
Learningtribes/edx-platform | openedx/core/djangoapps/user_api/tests/test_models.py | 56 | 6016 | """
Test UserPreferenceModel and UserPreference events
"""
from django.db import IntegrityError
from django.test import TestCase
from student.tests.factories import UserFactory
from student.tests.tests import UserSettingsEventTestMixin
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from ..tests.factories import UserPreferenceFactory, UserCourseTagFactory, UserOrgTagFactory
from ..models import UserPreference
from ..preferences.api import set_user_preference
class UserPreferenceModelTest(ModuleStoreTestCase):
"""
Test case covering User Preference ORM model attributes and custom operations
"""
def test_duplicate_user_key(self):
user = UserFactory.create()
UserPreferenceFactory.create(user=user, key="testkey", value="first")
self.assertRaises(
IntegrityError,
UserPreferenceFactory.create,
user=user,
key="testkey",
value="second"
)
def test_arbitrary_values(self):
user = UserFactory.create()
self._create_and_assert(user=user, key="testkey0", value="")
self._create_and_assert(user=user, key="testkey1", value="This is some English text!")
self._create_and_assert(user=user, key="testkey2", value="{'some': 'json'}")
self._create_and_assert(
user=user,
key="testkey3",
value="\xe8\xbf\x99\xe6\x98\xaf\xe4\xb8\xad\xe5\x9b\xbd\xe6\x96\x87\xe5\xad\x97'"
)
def _create_and_assert(self, user, key, value):
"""Create a new preference and assert the values. """
preference = UserPreferenceFactory.create(user=user, key=key, value=value)
self.assertEqual(preference.user, user)
self.assertEqual(preference.key, key)
self.assertEqual(preference.value, value)
return preference
def test_create_user_course_tags(self):
"""Create user preference tags and confirm properties are set accordingly. """
user = UserFactory.create()
course = CourseFactory.create()
tag = UserCourseTagFactory.create(user=user, course_id=course.id, key="testkey", value="foobar")
self.assertEquals(tag.user, user)
self.assertEquals(tag.course_id, course.id)
self.assertEquals(tag.key, "testkey")
self.assertEquals(tag.value, "foobar")
def test_create_user_org_tags(self):
"""Create org specific user tags and confirm all properties are set """
user = UserFactory.create()
course = CourseFactory.create()
tag = UserOrgTagFactory.create(user=user, org=course.id.org, key="testkey", value="foobar")
self.assertEquals(tag.user, user)
self.assertEquals(tag.org, course.id.org)
self.assertEquals(tag.key, "testkey")
self.assertEquals(tag.value, "foobar")
self.assertIsNotNone(tag.created)
self.assertIsNotNone(tag.modified)
# Modify the tag and save it. Check if the modified timestamp is updated.
original_modified = tag.modified
tag.value = "barfoo"
tag.save()
self.assertEquals(tag.value, "barfoo")
self.assertNotEqual(original_modified, tag.modified)
def test_get_value(self):
"""Verifies the behavior of get_value."""
user = UserFactory.create()
key = 'testkey'
value = 'testvalue'
# does a round trip
set_user_preference(user, key, value)
pref = UserPreference.get_value(user, key)
self.assertEqual(pref, value)
# get preference for key that doesn't exist for user
pref = UserPreference.get_value(user, 'testkey_none')
self.assertIsNone(pref)
class TestUserPreferenceEvents(UserSettingsEventTestMixin, TestCase):
"""
Mixin for verifying that user preference events are fired correctly.
"""
def setUp(self):
super(TestUserPreferenceEvents, self).setUp()
self.table = "user_api_userpreference"
self.user = UserFactory.create()
self.TEST_KEY = "test key"
self.TEST_VALUE = "test value"
self.user_preference = UserPreference.objects.create(user=self.user, key=self.TEST_KEY, value=self.TEST_VALUE)
self.reset_tracker()
def test_create_user_preference(self):
"""
Verify that we emit an event when a user preference is created.
"""
UserPreference.objects.create(user=self.user, key="new key", value="new value")
self.assert_user_setting_event_emitted(setting='new key', old=None, new="new value")
def test_update_user_preference(self):
"""
Verify that we emit an event when a user preference is updated.
"""
self.user_preference.value = "new value"
self.user_preference.save()
self.assert_user_setting_event_emitted(setting=self.TEST_KEY, old=self.TEST_VALUE, new="new value")
def test_delete_user_preference(self):
"""
Verify that we emit an event when a user preference is deleted.
"""
self.user_preference.delete()
self.assert_user_setting_event_emitted(setting=self.TEST_KEY, old=self.TEST_VALUE, new=None)
def test_truncated_user_preference_event(self):
"""
Verify that we truncate the preference value if it is too long.
"""
MAX_STRING_LENGTH = 12500
OVERSIZE_STRING_LENGTH = MAX_STRING_LENGTH + 10
self.user_preference.value = "z" * OVERSIZE_STRING_LENGTH
self.user_preference.save()
self.assert_user_setting_event_emitted(
setting=self.TEST_KEY, old=self.TEST_VALUE, new="z" * MAX_STRING_LENGTH, truncated=["new"]
)
self.user_preference.value = "x" * OVERSIZE_STRING_LENGTH
self.user_preference.save()
self.assert_user_setting_event_emitted(
setting=self.TEST_KEY, old="z" * MAX_STRING_LENGTH, new="x" * MAX_STRING_LENGTH, truncated=["old", "new"]
)
| agpl-3.0 |
mshafiq9/django | tests/staticfiles_tests/test_storage.py | 102 | 18551 | from __future__ import unicode_literals
import os
import shutil
import sys
import tempfile
import unittest
from django.conf import settings
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands import collectstatic
from django.contrib.staticfiles.management.commands.collectstatic import \
Command as CollectstaticCommand
from django.core.cache.backends.base import BaseCache
from django.core.management import call_command
from django.test import SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_text
from .cases import (
BaseCollectionTestCase, BaseStaticFilesTestCase, StaticFilesTestCase,
)
from .settings import TEST_ROOT, TEST_SETTINGS
def hashed_file_path(test, path):
fullpath = test.render_template(test.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
class TestHashedFiles(object):
hashed_file_path = hashed_file_path
def tearDown(self):
# Clear hashed files to avoid side effects among tests.
storage.staticfiles_storage.hashed_files.clear()
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.bb84a0240107.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_ignored_completely(self):
relpath = self.hashed_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.6c77f2643390.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'//foobar', content)
def test_path_with_querystring(self):
relpath = self.hashed_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css?spam=eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_fragment(self):
relpath = self.hashed_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.bb84a0240107.css#eggs")
with storage.staticfiles_storage.open(
"cached/styles.bb84a0240107.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_querystring_and_fragment(self):
relpath = self.hashed_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.75433540b096.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
def test_template_tag_absolute(self):
relpath = self.hashed_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.ae9ef2716fe3.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.bb84a0240107.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
def test_template_tag_denorm(self):
relpath = self.hashed_file_path("cached/denorm.css")
self.assertEqual(relpath, "cached/denorm.c5bd139ad821.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"..//cached///styles.css", content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
self.assertNotIn(b"url(img/relative.png )", content)
self.assertIn(b'url("img/relative.acae32e4532b.png', content)
def test_template_tag_relative(self):
relpath = self.hashed_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.b0375bd89156.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.bb84a0240107.css", content)
def test_import_replacement(self):
"See #18050"
relpath = self.hashed_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.2b1d40b0bbd4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.bb84a0240107.css")""", relfile.read())
def test_template_tag_deep_relative(self):
relpath = self.hashed_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.3906afbb5a17.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
def test_template_tag_url(self):
relpath = self.hashed_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.902310b73412.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
def test_post_processing(self):
"""
Test that post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
'interactive': False,
'verbosity': 0,
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
def test_css_import_case_insensitive(self):
relpath = self.hashed_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.c609562b6d3c.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
@override_settings(
STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')],
STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'],
)
def test_post_processing_failure(self):
"""
Test that post_processing indicates the origin of the error when it
fails. Regression test for #18986.
"""
finders.get_finder.cache_clear()
err = six.StringIO()
with self.assertRaises(Exception):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionCachedStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
def test_cache_invalidation(self):
name = "cached/styles.css"
hashed_name = "cached/styles.bb84a0240107.css"
# check if the cache is filled correctly as expected
cache_key = storage.staticfiles_storage.hash_key(name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(self.hashed_file_path(name), cached_name)
# clearing the cache to make sure we re-set it correctly in the url method
storage.staticfiles_storage.hashed_files.clear()
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, None)
self.assertEqual(self.hashed_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.hashed_files.get(cache_key)
self.assertEqual(cached_name, hashed_name)
def test_cache_key_memcache_validation(self):
"""
Handle cache key creation correctly, see #17861.
"""
name = (
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff"
"/some crazy/\x16\xb4"
)
cache_key = storage.staticfiles_storage.hash_key(name)
cache_validator = BaseCache({})
cache_validator.validate_key(cache_key)
self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7')
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage',
DEBUG=False,
))
class TestCollectionManifestStorage(TestHashedFiles, BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
def setUp(self):
super(TestCollectionManifestStorage, self).setUp()
temp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(temp_dir, 'test'))
self._clear_filename = os.path.join(temp_dir, 'test', 'cleared.txt')
with open(self._clear_filename, 'w') as f:
f.write('to be deleted in one test')
self.patched_settings = self.settings(
STATICFILES_DIRS=settings.STATICFILES_DIRS + [temp_dir])
self.patched_settings.enable()
self.addCleanup(shutil.rmtree, six.text_type(temp_dir))
def tearDown(self):
self.patched_settings.disable()
if os.path.exists(self._clear_filename):
os.unlink(self._clear_filename)
super(TestCollectionManifestStorage, self).tearDown()
def test_manifest_exists(self):
filename = storage.staticfiles_storage.manifest_name
path = storage.staticfiles_storage.path(filename)
self.assertTrue(os.path.exists(path))
def test_loaded_cache(self):
self.assertNotEqual(storage.staticfiles_storage.hashed_files, {})
manifest_content = storage.staticfiles_storage.read_manifest()
self.assertIn(
'"version": "%s"' % storage.staticfiles_storage.manifest_version,
force_text(manifest_content)
)
def test_parse_cache(self):
hashed_files = storage.staticfiles_storage.hashed_files
manifest = storage.staticfiles_storage.load_manifest()
self.assertEqual(hashed_files, manifest)
def test_clear_empties_manifest(self):
cleared_file_name = os.path.join('test', 'cleared.txt')
# collect the additional file
self.run_collectstatic()
hashed_files = storage.staticfiles_storage.hashed_files
self.assertIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertIn(cleared_file_name, manifest_content)
original_path = storage.staticfiles_storage.path(cleared_file_name)
self.assertTrue(os.path.exists(original_path))
# delete the original file form the app, collect with clear
os.unlink(self._clear_filename)
self.run_collectstatic(clear=True)
self.assertFileNotFound(original_path)
hashed_files = storage.staticfiles_storage.hashed_files
self.assertNotIn(cleared_file_name, hashed_files)
manifest_content = storage.staticfiles_storage.load_manifest()
self.assertNotIn(cleared_file_name, manifest_content)
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(
TEST_SETTINGS,
STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionSimpleCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, SimpleTestCase):
"""
Tests for the Cache busting storage
"""
hashed_file_path = hashed_file_path
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css", "/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/", "/static/path/")
self.assertStaticRenders("path/?query", "/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.hashed_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
class CustomStaticFilesStorage(storage.StaticFilesStorage):
"""
Used in TestStaticFilePermissions
"""
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode'] = 0o640
kwargs['directory_permissions_mode'] = 0o740
super(CustomStaticFilesStorage, self).__init__(*args, **kwargs)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports chmod.")
class TestStaticFilePermissions(BaseCollectionTestCase, StaticFilesTestCase):
command_params = {
'interactive': False,
'post_process': True,
'verbosity': 0,
'ignore_patterns': ['*.ignoreme'],
'use_default_ignore_patterns': True,
'clear': False,
'link': False,
'dry_run': False,
}
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
super(TestStaticFilePermissions, self).setUp()
def tearDown(self):
os.umask(self.old_umask)
super(TestStaticFilePermissions, self).tearDown()
# Don't run collectstatic command in this test class.
def run_collectstatic(self, **kwargs):
pass
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
)
def test_collect_static_files_permissions(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o655)
self.assertEqual(dir_mode, 0o765)
@override_settings(
FILE_UPLOAD_PERMISSIONS=None,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=None,
)
def test_collect_static_files_default_permissions(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o666 & ~self.umask)
self.assertEqual(dir_mode, 0o777 & ~self.umask)
@override_settings(
FILE_UPLOAD_PERMISSIONS=0o655,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765,
STATICFILES_STORAGE='staticfiles_tests.test_storage.CustomStaticFilesStorage',
)
def test_collect_static_files_subclass_of_static_storage(self):
collectstatic.Command().execute(**self.command_params)
test_file = os.path.join(settings.STATIC_ROOT, "test.txt")
test_dir = os.path.join(settings.STATIC_ROOT, "subdir")
file_mode = os.stat(test_file)[0] & 0o777
dir_mode = os.stat(test_dir)[0] & 0o777
self.assertEqual(file_mode, 0o640)
self.assertEqual(dir_mode, 0o740)
| bsd-3-clause |
dogmaphobic/MAVProxy | MAVProxy/modules/mavproxy_misseditor/me_event.py | 14 | 1762 | #!/usr/bin/env python
'''
Event class and enums for Mission Editor
Michael Day
June 2014
'''
#MissionEditorEvents come FROM the GUI (with a few exceptions where the Mission Editor Module sends a message to itself, e.g., MEE_TIME_TO_QUIT)
#MissionEditorGUIEvents go TO the GUI
#enum for MissionEditorEvent types
MEE_READ_WPS = 0
MEE_WRITE_WPS = 1
MEE_TIME_TO_QUIT = 2
MEE_GET_WP_RAD = 3
MEE_GET_LOIT_RAD = 4
MEE_GET_WP_DEFAULT_ALT = 5
MEE_WRITE_WP_NUM = 6
MEE_LOAD_WP_FILE = 7
MEE_SAVE_WP_FILE = 8
MEE_SET_WP_RAD = 9
MEE_SET_LOIT_RAD = 10
MEE_SET_WP_DEFAULT_ALT = 11
#enum of MissionEditorGUIEvent types
MEGE_CLEAR_MISS_TABLE = 0
MEGE_ADD_MISS_TABLE_ROWS = 1
MEGE_SET_MISS_ITEM = 2
MEGE_SET_WP_RAD = 3
MEGE_SET_LOIT_RAD = 4
MEGE_SET_WP_DEFAULT_ALT = 5
MEGE_SET_LAST_MAP_CLICK_POS = 6
class MissionEditorEvent:
def __init__(self, type, **kwargs):
self.type = type
self.arg_dict = kwargs
if not self.type in [MEE_READ_WPS, MEE_WRITE_WPS, MEGE_CLEAR_MISS_TABLE,
MEGE_ADD_MISS_TABLE_ROWS, MEGE_SET_MISS_ITEM, MEE_TIME_TO_QUIT,
MEE_GET_WP_RAD, MEE_GET_LOIT_RAD, MEGE_SET_WP_RAD, MEGE_SET_LOIT_RAD,
MEE_GET_WP_DEFAULT_ALT, MEGE_SET_WP_DEFAULT_ALT, MEE_WRITE_WP_NUM,
MEE_LOAD_WP_FILE, MEE_SAVE_WP_FILE, MEE_SET_WP_RAD, MEE_SET_LOIT_RAD,
MEE_SET_WP_DEFAULT_ALT]:
raise TypeError("Unrecongized MissionEditorEvent type:" + str(self.type))
def get_type(self):
return self.type
def get_arg(self, key):
if not key in self.arg_dict:
print("No key %s in %s" % (key, str(self.type)))
return None
return self.arg_dict[key]
| gpl-3.0 |
tomtor/QGIS | python/plugins/processing/script/ScriptAlgorithmProvider.py | 29 | 5223 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptAlgorithmProvider.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.core import (Qgis,
QgsMessageLog,
QgsApplication,
QgsProcessingProvider,
QgsRuntimeProfiler)
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from processing.gui.ProviderActions import (ProviderActions,
ProviderContextMenuActions)
from processing.script.AddScriptFromFileAction import AddScriptFromFileAction
from processing.script.CreateNewScriptAction import CreateNewScriptAction
from processing.script.AddScriptFromTemplateAction import AddScriptFromTemplateAction
from processing.script.DeleteScriptAction import DeleteScriptAction
from processing.script.EditScriptAction import EditScriptAction
from processing.script.OpenScriptFromFileAction import OpenScriptFromFileAction
from processing.script import ScriptUtils
from processing.tools.system import userFolder
class ScriptAlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
self.algs = []
self.folder_algorithms = []
self.actions = [CreateNewScriptAction(),
AddScriptFromTemplateAction(),
OpenScriptFromFileAction(),
AddScriptFromFileAction()
]
self.contextMenuActions = [EditScriptAction(),
DeleteScriptAction()]
def load(self):
with QgsRuntimeProfiler.profile('Script Provider'):
ProcessingConfig.settingIcons[self.name()] = self.icon()
ProcessingConfig.addSetting(Setting(self.name(),
ScriptUtils.SCRIPTS_FOLDERS,
self.tr("Scripts folder(s)"),
ScriptUtils.defaultScriptsFolder(),
valuetype=Setting.MULTIPLE_FOLDERS))
ProviderActions.registerProviderActions(self, self.actions)
ProviderContextMenuActions.registerProviderContextMenuActions(self.contextMenuActions)
ProcessingConfig.readSettings()
self.refreshAlgorithms()
return True
def unload(self):
ProcessingConfig.removeSetting(ScriptUtils.SCRIPTS_FOLDERS)
ProviderActions.deregisterProviderActions(self)
ProviderContextMenuActions.deregisterProviderContextMenuActions(self.contextMenuActions)
def icon(self):
return QgsApplication.getThemeIcon("/processingScript.svg")
def svgIconPath(self):
return QgsApplication.iconPath("processingScript.svg")
def id(self):
return "script"
def name(self):
return self.tr("Scripts")
def supportsNonFileBasedOutput(self):
# TODO - this may not be strictly true. We probably need a way for scripts
# to indicate whether individual outputs support non-file based outputs,
# but for now allow it. At best we expose nice features to users, at worst
# they'll get an error if they use them with incompatible outputs...
return True
def loadAlgorithms(self):
self.algs = []
folders = ScriptUtils.scriptsFolders()
# always add default script folder to the list
defaultScriptFolder = ScriptUtils.defaultScriptsFolder()
if defaultScriptFolder not in folders:
folders.append(defaultScriptFolder)
# load all scripts
for folder in folders:
folder = ScriptUtils.resetScriptFolder(folder)
if not folder:
continue
for path, subdirs, files in os.walk(folder):
for entry in files:
if entry.lower().endswith(".py"):
moduleName = os.path.splitext(os.path.basename(entry))[0]
filePath = os.path.abspath(os.path.join(path, entry))
alg = ScriptUtils.loadAlgorithm(moduleName, filePath)
if alg is not None:
self.algs.append(alg)
for a in self.algs:
self.addAlgorithm(a)
| gpl-2.0 |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/bin/nefi2/main.py | 4 | 2177 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The main nefi2 startup script.
It loads extension loader and initializes UI.
It also enables console batch processing mode.
"""
import os
from nefi2.model.ext_loader import ExtensionLoader
from nefi2.model.pipeline import Pipeline
from nefi2.view.main_controller import MainView
import sys
import argparse
import ctypes
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication
import qdarkstyle
__authors__ = {"Pavel Shkadzko": "p.shkadzko@gmail.com",
"Dennig Groß": "gdennis91@googlemail.com",
"Philipp Reichert": "prei@me.com"}
class Main:
@staticmethod
def gui_mode():
"""
Start NEFI2 GUI
"""
myappid = 'nefi2.0' # arbitrary string
if sys.platform == 'win32' or sys.platform == 'win64':
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
extloader = ExtensionLoader()
pipeline = Pipeline(extloader.cats_container)
app = QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
app.setQuitOnLastWindowClosed(True)
app.setWindowIcon(QtGui.QIcon(os.path.join('icons', 'nefi2.ico')))
wnd = MainView(pipeline)
wnd.load_dark_theme(app)
wnd.show()
sys.exit(app.exec_())
@staticmethod
def batch_mode(args):
"""
Process images in console mode
Args:
| *args* (dict) : argument dict returned by ArgumentParser
"""
extloader = ExtensionLoader()
pipeline = Pipeline(extloader.cats_container)
# processing args values
if args.pipeline:
# load the specified pipeline file
# default url
pipeline.load_pipeline_json(args.pipeline)
if args.dir:
# load the images from the specified source dir
pipeline.set_input(args.dir)
elif args.file:
# load a single image
pipeline.set_input(args.file)
if args.out:
pipeline.set_output_dir(args.out)
pipeline.process_batch()
if __name__ == '__main__':
pass
| bsd-2-clause |
ChristinaHammer/Client_Database | cdbtabledef.py | 1 | 2662 | """cdbtabledef.py
Developer: Noelle Todd
Last Updated: August 30, 2014
This module will create 4 tables for the client database, using the
sqlalchemy module, and the sqlite database. This module is still in
early testing stages, and as such, is subject to many changes, and
probably contains bugs.
"""
from sqlalchemy import Column, DateTime, String, Integer, ForeignKey, func
from sqlalchemy import Boolean
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime
engine = create_engine('sqlite:///test2_db.sqlite')
session = sessionmaker()
session.configure(bind=engine)
base = declarative_base()
class Household(base):
"""This class creates a table with columns for household data.
"""
__tablename__ = 'household'
id = Column(Integer, primary_key=True)
street_address = Column(String)
apt = Column(String)
city = Column(String, default = 'Troy')
state = Column(String(2), default = 'NY')
zip = Column(Integer, default = '12180')
date_verified = Column(DateTime)
seniors = Column(Integer, default = 0)
adults = Column(Integer, default = 0)
children = Column(Integer, default = 0)
infants = Column(Integer, default = 0)
total = Column(Integer, default = 0)
class Person(base):
"""This class creates a table with columns for individual's data.
"""
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
first_name = Column(String)
last_name = Column(String)
DOB = Column(DateTime)
age = Column(Integer)
phone = Column(String)
date_joined = Column(DateTime)
HH_ID = Column(Integer, ForeignKey('household.id'))
household = relationship(Household,
backref=backref('members',
uselist=True,
passive_updates=False))
class Volunteer(base):
"""This class creates a table with columns for volunteer data.
"""
__tablename__ = 'volunteer'
id = Column(Integer, primary_key = True)
first_name = Column(String)
last_name = Column(String)
phone = Column(String)
active = Column(Boolean, default=True)
color = Column(String)
class Visit(base):
"""This class creates a table with columns tracking visit history.
"""
__tablename__ = 'visit'
id = Column(Integer, primary_key = True)
I_ID = Column(Integer, ForeignKey('person.id'))
HH_ID = Column(Integer, ForeignKey('household.id'))
Vol_ID = Column(Integer, ForeignKey('volunteer.id'))
date = Column(DateTime, default = func.now())
visit_notes = Column(String(256))
base.metadata.create_all(engine)
| mit |
willzhang05/postgrestesting1 | postgrestesting1/lib/python3.5/site-packages/pip/wheel.py | 187 | 30186 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.download import path_to_url, unpack_url
from pip.exceptions import InvalidWheelFilename, UnsupportedWheel
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, make_path_relative, captured_stdout,
rmtree)
from pip.utils.logging import indent_log
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = os.path.expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = pkg_resources.safe_name(package_name).lower()
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path), trusted=True)
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
# is self.req.project_name case preserving?
s.lower().startswith(
req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
return None
return None
finally:
rmtree(tempd)
def __build_one(self, req, tempd):
base_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), "
"__file__, 'exec'))" % req.setup_py
] + list(self.global_options)
logger.info('Running setup.py bdist_wheel for %s', req.name)
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from the with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif req.editable:
if not autobuilding:
logger.info(
'Skipping bdist_wheel for %s, due to being editable',
req.name)
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
pkg_resources.safe_name(req.name).lower()):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
if autobuilding:
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warn("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(req, output_dir)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file), trusted=True)
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| mit |
javachengwc/hue | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_tzinfo.py | 149 | 3072 | import copy
import datetime
import os
import pickle
import time
from django.utils.tzinfo import FixedOffset, LocalTimezone
from django.utils import unittest
class TzinfoTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.old_TZ = os.environ.get('TZ')
os.environ['TZ'] = 'US/Eastern'
try:
# Check if a timezone has been set
time.tzset()
cls.tz_tests = True
except AttributeError:
# No timezone available. Don't run the tests that require a TZ
cls.tz_tests = False
@classmethod
def tearDownClass(cls):
if cls.old_TZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = cls.old_TZ
# Cleanup - force re-evaluation of TZ environment variable.
if cls.tz_tests:
time.tzset()
def test_fixedoffset(self):
self.assertEqual(repr(FixedOffset(0)), '+0000')
self.assertEqual(repr(FixedOffset(60)), '+0100')
self.assertEqual(repr(FixedOffset(-60)), '-0100')
self.assertEqual(repr(FixedOffset(280)), '+0440')
self.assertEqual(repr(FixedOffset(-280)), '-0440')
self.assertEqual(repr(FixedOffset(-78.4)), '-0118')
self.assertEqual(repr(FixedOffset(78.4)), '+0118')
self.assertEqual(repr(FixedOffset(-5.5*60)), '-0530')
self.assertEqual(repr(FixedOffset(5.5*60)), '+0530')
self.assertEqual(repr(FixedOffset(-.5*60)), '-0030')
self.assertEqual(repr(FixedOffset(.5*60)), '+0030')
def test_16899(self):
if not self.tz_tests:
return
ts = 1289106000
# Midnight at the end of DST in US/Eastern: 2010-11-07T05:00:00Z
dt = datetime.datetime.utcfromtimestamp(ts)
# US/Eastern -- we force its representation to "EST"
tz = LocalTimezone(dt + datetime.timedelta(days=1))
self.assertEqual(
repr(datetime.datetime.fromtimestamp(ts - 3600, tz)),
'datetime.datetime(2010, 11, 7, 0, 0, tzinfo=EST)')
self.assertEqual(
repr(datetime.datetime.fromtimestamp(ts, tz)),
'datetime.datetime(2010, 11, 7, 1, 0, tzinfo=EST)')
self.assertEqual(
repr(datetime.datetime.fromtimestamp(ts + 3600, tz)),
'datetime.datetime(2010, 11, 7, 1, 0, tzinfo=EST)')
def test_copy(self):
now = datetime.datetime.now()
self.assertIsInstance(copy.copy(FixedOffset(90)), FixedOffset)
self.assertIsInstance(copy.copy(LocalTimezone(now)), LocalTimezone)
def test_deepcopy(self):
now = datetime.datetime.now()
self.assertIsInstance(copy.deepcopy(FixedOffset(90)), FixedOffset)
self.assertIsInstance(copy.deepcopy(LocalTimezone(now)), LocalTimezone)
def test_pickling_unpickling(self):
now = datetime.datetime.now()
self.assertIsInstance(pickle.loads(pickle.dumps(FixedOffset(90))), FixedOffset)
self.assertIsInstance(pickle.loads(pickle.dumps(LocalTimezone(now))), LocalTimezone)
| apache-2.0 |
snowballhg/node-gyp | gyp/pylib/gyp/xcode_emulation.py | 149 | 42289 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Computed lazily by _GetSdkBaseDir(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_base_dir = None
def __init__(self, spec):
self.spec = spec
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.app'
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library'):
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def _GetSdkBaseDir(self):
"""Returns the root of the 'Developer' directory. On Xcode 4.2 and prior,
this is usually just /Developer. Xcode 4.3 moved that folder into the Xcode
bundle."""
if not XcodeSettings._sdk_base_dir:
import subprocess
job = subprocess.Popen(['xcode-select', '-print-path'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
print out
raise Exception('Error %d running xcode-select' % job.returncode)
# The Developer folder moved in Xcode 4.3.
xcode43_sdk_path = os.path.join(
out.rstrip(), 'Platforms/MacOSX.platform/Developer/SDKs')
if os.path.isdir(xcode43_sdk_path):
XcodeSettings._sdk_base_dir = xcode43_sdk_path
else:
XcodeSettings._sdk_base_dir = os.path.join(out.rstrip(), 'SDKs')
return XcodeSettings._sdk_base_dir
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx10.5')
if sdk_root.startswith('macosx'):
return os.path.join(self._GetSdkBaseDir(),
'MacOSX' + sdk_root[len('macosx'):] + '.sdk')
return sdk_root
def GetCflags(self, configname):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._Appendf(cflags, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._Appendf(
ldflags, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def GetInclude(self, lang):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self.compiled_headers[lang]
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self.compiled_headers[lang] + '.gch'
def GetObjDependencies(self, sources, objs):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang)))
return result
def GetPchBuildCommands(self):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c'), '-x c-header', 'c', self.header),
(self._Gch('cc'), '-x c++-header', 'cc', self.header),
(self._Gch('m'), '-x objective-c-header', 'm', self.header),
(self._Gch('mm'), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the sourc plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerTargetSetting('SDKROOT'):
env['SDKROOT'] = xcode_settings._SdkPath()
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise Exception(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
| mit |
DFEC-R2D2/r2d2 | final_design/python/states/static.py | 2 | 2514 |
from __future__ import division
from __future__ import print_function
# from random import randint
import time
import cv2
# from opencvutils import Camera
from library import factory
from library import reset_all_hw
# import multiprocessing as mp
# Static Mode
def static_func(hw, ns):
print("Starting static")
dome = hw['dome']
dome.speed(0)
legs = hw['legs']
legs.drive(1, 0)
legs.drive(2, 0)
flash = hw['flashlight']
flash.set(5)
time.sleep(0.7)
flash.set(20)
time.sleep(0.7)
flash.set(1)
time.sleep(0.7)
flash.set(0)
audio = hw['audio']
audio.speak('start')
# setup computer vision
# face detection
cascPath = 'haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
# camera
# image_size = (640, 480)
# camera = Camera(cam='pi')
# camera.init(win=image_size)
camera = cv2.VideoCapture(0)
person_found_cnt = 0
# ns.servo_wave = True
detect = [False]*4
while ns.current_state == 2:
if ns.wav:
audio.playWAV(ns.wav)
ns.wav = None
if ns.mp3:
audio.playMP3(ns.mp3)
ns.mp3 = None
# Sensor Reading
# get ultrasound
us = ns.ultrasounds[:3] # ignore back ultrasound
for i, u in enumerate(us):
print('u', u)
if u > 1 and u < 60:
person_found_cnt += 1
if not detect[i]:
detect[i] = True
else:
detect[i] = False
print(detect)
if True in detect:
print("see you")
flash.set(5)
if (person_found_cnt%10) == 1:
audio.speak_random(5)
else:
person_found_cnt = 0
flash.set(0)
time.sleep(1)
# audio.playWAV('nerf')
# grab image and see if a person is there
# ok, img = camera.read()
# if ok:
# print('-')
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# # cv2.imwrite('save.png', gray)
#
# faces = faceCascade.detectMultiScale(
# gray,
# scaleFactor=1.1,
# minNeighbors=5,
# minSize=(30, 30)
# )
#
# if len(faces) > 0:
# person_found_cnt += 1
# print('+')
# else:
# person_found_cnt = 0
# print('0')
#
# if person_found_cnt > ns.opencv_person_found:
# person_found_cnt = 0
# audio.speak_random(2)
# (x, y, w, h) = faces[0]
# cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
# # cv2.imwrite('face_save.png', img)
# audio.speak('found')
# for (x, y, w, h) in faces:
# cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
# cv2.imwrite('face_save.png', img)
# print("found person")
# time.sleep(0.5)
# print('.')
# exiting, reset all hw
reset_all_hw(hw)
| mit |
wlerin/streamlink | src/streamlink/plugins/dogus.py | 4 | 2489 | import re
import logging
from streamlink.plugin import Plugin
from streamlink.plugin.api.utils import itertags
from streamlink.plugins.youtube import YouTube
from streamlink.stream import HLSStream
from streamlink.utils import update_scheme
log = logging.getLogger(__name__)
class Dogus(Plugin):
"""
Support for live streams from Dogus sites include ntv, ntvspor, and kralmuzik
"""
url_re = re.compile(r"""https?://(?:www.)?
(?:
ntv.com.tr/canli-yayin/ntv|
ntvspor.net/canli-yayin|
kralmuzik.com.tr/tv/|
eurostartv.com.tr/canli-izle
)/?""", re.VERBOSE)
mobile_url_re = re.compile(r"""(?P<q>["'])(?P<url>(https?:)?//[^'"]*?/live/hls/[^'"]*?\?token=)
(?P<token>[^'"]*?)(?P=q)""", re.VERBOSE)
token_re = re.compile(r"""token=(?P<q>["'])(?P<token>[^'"]*?)(?P=q)""")
kral_token_url = "https://service.kralmuzik.com.tr/version/gettoken"
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
res = self.session.http.get(self.url)
# Look for Youtube embedded video first
for iframe in itertags(res.text, 'iframe'):
if YouTube.can_handle_url(iframe.attributes.get("src")):
log.debug("Handing off to YouTube plugin")
return self.session.streams(iframe.attributes.get("src"))
# Next check for HLS URL with token
mobile_url_m = self.mobile_url_re.search(res.text)
mobile_url = mobile_url_m and update_scheme(self.url, mobile_url_m.group("url"))
if mobile_url:
log.debug("Found mobile stream: {0}".format(mobile_url_m.group(0)))
token = mobile_url_m and mobile_url_m.group("token")
if not token and "kralmuzik" in self.url:
log.debug("Getting Kral Muzik HLS stream token from API")
token = self.session.http.get(self.kral_token_url).text
elif not token:
# if no token is in the url, try to find it else where in the page
log.debug("Searching for HLS stream token in URL")
token_m = self.token_re.search(res.text)
token = token_m and token_m.group("token")
return HLSStream.parse_variant_playlist(self.session, mobile_url + token,
headers={"Referer": self.url})
__plugin__ = Dogus
| bsd-2-clause |
dongjoon-hyun/tensorflow | tensorflow/python/training/learning_rate_decay_test.py | 9 | 17918 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for learning rate decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import learning_rate_decay
class LRDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testContinuous(self):
self.evaluate(variables.global_variables_initializer())
step = 5
decayed_lr = learning_rate_decay.exponential_decay(0.05, step, 10, 0.96)
expected = .05 * 0.96**(5.0 / 10.0)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
if context.executing_eagerly():
step = resource_variable_ops.ResourceVariable(0)
self.evaluate(variables.global_variables_initializer())
decayed_lr = learning_rate_decay.exponential_decay(
.1, step, 3, 0.96, staircase=True)
# No change to learning rate due to staircase
expected = .1
self.evaluate(step.assign(1))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
expected = .1
self.evaluate(step.assign(2))
self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6)
# Decayed learning rate
expected = .1 * 0.96 ** (100 // 3)
self.evaluate(step.assign(100))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
def testVariables(self):
with self.cached_session():
step = variables.VariableV1(1)
assign_1 = step.assign(1)
assign_2 = step.assign(2)
assign_100 = step.assign(100)
decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
staircase=True)
variables.global_variables_initializer().run()
# No change to learning rate
assign_1.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
assign_2.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
# Decayed learning rate
assign_100.op.run()
expected = .1 * 0.96 ** (100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testPiecewiseConstant(self):
x = resource_variable_ops.ResourceVariable(-999)
decayed_lr = learning_rate_decay.piecewise_constant(
x, [100, 110, 120], [1.0, 0.1, 0.01, 0.001])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6)
self.evaluate(x.assign(100))
self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6)
self.evaluate(x.assign(105))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
self.evaluate(x.assign(110))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
self.evaluate(x.assign(120))
self.assertAllClose(self.evaluate(decayed_lr), 0.01, 1e-6)
self.evaluate(x.assign(999))
self.assertAllClose(self.evaluate(decayed_lr), 0.001, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testPiecewiseConstantEdgeCases(self):
x_int = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int32)
boundaries, values = [-1.0, 1.0], [1, 2, 3]
with self.assertRaises(ValueError):
decayed_lr = learning_rate_decay.piecewise_constant(
x_int, boundaries, values)
if context.executing_eagerly():
decayed_lr()
x = resource_variable_ops.ResourceVariable(0.0)
boundaries, values = [-1.0, 1.0], [1.0, 2, 3]
with self.assertRaises(ValueError):
decayed_lr = learning_rate_decay.piecewise_constant(
x, boundaries, values)
if context.executing_eagerly():
decayed_lr()
# Test that ref types are valid.
if not context.executing_eagerly():
x = variables.VariableV1(0.0)
x_ref = x.op.outputs[0] # float32_ref tensor should be accepted
boundaries, values = [1.0, 2.0], [1, 2, 3]
learning_rate_decay.piecewise_constant(x_ref, boundaries, values)
# Test casting boundaries from int32 to int64.
x_int64 = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int64)
boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
decayed_lr = learning_rate_decay.piecewise_constant(
x_int64, boundaries, values)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6)
self.evaluate(x_int64.assign(1))
self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6)
self.evaluate(x_int64.assign(2))
self.assertAllClose(self.evaluate(decayed_lr), 0.5, 1e-6)
self.evaluate(x_int64.assign(3))
self.assertAllClose(self.evaluate(decayed_lr), 0.6, 1e-6)
self.evaluate(x_int64.assign(4))
self.assertAllClose(self.evaluate(decayed_lr), 0.7, 1e-6)
class LinearDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, cycle=True)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class SqrtDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = lr * 0.5**power
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = (lr - end_lr) * 0.5**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power, cycle=True)
expected = (lr - end_lr) * 0.25**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class PolynomialDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testBeginWithCycle(self):
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, decay_steps, cycle=True)
expected = lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class ExponentialDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr, step, k,
decay_rate)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr * math.exp(-i / k * decay_rate)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.natural_exp_decay(
initial_lr, step, k, decay_rate, staircase=True)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr * math.exp(-decay_rate * (i // k))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
class InverseDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr, step, k,
decay_rate)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr / (1 + i / k * decay_rate)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.inverse_time_decay(
initial_lr, step, k, decay_rate, staircase=True)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr / (1 + decay_rate * (i // k))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
class CosineDecayTest(test_util.TensorFlowTestCase):
def np_cosine_decay(self, step, decay_steps, alpha=0.0):
step = min(step, decay_steps)
completed_fraction = step / decay_steps
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay(initial_lr, step,
num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay(initial_lr, step,
num_training_steps, alpha)
expected = self.np_cosine_decay(step, num_training_steps, alpha)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class CosineDecayRestartsTest(test_util.TensorFlowTestCase):
def np_cosine_decay_restarts(self, step, decay_steps, t_mul=2.0, m_mul=1.0,
alpha=0.0):
fac = 1.0
while step >= decay_steps:
step -= decay_steps
decay_steps *= t_mul
fac *= m_mul
completed_fraction = step / decay_steps
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, alpha=alpha)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, alpha=alpha)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testMMul(self):
num_training_steps = 1000
initial_lr = 1.0
m_mul = 0.9
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, m_mul=m_mul)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, m_mul=m_mul)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testTMul(self):
num_training_steps = 1000
initial_lr = 1.0
t_mul = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, t_mul=t_mul)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, t_mul=t_mul)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class LinearCosineDecayTest(test_util.TensorFlowTestCase):
def np_linear_cosine_decay(self,
step,
decay_steps,
alpha=0.0,
beta=0.001,
num_periods=0.5):
step = min(step, decay_steps)
linear_decayed = float(decay_steps - step) / decay_steps
fraction = 2.0 * num_periods * step / float(decay_steps)
cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
return (alpha + linear_decayed) * cosine_decayed + beta
@test_util.run_in_graph_and_eager_modes
def testDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr, step, num_training_steps)
expected = self.np_linear_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testNonDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr,
step,
num_training_steps,
alpha=0.1,
beta=1e-4,
num_periods=5)
expected = self.np_linear_cosine_decay(
step, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class NoisyLinearCosineDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr, step, num_training_steps)
# Cannot be deterministically tested
self.evaluate(decayed_lr)
@test_util.run_in_graph_and_eager_modes
def testNonDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr,
step,
num_training_steps,
initial_variance=0.5,
variance_decay=0.1,
alpha=0.1,
beta=1e-4,
num_periods=5)
# Cannot be deterministically tested
self.evaluate(decayed_lr)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
you21979/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/svnrevision.py | 143 | 1735 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import db
class SVNRevision(db.Model):
number = db.IntegerProperty()
broken_bots = db.StringListProperty(default=[])
date = db.DateTimeProperty(auto_now_add=True)
| bsd-3-clause |
webmull/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/test/skip.py | 174 | 2299 | # Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
_log = logging.getLogger(__name__)
def skip_if(klass, condition, message=None, logger=None):
"""Makes all test_* methods in a given class no-ops if the given condition
is False. Backported from Python 3.1+'s unittest.skipIf decorator."""
if not logger:
logger = _log
if not condition:
return klass
for name in dir(klass):
attr = getattr(klass, name)
if not callable(attr):
continue
if not name.startswith('test_'):
continue
setattr(klass, name, _skipped_method(attr, message, logger))
klass._printed_skipped_message = False
return klass
def _skipped_method(method, message, logger):
def _skip(*args):
if method.im_class._printed_skipped_message:
return
method.im_class._printed_skipped_message = True
logger.info('Skipping %s.%s: %s' % (method.__module__, method.im_class.__name__, message))
return _skip
| bsd-3-clause |
trezorg/django | django/utils/decorators.py | 55 | 4832 | "Functions that help with dynamically creating decorators for views."
try:
from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
except ImportError:
from django.utils.functional import wraps, update_wrapper, WRAPPER_ASSIGNMENTS # Python 2.4 fallback.
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the view class.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
# 'func' is a function at the time it is passed to _dec, but will eventually
# be a method of the class it is defined it.
def _dec(func):
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func(self, *args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
return _wrapper
update_wrapper(_dec, decorator)
# Change the name to aid debugging.
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445.
"""
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception, e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
callback = lambda response: middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view)
return _decorator
return _make_decorator
| bsd-3-clause |
hzzyyy/pymcef | script/Russell3000.py | 1 | 1164 | from bs4 import BeautifulSoup
import requests
import pandas as pd
def _get_listed_table(url):
# Scrape the HTML at the url
r = requests.get(url)
# Turn the HTML into a Beautiful Soup object
soup = BeautifulSoup(r.text, "html.parser")
table = soup.find(text='AAPL').parent.parent.parent
return table
def get_listed_instruments(url):
table = _get_listed_table(url)
rows = table.find_all('tr')
#strip column names
colnames = [cell.string.strip() for cell in rows[0].find_all('td')]
df = pd.DataFrame(columns=colnames)
i = 0
for row in rows[1:]:
cells = [cell.string for cell in row.find_all('td')]
df.loc[i] = [None if cell is None else cell.strip() for cell in cells]
i += 1
return df
def get_listed_symbols(url):
table = _get_listed_table(url)
rows = table.find_all('tr')
#strip column names
colnames = [cell.string.strip() for cell in rows[0].find_all('td')]
idx = colnames.index(u'Symbol')
symbols = []
for row in rows[1:]:
symbol = row.find_all('td')[idx].string.strip()
symbols.append(symbol)
return symbols
| bsd-3-clause |
DeMille/emailhooks | django_nonrel/django/contrib/localflavor/id/id_choices.py | 439 | 3217 | import warnings
from django.utils.translation import ugettext_lazy as _
# Reference: http://id.wikipedia.org/wiki/Daftar_provinsi_Indonesia
# Indonesia does not have an official Province code standard.
# I decided to use unambiguous and consistent (some are common) 3-letter codes.
warnings.warn(
'There have been recent changes to the ID localflavor. See the release notes for details',
RuntimeWarning
)
PROVINCE_CHOICES = (
('ACE', _('Aceh')),
('BLI', _('Bali')),
('BTN', _('Banten')),
('BKL', _('Bengkulu')),
('DIY', _('Yogyakarta')),
('JKT', _('Jakarta')),
('GOR', _('Gorontalo')),
('JMB', _('Jambi')),
('JBR', _('Jawa Barat')),
('JTG', _('Jawa Tengah')),
('JTM', _('Jawa Timur')),
('KBR', _('Kalimantan Barat')),
('KSL', _('Kalimantan Selatan')),
('KTG', _('Kalimantan Tengah')),
('KTM', _('Kalimantan Timur')),
('BBL', _('Kepulauan Bangka-Belitung')),
('KRI', _('Kepulauan Riau')),
('LPG', _('Lampung')),
('MLK', _('Maluku')),
('MUT', _('Maluku Utara')),
('NTB', _('Nusa Tenggara Barat')),
('NTT', _('Nusa Tenggara Timur')),
('PPA', _('Papua')),
('PPB', _('Papua Barat')),
('RIU', _('Riau')),
('SLB', _('Sulawesi Barat')),
('SLS', _('Sulawesi Selatan')),
('SLT', _('Sulawesi Tengah')),
('SLR', _('Sulawesi Tenggara')),
('SLU', _('Sulawesi Utara')),
('SMB', _('Sumatera Barat')),
('SMS', _('Sumatera Selatan')),
('SMU', _('Sumatera Utara')),
)
LICENSE_PLATE_PREFIX_CHOICES = (
('A', _('Banten')),
('AA', _('Magelang')),
('AB', _('Yogyakarta')),
('AD', _('Surakarta - Solo')),
('AE', _('Madiun')),
('AG', _('Kediri')),
('B', _('Jakarta')),
('BA', _('Sumatera Barat')),
('BB', _('Tapanuli')),
('BD', _('Bengkulu')),
('BE', _('Lampung')),
('BG', _('Sumatera Selatan')),
('BH', _('Jambi')),
('BK', _('Sumatera Utara')),
('BL', _('Nanggroe Aceh Darussalam')),
('BM', _('Riau')),
('BN', _('Kepulauan Bangka Belitung')),
('BP', _('Kepulauan Riau')),
('CC', _('Corps Consulate')),
('CD', _('Corps Diplomatic')),
('D', _('Bandung')),
('DA', _('Kalimantan Selatan')),
('DB', _('Sulawesi Utara Daratan')),
('DC', _('Sulawesi Barat')),
('DD', _('Sulawesi Selatan')),
('DE', _('Maluku')),
('DG', _('Maluku Utara')),
('DH', _('NTT - Timor')),
('DK', _('Bali')),
('DL', _('Sulawesi Utara Kepulauan')),
('DM', _('Gorontalo')),
('DN', _('Sulawesi Tengah')),
('DR', _('NTB - Lombok')),
('DS', _('Papua dan Papua Barat')),
('DT', _('Sulawesi Tenggara')),
('E', _('Cirebon')),
('EA', _('NTB - Sumbawa')),
('EB', _('NTT - Flores')),
('ED', _('NTT - Sumba')),
('F', _('Bogor')),
('G', _('Pekalongan')),
('H', _('Semarang')),
('K', _('Pati')),
('KB', _('Kalimantan Barat')),
('KH', _('Kalimantan Tengah')),
('KT', _('Kalimantan Timur')),
('L', _('Surabaya')),
('M', _('Madura')),
('N', _('Malang')),
('P', _('Jember')),
('R', _('Banyumas')),
('RI', _('Federal Government')),
('S', _('Bojonegoro')),
('T', _('Purwakarta')),
('W', _('Sidoarjo')),
('Z', _('Garut')),
)
| mit |
shaunstanislaus/zulip | zerver/management/commands/bulk_change_user_name.py | 115 | 1120 | from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_change_full_name
from zerver.models import UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Change the names for many users."""
def add_arguments(self, parser):
parser.add_argument('data_file', metavar='<data file>', type=str,
help="file containing rows of the form <email>,<desired name>")
def handle(self, *args, **options):
data_file = options['data_file']
with open(data_file, "r") as f:
for line in f:
email, new_name = line.strip().split(",", 1)
try:
user_profile = get_user_profile_by_email(email)
old_name = user_profile.full_name
print "%s: %s -> %s" % (email, old_name, new_name)
do_change_full_name(user_profile, new_name)
except UserProfile.DoesNotExist:
print "* E-mail %s doesn't exist in the system, skipping." % (email,)
| apache-2.0 |
ukos-git/python-swnt-reactor | src/MKDatabase.py | 1 | 30109 | #!/usr/bin/env python
import MySQLdb as mysqlconnector
from MySQLdb.constants import CLIENT
import os
import socket
import decimal
import struct
from time import sleep
import multiprocessing
import ConfigParser
from MKFlowMessage import FBconvertLong # converter for long numbers to float and percent
#cvd-client->rbBmSDP7fSKp87b5
class MKDatabase(object):
sql = ""
connected = False
ready = False
messageID = -1
client = False
recording = False
recordingID = -1
fileName = ""
storage_description = 50
storage_values = 30
hostname = ""
# settings.cfg (see loadConfig)
dbUser = ""
dbPass = ""
dbHost = ""
dbName = ""
servername = ""
def __init__(self, isClient = False):
self.client = isClient
self.loadConfig()
self.test()
decimal.getcontext().prec = 2
def loadConfig(self):
config = ConfigParser.ConfigParser()
srcPath = os.path.dirname(os.path.realpath(__file__))
settingsFile = srcPath + '/../settings.cfg'
if not (os.path.exists(settingsFile)):
print "settings.cfg not found"
raise
config.read(settingsFile)
self.dbUser = config.get('Database', 'dbuser')
self.dbPass = config.get('Database', 'dbpass')
self.dbHost = config.get('Database', 'dbhost')
self.dbName = config.get('Database', 'dbname')
self.servername = config.get('Server', 'servername')
def open(self):
try:
if not self.checkIP():
print "server unavailable"
raise
self.db = mysqlconnector.connect(
host = self.dbHost,
user = self.dbUser,
passwd = self.dbPass,
db = self.dbName,
client_flag = CLIENT.FOUND_ROWS,
connect_timeout = 1
)
except:
print "database open failed."
self.close()
return False
else:
print "connected as user: %s" % self.dbUser
self.connected = True
return True
def close(self):
try:
self.db.close()
except:
if not self.checkIP():
print "connection lost. Database could not be closed normal"
self.connected = False
else:
self.connected = False
def isOpen(self):
#if not self.connected:
# return False
#try:
# stats = self.db.stat()
# if stats == 'MySQL server has gone away':
# self.close()
#except:
# self.connected = False
return self.connected
def write_without_timeout(self, db, sql, connection):
try:
cursor = db.cursor()
cursor.execute(sql)
affectedRows = cursor.rowcount
cursor.close()
db.commit()
except:
affectedRows = 0
try:
self.db.rollback()
except:
pass
connection.send(affectedRows)
connection.close()
def read_without_timeout(self, db, sql, connection):
affectedRows = 0
try:
cursor = db.cursor()
cursor.execute(sql)
data = cursor.fetchone()
cursor.close()
except:
connection.send([])
else:
connection.send(data)
connection.close()
# from alex martelli on http://stackoverflow.com/questions/1507091/python-mysqldb-query-timeout
def write(self, sql, update = False):
if not self.isOpen():
if not self.open():
raise
conn_parent, conn_child = multiprocessing.Pipe(False)
subproc = multiprocessing.Process(target = self.write_without_timeout,
args = (self.db, sql, conn_child))
subproc.start()
subproc.join(1)
if conn_parent.poll():
affectedRows = conn_parent.recv()
# on update statements rise if no lines were affected
if update and affectedRows == 0:
raise UpdateError('UPDATE statement failed')
else:
return affectedRows
subproc.terminate()
raise TimeoutError("Query %r ran for >%r" % (sql, timeout))
def read(self, sql):
if not self.isOpen():
if not self.open():
raise
conn_parent, conn_child = multiprocessing.Pipe(False)
subproc = multiprocessing.Process(target = self.read_without_timeout,
args = (self.db, sql, conn_child))
subproc.start()
subproc.join(1)
if conn_parent.poll():
data = conn_parent.recv()
try:
if len(data) == 0:
raise
except:
return []
else:
return data
else:
subproc.terminate()
return []
def writeArduino(self, sql):
try:
self.write(sql, True)
except:
try:
print "writeArduino failed: create database and try again."
self.createArduino()
self.resetArduino()
self.write(sql)
except:
self.close()
return False
else:
return True
else:
return True
def writeRecording(self, sql):
try:
self.write(sql, True)
except:
try:
self.createRecording()
self.resetRecording()
self.write(sql)
except:
self.close()
return False
else:
return True
else:
return True
def writeFlowbus(self, sql):
try:
self.write(sql)
except:
try:
self.createFlowbus()
self.write(sql)
except:
self.close()
return False
else:
return True
else:
return True
def writeMessage(self, sql):
try:
self.write(sql, True)
except:
try:
self.createMessage()
self.resetMessage()
self.write(sql)
except:
self.close()
return False
else:
return True
else:
return True
def test(self):
print "-- starting self-test --"
self.open()
sql="SELECT VERSION()"
data = self.read(sql)
self.close()
print "MySQL version : %s " % data
print "-- self test complete --"
def getHostname(self):
if self.hostname == "":
self.hostname = socket.gethostname()
return self.hostname
def isServer(self):
if (self.getHostname() == self.servername):
return True
else:
return False
def getIP(self):
if self.isServer():
ip = 'localhost'
else:
ip = self.dbHost
return ip
def checkIP(self, ip = ""):
if len(ip) == 0:
ip = self.getIP()
if ip == "localhost":
return True
command = "ping -c 1 -W 1 " + ip
print "executing '" + command + "'"
if os.system(command + " > /dev/null") == 0:
return True
else:
print "ip not found. sleeping penalty."
sleep(1)
return False
def createArduino(self):
sql = """CREATE TABLE IF NOT EXISTS `runtime_arduino` (
`temperature` decimal(6,2) NOT NULL DEFAULT '0',
`pressure` decimal(6,2) NOT NULL DEFAULT '0',
`argon` decimal(6,2) NOT NULL DEFAULT '0',
`ethanol` decimal(6,2) NOT NULL DEFAULT '0',
`spTemperature` int(11) NOT NULL DEFAULT '0',
`spPressure` int(11) NOT NULL DEFAULT '1000',
`spEthanol` int(11) NOT NULL DEFAULT '0',
`spArgon` int(11) NOT NULL DEFAULT '0'
) ENGINE=MEMORY DEFAULT Charset=utf8;"""
self.write(sql)
def resetArduino(self):
sql = """INSERT INTO `runtime_arduino`
(`temperature`, `pressure`, `argon`, `ethanol`, `spTemperature`, `spPressure`, `spEthanol`, `spArgon`)
VALUES
(0, 0, 0, 0, 0, 0, 0, 0);"""
self.write(sql)
def createFlowbus(self):
sql = """
CREATE TABLE IF NOT EXISTS `runtime_flowbus`
(
`instrument` smallint(2) NOT NULL DEFAULT '0',
`process` smallint(2) NOT NULL,
`flowBus` smallint(2) NOT NULL,
`dataType` tinyint(1) NOT NULL DEFAULT '0',
`parameter` binary(%i) NOT NULL DEFAULT '0',
`data` binary(%i) NOT NULL DEFAULT '0',
`time` decimal(7,2) NOT NULL DEFAULT '0',
UNIQUE KEY `instrument` (`instrument`,`process`,`flowBus`)
)
ENGINE=MEMORY
DEFAULT CHARSET=utf8;""" % (self.storage_description, self.storage_values)
self.write(sql)
def createRecording(self):
sql = """CREATE TABLE IF NOT EXISTS `runtime_recording` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`recording` tinyint(4) NOT NULL DEFAULT '0',
`id_recording` int(11) DEFAULT '0',
PRIMARY KEY (`id`)
) ENGINE=MEMORY DEFAULT Charset=utf8 AUTO_INCREMENT=40;"""
self.write(sql)
sql = """CREATE TABLE IF NOT EXISTS `recording` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`recording` tinyint(4) NOT NULL DEFAULT '0',
`filename` text NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT Charset=utf8 AUTO_INCREMENT=40;"""
self.write(sql)
def resetRecording(self):
sql = """INSERT INTO `runtime_recording`
(`recording`)
VALUES
(0)"""
self.write(sql)
def createMessage(self):
sql = """CREATE TABLE IF NOT EXISTS `cvd`.`runtime_message` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`ready` tinyint(4) NOT NULL DEFAULT '0',
`id_message` int(11) DEFAULT '0',
PRIMARY KEY (`id`)
) ENGINE=MEMORY DEFAULT Charset=utf8 AUTO_INCREMENT=40;"""
self.write(sql)
sql = """CREATE TABLE IF NOT EXISTS `cvd`.`message` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`processed` tinyint(4) NOT NULL DEFAULT '0',
`text` text NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT Charset=utf8 AUTO_INCREMENT=40;"""
self.write(sql)
def resetMessage(self):
sql = """INSERT INTO `cvd`.`runtime_message`
(`ready`)
VALUES
(0)"""
self.write(sql)
sql = """DELETE FROM `cvd`.`message`
WHERE `processed` = 1"""
self.write(sql)
def setData(self, data, setpoint):
try:
self.temperature = decimal.Decimal(data[0])
self.pressure = decimal.Decimal(data[1])
self.argon = decimal.Decimal(data[2])
self.ethanol = decimal.Decimal(data[3])
except:
self.temperature = 0.00
self.pressure = 0.00
self.argon = 0.00
self.ethanol = 0.00
try:
self.spTemperature = int(setpoint[0])
self.spPressure = int(setpoint[1])
self.spArgon = int(setpoint[2])
self.spEthanol = int(setpoint[3])
except:
self.spTemperature = 0
self.spPressure = 1000
self.spEthanol = 0
self.spArgon = 0
sql = """UPDATE `cvd`.`runtime_arduino`
SET `temperature` = %s,
`pressure` = %s,
`ethanol` = %s,
`argon` = %s,
`spTemperature` = %s,
`spPressure` = %s,
`spEthanol` = %s,
`spArgon` = %s;""" % (self.temperature, self.pressure, self.ethanol, self.argon, self.spTemperature, self.spPressure, self.spEthanol, self.spArgon)
return self.writeArduino(sql)
def setLogFile(self, fileName):
id = self.getRecordingID()
if id < 0:
return False
sql = """UPDATE `cvd`.`recording`
SET `filename` = '%s',
`recording` = 1
WHERE `id` = %i
LIMIT 1;""" % (fileName, id)
if not self.writeRecording(sql):
return False
if self.getLogFile() == fileName:
return True
else:
return False
def isRecording(self):
sql = """SELECT `recording`
FROM `cvd`.`runtime_recording`
LIMIT 1;"""
try:
data = self.read(sql)
except:
return False
if not len(data) == 1:
return False
else:
if data[0]:
return True
else:
return False
def stopRecording(self):
sql = """UPDATE `cvd`.`runtime_recording`
SET `recording` = 0;"""
if not self.writeRecording(sql):
return False
sql = """UPDATE `cvd`.`recording`
SET `recording` = 0
WHERE `recording` = 1;"""
if not self.writeRecording(sql):
return False
self.recordingID = -1
return True
def startRecording(self, filename = ''):
self.stopRecording
sql = """INSERT INTO `cvd`.`recording` (
`id` ,`time` , `recording` , `filename` )
VALUES (
NULL , CURRENT_TIMESTAMP , 1, '%s')""" % filename
if not self.writeRecording(sql):
return False
sql = """SELECT `id`
FROM `cvd`.`recording`
WHERE `recording` = 1
LIMIT 1;"""
data = self.read(sql)
if not len(data) == 1:
return False
sql = """UPDATE `cvd`.`runtime_recording`
SET `id_recording` = %i,
`recording` = 1
LIMIT 1;""" % data
if not self.writeRecording(sql):
return False
return True
def getRecordingID(self):
sql = """SELECT `id_recording`
FROM `cvd`.`runtime_recording`
LIMIT 1;"""
data = self.read(sql)
if (len(data) == 1):
return int(data[0])
else:
return -1
def getLogFile(self):
# get id from memory table
recordingID = self.getRecordingID()
# update filename from disc table if not already saved in class
if not (recordingID == self.recordingID) or len(self.fileName) == 0:
print "querying filename from sql table"
self.close()
sql = """SELECT `filename`
FROM `cvd`.`recording`
WHERE `id` = %i;""" % recordingID
data = self.read(sql)
if len(data) == 1:
self.fileName = data[0]
else:
self.fileName = ''
self.recordingID = recordingID
return self.fileName
def setMessage(self, message):
sql = """INSERT INTO `cvd`.`message`
(`text`) VALUES ('%s');""" % (message)
if self.writeMessage(sql):
return self.updateMessage()
return False
def updateMessage(self):
sql = """SELECT `id` FROM `cvd`.`message`
WHERE `processed` = 0
LIMIT 1;"""
data = self.read(sql)
if (len(data) == 1):
id_message = data[0]
ready = 1
else:
ready = 0
id_message = -1
sql = """UPDATE `cvd`.`runtime_message`
SET `ready` = %i,
`id_message` = %i
LIMIT 1;""" % (ready, id_message)
return self.writeMessage(sql)
def isReady(self):
if self.ready:
return True
sql = """SELECT `ready`, `id_message`
FROM `cvd`.`runtime_message`;"""
try:
data = self.read(sql)
except:
return False
if not len(data) == 2:
data = (0,-1)
(self.ready, self.messageID) = data
if self.ready:
return True
else:
return False
def getMessage(self):
self.message = ""
# read from runtime (memory table)
if self.isReady():
# ready flag did also read out messageID.
# get message string from cvd.message
sql = """SELECT `text`
FROM `cvd`.`message`
WHERE `id` = %i
LIMIT 1;""" % self.messageID
data = self.read(sql)
if (len(data) == 1):
self.message = data[0]
# mark message in cvd.message as processed
sql = """UPDATE `cvd`.`message`
SET `processed` = 1
WHERE `id` = %i;""" % self.messageID
self.writeMessage(sql)
self.updateMessage()
# reset readout
self.ready = False
return self.message
def setFlowbus(self, instrument, process, flowBus, dataTypeString, dataInput, timeInput, parameterName):
time = decimal.Decimal(timeInput)
parameterName = parameterName.encode("hex")
if (dataTypeString == "character"):
dataType = 0
data = format(int(dataInput), 'x')
elif(dataTypeString == "integer"):
dataType = 1
data = format(int(dataInput), 'x')
elif(dataTypeString == "long"):
dataType = 2
data = format(int(dataInput), 'x')
elif(dataTypeString == "string"):
dataType = 3
data = dataInput.encode("hex")
else:
raise ValueError("can not identify dataType at setFlowBus()")
sql = """
INSERT INTO `cvd`.`runtime_flowbus`
(`instrument`,`process`,`flowBus`,`dataType`,`data`,`time`, `parameter`)
VALUES
(%i, %i, %i, %i, UNHEX(LPAD('%s',%i,'0')), %.2f, UNHEX(LPAD('%s',%i,'0')))""" % (instrument, process, flowBus, dataType, data, self.storage_values * 2, time, parameterName, self.storage_description * 2)
sql += """
ON DUPLICATE KEY UPDATE
`data` = UNHEX(LPAD('%s',%i,'0')),
`time` = %.2f;""" % (data, self.storage_values * 2, time)
self.writeFlowbus(sql)
def getFlowbus(self, instrument, process, flowBus):
sql = """
SELECT `dataType`,TRIM(LEADING '0' FROM HEX(`data`)),`time`,TRIM(LEADING '0' FROM HEX(`parameter`))
FROM `cvd`.`runtime_flowbus`
WHERE
( `instrument` = %i
AND `process` = %i
AND `flowBus` = %i);
""" % (instrument, process, flowBus)
data = self.read(sql)
if (len(data) == 4):
(dataType, dataOut, timeOut, parameter) = data
else:
return (-1,-1,-1)
parameter = parameter.decode("hex")
time = decimal.Decimal(timeOut)
if (dataType == 0):
data = int(dataOut, 16)
elif(dataType == 1):
data = int(dataOut, 16)
elif(dataType == 2):
data = FBconvertLong(process, flowBus, int(dataOut,16))
elif(dataType == 3):
data = dataOut.decode("hex")
else:
raise ValueError("can not identify dataType at getFlowBus()")
return (parameter, data, time)
def getAll(self):
sql = """SELECT temperature, pressure, ethanol, argon,
spTemperature, spPressure, spEthanol, spArgon
FROM `cvd`.`runtime_arduino`
LIMIT 1"""
data = self.read(sql)
if len(data) == 0:
print "database readout failed for arduino!"
data = (-1,-1,-1,-1, -1,-1,-1,-1)
(self.temperature, self.pressure, self.ethanol, self.argon, self.spTemperature, self.spPressure, self.spEthanol, self.spArgon) = data
class UpdateError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TimeoutError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
sql = """UPDATE `cvd`.`runtime_arduino`
SET `temperature` = %s,
`pressure` = %s,
`ethanol` = %s,
`argon` = %s
`spTemperature` = %s,
`spPressure` = %s,
`spEthanol` = %s,
`spArgon` = %s
LIMIT 1;""" % (self.temperature, self.pressure, self.ethanol, self.argon, setpoint[0], setpoint[1], setpoint[2], setpoint[2])
return self.writeArduino(sql)
def setLogFile(self, fileName):
id = self.getRecordingID()
if id < 0:
return False
sql = """UPDATE `cvd`.`recording`
SET `filename` = '%s',
`recording` = 1
WHERE `id` = %i
LIMIT 1;""" % (fileName, id)
if not self.writeRecording(sql):
return False
if self.getLogFile() == fileName:
return True
else:
return False
def isRecording(self):
sql = """SELECT `recording`
FROM `cvd`.`runtime_recording`
LIMIT 1;"""
try:
data = self.read(sql)
except:
return False
if not len(data) == 1:
return False
else:
if data[0]:
return True
else:
return False
def stopRecording(self):
sql = """UPDATE `cvd`.`runtime_recording`
SET `recording` = 0;"""
if not self.writeRecording(sql):
return False
sql = """UPDATE `cvd`.`recording`
SET `recording` = 0
WHERE `recording` = 1;"""
if not self.writeRecording(sql):
return False
self.recordingID = -1
return True
def startRecording(self, filename = ''):
self.stopRecording
sql = """INSERT INTO `cvd`.`recording` (
`id` ,`time` , `recording` , `filename` )
VALUES (
NULL , CURRENT_TIMESTAMP , 1, '%s')""" % filename
if not self.writeRecording(sql):
return False
sql = """SELECT `id`
FROM `cvd`.`recording`
WHERE `recording` = 1
LIMIT 1;"""
data = self.read(sql)
if not len(data) == 1:
return False
sql = """UPDATE `cvd`.`runtime_recording`
SET `id_recording` = %i,
`recording` = 1
LIMIT 1;""" % data
if not self.writeRecording(sql):
return False
return True
def getRecordingID(self):
sql = """SELECT `id_recording`
FROM `cvd`.`runtime_recording`
LIMIT 1;"""
data = self.read(sql)
if (len(data) == 1):
return int(data[0])
else:
return -1
def getLogFile(self):
# get id from memory table
recordingID = self.getRecordingID()
# update filename from disc table if not already saved in class
if not (recordingID == self.recordingID) or len(self.fileName) == 0:
print "querying filename from sql table"
self.close()
sql = """SELECT `filename`
FROM `cvd`.`recording`
WHERE `id` = %i;""" % recordingID
data = self.read(sql)
if len(data) == 1:
self.fileName = data[0]
else:
self.fileName = ''
self.recordingID = recordingID
return self.fileName
def setMessage(self, message):
sql = """INSERT INTO `cvd`.`message`
(`text`) VALUES ('%s');""" % (message)
if self.writeMessage(sql):
return self.updateMessage()
return False
def updateMessage(self):
sql = """SELECT `id` FROM `cvd`.`message`
WHERE `processed` = 0
LIMIT 1;"""
data = self.read(sql)
if (len(data) == 1):
id_message = data[0]
ready = 1
else:
ready = 0
id_message = -1
sql = """UPDATE `cvd`.`runtime_message`
SET `ready` = %i,
`id_message` = %i
LIMIT 1;""" % (ready, id_message)
return self.writeMessage(sql)
def isReady(self):
if self.ready:
return True
sql = """SELECT `ready`, `id_message`
FROM `cvd`.`runtime_message`;"""
try:
data = self.read(sql)
except:
return False
if not len(data) == 2:
data = (0,-1)
(self.ready, self.messageID) = data
if self.ready:
return True
else:
return False
def getMessage(self):
self.message = ""
# read from runtime (memory table)
if self.isReady():
# ready flag did also read out messageID.
# get message string from cvd.message
sql = """SELECT `text`
FROM `cvd`.`message`
WHERE `id` = %i
LIMIT 1;""" % self.messageID
data = self.read(sql)
if (len(data) == 1):
self.message = data[0]
# mark message in cvd.message as processed
sql = """UPDATE `cvd`.`message`
SET `processed` = 1
WHERE `id` = %i;""" % self.messageID
self.writeMessage(sql)
self.updateMessage()
# reset readout
self.ready = False
return self.message
def setFlowbus(self, instrument, process, flowBus, dataTypeString, dataInput, timeInput, parameterName):
time = decimal.Decimal(timeInput)
parameterName = parameterName.encode("hex")
if (dataTypeString == "character"):
dataType = 0
data = format(int(dataInput), 'x')
elif(dataTypeString == "integer"):
dataType = 1
data = format(int(dataInput), 'x')
elif(dataTypeString == "long"):
dataType = 2
data = format(int(dataInput), 'x')
elif(dataTypeString == "string"):
dataType = 3
data = dataInput.encode("hex")
else:
raise ValueError("can not identify dataType at setFlowBus()")
sql = """
INSERT INTO `cvd`.`runtime_flowbus`
(`instrument`,`process`,`flowBus`,`dataType`,`data`,`time`, `parameter`)
VALUES
(%i, %i, %i, %i, UNHEX(LPAD('%s',%i,'0')), %.2f, UNHEX(LPAD('%s',%i,'0')))""" % (instrument, process, flowBus, dataType, data, self.storage_values * 2, time, parameterName, self.storage_description * 2)
sql += """
ON DUPLICATE KEY UPDATE
`data` = UNHEX(LPAD('%s',%i,'0')),
`time` = %.2f;""" % (data, self.storage_values * 2, time)
self.writeFlowbus(sql)
def getFlowbus(self, instrument, process, flowBus):
sql = """
SELECT `dataType`,TRIM(LEADING '0' FROM HEX(`data`)),`time`,TRIM(LEADING '0' FROM HEX(`parameter`))
FROM `cvd`.`runtime_flowbus`
WHERE
( `instrument` = %i
AND `process` = %i
AND `flowBus` = %i);
""" % (instrument, process, flowBus)
data = self.read(sql)
if (len(data) == 4):
(dataType, dataOut, timeOut, parameter) = data
else:
return (-1,-1,-1)
parameter = parameter.decode("hex")
time = decimal.Decimal(timeOut)
if (dataType == 0):
data = int(dataOut, 16)
elif(dataType == 1):
data = int(dataOut, 16)
elif(dataType == 2):
data = FBconvertLong(process, flowBus, int(dataOut,16))
elif(dataType == 3):
data = dataOut.decode("hex")
else:
raise ValueError("can not identify dataType at getFlowBus()")
return (parameter, data, time)
def getAll(self):
sql = """SELECT temperature, pressure, ethanol, argon,
spTemperature, spPressure, spEthanol, spArgon
FROM `cvd`.`runtime_arduino`
LIMIT 1"""
data = self.read(sql)
if len(data) == 0:
print "database readout failed for arduino!"
data = (-1,-1,-1,-1, -1,-1,-1,-1)
(self.temperature, self.pressure, self.ethanol, self.argon, self.spTemperature, self.spPressure, self.spEthanol, self.spArgon) = data
class UpdateError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TimeoutError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| mit |
smashwilson/ansible-modules-core | cloud/amazon/ec2_elb.py | 18 | 12691 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELBs
description:
- This module de-registers or registers an AWS EC2 instance from the ELBs
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
author: John Jarvis
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
enable_availability_zone:
description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
required: false
default: yes
choices: [ "yes", "no" ]
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
wait_timeout:
description:
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
required: false
default: 0
version_added: "1.6"
extends_documentation_fragment: aws
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
action: ec2_facts
- name: Instance De-register
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
with_items: ec2_elbs
"""
import time
try:
import boto
import boto.ec2
import boto.ec2.elb
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
region=None, **aws_connect_params):
self.module = module
self.instance_id = instance_id
self.region = region
self.aws_connect_params = aws_connect_params
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
def deregister(self, wait, timeout):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if initial_state is None:
# The instance isn't registered with this ELB so just
# return unchanged
return
lb.deregister_instances([self.instance_id])
# The ELB is changing state in some way. Either an instance that's
# InService is moving to OutOfService, or an instance that's
# already OutOfService is being deregistered.
self.changed = True
if wait:
self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
def register(self, wait, enable_availability_zone, timeout):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if enable_availability_zone:
self._enable_availailability_zone(lb)
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _enable_availailability_zone(self, lb):
"""Enable the current instance's availability zone in the provided lb.
Returns True if the zone was enabled or False if no change was made.
lb: load balancer"""
instance = self._get_instance()
if instance.placement in lb.availability_zones:
return False
lb.enable_zones(zones=instance.placement)
# If successful, the new zone will have been added to
# lb.availability_zones
return instance.placement in lb.availability_zones
def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
wait_timeout = time.time() + timeout
while True:
instance_state = self._get_instance_health(lb)
if not instance_state:
msg = ("The instance %s could not be put in service on %s."
" Reason: Invalid Instance")
self.module.fail_json(msg=msg % (self.instance_id, lb))
if instance_state.state == awaited_state:
# Check the current state against the initial state, and only set
# changed if they are different.
if (initial_state is None) or (instance_state.state != initial_state.state):
self.changed = True
break
elif self._is_instance_state_pending(instance_state):
# If it's pending, we'll skip further checks andd continue waiting
pass
elif (awaited_state == 'InService'
and instance_state.reason_code == "Instance"
and time.time() >= wait_timeout):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
# instance's availabilty zone enabled. The exact reason why is
# described in InstantState.description.
msg = ("The instance %s could not be put in service on %s."
" Reason: %s")
self.module.fail_json(msg=msg % (self.instance_id,
lb,
instance_state.description))
time.sleep(1)
def _is_instance_state_pending(self, instance_state):
"""
Determines whether the instance_state is "pending", meaning there is
an operation under way to bring it in service.
"""
# This is messy, because AWS provides no way to distinguish between
# an instance that is is OutOfService because it's pending vs. OutOfService
# because it's failing health checks. So we're forced to analyze the
# description, which is likely to be brittle.
return (instance_state and 'pending' in instance_state.description)
def _get_instance_health(self, lb):
"""
Check instance health, should return status object or None under
certain error conditions.
"""
try:
status = lb.get_instance_health([self.instance_id])[0]
except boto.exception.BotoServerError, e:
if e.error_code == 'InvalidInstance':
return None
else:
raise
return status
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
try:
elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers()
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
ec2 = connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'requred': False, 'default': 0, 'type': 'int'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ec2_elbs = module.params['ec2_elbs']
wait = module.params['wait']
enable_availability_zone = module.params['enable_availability_zone']
timeout = module.params['wait_timeout']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs,
region=region, **aws_connect_params)
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait, enable_availability_zone, timeout)
elif module.params['state'] == 'absent':
elb_man.deregister(wait, timeout)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
sjvasquez/AIChallenge | Index.py | 1 | 4810 | from solver_utils import *
class Index():
def __init__(self, filename, analyzer, sentences=False, textbook=None, wiki=None, reindex=False):
self.analyzer = analyzer
self.schema = Schema(title=TEXT(stored=True, analyzer=analyzer),
content=TEXT(stored=True, analyzer=analyzer))
if reindex:
if sentences:
sentences = MySentences('training_text')
if not os.path.exists(filename):
os.mkdir(filename)
self.index = create_in(filename, self.schema)
writer = self.index.writer()
for sentence in sentences:
writer.add_document(content=unicode(sentence, errors='ignore'))
writer.commit()
else:
if not os.path.exists(filename):
os.mkdir(filename)
self.index = create_in(filename, self.schema)
if textbook:
print "Indexing textbook...",
self.index_textbook(textbook)
num_textbook_docs = self.index.doc_count()
print str(self.index.doc_count()) + " textbook documents indexed"
if wiki:
print "Indexing wikipedia articles...",
self.index_wiki(wiki)
num_wiki_docs = self.index.doc_count() - num_textbook_docs
print str(num_wiki_docs) + " wiki articles indexed"
print "Finished indexing\n"
self.index = open_dir(filename)
def generate_next_doc(self, unicode_file, doc_delimiter):
buffered_text = ""
while True:
while doc_delimiter in buffered_text:
delimiter_index = re.search(doc_delimiter, buffered_text).end()
yield buffered_text[:delimiter_index]
buffered_text = buffered_text[delimiter_index:]
chunk = unicode_file.read(4096)
if not chunk:
yield buffered_text
break
buffered_text += chunk
def index_textbook(self, filename):
writer = self.index.writer()
with codecs.open(filename, encoding='utf-8', errors='ignore') as unicode_file:
for document in self.generate_next_doc(unicode_file, "<h1"):
end_header_index = document.find("</h1>")
if end_header_index != -1:
header = document[:end_header_index].strip()
start = header.find("\n")
header = header[start + 1:].strip()
rest = re.sub(r'<.*>', ' ', document[end_header_index + len("</h1>") + 1:])
rest = rest.replace("\n", " ")
if header != "References":
writer.add_document(title=header, content=rest)
writer.commit()
def index_wiki(self, filename):
writer = self.index.writer()
with codecs.open(filename, encoding='utf-8', errors='ignore') as unicode_file:
for document in self.generate_next_doc(unicode_file, "<doc"):
end_header_index = document.find("</doc>")
if end_header_index != -1:
all_text = document[:end_header_index].strip()
start_header = all_text.find("\n")
end_header = all_text.find("\n\n")
header = all_text[start_header + 1:end_header]
rest = all_text[end_header + 2:]
writer.add_document(title=header, content=rest)
writer.commit()
def get_top_k_documents(self, k, query):
terms = [token.text for token in self.analyzer(query)]
q = Or([Term("content", text) for text in terms])
with self.index.searcher() as searcher:
results = searcher.search(q, limit=k, scored=True)
return results
def get_top_k_texts(self, k, query):
terms = [token.text for token in self.analyzer(query)]
q = Or([Term("content", text) for text in terms])
with self.index.searcher() as searcher:
results = searcher.search(q, limit=k, scored=True)
return [i["content"] for i in results]
def get_top_k_words(self, k, question):
words = []
for result in self.get_top_k_texts(k, question):
words += [token.text for token in self.analyzer(result)]
return words
class MySentences(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
if 'textbook' in fname or 'science_wiki' in fname:
print fname
for line in open(os.path.join(self.dirname, fname)):
yield line
| mit |
stacywsmith/ansible | lib/ansible/modules/cloud/openstack/_quantum_network.py | 12 | 10404 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: quantum_network
version_added: "1.4"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use M(os_network) instead.
short_description: Creates/Removes networks from OpenStack
description:
- Add or Remove network from OpenStack.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
tenant_name:
description:
- The name of the tenant for whom the network is created
required: false
default: None
auth_url:
description:
- The keystone url for authentication
required: false
default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be assigned to the network
required: true
default: None
provider_network_type:
description:
- The type of the network to be created, gre, vlan, local. Available types depend on the plugin. The Quantum service decides if not specified.
required: false
default: None
provider_physical_network:
description:
- The physical network which would realize the virtual network for flat and vlan networks.
required: false
default: None
provider_segmentation_id:
description:
- The id that has to be assigned to the network, in case of vlan networks that would be vlan id and for gre the tunnel id
required: false
default: None
router_external:
description:
- If 'yes', specifies that the virtual network is a external network (public).
required: false
default: false
shared:
description:
- Whether this network is shared or not
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down
required: false
default: true
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
- name: Create a GRE backed Quantum network with tunnel id 1 for tenant1
quantum_network:
name: t1network
tenant_name: tenant1
state: present
provider_network_type: gre
provider_segmentation_id: 1
login_username: admin
login_password: admin
login_tenant_name: admin
- name: Create an external network
quantum_network:
name: external_network
state: present
provider_network_type: local
router_external: yes
login_username: admin
login_password: admin
login_tenant_name: admin
'''
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting network endpoint: %s " %e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = " Error in connecting to neutron: %s " %e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
_os_tenant_id = _os_keystone.tenant_id
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception as e:
module.fail_json(msg = "Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _create_network(module, neutron):
neutron.format = 'json'
network = {
'name': module.params.get('name'),
'tenant_id': _os_tenant_id,
'provider:network_type': module.params.get('provider_network_type'),
'provider:physical_network': module.params.get('provider_physical_network'),
'provider:segmentation_id': module.params.get('provider_segmentation_id'),
'router:external': module.params.get('router_external'),
'shared': module.params.get('shared'),
'admin_state_up': module.params.get('admin_state_up'),
}
if module.params['provider_network_type'] == 'local':
network.pop('provider:physical_network', None)
network.pop('provider:segmentation_id', None)
if module.params['provider_network_type'] == 'flat':
network.pop('provider:segmentation_id', None)
if module.params['provider_network_type'] == 'gre':
network.pop('provider:physical_network', None)
if module.params['provider_network_type'] is None:
network.pop('provider:network_type', None)
network.pop('provider:physical_network', None)
network.pop('provider:segmentation_id', None)
try:
net = neutron.create_network({'network':network})
except Exception as e:
module.fail_json(msg = "Error in creating network: %s" % e.message)
return net['network']['id']
def _delete_network(module, net_id, neutron):
try:
id = neutron.delete_network(net_id)
except Exception as e:
module.fail_json(msg = "Error in deleting the network: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre']),
provider_physical_network = dict(default=None),
provider_segmentation_id = dict(default=None),
router_external = dict(default=False, type='bool'),
shared = dict(default=False, type='bool'),
admin_state_up = dict(default=True, type='bool'),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
if module.params['provider_network_type'] in ['vlan' , 'flat']:
if not module.params['provider_physical_network']:
module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.")
if module.params['provider_network_type'] in ['vlan', 'gre']:
if not module.params['provider_segmentation_id']:
module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.")
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
network_id = _get_net_id(neutron, module)
if not network_id:
network_id = _create_network(module, neutron)
module.exit_json(changed = True, result = "Created", id = network_id)
else:
module.exit_json(changed = False, result = "Success", id = network_id)
if module.params['state'] == 'absent':
network_id = _get_net_id(neutron, module)
if not network_id:
module.exit_json(changed = False, result = "Success")
else:
_delete_network(module, network_id, neutron)
module.exit_json(changed = True, result = "Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
drpngx/tensorflow | tensorflow/contrib/image/python/ops/distort_image_ops.py | 68 | 5793 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.image.ops import gen_distort_image_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import resource_loader
_distort_image_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_distort_image_ops.so'))
# pylint: disable=invalid-name
def random_hsv_in_yiq(image,
max_delta_hue=0,
lower_saturation=1,
upper_saturation=1,
lower_value=1,
upper_value=1,
seed=None):
"""Adjust hue, saturation, value of an RGB image randomly in YIQ color space.
Equivalent to `adjust_yiq_hsv()` but uses a `delta_h` randomly
picked in the interval `[-max_delta_hue, max_delta_hue]`, a `scale_saturation`
randomly picked in the interval `[lower_saturation, upper_saturation]`, and
a `scale_value` randomly picked in the interval
`[lower_saturation, upper_saturation]`.
Args:
image: RGB image or images. Size of the last dimension must be 3.
max_delta_hue: float. Maximum value for the random delta_hue. Passing 0
disables adjusting hue.
lower_saturation: float. Lower bound for the random scale_saturation.
upper_saturation: float. Upper bound for the random scale_saturation.
lower_value: float. Lower bound for the random scale_value.
upper_value: float. Upper bound for the random scale_value.
seed: An operation-specific seed. It will be used in conjunction
with the graph-level seed to determine the real seeds that will be
used in this operation. Please see the documentation of
set_random_seed for its interaction with the graph-level random seed.
Returns:
3-D float tensor of shape `[height, width, channels]`.
Raises:
ValueError: if `max_delta`, `lower_saturation`, `upper_saturation`,
`lower_value`, or `upper_Value` is invalid.
"""
if max_delta_hue < 0:
raise ValueError('max_delta must be non-negative.')
if lower_saturation < 0:
raise ValueError('lower_saturation must be non-negative.')
if lower_value < 0:
raise ValueError('lower_value must be non-negative.')
if lower_saturation > upper_saturation:
raise ValueError('lower_saturation must be < upper_saturation.')
if lower_value > upper_value:
raise ValueError('lower_value must be < upper_value.')
if max_delta_hue == 0:
delta_hue = 0
else:
delta_hue = random_ops.random_uniform(
[], -max_delta_hue, max_delta_hue, seed=seed)
if lower_saturation == upper_saturation:
scale_saturation = lower_saturation
else:
scale_saturation = random_ops.random_uniform(
[], lower_saturation, upper_saturation, seed=seed)
if lower_value == upper_value:
scale_value = lower_value
else:
scale_value = random_ops.random_uniform(
[], lower_value, upper_value, seed=seed)
return adjust_hsv_in_yiq(image, delta_hue, scale_saturation, scale_value)
def adjust_hsv_in_yiq(image,
delta_hue=0,
scale_saturation=1,
scale_value=1,
name=None):
"""Adjust hue, saturation, value of an RGB image in YIQ color space.
This is a convenience method that converts an RGB image to float
representation, converts it to YIQ, rotates the color around the Y channel by
delta_hue in radians, scales the chrominance channels (I, Q) by
scale_saturation, scales all channels (Y, I, Q) by scale_value,
converts back to RGB, and then back to the original data type.
`image` is an RGB image. The image hue is adjusted by converting the
image to YIQ, rotating around the luminance channel (Y) by
`delta_hue` in radians, multiplying the chrominance channels (I, Q) by
`scale_saturation`, and multiplying all channels (Y, I, Q) by
`scale_value`. The image is then converted back to RGB.
Args:
image: RGB image or images. Size of the last dimension must be 3.
delta_hue: float, the hue rotation amount, in radians.
scale_saturation: float, factor to multiply the saturation by.
scale_value: float, factor to multiply the value by.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
"""
with ops.name_scope(name, 'adjust_hsv_in_yiq', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
rgb_altered = gen_distort_image_ops.adjust_hsv_in_yiq(
flt_image, delta_hue, scale_saturation, scale_value)
return image_ops.convert_image_dtype(rgb_altered, orig_dtype)
| apache-2.0 |
reingart/pyfpdf | fpdf/template.py | 1 | 9458 | # -*- coding: iso-8859-1 -*-
"PDF Template Helper for FPDF.py"
from __future__ import with_statement
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
import sys,os,csv
from .fpdf import FPDF
from .py3k import PY3K, basestring, unicode
def rgb(col):
return (col // 65536), (col // 256 % 256), (col% 256)
class Template:
def __init__(self, infile=None, elements=None, format='A4', orientation='portrait',
title='', author='', subject='', creator='', keywords=''):
if elements:
self.load_elements(elements)
self.handlers = {'T': self.text, 'L': self.line, 'I': self.image,
'B': self.rect, 'BC': self.barcode, 'W': self.write, }
self.texts = {}
pdf = self.pdf = FPDF(format=format,orientation=orientation, unit="mm")
pdf.set_title(title)
pdf.set_author(author)
pdf.set_creator(creator)
pdf.set_subject(subject)
pdf.set_keywords(keywords)
def load_elements(self, elements):
"Initialize the internal element structures"
self.pg_no = 0
self.elements = elements
self.keys = [v['name'].lower() for v in self.elements]
def parse_csv(self, infile, delimiter=",", decimal_sep="."):
"Parse template format csv file and create elements dict"
keys = ('name','type','x1','y1','x2','y2','font','size',
'bold','italic','underline','foreground','background',
'align','text','priority', 'multiline')
self.elements = []
self.pg_no = 0
if not PY3K:
f = open(infile, 'rb')
else:
f = open(infile)
with f:
for row in csv.reader(f, delimiter=delimiter):
kargs = {}
for i,v in enumerate(row):
if not v.startswith("'") and decimal_sep!=".":
v = v.replace(decimal_sep,".")
else:
v = v
if v=='':
v = None
else:
v = eval(v.strip())
kargs[keys[i]] = v
self.elements.append(kargs)
self.keys = [v['name'].lower() for v in self.elements]
def add_page(self):
self.pg_no += 1
self.texts[self.pg_no] = {}
def __setitem__(self, name, value):
if name.lower() in self.keys:
if not PY3K and isinstance(value, unicode):
value = value.encode("latin1","ignore")
elif value is None:
value = ""
else:
value = str(value)
self.texts[self.pg_no][name.lower()] = value
# setitem shortcut (may be further extended)
set = __setitem__
def has_key(self, name):
return name.lower() in self.keys
def __contains__(self, name):
return self.has_key(name)
def __getitem__(self, name):
if name in self.keys:
key = name.lower()
if key in self.texts:
# text for this page:
return self.texts[self.pg_no][key]
else:
# find first element for default text:
elements = [element for element in self.elements
if element['name'].lower() == key]
if elements:
return elements[0]['text']
def split_multicell(self, text, element_name):
"Divide (\n) a string using a given element width"
pdf = self.pdf
element = [element for element in self.elements
if element['name'].lower() == element_name.lower()][0]
style = ""
if element['bold']: style += "B"
if element['italic']: style += "I"
if element['underline']: style += "U"
pdf.set_font(element['font'],style,element['size'])
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(element['align']) # D/I in spanish
if isinstance(text, unicode) and not PY3K:
text = text.encode("latin1","ignore")
else:
text = str(text)
return pdf.multi_cell(w=element['x2']-element['x1'],
h=element['y2']-element['y1'],
txt=text,align=align,split_only=True)
def render(self, outfile, dest="F"):
pdf = self.pdf
for pg in range(1, self.pg_no+1):
pdf.add_page()
pdf.set_font('Arial','B',16)
pdf.set_auto_page_break(False,margin=0)
for element in sorted(self.elements,key=lambda x: x['priority']):
#print "dib",element['type'], element['name'], element['x1'], element['y1'], element['x2'], element['y2']
element = element.copy()
element['text'] = self.texts[pg].get(element['name'].lower(), element['text'])
if 'rotate' in element:
pdf.rotate(element['rotate'], element['x1'], element['y1'])
self.handlers[element['type'].upper()](pdf, **element)
if 'rotate' in element:
pdf.rotate(0)
if dest:
return pdf.output(outfile, dest)
def text(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=10,
bold=False, italic=False, underline=False, align="",
foreground=0, backgroud=65535, multiline=None,
*args, **kwargs):
if text:
if pdf.text_color!=rgb(foreground):
pdf.set_text_color(*rgb(foreground))
if pdf.fill_color!=rgb(backgroud):
pdf.set_fill_color(*rgb(backgroud))
font = font.strip().lower()
if font == 'arial black':
font = 'arial'
style = ""
for tag in 'B', 'I', 'U':
if (text.startswith("<%s>" % tag) and text.endswith("</%s>" %tag)):
text = text[3:-4]
style += tag
if bold: style += "B"
if italic: style += "I"
if underline: style += "U"
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(align) # D/I in spanish
pdf.set_font(font,style,size)
##m_k = 72 / 2.54
##h = (size/m_k)
pdf.set_xy(x1,y1)
if multiline is None:
# multiline==None: write without wrapping/trimming (default)
pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
elif multiline:
# multiline==True: automatic word - warp
pdf.multi_cell(w=x2-x1,h=y2-y1,txt=text,border=0,align=align)
else:
# multiline==False: trim to fit exactly the space defined
text = pdf.multi_cell(w=x2-x1, h=y2-y1,
txt=text, align=align, split_only=True)[0]
print("trimming: *%s*" % text)
pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
#pdf.Text(x=x1,y=y1,txt=text)
def line(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
#print "SetDrawColor", hex(foreground)
pdf.set_draw_color(*rgb(foreground))
#print "SetLineWidth", size
pdf.set_line_width(size)
pdf.line(x1, y1, x2, y2)
def rect(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, backgroud=65535, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
pdf.set_draw_color(*rgb(foreground))
if pdf.fill_color!=rgb(backgroud):
pdf.set_fill_color(*rgb(backgroud))
pdf.set_line_width(size)
pdf.rect(x1, y1, x2-x1, y2-y1)
def image(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', *args,**kwargs):
if text:
pdf.image(text,x1,y1,w=x2-x1,h=y2-y1,type='',link='')
def barcode(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=1,
foreground=0, *args, **kwargs):
if pdf.draw_color!=rgb(foreground):
pdf.set_draw_color(*rgb(foreground))
font = font.lower().strip()
if font == 'interleaved 2of5 nt':
pdf.interleaved2of5(text,x1,y1,w=size,h=y2-y1)
# Added by Derek Schwalenberg Schwalenberg1013@gmail.com to allow (url) links in templates (using write method) 2014-02-22
def write(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=1,
bold=False, italic=False, underline=False, align="", link='http://example.com',
foreground=0, *args, **kwargs):
if pdf.text_color!=rgb(foreground):
pdf.set_text_color(*rgb(foreground))
font = font.strip().lower()
if font == 'arial black':
font = 'arial'
style = ""
for tag in 'B', 'I', 'U':
if (text.startswith("<%s>" % tag) and text.endswith("</%s>" %tag)):
text = text[3:-4]
style += tag
if bold: style += "B"
if italic: style += "I"
if underline: style += "U"
align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(align) # D/I in spanish
pdf.set_font(font,style,size)
##m_k = 72 / 2.54
##h = (size/m_k)
pdf.set_xy(x1,y1)
pdf.write(5,text,link)
| lgpl-3.0 |
bluesea/zulip | api/integrations/codebase/zulip_codebase_config.py | 124 | 2537 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for your codebase account
# Note that this is the Codebase API Username, found in the Settings page
# for your account
CODEBASE_API_USERNAME = "foo@example.com"
CODEBASE_API_KEY = "1234561234567abcdef"
# The URL of your codebase setup
CODEBASE_ROOT_URL = "https://YOUR_COMPANY.codebasehq.com"
# When initially started, how many hours of messages to include.
# Note that the Codebase API only returns the 20 latest events,
# if you have more than 20 events that fit within this window,
# earlier ones may be lost
CODEBASE_INITIAL_HISTORY_HOURS = 12
# Change these values to configure Zulip authentication for the plugin
ZULIP_USER = "codebase-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# The streams to send commit information and ticket information to
ZULIP_COMMITS_STREAM_NAME = "codebase"
ZULIP_TICKETS_STREAM_NAME = "tickets"
# If properly installed, the Zulip API should be in your import
# path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip API server URI
ZULIP_SITE = "https://api.zulip.com"
# If you wish to log to a file rather than stdout/stderr,
# please fill this out your desired path
LOG_FILE = None
# This file is used to resume this mirror in case the script shuts down.
# It is required and needs to be writeable.
RESUME_FILE = "/var/tmp/zulip_codebase.state"
| apache-2.0 |
FrancoCotter/ReactTimerAPP | node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py | 1534 | 3426 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| mit |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyKDE4/kdeui/NETRootInfo.py | 1 | 5242 | # encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python2.7/dist-packages/PyKDE4/kdeui.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
from NET import NET
class NETRootInfo(NET):
# no doc
def activate(self, *args, **kwargs): # real signature unknown
pass
def activeWindow(self, *args, **kwargs): # real signature unknown
pass
def addClient(self, *args, **kwargs): # real signature unknown
pass
def changeActiveWindow(self, *args, **kwargs): # real signature unknown
pass
def changeCurrentDesktop(self, *args, **kwargs): # real signature unknown
pass
def changeDesktopGeometry(self, *args, **kwargs): # real signature unknown
pass
def changeDesktopViewport(self, *args, **kwargs): # real signature unknown
pass
def changeNumberOfDesktops(self, *args, **kwargs): # real signature unknown
pass
def changeShowingDesktop(self, *args, **kwargs): # real signature unknown
pass
def clientListCount(self, *args, **kwargs): # real signature unknown
pass
def clientListStackingCount(self, *args, **kwargs): # real signature unknown
pass
def closeWindow(self, *args, **kwargs): # real signature unknown
pass
def closeWindowRequest(self, *args, **kwargs): # real signature unknown
pass
def currentDesktop(self, *args, **kwargs): # real signature unknown
pass
def desktopGeometry(self, *args, **kwargs): # real signature unknown
pass
def desktopLayoutColumnsRows(self, *args, **kwargs): # real signature unknown
pass
def desktopLayoutCorner(self, *args, **kwargs): # real signature unknown
pass
def desktopLayoutOrientation(self, *args, **kwargs): # real signature unknown
pass
def desktopName(self, *args, **kwargs): # real signature unknown
pass
def desktopViewport(self, *args, **kwargs): # real signature unknown
pass
def gotPing(self, *args, **kwargs): # real signature unknown
pass
def gotTakeActivity(self, *args, **kwargs): # real signature unknown
pass
def isSupported(self, *args, **kwargs): # real signature unknown
pass
def moveResize(self, *args, **kwargs): # real signature unknown
pass
def moveResizeRequest(self, *args, **kwargs): # real signature unknown
pass
def moveResizeWindow(self, *args, **kwargs): # real signature unknown
pass
def moveResizeWindowRequest(self, *args, **kwargs): # real signature unknown
pass
def numberOfDesktops(self, *args, **kwargs): # real signature unknown
pass
def removeClient(self, *args, **kwargs): # real signature unknown
pass
def restackRequest(self, *args, **kwargs): # real signature unknown
pass
def restackWindow(self, *args, **kwargs): # real signature unknown
pass
def rootWindow(self, *args, **kwargs): # real signature unknown
pass
def screenNumber(self, *args, **kwargs): # real signature unknown
pass
def sendPing(self, *args, **kwargs): # real signature unknown
pass
def setActiveWindow(self, *args, **kwargs): # real signature unknown
pass
def setClientList(self, *args, **kwargs): # real signature unknown
pass
def setClientListStacking(self, *args, **kwargs): # real signature unknown
pass
def setCurrentDesktop(self, *args, **kwargs): # real signature unknown
pass
def setDesktopGeometry(self, *args, **kwargs): # real signature unknown
pass
def setDesktopLayout(self, *args, **kwargs): # real signature unknown
pass
def setDesktopName(self, *args, **kwargs): # real signature unknown
pass
def setDesktopViewport(self, *args, **kwargs): # real signature unknown
pass
def setNumberOfDesktops(self, *args, **kwargs): # real signature unknown
pass
def setShowingDesktop(self, *args, **kwargs): # real signature unknown
pass
def setSupported(self, *args, **kwargs): # real signature unknown
pass
def setVirtualRoots(self, *args, **kwargs): # real signature unknown
pass
def setWorkArea(self, *args, **kwargs): # real signature unknown
pass
def showingDesktop(self, *args, **kwargs): # real signature unknown
pass
def supportWindow(self, *args, **kwargs): # real signature unknown
pass
def takeActivity(self, *args, **kwargs): # real signature unknown
pass
def virtualRootsCount(self, *args, **kwargs): # real signature unknown
pass
def wmName(self, *args, **kwargs): # real signature unknown
pass
def workArea(self, *args, **kwargs): # real signature unknown
pass
def x11Display(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
ACTIONS = 4
PROPERTIES_SIZE = 5
PROTOCOLS = 0
PROTOCOLS2 = 3
STATES = 2
WINDOW_TYPES = 1
| gpl-2.0 |
johngian/mozillians | mozillians/phonebook/tests/test_views/test_views_profile.py | 3 | 11143 | from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.test import Client
from django.test.utils import override_settings
from mock import patch
from nose.tools import ok_, eq_
from mozillians.common.templatetags.helpers import redirect, urlparams
from mozillians.common.tests import TestCase
from mozillians.users.managers import PUBLIC, MOZILLIANS, EMPLOYEES, PRIVATE
from mozillians.users.tests import UserFactory
class ViewProfileTests(TestCase):
@patch('mozillians.phonebook.views.messages.warning')
@patch('mozillians.phonebook.views.login_required', wraps=login_required)
def test_view_profile_no_public_anonymous(self, login_required_mock,
warning_mock):
lookup_user = UserFactory.create()
client = Client()
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
client.get(url, follow=True)
ok_(warning_mock.called)
ok_(login_required_mock.called)
@patch('mozillians.phonebook.views.messages.error')
@patch('mozillians.phonebook.views.redirect', wraps=redirect)
def test_view_profile_no_public_unvouched(self, redirect_mock, error_mock):
lookup_user = UserFactory.create()
user = UserFactory.create(vouched=False)
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
client.get(url, follow=True)
ok_(redirect_mock.called)
ok_(error_mock.called)
def test_view_profile_no_public_vouched(self):
lookup_user = UserFactory.create()
user = UserFactory.create()
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
def test_view_vouched_profile_public_anonymous(self):
lookup_user = UserFactory.create(userprofile={'privacy_full_name': PUBLIC})
client = Client()
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
ok_('vouch_form' not in response.context)
def test_view_vouched_profile_public_unvouched(self):
lookup_user = UserFactory.create(userprofile={'privacy_full_name': PUBLIC})
user = UserFactory.create(vouched=False)
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
ok_('vouch_form' not in response.context)
@override_settings(CAN_VOUCH_THRESHOLD=1)
def test_view_vouched_profile_public_vouched(self):
lookup_user = UserFactory.create(userprofile={'privacy_full_name': PUBLIC})
user = UserFactory.create()
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, MOZILLIANS)
ok_('vouch_form' in response.context)
def test_view_unvouched_profile_public_anonymous(self):
lookup_user = UserFactory.create(vouched=False,
userprofile={'privacy_full_name': PUBLIC})
client = Client()
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
ok_('vouch_form' not in response.context)
def test_view_unvouched_profile_public_unvouched(self):
lookup_user = UserFactory.create(vouched=False,
userprofile={'privacy_full_name': PUBLIC})
user = UserFactory.create(vouched=False)
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
ok_('vouch_form' not in response.context)
@override_settings(CAN_VOUCH_THRESHOLD=1)
def test_view_unvouched_profile_public_vouched(self):
lookup_user = UserFactory.create(vouched=False,
userprofile={'privacy_full_name': PUBLIC})
user = UserFactory.create()
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': lookup_user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], lookup_user)
eq_(response.context['profile'], lookup_user.userprofile)
eq_(response.context['profile']._privacy_level, MOZILLIANS)
ok_('vouch_form' in response.context)
def test_view_profile_mine_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, None)
eq_(response.context['privacy_mode'], 'myself')
def test_view_profile_mine_vouched(self):
user = UserFactory.create()
with self.login(user) as client:
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, None)
eq_(response.context['privacy_mode'], 'myself')
def test_view_profile_mine_as_anonymous(self):
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
url = urlparams(url, view_as='anonymous')
with self.login(user) as client:
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, PUBLIC)
eq_(response.context['privacy_mode'], 'anonymous')
def test_view_profile_mine_as_mozillian(self):
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
url = urlparams(url, view_as='mozillian')
with self.login(user) as client:
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, MOZILLIANS)
eq_(response.context['privacy_mode'], 'mozillian')
def test_view_profile_mine_as_employee(self):
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
url = urlparams(url, view_as='employee')
with self.login(user) as client:
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, EMPLOYEES)
eq_(response.context['privacy_mode'], 'employee')
def test_view_profile_mine_as_private(self):
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': user.username})
url = urlparams(url, view_as='private')
with self.login(user) as client:
response = client.get(url, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
eq_(response.context['shown_user'], user)
eq_(response.context['profile'], user.userprofile)
eq_(response.context['profile']._privacy_level, PRIVATE)
eq_(response.context['privacy_mode'], 'private')
def test_view_profile_waiting_for_vouch_unvouched(self):
unvouched_user = UserFactory.create(vouched=False)
user = UserFactory.create(vouched=False)
url = reverse('phonebook:profile_view',
kwargs={'username': unvouched_user.username})
with self.login(user) as client:
response = client.get(url, follow=True)
ok_('vouch_form' not in response.context)
@override_settings(CAN_VOUCH_THRESHOLD=1)
def test_view_profile_waiting_for_vouch_vouched(self):
unvouched_user = UserFactory.create(vouched=False)
user = UserFactory.create()
url = reverse('phonebook:profile_view',
kwargs={'username': unvouched_user.username})
with self.login(user) as client:
response = client.get(url, follow=True)
ok_('vouch_form' in response.context)
| bsd-3-clause |
svogl/mbed-os | tools/libraries.py | 42 | 3221 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tools.paths import MBED_LIBRARIES,\
MBED_RPC, RPC_LIBRARY, USB, USB_LIBRARIES, \
DSP_ABSTRACTION, DSP_CMSIS, DSP_LIBRARIES,\
CPPUTEST_SRC,\
CPPUTEST_PLATFORM_SRC, CPPUTEST_TESTRUNNER_SCR, CPPUTEST_LIBRARY,\
CPPUTEST_INC, CPPUTEST_PLATFORM_INC, CPPUTEST_TESTRUNNER_INC,\
CPPUTEST_INC_EXT
from tools.data.support import DEFAULT_SUPPORT
from tools.tests import TEST_MBED_LIB
LIBRARIES = [
# RPC
{
"id": "rpc",
"source_dir": MBED_RPC,
"build_dir": RPC_LIBRARY,
"dependencies": [MBED_LIBRARIES],
},
# USB Device libraries
{
"id": "usb",
"source_dir": USB,
"build_dir": USB_LIBRARIES,
"dependencies": [MBED_LIBRARIES],
},
# DSP libraries
{
"id": "dsp",
"source_dir": [DSP_ABSTRACTION, DSP_CMSIS],
"build_dir": DSP_LIBRARIES,
"dependencies": [MBED_LIBRARIES]
},
# Unit Testing library
{
"id": "cpputest",
"source_dir": [CPPUTEST_SRC, CPPUTEST_PLATFORM_SRC,
CPPUTEST_TESTRUNNER_SCR],
"build_dir": CPPUTEST_LIBRARY,
"dependencies": [MBED_LIBRARIES],
'inc_dirs': [CPPUTEST_INC, CPPUTEST_PLATFORM_INC,
CPPUTEST_TESTRUNNER_INC, TEST_MBED_LIB],
'inc_dirs_ext': [CPPUTEST_INC_EXT],
'macros': ["CPPUTEST_USE_MEM_LEAK_DETECTION=0",
"CPPUTEST_USE_STD_CPP_LIB=0", "CPPUTEST=1"],
},
]
LIBRARY_MAP = dict([(library['id'], library) for library in LIBRARIES])
class Library(object):
"""A library representation that allows for querying of support"""
def __init__(self, lib_id):
lib = LIBRARY_MAP[lib_id]
self.supported = lib.get("supported", DEFAULT_SUPPORT)
self.dependencies = lib.get("dependencies", None)
# Include dirs required by library build
self.inc_dirs = lib.get("inc_dirs", None)
# Include dirs required by others to use with this library
self.inc_dirs_ext = lib.get("inc_dirs_ext", None)
# Additional macros you want to define when building library
self.macros = lib.get("macros", None)
self.source_dir = lib["source_dir"]
self.build_dir = lib["build_dir"]
def is_supported(self, target, toolchain):
"""Check if a target toolchain combination is supported
Positional arguments:
target - the MCU or board
toolchain - the compiler
"""
if not hasattr(self, 'supported'):
return True
return (target.name in self.supported) and \
(toolchain in self.supported[target.name])
| apache-2.0 |
Mikk36/OctoPrint | src/octoprint/filemanager/__init__.py | 8 | 15661 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
import os
import octoprint.plugin
import octoprint.util
from octoprint.events import eventManager, Events
from .destinations import FileDestinations
from .analysis import QueueEntry, AnalysisQueue
from .storage import LocalFileStorage
from .util import AbstractFileWrapper, StreamWrapper, DiskFileWrapper
extensions = dict(
)
def full_extension_tree():
result = dict(
# extensions for 3d model files
model=dict(
stl=["stl"]
),
# extensions for printable machine code
machinecode=dict(
gcode=["gcode", "gco", "g"]
)
)
extension_tree_hooks = octoprint.plugin.plugin_manager().get_hooks("octoprint.filemanager.extension_tree")
for name, hook in extension_tree_hooks.items():
try:
hook_result = hook()
if hook_result is None or not isinstance(hook_result, dict):
continue
result = octoprint.util.dict_merge(result, hook_result)
except:
logging.getLogger(__name__).exception("Exception while retrieving additional extension tree entries from hook {name}".format(name=name))
return result
def get_extensions(type, subtree=None):
if not subtree:
subtree = full_extension_tree()
for key, value in subtree.items():
if key == type:
return get_all_extensions(subtree=value)
elif isinstance(value, dict):
sub_extensions = get_extensions(type, subtree=value)
if sub_extensions:
return sub_extensions
return None
def get_all_extensions(subtree=None):
if not subtree:
subtree = full_extension_tree()
result = []
if isinstance(subtree, dict):
for key, value in subtree.items():
if isinstance(value, dict):
result += get_all_extensions(value)
elif isinstance(value, (list, tuple)):
result += value
elif isinstance(subtree, (list, tuple)):
result = subtree
return result
def get_path_for_extension(extension, subtree=None):
if not subtree:
subtree = full_extension_tree()
for key, value in subtree.items():
if isinstance(value, (list, tuple)) and extension in value:
return [key]
elif isinstance(value, dict):
path = get_path_for_extension(extension, subtree=value)
if path:
return [key] + path
return None
def valid_extension(extension, type=None):
if not type:
return extension in get_all_extensions()
else:
extensions = get_extensions(type)
if extensions:
return extension in extensions
def valid_file_type(filename, type=None):
_, extension = os.path.splitext(filename)
extension = extension[1:].lower()
return valid_extension(extension, type=type)
def get_file_type(filename):
_, extension = os.path.splitext(filename)
extension = extension[1:].lower()
return get_path_for_extension(extension)
class NoSuchStorage(Exception):
pass
class FileManager(object):
def __init__(self, analysis_queue, slicing_manager, printer_profile_manager, initial_storage_managers=None):
self._logger = logging.getLogger(__name__)
self._analysis_queue = analysis_queue
self._analysis_queue.register_finish_callback(self._on_analysis_finished)
self._storage_managers = dict()
if initial_storage_managers:
self._storage_managers.update(initial_storage_managers)
self._slicing_manager = slicing_manager
self._printer_profile_manager = printer_profile_manager
import threading
self._slicing_jobs = dict()
self._slicing_jobs_mutex = threading.Lock()
self._slicing_progress_callbacks = []
self._last_slicing_progress = None
self._progress_plugins = []
self._preprocessor_hooks = dict()
def initialize(self):
self.reload_plugins()
def worker():
self._logger.info("Adding backlog items from all storage types to analysis queue...".format(**locals()))
for storage_type, storage_manager in self._storage_managers.items():
self._determine_analysis_backlog(storage_type, storage_manager)
import threading
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
def reload_plugins(self):
self._progress_plugins = octoprint.plugin.plugin_manager().get_implementations(octoprint.plugin.ProgressPlugin)
self._preprocessor_hooks = octoprint.plugin.plugin_manager().get_hooks("octoprint.filemanager.preprocessor")
def register_slicingprogress_callback(self, callback):
self._slicing_progress_callbacks.append(callback)
def unregister_slicingprogress_callback(self, callback):
try:
self._slicing_progress_callbacks.remove(callback)
except ValueError:
# callback was not registered
pass
def _determine_analysis_backlog(self, storage_type, storage_manager):
counter = 0
for entry, path, printer_profile in storage_manager.analysis_backlog:
file_type = get_file_type(path)[-1]
# we'll use the default printer profile for the backlog since we don't know better
queue_entry = QueueEntry(entry, file_type, storage_type, path, self._printer_profile_manager.get_default())
self._analysis_queue.enqueue(queue_entry, high_priority=False)
counter += 1
self._logger.info("Added {counter} items from storage type \"{storage_type}\" to analysis queue".format(**locals()))
def add_storage(self, storage_type, storage_manager):
self._storage_managers[storage_type] = storage_manager
self._determine_analysis_backlog(storage_type, storage_manager)
def remove_storage(self, type):
if not type in self._storage_managers:
return
del self._storage_managers[type]
@property
def slicing_enabled(self):
return self._slicing_manager.slicing_enabled
@property
def registered_slicers(self):
return self._slicing_manager.registered_slicers
@property
def default_slicer(self):
return self._slicing_manager.default_slicer
def slice(self, slicer_name, source_location, source_path, dest_location, dest_path,
position=None, profile=None, printer_profile_id=None, overrides=None, callback=None, callback_args=None):
absolute_source_path = self.path_on_disk(source_location, source_path)
def stlProcessed(source_location, source_path, tmp_path, dest_location, dest_path, start_time, printer_profile_id, callback, callback_args, _error=None, _cancelled=False, _analysis=None):
try:
if _error:
eventManager().fire(Events.SLICING_FAILED, {"stl": source_path, "gcode": dest_path, "reason": _error})
elif _cancelled:
eventManager().fire(Events.SLICING_CANCELLED, {"stl": source_path, "gcode": dest_path})
else:
source_meta = self.get_metadata(source_location, source_path)
hash = source_meta["hash"]
import io
links = [("model", dict(name=source_path))]
_, stl_name = self.split_path(source_location, source_path)
file_obj = StreamWrapper(os.path.basename(dest_path),
io.BytesIO(u";Generated from {stl_name} {hash}\n".format(**locals()).encode("ascii", "replace")),
io.FileIO(tmp_path, "rb"))
printer_profile = self._printer_profile_manager.get(printer_profile_id)
self.add_file(dest_location, dest_path, file_obj, links=links, allow_overwrite=True, printer_profile=printer_profile, analysis=_analysis)
end_time = time.time()
eventManager().fire(Events.SLICING_DONE, {"stl": source_path, "gcode": dest_path, "time": end_time - start_time})
if callback is not None:
if callback_args is None:
callback_args = ()
callback(*callback_args)
finally:
os.remove(tmp_path)
source_job_key = (source_location, source_path)
dest_job_key = (dest_location, dest_path)
with self._slicing_jobs_mutex:
if source_job_key in self._slicing_jobs:
del self._slicing_jobs[source_job_key]
if dest_job_key in self._slicing_jobs:
del self._slicing_jobs[dest_job_key]
slicer = self._slicing_manager.get_slicer(slicer_name)
import time
start_time = time.time()
eventManager().fire(Events.SLICING_STARTED, {"stl": source_path, "gcode": dest_path, "progressAvailable": slicer.get_slicer_properties()["progress_report"] if slicer else False})
import tempfile
f = tempfile.NamedTemporaryFile(suffix=".gco", delete=False)
temp_path = f.name
f.close()
with self._slicing_jobs_mutex:
source_job_key = (source_location, source_path)
dest_job_key = (dest_location, dest_path)
if dest_job_key in self._slicing_jobs:
job_slicer_name, job_absolute_source_path, job_temp_path = self._slicing_jobs[dest_job_key]
self._slicing_manager.cancel_slicing(job_slicer_name, job_absolute_source_path, job_temp_path)
del self._slicing_jobs[dest_job_key]
self._slicing_jobs[dest_job_key] = self._slicing_jobs[source_job_key] = (slicer_name, absolute_source_path, temp_path)
args = (source_location, source_path, temp_path, dest_location, dest_path, start_time, printer_profile_id, callback, callback_args)
self._slicing_manager.slice(slicer_name,
absolute_source_path,
temp_path,
profile,
stlProcessed,
position=position,
callback_args=args,
overrides=overrides,
printer_profile_id=printer_profile_id,
on_progress=self.on_slicing_progress,
on_progress_args=(slicer_name, source_location, source_path, dest_location, dest_path))
def on_slicing_progress(self, slicer, source_location, source_path, dest_location, dest_path, _progress=None):
if not _progress:
return
progress_int = int(_progress * 100)
if self._last_slicing_progress != progress_int:
self._last_slicing_progress = progress_int
for callback in self._slicing_progress_callbacks:
try: callback.sendSlicingProgress(slicer, source_location, source_path, dest_location, dest_path, progress_int)
except: self._logger.exception("Exception while pushing slicing progress")
if progress_int:
def call_plugins(slicer, source_location, source_path, dest_location, dest_path, progress):
for plugin in self._progress_plugins:
try:
plugin.on_slicing_progress(slicer, source_location, source_path, dest_location, dest_path, progress)
except:
self._logger.exception("Exception while sending slicing progress to plugin %s" % plugin._identifier)
import threading
thread = threading.Thread(target=call_plugins, args=(slicer, source_location, source_path, dest_location, dest_path, progress_int))
thread.daemon = False
thread.start()
def get_busy_files(self):
return self._slicing_jobs.keys()
def file_exists(self, destination, path):
return self._storage(destination).file_exists(path)
def list_files(self, destinations=None, path=None, filter=None, recursive=None):
if not destinations:
destinations = self._storage_managers.keys()
if isinstance(destinations, (str, unicode, basestring)):
destinations = [destinations]
result = dict()
for dst in destinations:
result[dst] = self._storage_managers[dst].list_files(path=path, filter=filter, recursive=recursive)
return result
def add_file(self, destination, path, file_object, links=None, allow_overwrite=False, printer_profile=None, analysis=None):
if printer_profile is None:
printer_profile = self._printer_profile_manager.get_current_or_default()
for hook in self._preprocessor_hooks.values():
try:
hook_file_object = hook(path, file_object, links=links, printer_profile=printer_profile, allow_overwrite=allow_overwrite)
except:
self._logger.exception("Error when calling preprocessor hook {}, ignoring".format(hook))
continue
if hook_file_object is not None:
file_object = hook_file_object
file_path = self._storage(destination).add_file(path, file_object, links=links, printer_profile=printer_profile, allow_overwrite=allow_overwrite)
absolute_path = self._storage(destination).path_on_disk(file_path)
if analysis is None:
file_type = get_file_type(absolute_path)
if file_type:
queue_entry = QueueEntry(file_path, file_type[-1], destination, absolute_path, printer_profile)
self._analysis_queue.enqueue(queue_entry, high_priority=True)
else:
self._add_analysis_result(destination, path, analysis)
eventManager().fire(Events.UPDATED_FILES, dict(type="printables"))
return file_path
def remove_file(self, destination, path):
self._storage(destination).remove_file(path)
eventManager().fire(Events.UPDATED_FILES, dict(type="printables"))
def add_folder(self, destination, path, ignore_existing=True):
folder_path = self._storage(destination).add_folder(path, ignore_existing=ignore_existing)
eventManager().fire(Events.UPDATED_FILES, dict(type="printables"))
return folder_path
def remove_folder(self, destination, path, recursive=True):
self._storage(destination).remove_folder(path, recursive=recursive)
eventManager().fire(Events.UPDATED_FILES, dict(type="printables"))
def get_metadata(self, destination, path):
return self._storage(destination).get_metadata(path)
def add_link(self, destination, path, rel, data):
self._storage(destination).add_link(path, rel, data)
def remove_link(self, destination, path, rel, data):
self._storage(destination).remove_link(path, rel, data)
def log_print(self, destination, path, timestamp, print_time, success, printer_profile):
try:
if success:
self._storage(destination).add_history(path, dict(timestamp=timestamp, printTime=print_time, success=success, printerProfile=printer_profile))
else:
self._storage(destination).add_history(path, dict(timestamp=timestamp, success=success, printerProfile=printer_profile))
eventManager().fire(Events.METADATA_STATISTICS_UPDATED, dict(storage=destination, path=path))
except NoSuchStorage:
# if there's no storage configured where to log the print, we'll just not log it
pass
def set_additional_metadata(self, destination, path, key, data, overwrite=False, merge=False):
self._storage(destination).set_additional_metadata(path, key, data, overwrite=overwrite, merge=merge)
def remove_additional_metadata(self, destination, path, key):
self._storage(destination).remove_additional_metadata(path, key)
def path_on_disk(self, destination, path):
return self._storage(destination).path_on_disk(path)
def sanitize(self, destination, path):
return self._storage(destination).sanitize(path)
def sanitize_name(self, destination, name):
return self._storage(destination).sanitize_name(name)
def sanitize_path(self, destination, path):
return self._storage(destination).sanitize_path(path)
def split_path(self, destination, path):
return self._storage(destination).split_path(path)
def join_path(self, destination, *path):
return self._storage(destination).join_path(*path)
def path_in_storage(self, destination, path):
return self._storage(destination).path_in_storage(path)
def _storage(self, destination):
if not destination in self._storage_managers:
raise NoSuchStorage("No storage configured for destination {destination}".format(**locals()))
return self._storage_managers[destination]
def _add_analysis_result(self, destination, path, result):
if not destination in self._storage_managers:
return
storage_manager = self._storage_managers[destination]
storage_manager.set_additional_metadata(path, "analysis", result)
def _on_analysis_finished(self, entry, result):
self._add_analysis_result(entry.location, entry.path, result)
| agpl-3.0 |
waldol1/BYU-AWESOME | scripts/compute_features.py | 1 | 16441 |
import os
import sys
import cv2
import scipy.ndimage as nd
import scipy.ndimage.morphology
import numpy as np
import traceback
def invert(im):
return 255 - im
def divide(im):
return im / 255
# deprecated
def gt_original_to_processed(im):
im = im / 255
im = 1 - im
return im
# deprecated
def gt_processed_to_original(im):
im = 1 - im
im = im * 255
return im
def shave(im, top, bottom, left, right):
if bottom == 0:
bottom = im.shape[0]
else:
bottom *= -1
if right == 0:
right = im.shape[1]
else:
right *= -1
if im.ndim == 3:
return im[top:bottom,left:right,:]
else:
return im[top:bottom,left:right]
def bilateral(im):
return cv2.bilateralFilter(im, d=100, sigmaColor=100, sigmaSpace=100)
def mean_transform(im, window_size):
return cv2.blur(im, (window_size, window_size), borderType=cv2.BORDER_REFLECT_101)
def median_transform(im, window_size):
return cv2.medianBlur(im, window_size)
def otsu(im):
if im.ndim == 3:
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
thresh, result = cv2.threshold(im, 0, 1, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
return result
# TOO SLOW
## creates a particular sauvola threshold function using K, width, height
#def create_sauvola(K, R, width, height):
# return lambda im, i, j: sauvola_threshold(im, i, j, K, R, width, height)
#
#
## compute the local sauvola threshold for im[i,j] for the given parameters
#def sauvola_threshold(im, i, j, K, R, width, height):
# window = im[max(i-width/2, 0):i+width/2, max(j-height/2, 0):j+height/2]
# u = np.mean(window)
# std = np.std(window)
# #return u * (1 - K * (1 - (std / R)) )
# return u + K * std
#
#
## generic local threshold algorithm using fthresh to calculate the local threshold
#def local_thresh(im, fthresh):
# out = np.zeros_like(im)
# for i in xrange(out.shape[0]):
# for j in xrange(out.shape[1]):
# thresh = fthresh(im, i, j)
# if im[i,j] >= thresh:
# out[i,j] = 255
# return out
#
#
#def sauvola(im, K=-0.2, R=128, size=79):
# if im.ndim == 3:
# im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# sauvola_func = create_sauvola(K, R, size, size)
# return local_thresh(im, sauvola_func)
def std_dev_transform(im, window_size):
if im.ndim == 3:
size = (window_size, window_size, 1)
else:
size = (window_size, window_size)
# scale to a wider range
return 3 * nd.generic_filter(im, nd.standard_deviation, size=size, mode='reflect')
def max_transform(im, window_size):
if im.ndim == 3:
size = (window_size, window_size, 1)
else:
size = (window_size, window_size)
return nd.maximum_filter(im, size=size, mode='reflect')
def min_transform(im, window_size):
if im.ndim == 3:
size = (window_size, window_size, 1)
else:
size = (window_size, window_size)
return nd.minimum_filter(im, size=size, mode='reflect')
def percentile_10_transform(im, window_size):
if im.ndim == 3:
size = (window_size, window_size, 1)
else:
size = (window_size, window_size)
return nd.percentile_filter(im, percentile=10, size=size, mode='reflect')
def percentile_25_transform(im, window_size):
if im.ndim == 3:
size = (window_size, window_size, 1)
else:
size = (window_size, window_size)
return nd.percentile_filter(im, percentile=25, size=size, mode='reflect')
def canny(im, low=100, high=200):
return cv2.Canny(im, low, high, L2gradient=True)
def percentile_gray(im):
hist, bin_edges = np.histogram(im, bins=256, range=(0,256), density=True)
cum_histo = np.cumsum(hist)
cum_histo *= 255
cum_histo = cum_histo.astype(np.uint8)
return cum_histo[im]
def percentile(im):
if im.ndim == 2:
return percentile_gray(im)
else:
b_perc = percentile_gray(im[:,:,0])
g_perc = percentile_gray(im[:,:,1])
r_perc = percentile_gray(im[:,:,2])
return np.concatenate([b_perc[:,:,np.newaxis], g_perc[:,:,np.newaxis], r_perc[:,:,np.newaxis]], axis=2)
def slice(im, axis):
return im[:,:,axis]
def relative_darkness(im, window_size, threshold=15):
return relative_darkness2(im, window_size, threshold)
def relative_darkness2(im, window_size, threshold=15, group=None):
if im.ndim == 3:
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# find number of pixels at least $threshold less than the center value
def below_thresh(vals):
center_val = vals[vals.shape[0]/2]
lower_thresh = center_val - threshold
return (vals < lower_thresh).sum()
# find number of pixels at least $threshold greater than the center value
def above_thresh(vals):
center_val = vals[vals.shape[0]/2]
above_thresh = center_val + threshold
return (vals > above_thresh).sum()
# apply the above function convolutionally
lower = nd.generic_filter(im, below_thresh, size=window_size, mode='reflect')
upper = nd.generic_filter(im, above_thresh, size=window_size, mode='reflect')
# number of values within $threshold of the center value is the remainder
# constraint: lower + middle + upper = window_size ** 2
middle = np.empty_like(lower)
middle.fill(window_size*window_size)
middle = middle - (lower + upper)
# scale to range [0-255]
lower = lower * (255 / (window_size * window_size))
middle = middle * (255 / (window_size * window_size))
upper = upper * (255 / (window_size * window_size))
if group == 'lower':
return lower
if group == 'middle':
return middle
if group == 'upper':
return upper
return np.concatenate( [lower[:,:,np.newaxis], middle[:,:,np.newaxis], upper[:,:,np.newaxis]], axis=2)
def remove_small_ccs(im, min_area=10, structure=np.ones(shape=(3,3), dtype=int)):
inverted = invert(im) # 0 is considered to be background
labeled, num_ccs = nd.label(inverted, structure=structure)
all_cc_slices = nd.find_objects(labeled)
for y, x in all_cc_slices:
area = (y.stop - y.start) * (x.stop - x.start)
if area < min_area:
inverted[y,x] = 0
return invert(inverted)
def create_dilated_recall_weights():
in_dir = sys.argv[1]
recall_dir = os.path.join(in_dir, 'recall_weights')
out_dir = os.path.join(in_dir, 'dilated_recall_weights')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
func = lambda im: nd.morphology.grey_dilation(im, size=(3,3))
convert_dir(func, recall_dir, out_dir)
def modify_recall_weights(im):
binary = np.copy(im)
non_zero_idx = binary > 0
binary[non_zero_idx] = 1
struct = nd.generate_binary_structure(2,2) # 3x3 full
dilated = nd.morphology.binary_dilation(binary, struct)
diff = dilated - binary
diff_idx = diff != 0
im[diff_idx] = 5
return im
# sets the border pixels to have a weight of 5 (out of 128)
def create_modified_recall_weights():
in_dir = sys.argv[1]
recall_dir = os.path.join(in_dir, 'recall_weights')
out_dir = os.path.join(in_dir, 'modified_recall_weights')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
func = modify_recall_weights
convert_dir(func, recall_dir, out_dir)
def convert_dat(fname, size):
flat = np.loadtxt(fname)
return flat.reshape(size)
def convert_dats(im_file, recall_file, precision_file, recall_dir, precision_dir):
im = cv2.imread(im_file)
size = im.shape[:2]
recall_out = os.path.join(recall_dir, os.path.basename(im_file))
if not os.path.exists(recall_out):
recall_im = convert_dat(recall_file, size)
cv2.imwrite(recall_out, 128 * recall_im) # scale weights for discritization
precision_out = os.path.join(precision_dir, os.path.basename(im_file))
if not os.path.exists(precision_out):
precision_im = convert_dat(precision_file, size) + 1
cv2.imwrite(precision_out, 128 * precision_im) # scale weights for discritization
def convert_dats_main():
in_dir = sys.argv[1]
recall_dir = os.path.join(in_dir, 'recall_weights')
try:
os.makedirs(recall_dir)
except:
pass
precision_dir = os.path.join(in_dir, 'precision_weights')
try:
os.makedirs(precision_dir)
except:
pass
dat_dir = os.path.join(in_dir, 'pr_dats')
for f in os.listdir(dat_dir):
if not f.endswith('.png'):
continue
im_file = os.path.join(dat_dir, f)
base = os.path.splitext(f)[0]
recall_file = os.path.join(dat_dir, base + "_RWeights.dat")
precision_file = os.path.join(dat_dir, base + "_PWeights.dat")
if not os.path.exists(recall_file):
#raise Exception("%s does not exist" % recall_file)
print "%s does not exist" % recall_file
continue
if not os.path.exists(precision_file):
#raise Exception("%s does not exist" % precision_file)
print "%s does not exist" % precision_file
continue
try:
convert_dats(im_file, recall_file, precision_file, recall_dir, precision_dir)
except:
print im_file
traceback.print_exc()
def create_uniform_weights():
root_dir = sys.argv[1]
uniform_dir = os.path.join(root_dir, 'uniform_weights')
try:
os.makedirs(uniform_dir)
except:
pass
in_dir = os.path.join(root_dir, 'original_images')
convert_dir(lambda im: 128 * np.ones_like(im), in_dir, uniform_dir)
def create_dilated_baselines():
root_dir = sys.argv[1]
#for x in [1,3,5,7]:
for x in [int(sys.argv[2])]:
out_dir = os.path.join(root_dir, 'baselines_%d' % x)
try:
os.makedirs(out_dir)
except:
pass
in_dir = os.path.join(root_dir, 'baselines')
structure = np.ones((x,x))
convert_dir(lambda im: nd.morphology.binary_dilation(im, structure=structure).astype(np.uint8), in_dir, out_dir)
def convert_dir(func, in_dir, out_dir, force_overwrite=False):
for f in os.listdir(in_dir):
in_path = os.path.join(in_dir, f)
f_base = os.path.basename(f)
f_base = os.path.splitext(f_base)[0]
out_path = os.path.join(out_dir, f_base + ".png")
if os.path.exists(out_path) and not force_overwrite:
continue
#im = cv2.imread(in_path, cv2.IMREAD_UNCHANGED)
im = cv2.imread(in_path, 1)
if im is None:
raise Exception("Image %s could not be read" % in_path)
try:
processed = func(im)
except:
print "Exception occured on image: %s" % f
raise
cv2.imwrite(out_path, processed)
def convert_file(func, in_file, out_file, gray=False):
im = cv2.imread(in_file, cv2.IMREAD_UNCHANGED)
processed = func(im)
cv2.imwrite(out_file, processed)
def process_features1():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, "original_images")
for transform in ['mean', 'median']:
print transform
func = globals()[transform + "_transform"]
for size in [9, 19, 39, 79]:
print " ", size
size_func = lambda im: func(im, size)
out_dir = os.path.join(_dir, transform, str(size))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(size_func, in_dir, out_dir)
def process_features2():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, "original_images")
for transform in ['min', 'max', 'percentile_10', 'percentile_25']:
print transform
func = globals()[transform + "_transform"]
for size in [3, 5, 7, 9]:
print " ", size
size_func = lambda im: func(im, size)
out_dir = os.path.join(_dir, transform, str(size))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(size_func, in_dir, out_dir)
def process_features3():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, "original_images")
for transform in ['std_dev']:
print transform
func = globals()[transform + "_transform"]
for size in [3, 5, 7, 9]:
print " ", size
size_func = lambda im: func(im, size)
out_dir = os.path.join(_dir, transform, str(size))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(size_func, in_dir, out_dir)
def process_features4():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, "original_images")
#for transform in ['bilateral', 'percentile', 'otsu']:
for transform in ['otsu']:
print transform
func = globals()[transform]
out_dir = os.path.join(_dir, transform)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(func, in_dir, out_dir)
def process_features5():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, "original_images")
for transform in ['relative_darkness2']:
print transform
func = globals()[transform]
for thresh in [10, 20, 40]:
print " ", thresh
for size in [5, 7, 9]:
print " ", size
for group in ['lower', 'middle', 'upper']:
print " ", group
size_func = lambda im: func(im, size, thresh, group)
out_dir = os.path.join(_dir, transform, str(size), str(thresh), group)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(size_func, in_dir, out_dir)
def process_features7():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, "original_images")
for transform in ['slice']:
print transform
func = globals()[transform]
for name, axis in [('b', 0), ('g', 1), ('r', 2)]:
f = lambda im: func(im, axis)
out_dir = os.path.join(_dir, transform, name)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(f, in_dir, out_dir)
def process_features6():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, "original_images")
for transform in ['canny']:
print transform
func = globals()[transform]
for low in [75, 100, 125]:
print " ", low
for high in [150, 175, 200]:
print " ", high
size_func = lambda im: func(im, low, high)
out_dir = os.path.join(_dir, transform, str(low), str(high))
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(size_func, in_dir, out_dir)
def process_features():
process_features1()
process_features2()
process_features3()
process_features4()
process_features5()
process_features6()
def process_gt():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, 'original_gt')
out_dir = os.path.join(_dir, 'processed_gt')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(gt_original_to_processed, in_dir, out_dir)
def invert_gt():
_dir = sys.argv[1]
in_dir = os.path.join(_dir, 'original_gt')
out_dir = os.path.join(_dir, 'original_gt')
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
convert_dir(invert, in_dir, out_dir, force_overwrite=True)
def crop_parzival():
in_dir = sys.argv[1]
out_dir = sys.argv[2]
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
func = lambda im: shave(im, 200, 200, 75, 120)
convert_dir(func, in_dir, out_dir)
def crop_saint_gall():
in_dir = sys.argv[1]
out_dir = sys.argv[2]
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
func = lambda im: shave(im, 500, 700, 225, 225)
convert_dir(func, in_dir, out_dir)
def crop_rodrigo():
in_dir = sys.argv[1]
out_dir = sys.argv[2]
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
func = lambda im: shave(im, 30, 30, 30, 30)
convert_dir(func, in_dir, out_dir)
def crop_single():
top = int(sys.argv[3])
bottom = int(sys.argv[4])
left = int(sys.argv[5])
right = int(sys.argv[6])
func = lambda im: shave(im, top, bottom, left, right)
convert_file(func, sys.argv[1], sys.argv[2])
def crop_perc():
_dir = sys.argv[1]
perc = int(sys.argv[2]) / 100.
func = lambda im: im[:, :int(im.shape[1] * perc)]
convert_dir(func, _dir, _dir, True)
def clean_binary_single():
func = lambda im: remove_small_ccs(im, int(sys.argv[3]))
convert_file(func, sys.argv[1], sys.argv[2], gray=True)
def clean_binary_parzival():
func = lambda im: remove_small_ccs(im, 200)
convert_dir(func, sys.argv[1], sys.argv[2], gray=True)
def clean_binary_saintgall():
func = lambda im: remove_small_ccs(im, 400)
convert_dir(func, sys.argv[1], sys.argv[2], gray=True)
def clean_binary_hbr():
func = lambda im: remove_small_ccs(im, 80)
convert_dir(func, sys.argv[1], sys.argv[2], gray=True)
def clean_binary_hdlac():
func = lambda im: remove_small_ccs(im, 100)
convert_dir(func, sys.argv[1], sys.argv[2], gray=True)
if __name__ == "__main__":
#crop_saint_gall()
#crop_parzival()
#crop_rodrigo()
#crop_single()
#crop_perc()
#process_features()
#process_features1()
#process_features2()
#process_features3()
#process_features4()
#process_features5()
#process_features6()
process_features7()
#process_gt()
#invert_gt()
#clean_binary_single()
#clean_binary_parzival()
#clean_binary_saintgall()
#clean_binary_hbr()
#clean_binary_hdlac()
#convert_dats_main()
#create_uniform_weights()
#create_dilated_recall_weights()
#create_modified_recall_weights()
#create_dilated_baselines()
#convert_dir(invert, sys.argv[1], sys.argv[2], True)
#convert_dir(divide, sys.argv[1], sys.argv[1], True)
#convert_file(bilateral, '/home/chris/Dropbox/test.jpg', '/home/chris/Dropbox/out.png')
#for size in [3, 5, 7, 9]:
# for thresh in [7, 15, 20, 30, 45]:
# convert_file(lambda im: relative_darkness(im, size, threshold=thresh), '/home/chris/Dropbox/test.jpg', '/home/chris/Dropbox/out_%d_%d.png' % (size,thresh))
| gpl-3.0 |
KristoferHellman/gimli | python/pygimli/viewer/mayaview.py | 1 | 2081 | # -*- coding: utf-8 -*-
import sys
import os
from matplotlib import pyplot as plt
showMesh3DFunct = 'showMesh3DMayvi'
try:
from mayavi import mlab
except ImportError:
error_msg = """Visualization in 3D requires Mayavi.\n""" + \
"""Try 'pip install mayavi' depending on your system.\n""" + \
"""Fallback to matplotlib \n"""
sys.stderr.write(error_msg)
showMesh3DFunct = 'showMesh3DFallback'
def showMesh3D(mesh, interactive=True):
return globals()[showMesh3DFunct](mesh, interactive)
def showMesh3DFallback(mesh, interactive=True):
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
if len(mesh.positions()) < 1e4:
for pos in mesh.positions():
ax.scatter(pos[0], pos[1], pos[2], 'ko')
text = ("Proper visualization in 3D requires Mayavi.\n"
"""Try 'pip install mayavi' depending on your system.""")
ax.set_title(text)
plt.show()
def showMesh3DMayvi(mesh, interactive=True):
"""
Proof of concept for mayavi binding.
Parameters
----------
mesh : pygimli.Mesh
interactive : bool
"""
# should avoid opening of mayavi window when building documentation
if not interactive:
mlab.options.offscreen = True
fig = mlab.figure(bgcolor=(1, 1, 1), size=(400, 400))
# temporary VTK write & read, may be replaced with direct VTK object.
tmp = "/tmp/gimli_3d_view_%s.vtk" % os.getpid()
mesh.exportVTK(tmp)
src = mlab.pipeline.open(tmp, figure=fig)
os.remove(tmp)
surf = mlab.pipeline.surface(src, figure=fig, opacity=0.5)
edges = mlab.pipeline.extract_edges(surf, figure=fig)
mlab.pipeline.surface(edges, color=(0, 0, 0), figure=fig)
# mlab.pipeline.image_plane_widget(surf, colormap='gray',
# plane_orientation='x_axes')
if interactive:
mlab.show()
else:
arr = mlab.screenshot(figure=fig, antialiased=True)
plt.imshow(arr)
plt.axis('off')
plt.show()
| gpl-3.0 |
aboutsajjad/Bridge | app_packages/youtube_dl/extractor/espn.py | 8 | 8708 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .once import OnceIE
from ..compat import compat_str
from ..utils import (
determine_ext,
int_or_none,
unified_timestamp,
)
class ESPNIE(OnceIE):
_VALID_URL = r'''(?x)
https?://
(?:
(?:
(?:
(?:(?:\w+\.)+)?espn\.go|
(?:www\.)?espn
)\.com/
(?:
(?:
video/(?:clip|iframe/twitter)|
watch/player
)
(?:
.*?\?.*?\bid=|
/_/id/
)
)
)|
(?:www\.)espnfc\.(?:com|us)/(?:video/)?[^/]+/\d+/video/
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'http://espn.go.com/video/clip?id=10365079',
'info_dict': {
'id': '10365079',
'ext': 'mp4',
'title': '30 for 30 Shorts: Judging Jewell',
'description': 'md5:39370c2e016cb4ecf498ffe75bef7f0f',
'timestamp': 1390936111,
'upload_date': '20140128',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://broadband.espn.go.com/video/clip?id=18910086',
'info_dict': {
'id': '18910086',
'ext': 'mp4',
'title': 'Kyrie spins around defender for two',
'description': 'md5:2b0f5bae9616d26fba8808350f0d2b9b',
'timestamp': 1489539155,
'upload_date': '20170315',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://nonredline.sports.espn.go.com/video/clip?id=19744672',
'only_matching': True,
}, {
'url': 'https://cdn.espn.go.com/video/clip/_/id/19771774',
'only_matching': True,
}, {
'url': 'http://www.espn.com/watch/player?id=19141491',
'only_matching': True,
}, {
'url': 'http://www.espn.com/watch/player?bucketId=257&id=19505875',
'only_matching': True,
}, {
'url': 'http://www.espn.com/watch/player/_/id/19141491',
'only_matching': True,
}, {
'url': 'http://www.espn.com/video/clip?id=10365079',
'only_matching': True,
}, {
'url': 'http://www.espn.com/video/clip/_/id/17989860',
'only_matching': True,
}, {
'url': 'https://espn.go.com/video/iframe/twitter/?cms=espn&id=10365079',
'only_matching': True,
}, {
'url': 'http://www.espnfc.us/video/espn-fc-tv/86/video/3319154/nashville-unveiled-as-the-newest-club-in-mls',
'only_matching': True,
}, {
'url': 'http://www.espnfc.com/english-premier-league/23/video/3324163/premier-league-in-90-seconds-golden-tweets',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
clip = self._download_json(
'http://api-app.espn.com/v1/video/clips/%s' % video_id,
video_id)['videos'][0]
title = clip['headline']
format_urls = set()
formats = []
def traverse_source(source, base_source_id=None):
for source_id, source in source.items():
if source_id == 'alert':
continue
elif isinstance(source, compat_str):
extract_source(source, base_source_id)
elif isinstance(source, dict):
traverse_source(
source,
'%s-%s' % (base_source_id, source_id)
if base_source_id else source_id)
def extract_source(source_url, source_id=None):
if source_url in format_urls:
return
format_urls.add(source_url)
ext = determine_ext(source_url)
if OnceIE.suitable(source_url):
formats.extend(self._extract_once_formats(source_url))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
source_url, video_id, f4m_id=source_id, fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=source_id, fatal=False))
else:
f = {
'url': source_url,
'format_id': source_id,
}
mobj = re.search(r'(\d+)p(\d+)_(\d+)k\.', source_url)
if mobj:
f.update({
'height': int(mobj.group(1)),
'fps': int(mobj.group(2)),
'tbr': int(mobj.group(3)),
})
if source_id == 'mezzanine':
f['preference'] = 1
formats.append(f)
links = clip.get('links', {})
traverse_source(links.get('source', {}))
traverse_source(links.get('mobile', {}))
self._sort_formats(formats)
description = clip.get('caption') or clip.get('description')
thumbnail = clip.get('thumbnail')
duration = int_or_none(clip.get('duration'))
timestamp = unified_timestamp(clip.get('originalPublishDate'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
class ESPNArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:espn\.go|(?:www\.)?espn)\.com/(?:[^/]+/)*(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://espn.go.com/nba/recap?gameId=400793786',
'only_matching': True,
}, {
'url': 'http://espn.go.com/blog/golden-state-warriors/post/_/id/593/how-warriors-rapidly-regained-a-winning-edge',
'only_matching': True,
}, {
'url': 'http://espn.go.com/sports/endurance/story/_/id/12893522/dzhokhar-tsarnaev-sentenced-role-boston-marathon-bombings',
'only_matching': True,
}, {
'url': 'http://espn.go.com/nba/playoffs/2015/story/_/id/12887571/john-wall-washington-wizards-no-swelling-left-hand-wrist-game-5-return',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if ESPNIE.suitable(url) else super(ESPNArticleIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(
r'class=(["\']).*?video-play-button.*?\1[^>]+data-id=["\'](?P<id>\d+)',
webpage, 'video id', group='id')
return self.url_result(
'http://espn.go.com/video/clip?id=%s' % video_id, ESPNIE.ie_key())
class FiveThirtyEightIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?fivethirtyeight\.com/features/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://fivethirtyeight.com/features/how-the-6-8-raiders-can-still-make-the-playoffs/',
'info_dict': {
'id': '21846851',
'ext': 'mp4',
'title': 'FiveThirtyEight: The Raiders can still make the playoffs',
'description': 'Neil Paine breaks down the simplest scenario that will put the Raiders into the playoffs at 8-8.',
'timestamp': 1513960621,
'upload_date': '20171222',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(
r'data-video-id=["\'](?P<id>\d+)',
webpage, 'video id', group='id')
return self.url_result(
'http://espn.go.com/video/clip?id=%s' % video_id, ESPNIE.ie_key())
| mit |
bjzhang/xen | tools/python/xen/util/Brctl.py | 48 | 5242 | """Bridge control utilities.
"""
import os
import os.path
import re
import sys
CMD_IFCONFIG = 'ifconfig'
CMD_ROUTE = 'route'
CMD_BRCTL = 'brctl'
CMD_IPTABLES = "iptables"
opts = None
class Opts:
def __init__(self, defaults):
for (k, v) in defaults.items():
setattr(self, k, v)
pass
def cmd(p, s):
"""Print and execute command 'p' with args 's'.
"""
global opts
c = p + ' ' + s
if opts.verbose: print c
if not opts.dryrun:
os.system(c)
bridgeRE = re.compile(r'([^\t]*)\t*[^\t]*\t*[^\t]*\t*([^\t]*)')
def get_state():
fin = os.popen(CMD_BRCTL + ' show', 'r')
try:
bridges = {}
brlist = None
brname = None
first = True
for line in fin:
if first:
first = False
elif line[0] == '\t':
brlist.append(line.strip())
else:
if brname:
bridges[brname] = brlist
m = bridgeRE.match(line)
brname = m.group(1)
brlist = [m.group(2).strip()]
if brname:
bridges[brname] = brlist
return bridges
finally:
fin.close()
def vif_bridge_add(params):
"""Add the network interface for vif on dom to a bridge.
"""
cmd(CMD_BRCTL, 'addif %(bridge)s %(vif)s' % params)
def vif_bridge_rem(params):
"""Remove the network interface for vif on dom from a bridge.
"""
cmd(CMD_BRCTL, 'delif %(bridge)s %(vif)s' % params)
def vif_restrict_addr(vif, addr, delete=0):
d = { 'vif': vif, 'addr': addr}
if delete:
d['flag'] = '-D'
else:
d['flag'] = '-A'
cmd(CMD_IPTABLES, '-P FORWARD DROP')
cmd(CMD_IPTABLES, '%(flag)s FORWARD -m physdev --physdev-in %(vif)s -s %(addr)s -j ACCEPT' % d)
cmd(CMD_IPTABLES, '%(flag)s FORWARD -m physdev --physdev-out %(vif)s -d %(addr)s -j ACCEPT' % d)
def bridge_create(bridge, **kwd):
"""Create a bridge.
Defaults hello time to 0, forward delay to 0 and stp off.
"""
cmd(CMD_BRCTL, 'addbr %s' % bridge)
if kwd.get('hello', None) is None:
kwd['hello'] = 0
if kwd.get('fd', None) is None:
kwd['fd'] = 0
if kwd.get('stp', None) is None:
kwd['stp'] = 'off'
bridge_set(bridge, **kwd)
cmd(CMD_IFCONFIG, "%s up" % bridge)
def bridge_set(bridge, hello=None, fd=None, stp=None):
"""Set bridge parameters.
"""
if hello is not None:
cmd(CMD_BRCTL, 'sethello %s %d' % (bridge, hello))
if fd is not None:
cmd(CMD_BRCTL, 'setfd %s %d' % (bridge, fd))
if stp is not None:
cmd(CMD_BRCTL, 'stp %s %s' % (bridge, stp))
def bridge_del(bridge):
"""Delete a bridge.
"""
cmd(CMD_IFCONFIG, '%s down' % bridge)
cmd(CMD_BRCTL, 'delbr %s' % bridge)
def routes():
"""Return a list of the routes.
"""
fin = os.popen(CMD_ROUTE + ' -n', 'r')
routes = []
for x in fin:
if x.startswith('Kernel'): continue
if x.startswith('Destination'): continue
x = x.strip()
y = x.split()
z = { 'destination': y[0],
'gateway' : y[1],
'mask' : y[2],
'flags' : y[3],
'metric' : y[4],
'ref' : y[5],
'use' : y[6],
'interface' : y[7] }
routes.append(z)
return routes
def ifconfig(interface):
"""Return the ip config for an interface,
"""
fin = os.popen(CMD_IFCONFIG + ' %s' % interface, 'r')
inetre = re.compile('\s*inet\s*addr:(?P<address>\S*)\s*Bcast:(?P<broadcast>\S*)\s*Mask:(?P<mask>\S*)')
info = None
for x in fin:
m = inetre.match(x)
if not m: continue
info = m.groupdict()
info['interface'] = interface
break
return info
def reconfigure(interface, bridge):
"""Reconfigure an interface to be attached to a bridge, and give the bridge
the IP address etc. from interface. Move the default route to the interface
to the bridge.
"""
global opts
intf_info = ifconfig(interface)
if not intf_info:
print >>sys.stderr, 'Interface not found:', interface
return
#bridge_info = ifconfig(bridge)
#if not bridge_info:
# print >>sys.stderr, 'Bridge not found:', bridge
# return
route_info = routes()
intf_info['bridge'] = bridge
intf_info['gateway'] = None
for r in route_info:
if (r['destination'] == '0.0.0.0' and
'G' in r['flags'] and
r['interface'] == interface):
intf_info['gateway'] = r['gateway']
if not intf_info['gateway']:
print >>sys.stderr, 'Gateway not found: ', interface
return
cmd(CMD_IFCONFIG,
'%(bridge)s %(address)s netmask %(mask)s broadcast %(broadcast)s up'
% intf_info)
cmd(CMD_ROUTE,
'add default gateway %(gateway)s dev %(bridge)s'
% intf_info)
cmd(CMD_BRCTL, 'addif %(bridge)s %(interface)s' % intf_info)
cmd(CMD_IFCONFIG, '%(interface)s 0.0.0.0' % intf_info)
defaults = {
'verbose' : 1,
'dryrun' : 0,
}
opts = Opts(defaults)
def set_opts(val):
global opts
opts = val
return opts
| gpl-2.0 |
asadziach/tensorflow | tensorflow/python/training/adagrad.py | 64 | 4181 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdagradOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
See this [paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
or this
[intro](http://cs.stanford.edu/~ppasupat/a9online/uploads/proximal_notes.pdf).
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
use_locking=False, name="Adagrad"):
"""Construct a new Adagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(AdagradOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
# Created in Initialize.
self._learning_rate_tensor = None
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
dtype = v.dtype.base_dtype
init = init_ops.constant_initializer(self._initial_accumulator_value,
dtype=dtype)
self._get_or_make_slot_with_initializer(v, init, v.get_shape(), dtype,
"accumulator", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.apply_adagrad(
var,
acc,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_apply_adagrad(
var.handle,
acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_adagrad(
var,
acc,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_sparse_apply_adagrad(
var.handle,
acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
| apache-2.0 |
pballand/congress | thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testdfa.py | 2 | 1531 |
import unittest
import antlr3
class TestDFA(unittest.TestCase):
"""Test case for the DFA class."""
def setUp(self):
"""Setup test fixure.
We need a Recognizer in order to instanciate a DFA.
"""
class TRecognizer(antlr3.BaseRecognizer):
api_version = 'HEAD'
self.recog = TRecognizer()
def testInit(self):
"""DFA.__init__()
Just a smoke test.
"""
dfa = antlr3.DFA(
self.recog, 1,
eot=[],
eof=[],
min=[],
max=[],
accept=[],
special=[],
transition=[]
)
def testUnpack(self):
"""DFA.unpack()"""
self.assertEqual(
antlr3.DFA.unpack(
"\1\3\1\4\2\uffff\1\5\22\uffff\1\2\31\uffff\1\6\6\uffff"
"\32\6\4\uffff\1\6\1\uffff\32\6"
),
[ 3, 4, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
6, -1, -1, -1, -1, -1, -1, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, -1, -1, -1, -1, 6, -1,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6
]
)
if __name__ == "__main__":
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| apache-2.0 |
asm-products/movie-database-service | ani/lib/python2.7/site-packages/rest_framework/status.py | 110 | 1982 | """
Descriptive HTTP status codes, for code readability.
See RFC 2616 - http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
And RFC 6585 - http://tools.ietf.org/html/rfc6585
"""
from __future__ import unicode_literals
def is_informational(code):
return code >= 100 and code <= 199
def is_success(code):
return code >= 200 and code <= 299
def is_redirect(code):
return code >= 300 and code <= 399
def is_client_error(code):
return code >= 400 and code <= 499
def is_server_error(code):
return code >= 500 and code <= 599
HTTP_100_CONTINUE = 100
HTTP_101_SWITCHING_PROTOCOLS = 101
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_202_ACCEPTED = 202
HTTP_203_NON_AUTHORITATIVE_INFORMATION = 203
HTTP_204_NO_CONTENT = 204
HTTP_205_RESET_CONTENT = 205
HTTP_206_PARTIAL_CONTENT = 206
HTTP_300_MULTIPLE_CHOICES = 300
HTTP_301_MOVED_PERMANENTLY = 301
HTTP_302_FOUND = 302
HTTP_303_SEE_OTHER = 303
HTTP_304_NOT_MODIFIED = 304
HTTP_305_USE_PROXY = 305
HTTP_306_RESERVED = 306
HTTP_307_TEMPORARY_REDIRECT = 307
HTTP_400_BAD_REQUEST = 400
HTTP_401_UNAUTHORIZED = 401
HTTP_402_PAYMENT_REQUIRED = 402
HTTP_403_FORBIDDEN = 403
HTTP_404_NOT_FOUND = 404
HTTP_405_METHOD_NOT_ALLOWED = 405
HTTP_406_NOT_ACCEPTABLE = 406
HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_408_REQUEST_TIMEOUT = 408
HTTP_409_CONFLICT = 409
HTTP_410_GONE = 410
HTTP_411_LENGTH_REQUIRED = 411
HTTP_412_PRECONDITION_FAILED = 412
HTTP_413_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_414_REQUEST_URI_TOO_LONG = 414
HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416
HTTP_417_EXPECTATION_FAILED = 417
HTTP_428_PRECONDITION_REQUIRED = 428
HTTP_429_TOO_MANY_REQUESTS = 429
HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431
HTTP_500_INTERNAL_SERVER_ERROR = 500
HTTP_501_NOT_IMPLEMENTED = 501
HTTP_502_BAD_GATEWAY = 502
HTTP_503_SERVICE_UNAVAILABLE = 503
HTTP_504_GATEWAY_TIMEOUT = 504
HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505
HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511
| agpl-3.0 |
eayunstack/nova | nova/scheduler/filters/aggregate_multitenancy_isolation.py | 20 | 1972 | # Copyright (c) 2011-2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class AggregateMultiTenancyIsolation(filters.BaseHostFilter):
"""Isolate tenants in specific aggregates."""
# Aggregate data and tenant do not change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
"""If a host is in an aggregate that has the metadata key
"filter_tenant_id" it can only create instances from that tenant(s).
A host can be in different aggregates.
If a host doesn't belong to an aggregate with the metadata key
"filter_tenant_id" it can create instances from all tenants.
"""
spec = filter_properties.get('request_spec', {})
props = spec.get('instance_properties', {})
tenant_id = props.get('project_id')
context = filter_properties['context']
metadata = db.aggregate_metadata_get_by_host(context, host_state.host,
key="filter_tenant_id")
if metadata != {}:
if tenant_id not in metadata["filter_tenant_id"]:
LOG.debug("%s fails tenant id on aggregate", host_state)
return False
return True
| apache-2.0 |
khancyr/ardupilot | libraries/AP_HAL_ChibiOS/hwdef/scripts/dma_resolver.py | 15 | 20646 | #!/usr/bin/env python
import sys, fnmatch
import importlib
# peripheral types that can be shared, wildcard patterns
SHARED_MAP = ["I2C*", "USART*_TX", "UART*_TX", "SPI*", "TIM*_UP", "TIM*_CH*"]
ignore_list = []
dma_map = None
debug = False
def check_possibility(periph, dma_stream, curr_dict, dma_map, check_list, cannot_use_stream, forbidden_map):
global ignore_list
if debug:
print('............ Checking ', periph, dma_stream, 'without', cannot_use_stream)
for other_periph in curr_dict:
if other_periph != periph:
if curr_dict[other_periph] == dma_stream:
if other_periph in forbidden_map[periph]:
if debug:
print('.................... Forbidden', periph, other_periph)
return False
if debug:
print('.................... Collision', other_periph, dma_stream)
ignore_list.append(periph)
check_str = "%s(%d,%d) %s(%d,%d)" % (
other_periph, curr_dict[other_periph][0],
curr_dict[other_periph][1], periph, dma_stream[0],
dma_stream[1])
#check if we did this before
if check_str in check_list:
return False
check_list.append(check_str)
if debug:
print("Trying to Resolve Conflict: ", check_str)
#check if we can resolve by swapping with other periphs
for streamchan in dma_map[other_periph]:
stream = (streamchan[0], streamchan[1])
if stream != curr_dict[other_periph] and \
stream not in cannot_use_stream and \
check_possibility(other_periph, stream, curr_dict, dma_map, check_list,
cannot_use_stream+[(dma_stream)], forbidden_map):
curr_dict[other_periph] = stream
if debug:
print ('....................... Resolving', other_periph, stream)
return True
if debug:
print ('....................... UnSolved !!!!!!!!', periph, dma_stream)
return False
if debug:
print ('....................... Solved ..........', periph, dma_stream)
return True
def can_share(periph, noshare_list):
'''check if a peripheral is in the SHARED_MAP list'''
for noshare in noshare_list:
if fnmatch.fnmatch(periph, noshare):
return False
for f in SHARED_MAP:
if fnmatch.fnmatch(periph, f):
return True
if debug:
print("%s can't share" % periph)
return False
# list of peripherals on H7 that are on DMAMUX2 and BDMA
have_DMAMUX = False
DMAMUX2_peripherals = [ 'I2C4', 'SPI6', 'ADC3' ]
def dmamux_channel(key):
'''return DMAMUX channel for H7'''
for p in DMAMUX2_peripherals:
if key.find(p) != -1:
return 'STM32_DMAMUX2_' + key
# default to DMAMUX1
return 'STM32_DMAMUX1_' + key
def dma_name(key):
'''return 'DMA' or 'BDMA' based on peripheral name'''
if not have_DMAMUX:
return "DMA"
for p in DMAMUX2_peripherals:
if key.find(p) != -1:
return 'BDMA'
return 'DMA'
def chibios_dma_define_name(key):
'''return define name needed for board.h for ChibiOS'''
dma_key = key + '_' + dma_name(key)
if key.startswith('ADC'):
return 'STM32_ADC_%s_' % dma_key
elif key.startswith('SPI'):
return 'STM32_SPI_%s_' % dma_key
elif key.startswith('I2C'):
return 'STM32_I2C_%s_' % dma_key
elif key.startswith('USART'):
return 'STM32_UART_%s_' % dma_key
elif key.startswith('UART'):
return 'STM32_UART_%s_' % dma_key
elif key.startswith('SDIO') or key.startswith('SDMMC'):
return 'STM32_SDC_%s_' % dma_key
elif key.startswith('TIM'):
return 'STM32_TIM_%s_' % dma_key
else:
print("Error: Unknown key type %s" % key)
sys.exit(1)
def get_list_index(peripheral, priority_list):
'''return index into priority_list for a peripheral'''
for i in range(len(priority_list)):
str = priority_list[i]
if fnmatch.fnmatch(peripheral, str):
return i
# default to max priority
return len(priority_list)
def get_sharing_priority(periph_list, priority_list):
'''get priority of a list of peripherals we could share with'''
highest = len(priority_list)
for p in periph_list:
prio = get_list_index(p, priority_list)
if prio < highest:
highest = prio
return highest
def generate_DMAMUX_map_mask(peripheral_list, channel_mask, noshare_list, dma_exclude, stream_ofs):
'''
generate a dma map suitable for a board with a DMAMUX
In principle any peripheral can use any stream, but we need to
ensure that a peripheral doesn't try to use the same stream as its
partner (eg. a RX/TX pair)
'''
dma_map = {}
idsets = {}
# first unshareable peripherals
available = channel_mask
for p in peripheral_list:
dma_map[p] = []
idsets[p] = set()
for p in peripheral_list:
if can_share(p, noshare_list) or p in dma_exclude:
continue
for i in range(16):
mask = (1<<i)
if available & mask != 0:
available &= ~mask
dma = (i // 8) + 1
stream = i % 8
dma_map[p].append((dma,stream))
idsets[p].add(i)
break
if debug:
print('dma_map1: ', dma_map)
print('available: 0x%04x' % available)
# now shareable
idx = 0
for p in peripheral_list:
if not can_share(p, noshare_list) or p in dma_exclude:
continue
base = idx % 16
for i in range(16):
found = None
for ii in list(range(base,16)) + list(range(0,base)):
if (1<<ii) & available == 0:
continue
dma = (ii // 8) + 1
stream = ii % 8
if (dma,stream) in dma_map[p]:
# this peripheral is already using the stream
continue
# prevent attempts to share with other half of same peripheral
if p.endswith('RX'):
other = p[:-2] + 'TX'
elif p.endswith('TX'):
other = p[:-2] + 'RX'
else:
other = None
if other is not None and other in idsets and ii in idsets[other]:
if len(idsets[p]) >= len(idsets[other]) and len(idsets[other]) > 0:
continue
idsets[other].remove(ii)
dma_map[other].remove((dma,stream))
found = ii
break
if found is None:
continue
base = (found+1) % 16
dma = (found // 8) + 1
stream = found % 8
dma_map[p].append((dma,stream))
idsets[p].add(found)
idx = (idx+1) % 16
if stream_ofs != 0:
# add in stream_ofs to cope with STM32G4
for p in dma_map.keys():
for (dma,stream) in dma_map[p]:
map2 = []
map2.append((dma,stream+stream_ofs))
dma_map[p] = map2
if debug:
print('dma_map: ', dma_map)
print('idsets: ', idsets)
print('available: 0x%04x' % available)
return dma_map
def generate_DMAMUX_map(peripheral_list, noshare_list, dma_exclude, stream_ofs):
'''
generate a dma map suitable for a board with a DMAMUX1 and DMAMUX2
'''
# first split peripheral_list into those for DMAMUX1 and those for DMAMUX2
dmamux1_peripherals = []
dmamux2_peripherals = []
for p in peripheral_list:
if dma_name(p) == 'BDMA':
dmamux2_peripherals.append(p)
else:
dmamux1_peripherals.append(p)
map1 = generate_DMAMUX_map_mask(dmamux1_peripherals, 0xFFFF, noshare_list, dma_exclude, stream_ofs)
# there are 8 BDMA channels, but an issue has been found where if I2C4 and SPI6
# use neighboring channels then we sometimes lose a BDMA completion interrupt. To
# avoid this we set the BDMA available mask to 0x33, which forces the channels not to be
# adjacent. This issue was found on a CUAV-X7, with H743 RevV.
map2 = generate_DMAMUX_map_mask(dmamux2_peripherals, 0x55, noshare_list, dma_exclude, stream_ofs)
# translate entries from map2 to "DMA controller 3", which is used for BDMA
for p in map2.keys():
streams = []
for (controller,stream) in map2[p]:
streams.append((3,stream))
map2[p] = streams
both = map1
both.update(map2)
if debug:
print('dma_map_both: ', both)
return both
def sharing_allowed(p1, p2):
'''return true if sharing is allowed between p1 and p2'''
if p1 == p2:
return True
# don't allow RX and TX of same peripheral to share
if p1.endswith('_RX') and p2.endswith('_TX') and p1[:-2] == p2[:-2]:
return False
# don't allow sharing of two TIMn_UP channels as DShot code can't cope
if p1.endswith("_UP") and p2.endswith("_UP") and p1.startswith("TIM") and p2.startswith("TIM"):
return False
return True
def check_sharing(shared):
'''check if DMA channel sharing is OK'''
for p in shared:
# don't share UART RX with anything
if (p.startswith("UART") or p.startswith("USART")) and p.endswith("_RX"):
print("Illegal sharing of %s" % p)
return False
# don't share ADC with anything
if p.startswith("ADC"):
print("Illegal sharing of %s" % p)
return False
for p2 in shared:
if not sharing_allowed(p, p2):
print("Illegal sharing of %s and %s" % (p, p2))
return False
return True
def forbidden_list(p, peripheral_list):
'''given a peripheral, form a list of other peripherals we may not share with'''
ret = []
for p2 in peripheral_list:
if not sharing_allowed(p, p2):
ret.append(p2)
return ret
def write_dma_header(f, peripheral_list, mcu_type, dma_exclude=[],
dma_priority='', dma_noshare=[]):
'''write out a DMA resolver header file'''
global dma_map, have_DMAMUX, has_bdshot
timer_ch_periph = []
has_bdshot = False
# form a list of DMA priorities
priority_list = dma_priority.split()
# sort by priority
peripheral_list = sorted(peripheral_list, key=lambda x: get_list_index(x, priority_list))
# form a list of peripherals that can't share
noshare_list = dma_noshare[:]
try:
lib = importlib.import_module(mcu_type)
if hasattr(lib, "DMA_Map"):
dma_map = lib.DMA_Map
else:
return [], []
except ImportError:
print("Unable to find module for MCU %s" % mcu_type)
sys.exit(1)
if dma_map is None:
have_DMAMUX = True
# ensure we don't assign dma for TIMx_CH as we share that with TIMx_UP
timer_ch_periph = [periph for periph in peripheral_list if "_CH" in periph]
dma_exclude += timer_ch_periph
if mcu_type.startswith("STM32G4"):
stream_ofs = 1
else:
stream_ofs = 0
dma_map = generate_DMAMUX_map(peripheral_list, noshare_list, dma_exclude, stream_ofs)
print("Writing DMA map")
unassigned = []
curr_dict = {}
# build a map from peripheral name to a list of peripherals that it cannot share with
forbidden_map = {}
for p in peripheral_list:
forbidden_map[p] = forbidden_list(p, peripheral_list)
for periph in peripheral_list:
if "_CH" in periph:
has_bdshot = True # the list contains a CH port
if periph in dma_exclude:
continue
assigned = False
check_list = []
if not periph in dma_map:
print("Unknown peripheral function %s in DMA map for %s" %
(periph, mcu_type))
sys.exit(1)
if debug:
print('\n\n.....Starting lookup for', periph)
for streamchan in dma_map[periph]:
if debug:
print('........Possibility for', periph, streamchan)
stream = (streamchan[0], streamchan[1])
if check_possibility(periph, stream, curr_dict, dma_map,
check_list, [], forbidden_map):
curr_dict[periph] = stream
if debug:
print ('....................... Setting', periph, stream)
assigned = True
break
if assigned == False:
unassigned.append(periph)
if debug:
print('curr_dict: ', curr_dict)
print('unassigned: ', unassigned)
# now look for shared DMA possibilities
stream_assign = {}
for k in curr_dict.keys():
p = curr_dict[k]
if not p in stream_assign:
stream_assign[p] = [k]
else:
stream_assign[p].append(k)
unassigned_new = unassigned[:]
for periph in unassigned:
share_possibility = []
for streamchan in dma_map[periph]:
stream = (streamchan[0], streamchan[1])
share_ok = True
for periph2 in stream_assign[stream]:
if not can_share(periph, noshare_list) or not can_share(periph2, noshare_list) or periph2 in forbidden_map[periph]:
share_ok = False
if share_ok:
share_possibility.append(stream)
if share_possibility:
# sort the possible sharings so minimise impact on high priority streams
share_possibility = sorted(share_possibility, key=lambda x: get_sharing_priority(stream_assign[x], priority_list))
# and take the one with the least impact (lowest value for highest priority stream share)
stream = share_possibility[-1]
if debug:
print("Sharing %s on %s with %s" % (periph, stream,
stream_assign[stream]))
curr_dict[periph] = stream
stream_assign[stream].append(periph)
unassigned_new.remove(periph)
unassigned = unassigned_new
for key in sorted(curr_dict.keys()):
stream = curr_dict[key]
if len(stream_assign[stream]) > 1:
if not check_sharing(stream_assign[stream]):
sys.exit(1)
if debug:
print(stream_assign)
f.write("\n\n// auto-generated DMA mapping from dma_resolver.py\n")
if unassigned:
f.write(
"\n// Note: The following peripherals can't be resolved for DMA: %s\n\n"
% unassigned)
ordered_up_channels = []
# produce a list of timers ordered by the DMA streamid of the UP channel
# this is so that the dshot code can take out the UP DMA locks in the same order as I2C and SPI
for key in curr_dict.keys():
if "_UP" in key:
ordered_up_channels.append(key)
def order_by_streamid(key):
stream = curr_dict[key]
return (stream[0] * 8 + stream[1]) * 20 + int(key[3:-3])
ordered_up_channels = sorted(ordered_up_channels, key=order_by_streamid)
ordered_timers = []
for key in ordered_up_channels:
ordered_timers.append(key[0:-3])
for key in sorted(curr_dict.keys()):
stream = curr_dict[key]
shared = ''
if len(stream_assign[stream]) > 1:
shared = ' // shared %s' % ','.join(stream_assign[stream])
if curr_dict[key] == "STM32_DMA_STREAM_ID_ANY":
f.write("#define %-30s STM32_DMA_STREAM_ID_ANY\n" % (chibios_dma_define_name(key)+'STREAM'))
f.write("#define %-30s %s\n" % (chibios_dma_define_name(key)+'CHAN', dmamux_channel(key)))
continue
else:
dma_controller = curr_dict[key][0]
if dma_controller == 3:
# for BDMA we use 3 in the resolver
dma_controller = 1
f.write("#define %-30s STM32_DMA_STREAM_ID(%u, %u)%s\n" %
(chibios_dma_define_name(key)+'STREAM', dma_controller,
curr_dict[key][1], shared))
if have_DMAMUX and "_UP" in key:
# share the dma with rest of the _CH ports
for ch in range(1,5):
chkey = key.replace('_UP', '_CH{}'.format(ch))
if chkey not in timer_ch_periph:
continue
f.write("#define %-30s STM32_DMA_STREAM_ID(%u, %u)%s\n" %
(chibios_dma_define_name(chkey)+'STREAM', dma_controller,
curr_dict[key][1], shared))
for streamchan in dma_map[key]:
if stream == (streamchan[0], streamchan[1]):
if have_DMAMUX:
chan = dmamux_channel(key)
else:
chan = streamchan[2]
f.write("#define %-30s %s\n" %
(chibios_dma_define_name(key)+'CHAN', chan))
if have_DMAMUX and "_UP" in key:
# share the devid with rest of the _CH ports
for ch in range(1,5):
chkey = key.replace('_UP', '_CH{}'.format(ch))
if chkey not in timer_ch_periph:
continue
f.write("#define %-30s %s\n" %
(chibios_dma_define_name(chkey)+'CHAN',
chan.replace('_UP', '_CH{}'.format(ch))))
break
# now generate UARTDriver.cpp DMA config lines
f.write("\n\n// generated UART DMA configuration lines\n")
for u in range(1, 9):
key = None
if 'USART%u_TX' % u in peripheral_list:
key = 'USART%u' % u
if 'UART%u_TX' % u in peripheral_list:
key = 'UART%u' % u
if 'USART%u_RX' % u in peripheral_list:
key = 'USART%u' % u
if 'UART%u_RX' % u in peripheral_list:
key = 'UART%u' % u
if key is None:
continue
if have_DMAMUX:
# use DMAMUX ID as channel number
dma_rx_chn = dmamux_channel(key + "_RX")
dma_tx_chn = dmamux_channel(key + "_TX")
else:
dma_rx_chn = "STM32_UART_%s_RX_%s_CHAN" % (key, dma_name(key))
dma_tx_chn = "STM32_UART_%s_TX_%s_CHAN" % (key, dma_name(key))
f.write("#define STM32_%s_RX_DMA_CONFIG " % key)
if key + "_RX" in curr_dict:
f.write(
"true, STM32_UART_%s_RX_%s_STREAM, %s\n" % (key, dma_name(key), dma_rx_chn))
else:
f.write("false, 0, 0\n")
f.write("#define STM32_%s_TX_DMA_CONFIG " % key)
if key + "_TX" in curr_dict:
f.write(
"true, STM32_UART_%s_TX_%s_STREAM, %s\n" % (key, dma_name(key), dma_tx_chn))
else:
f.write("false, 0, 0\n")
# now generate SPI DMA streams lines
f.write("\n\n// generated SPI DMA configuration lines\n")
for u in range(1, 9):
if 'SPI%u_TX' % u in peripheral_list and 'SPI%u_RX' % u in peripheral_list:
key = 'SPI%u' % u
else:
continue
f.write('#define STM32_SPI_%s_DMA_STREAMS STM32_SPI_%s_TX_%s_STREAM, STM32_SPI_%s_RX_%s_STREAM\n' % (
key, key, dma_name(key), key, dma_name(key)))
return unassigned, ordered_timers
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser("dma_resolver.py")
parser.add_option("-M", "--mcu", default=None, help='MCU type')
parser.add_option(
"-D", "--debug", action='store_true', help='enable debug')
parser.add_option(
"-P",
"--peripherals",
default=None,
help='peripheral list (comma separated)')
opts, args = parser.parse_args()
if opts.peripherals is None:
print("Please provide a peripheral list with -P")
sys.exit(1)
if opts.mcu is None:
print("Please provide a MCU type with -<")
sys.exit(1)
debug = opts.debug
plist = opts.peripherals.split(',')
mcu_type = opts.mcu
f = open("dma.h", "w")
write_dma_header(f, plist, mcu_type)
| gpl-3.0 |
mezz64/home-assistant | homeassistant/components/flunearyou/__init__.py | 3 | 6840 | """The flunearyou component."""
import asyncio
from datetime import timedelta
from pyflunearyou import Client
from pyflunearyou.errors import FluNearYouError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .const import (
CATEGORY_CDC_REPORT,
CATEGORY_USER_REPORT,
DATA_CLIENT,
DOMAIN,
LOGGER,
SENSORS,
TOPIC_UPDATE,
)
DATA_LISTENER = "listener"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=30)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
}
)
},
extra=vol.ALLOW_EXTRA,
)
@callback
def async_get_api_category(sensor_type):
"""Get the category that a particular sensor type belongs to."""
try:
return next(
(
category
for category, sensors in SENSORS.items()
for sensor in sensors
if sensor[0] == sensor_type
)
)
except StopIteration as err:
raise ValueError(f"Can't find category sensor type: {sensor_type}") from err
async def async_setup(hass, config):
"""Set up the Flu Near You component."""
hass.data[DOMAIN] = {DATA_CLIENT: {}, DATA_LISTENER: {}}
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_LATITUDE: config[DOMAIN].get(CONF_LATITUDE, hass.config.latitude),
CONF_LONGITUDE: config[DOMAIN].get(
CONF_LATITUDE, hass.config.longitude
),
},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up Flu Near You as config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
fny = FluNearYouData(
hass,
Client(websession),
config_entry.data.get(CONF_LATITUDE, hass.config.latitude),
config_entry.data.get(CONF_LONGITUDE, hass.config.longitude),
)
await fny.async_update()
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = fny
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
async def refresh(event_time):
"""Refresh data from Flu Near You."""
await fny.async_update()
hass.data[DOMAIN][DATA_LISTENER][config_entry.entry_id] = async_track_time_interval(
hass, refresh, DEFAULT_SCAN_INTERVAL
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an Flu Near You config entry."""
hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(config_entry.entry_id)
remove_listener()
await hass.config_entries.async_forward_entry_unload(config_entry, "sensor")
return True
class FluNearYouData:
"""Define a data object to retrieve info from Flu Near You."""
def __init__(self, hass, client, latitude, longitude):
"""Initialize."""
self._async_cancel_time_interval_listener = None
self._client = client
self._hass = hass
self.data = {}
self.latitude = latitude
self.longitude = longitude
self._api_category_count = {
CATEGORY_CDC_REPORT: 0,
CATEGORY_USER_REPORT: 0,
}
self._api_category_locks = {
CATEGORY_CDC_REPORT: asyncio.Lock(),
CATEGORY_USER_REPORT: asyncio.Lock(),
}
async def _async_get_data_from_api(self, api_category):
"""Update and save data for a particular API category."""
if self._api_category_count[api_category] == 0:
return
if api_category == CATEGORY_CDC_REPORT:
api_coro = self._client.cdc_reports.status_by_coordinates(
self.latitude, self.longitude
)
else:
api_coro = self._client.user_reports.status_by_coordinates(
self.latitude, self.longitude
)
try:
self.data[api_category] = await api_coro
except FluNearYouError as err:
LOGGER.error("Unable to get %s data: %s", api_category, err)
self.data[api_category] = None
async def _async_update_listener_action(self, now):
"""Define an async_track_time_interval action to update data."""
await self.async_update()
@callback
def async_deregister_api_interest(self, sensor_type):
"""Decrement the number of entities with data needs from an API category."""
# If this deregistration should leave us with no registration at all, remove the
# time interval:
if sum(self._api_category_count.values()) == 0:
if self._async_cancel_time_interval_listener:
self._async_cancel_time_interval_listener()
self._async_cancel_time_interval_listener = None
return
api_category = async_get_api_category(sensor_type)
self._api_category_count[api_category] -= 1
async def async_register_api_interest(self, sensor_type):
"""Increment the number of entities with data needs from an API category."""
# If this is the first registration we have, start a time interval:
if not self._async_cancel_time_interval_listener:
self._async_cancel_time_interval_listener = async_track_time_interval(
self._hass,
self._async_update_listener_action,
DEFAULT_SCAN_INTERVAL,
)
api_category = async_get_api_category(sensor_type)
self._api_category_count[api_category] += 1
# If a sensor registers interest in a particular API call and the data doesn't
# exist for it yet, make the API call and grab the data:
async with self._api_category_locks[api_category]:
if api_category not in self.data:
await self._async_get_data_from_api(api_category)
async def async_update(self):
"""Update Flu Near You data."""
tasks = [
self._async_get_data_from_api(api_category)
for api_category in self._api_category_count
]
await asyncio.gather(*tasks)
LOGGER.debug("Received new data")
async_dispatcher_send(self._hass, TOPIC_UPDATE)
| apache-2.0 |
Fafou/Sick-Beard | sickbeard/providers/piratebay/__init__.py | 30 | 12946 | # Author: Mr_Orange <mr_orange@hotmail.it>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import re
import urllib, urllib2
import sys
import os
import sickbeard
from sickbeard.providers import generic
from sickbeard.common import Quality
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.common import Overview
from sickbeard.exceptions import ex
from sickbeard import encodingKludge as ek
proxy_dict = {
'Getprivate.eu (NL)' : 'http://getprivate.eu/',
'15bb51.info (US)' : 'http://15bb51.info/',
'Hideme.nl (NL)' : 'http://hideme.nl/',
'Rapidproxy.us (GB)' : 'http://rapidproxy.us/',
'Proxite.eu (DE)' :'http://proxite.eu/',
'Shieldmagic.com (GB)' : 'http://www.shieldmagic.com/',
'Webproxy.cz (CZ)' : 'http://webproxy.cz/',
'Freeproxy.cz (CZ)' : 'http://www.freeproxy.cz/',
}
class ThePirateBayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "PirateBay")
self.supportsBacklog = True
self.proxy = ThePirateBayWebproxy()
self.url = 'http://thepiratebay.se/'
self.searchurl = self.url+'search/%s/0/7/200' # order by seed
self.re_title_url = '/torrent/(?P<id>\d+)/(?P<title>.*?)//1".+?(?P<url>magnet.*?)//1".+?(?P<seeders>\d+)</td>.+?(?P<leechers>\d+)</td>'
def isEnabled(self):
return sickbeard.THEPIRATEBAY
def imageName(self):
return 'piratebay.png'
def getQuality(self, item):
quality = Quality.nameQuality(item[0])
return quality
def _reverseQuality(self,quality):
quality_string = ''
if quality == Quality.SDTV:
quality_string = 'HDTV x264'
if quality == Quality.SDDVD:
quality_string = 'DVDRIP'
elif quality == Quality.HDTV:
quality_string = '720p HDTV x264'
elif quality == Quality.FULLHDTV:
quality_string = '1080p HDTV x264'
elif quality == Quality.RAWHDTV:
quality_string = '1080i HDTV mpeg2'
elif quality == Quality.HDWEBDL:
quality_string = '720p WEB-DL'
elif quality == Quality.FULLHDWEBDL:
quality_string = '1080p WEB-DL'
elif quality == Quality.HDBLURAY:
quality_string = '720p Bluray x264'
elif quality == Quality.FULLHDBLURAY:
quality_string = '1080p Bluray x264'
return quality_string
def _find_season_quality(self,title,torrent_id):
""" Return the modified title of a Season Torrent with the quality found inspecting torrent file list """
mediaExtensions = ['avi', 'mkv', 'wmv', 'divx',
'vob', 'dvr-ms', 'wtv', 'ts'
'ogv', 'rar', 'zip']
quality = Quality.UNKNOWN
fileName = None
fileURL = self.proxy._buildURL(self.url+'ajax_details_filelist.php?id='+str(torrent_id))
data = self.getURL(fileURL)
if not data:
return None
filesList = re.findall('<td.+>(.*?)</td>',data)
if not filesList:
logger.log(u"Unable to get the torrent file list for "+title, logger.ERROR)
for fileName in filter(lambda x: x.rpartition(".")[2].lower() in mediaExtensions, filesList):
quality = Quality.nameQuality(os.path.basename(fileName))
if quality != Quality.UNKNOWN: break
if fileName!=None and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(os.path.basename(fileName))
if quality == Quality.UNKNOWN:
logger.log(u"No Season quality for "+title, logger.DEBUG)
return None
try:
myParser = NameParser()
parse_result = myParser.parse(fileName)
except InvalidNameException:
return None
logger.log(u"Season quality for "+title+" is "+Quality.qualityStrings[quality], logger.DEBUG)
if parse_result.series_name and parse_result.season_number:
title = parse_result.series_name+' S%02d' % int(parse_result.season_number)+' '+self._reverseQuality(quality)
return title
def _get_season_search_strings(self, show, season=None):
search_string = {'Episode': []}
if not show:
return []
seasonEp = show.getAllEpisodes(season)
wantedEp = [x for x in seasonEp if show.getOverview(x.status) in (Overview.WANTED, Overview.QUAL)]
#If Every episode in Season is a wanted Episode then search for Season first
if wantedEp == seasonEp and not show.air_by_date:
search_string = {'Season': [], 'Episode': []}
for show_name in set(show_name_helpers.allPossibleShowNames(show)):
ep_string = show_name +' S%02d' % int(season) #1) ShowName SXX
search_string['Season'].append(ep_string)
ep_string = show_name+' Season '+str(season)+' -Ep*' #2) ShowName Season X
search_string['Season'].append(ep_string)
#Building the search string with the episodes we need
for ep_obj in wantedEp:
search_string['Episode'] += self._get_episode_search_strings(ep_obj)[0]['Episode']
#If no Episode is needed then return an empty list
if not search_string['Episode']:
return []
return [search_string]
def _get_episode_search_strings(self, ep_obj,french=None):
search_string = {'Episode': []}
if not ep_obj:
return []
if ep_obj.show.air_by_date:
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) +' '+ str(ep_obj.airdate)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(ep_obj.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) +' '+ \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season, 'episodenumber': ep_obj.scene_episode} +'|'+\
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.scene_season, 'episodenumber': ep_obj.scene_episode} +'|'+\
sickbeard.config.naming_ep_type[3] % {'seasonnumber': ep_obj.scene_season, 'episodenumber': ep_obj.scene_episode} \
search_string['Episode'].append(ep_string)
return [search_string]
def _doSearch(self, search_params, show=None, season=None, french=None):
results = []
items = {'Season': [], 'Episode': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
searchURL = self.proxy._buildURL(self.searchurl %(urllib.quote(search_string.replace('!','').encode("utf-8"))))
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
return []
re_title_url = self.proxy._buildRE(self.re_title_url)
#Extracting torrent information from data returned by searchURL
match = re.compile(re_title_url, re.DOTALL ).finditer(urllib.unquote(data))
for torrent in match:
title = torrent.group('title').replace('_','.')#Do not know why but SickBeard skip release with '_' in name
url = torrent.group('url')
id = int(torrent.group('id'))
seeders = int(torrent.group('seeders'))
leechers = int(torrent.group('leechers'))
#Filter unseeded torrent
if seeders == 0 or not title \
or not show_name_helpers.filterBadReleases(title):
continue
#Accept Torrent only from Good People for every Episode Search
if sickbeard.THEPIRATEBAY_TRUSTED and re.search('(VIP|Trusted|Helper)',torrent.group(0))== None:
logger.log(u"ThePirateBay Provider found result "+torrent.group('title')+" but that doesn't seem like a trusted result so I'm ignoring it",logger.DEBUG)
continue
#Try to find the real Quality for full season torrent analyzing files in torrent
if mode == 'Season' and Quality.nameQuality(title) == Quality.UNKNOWN:
if not self._find_season_quality(title,id): continue
item = title, url, id, seeders, leechers
items[mode].append(item)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if url:
url = url.replace('&','&')
return (title, url)
def getURL(self, url, headers=None):
if not headers:
headers = []
# Glype Proxies does not support Direct Linking.
# We have to fake a search on the proxy site to get data
if self.proxy.isEnabled():
headers.append(('Referer', self.proxy.getProxyURL()))
result = None
try:
result = helpers.getURL(url, headers)
except (urllib2.HTTPError, IOError), e:
logger.log(u"Error loading "+self.name+" URL: " + str(sys.exc_info()) + " - " + ex(e), logger.ERROR)
return None
return result
def downloadResult(self, result):
"""
Save the result to disk.
"""
#Hack for rtorrent user (it will not work for other torrent client)
if sickbeard.TORRENT_METHOD == "blackhole" and result.url.startswith('magnet'):
magnetFileName = ek.ek(os.path.join, sickbeard.TORRENT_DIR, helpers.sanitizeFileName(result.name) + '.' + self.providerType)
magnetFileContent = 'd10:magnet-uri' + `len(result.url)` + ':' + result.url + 'e'
try:
fileOut = open(magnetFileName, 'wb')
fileOut.write(magnetFileContent)
fileOut.close()
helpers.chmodAsParent(magnetFileName)
except IOError, e:
logger.log("Unable to save the file: "+ex(e), logger.ERROR)
return False
logger.log(u"Saved magnet link to "+magnetFileName+" ", logger.MESSAGE)
return True
class ThePirateBayWebproxy:
def __init__(self):
self.Type = 'GlypeProxy'
self.param = 'browse.php?u='
self.option = '&b=32'
def isEnabled(self):
""" Return True if we Choose to call TPB via Proxy """
return sickbeard.THEPIRATEBAY_PROXY
def getProxyURL(self):
""" Return the Proxy URL Choosen via Provider Setting """
return str(sickbeard.THEPIRATEBAY_PROXY_URL)
def _buildURL(self,url):
""" Return the Proxyfied URL of the page """
if self.isEnabled():
url = self.getProxyURL() + self.param + url + self.option
return url
def _buildRE(self,regx):
""" Return the Proxyfied RE string """
if self.isEnabled():
regx = re.sub('//1',self.option,regx).replace('&','&')
else:
regx = re.sub('//1','',regx)
return regx
provider = ThePirateBayProvider() | gpl-3.0 |
supertree-toolkit/stk | stk_gui/stk_gui/tree.py | 1 | 14918 | #/usr/bin/env python
# This file is part of Diamond.
#
# Diamond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diamond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diamond. If not, see <http://www.gnu.org/licenses/>.
import base64
import bz2
import copy
import cPickle as pickle
import cStringIO as StringIO
import re
import zlib
from lxml import etree
import sys
import gobject
import debug
import choice
import mixedtree
class Tree(gobject.GObject):
"""This class maps pretty much 1-to-1 with an xml tree.
It is used to represent the options in-core."""
__gsignals__ = { "on-set-data" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (str,)),
"on-set-attr" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (str, str))}
def __init__(self, name="", schemaname="", attrs={}, children=None, cardinality='', datatype=None, doc=None):
gobject.GObject.__init__(self)
# name: the element name in the options XML
# e.g. "fluidity_options"
self.name = name
# schemaname: the label given to it in the Xvif parsing of the schema
# this is necessary to walk the tree to see what possible valid
# children this node could have
# e.g. "0:elt" for the root node.
self.schemaname = schemaname
# Any children?
if children is None:
self.children = []
else:
self.children = children
# The cardinality of a node is
# how many you must/can have, e.g.
# "exactly one", "zero or one", "any amount", etc.
# This is set by Schema.valid_children for candidate
# nodes in the tree, you see.
# Possible choices: '' '?' '*' '+'
# with the usual regex meanings.
self.cardinality = cardinality
# Used for Optional or ZeroOrMore
# trees. False means it is present but inactive.
# must be set if cardinality is changed!
self.set_default_active()
# Any documentation associated with this node?
self.doc = doc
# What is the parent of this tree?
# None means the root node.
self.parent = None
# Does this node require attention from the user?
self.valid = False
# The datatype that this tree stores and the data stored
if isinstance(datatype, tuple) and len(datatype) == 1:
self.datatype = "fixed"
self.data = datatype[0]
else:
self.datatype = datatype
self.data = None
# The attributes of the tree
self.attrs = {}
for key in attrs.keys():
if isinstance(attrs[key][0], tuple) and len(attrs[key][0]) == 1:
self.attrs[key] = ("fixed", attrs[key][0][0])
else:
self.attrs[key] = attrs[key]
self.recompute_validity()
def set_attr(self, attr, val):
"""Set an attribute."""
(datatype, curval) = self.attrs[attr]
(invalid, newdata) = self.valid_data(datatype, val)
if invalid:
raise Exception, "invalid data: (%s, %s)" % (datatype, val)
self.attrs[attr] = (datatype, newdata)
self.recompute_validity()
self.emit("on-set-attr", attr, val)
def get_attr(self, attr):
"""Get an attribute."""
(datatype, curval) = self.attrs[attr]
return curval
def get_attrs(self):
"""Get all attributes"""
return self.attrs
def set_data(self, data):
(invalid, data) = self.valid_data(self.datatype, data)
if invalid:
raise Exception, "invalid data: (%s, %s)" % (str(self.datatype), data)
self.data = data
self.recompute_validity()
self.emit("on-set-data", data)
def valid_data(self, datatype, data):
if datatype is None:
raise Exception, "datatype is None!"
elif datatype == "fixed":
raise Exception, "datatype is fixed!"
datatypes_to_check = []
if isinstance(datatype, tuple):
if isinstance(datatype[0], tuple):
fixed_values = datatype[0]
else:
fixed_values = datatype
if data in fixed_values:
return (False, data)
else:
if not isinstance(datatype[0], tuple):
return (True, data)
datatypes_to_check = list(datatype[1:])
else:
datatypes_to_check = [datatype]
for datatype in datatypes_to_check:
try:
tempval = datatype(data)
if isinstance(tempval, str):
data = tempval
return (False, data)
except:
pass
return (True, data)
def validity_check(self, datatype, data):
"""
Check to see if the supplied data with supplied type can be stored in a
tree.Tree.
"""
(invalid, new_data) = self.valid_data(datatype, data)
if not invalid and isinstance(new_data, str) and new_data != "":
if new_data != data and self.validity_check(datatype, new_data) is None:
return None
else:
return new_data
else:
return None
def copy(self):
new_copy = Tree()
for attr in ["attrs", "name", "schemaname", "doc", "cardinality", "datatype", "data", "active", "valid"]:
setattr(new_copy, attr, copy.copy(getattr(self, attr)))
new_copy.parent = self.parent
new_copy.children = []
return new_copy
def recompute_validity(self):
new_valid = True
# if any children are invalid,
# we are invalid too
for child in self.children:
if child.active is False: continue
if child.__class__ is choice.Choice:
child = child.get_current_tree()
if child.valid is False:
new_valid = False
# if any attributes are unset,
# we are invalid.
if self.active:
for attr in self.attrs.keys():
(datatype, val) = self.attrs[attr]
if not datatype is None and val is None:
new_valid = False
# if we're supposed to have data and don't,
# we are invalid.
if self.datatype is not None:
if not hasattr(self, "data"):
new_valid = False
if self.data is None:
new_valid = False
# so we are valid.
# in either case, let's let the parent know.
self.valid = new_valid
if self.parent is not None:
self.parent.recompute_validity()
def find_or_add(self, treelist):
"""Append a child node to this node in the tree.
If it already exists, make tree point to it."""
outlist = []
for tree in treelist:
new_tree = None
found = False
for t in self.children:
if t.schemaname == tree.schemaname:
tree = t
found = True
break
if not found:
tree.set_parent(self)
self.children.append(tree)
tree.recompute_validity()
outlist.append(tree)
for tree in outlist:
if tree.cardinality in ['+', '*']:
inactive_list = [x for x in outlist if x.schemaname == tree.schemaname and x.active is False]
if len(inactive_list) > 0: continue
else:
new_tree = self.add_inactive_instance(tree)
outlist.insert(outlist.index(tree)+1, new_tree)
return outlist
def write(self, filename):
if isinstance(filename, str):
file = open(filename, "w")
else:
file = filename
xmlTree=etree.tostring(self.write_core(None), pretty_print = True, xml_declaration = True, encoding="utf-8")
file.write(xmlTree)
def write_core(self, parent):
"""Write to XML; this is the part that recurses"""
sub_tree=etree.Element(self.name)
for key in self.attrs:
val = self.attrs[key]
output_val = val[1]
if output_val is not None:
sub_tree.set(unicode(key), unicode(output_val))
for child in self.children:
if child.active is True:
child.write_core(sub_tree)
if self.data is not None:
sub_tree.text = unicode(self.data)
if parent is not None:
parent.append(sub_tree)
return sub_tree
def pickle(self):
if hasattr(self, "xmlnode"):
del self.xmlnode
return base64.b64encode(bz2.compress(pickle.dumps(self)))
def unpickle(self, pick):
return pickle.loads(bz2.decompress(base64.b64decode(pick)))
def print_str(self):
s = "name: %s at %s\n" % (self.name, hex(id(self)))
s = s + "schemaname: %s\n" % self.schemaname
s = s + "attrs: %s\n" % self.attrs
s = s + "children: %s\n" % self.children
if self.parent is not None:
s = s + "parent: %s %s at %s\n" % (self.parent.__class__, self.parent.name, hex(id(self.parent)))
else:
s = s + "parent: %s at %s\n" % (self.parent.__class__, hex(id(self.parent)))
s = s + "datatype: %s\n" % str(self.datatype)
s = s + "data: %s\n" % str(self.data)
s = s + "cardinality: %s\n" % self.cardinality
s = s + "active: %s\n" % self.active
s = s + "valid: %s\n" % self.valid
return s
def set_default_active(self):
self.active = True
if self.cardinality == '?' or self.cardinality == '*':
self.active = False
def count_children_by_schemaname(self, schemaname):
count = len(filter(lambda x: x.schemaname == schemaname, self.children))
return count
def get_children_by_schemaname(self, schemaname):
return filter(lambda x: x.schemaname == schemaname, self.children)
def delete_child_by_ref(self, ref):
self.children.remove(ref)
def add_inactive_instance(self, tree):
for t in self.children:
if t.schemaname == tree.schemaname and t.active is False:
return t
new_tree = tree.copy()
new_tree.active = False
if new_tree.__class__ is Tree:
new_tree.children = []
new_tree.parent = tree.parent
self.children.insert(self.children.index(tree)+1, new_tree)
return new_tree
def print_recursively(self, indent=""):
s = self.__str__()
debug.dprint(indent + ' ' + s.replace('\n', '\n' + indent + ' '), 0, newline = False)
debug.dprint("", 0)
for i in range(len(self.children)):
if isinstance(self.children[i], Tree):
self.children[i].print_recursively(indent + ">>")
elif isinstance(self.children[i], choice.Choice):
ref = self.children[i].get_current_tree()
ref.print_recursively(indent + ">>")
if i < len(self.children) - 1:
debug.dprint("", 0)
return
def add_children(self, schema):
l = schema.valid_children(self)
l = self.find_or_add(l)
for child in self.children:
child.add_children(schema)
def matches(self, text, case_sensitive = False):
if case_sensitive:
text_re = re.compile(text)
else:
text_re = re.compile(text, re.IGNORECASE)
if not text_re.search(self.name) is None:
return True
if not self.doc is None:
if not text_re.search(self.doc) is None:
return True
for key in self.attrs:
if not text_re.search(key) is None:
return True
if not self.get_attr(key) is None:
if not text_re.search(self.get_attr(key)) is None:
return True
if not self.data is None:
if not text_re.search(self.data) is None:
return True
return False
def get_current_tree(self):
return self
def get_possible_names(self):
return [self.name]
def set_parent(self, parent):
self.parent = parent
def find_tree(self, name):
if name == self.name:
return self
else:
raise Exception, "ban the bomb"
def choices(self):
return [self]
def is_comment(self):
"""
Test whether the given node is a comment node.
"""
if not self.name == "comment":
return False
if not self.attrs == {}:
return False
if not self.children == []:
return False
if not self.datatype is str:
return False
if not self.cardinality == "?":
return False
return True
def get_comment(self):
"""
Return the first comment found as a child of the supplied node, or None if
none found.
"""
for child in self.children:
if child.is_comment():
return child
return None
def is_tensor(self, geometry_dim_tree):
return False
def is_python_code(self):
"""
Perform a series of tests on the current Tree, to determine if
it is intended to be used to store python code data.
"""
try:
lang = self.selected_node.get_attr("language")
if lang == "python":
return True
except:
pass
return False
def get_display_name(self):
"""
This is a fluidity hack, allowing the name displayed in the treeview on the
left to be different to the element name. If it has an attribute name="xxx",
element_tag (xxx) is displayed.
"""
name = self.get_name()
if name is None:
return self.name
else:
return self.name + " (" + name + ")"
def get_name(self):
if "name" in self.attrs:
name = self.attrs["name"][1]
return name
return None
def get_children(self):
return self.children
def get_choices(self):
return [self]
def is_hidden(self):
"""
Tests whether the supplied tree should be hidden in view.
"""
return self.is_comment() or self.name in ["integer_value", "real_value", "string_value", "logical_value"]
def get_name_path(self, leaf = True):
name = self.get_display_name() if leaf else self.get_name()
if self.parent is None:
return name
else:
pname = self.parent.get_name_path(False)
if name is None:
return pname
elif pname is None:
return name
else:
return pname + "/" + name
def get_mixed_data(self):
integers = [child for child in self.children if child.name == "integer_value"]
reals = [child for child in self.children if child.name == "real_value"]
logicals = [child for child in self.children if child.name == "logical_value"]
strings = [child for child in self.children if child.name == "string_value"]
child = None
if len(integers) > 0:
child = integers[0]
if len(reals) > 0:
child = reals[0]
if len(logicals) > 0:
child = logicals[0]
if len(strings) > 0:
child = strings[0]
if child is None:
return self
else:
return mixedtree.MixedTree(self, child)
def is_sliceable(self):
mixed = self.get_mixed_data()
if isinstance(mixed, mixedtree.MixedTree):
return True
return (self.datatype is not None and self.datatype != "fixed") or self.attrs
def __str__(self):
return self.get_display_name()
def __repr__(self):
return self.get_name_path()
gobject.type_register(Tree)
| gpl-3.0 |
tacaswell/scikit-beam | skbeam/core/fitting/tests/test_background.py | 5 | 3709 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li (lili@bnl.gov) #
# created on 08/16/2014 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_allclose
from skbeam.core.fitting import snip_method
def test_snip_method():
"""
test of background function from xrf fit
"""
xmin = 0
# three gaussian peak
xval = np.arange(-20, 20, 0.1)
std = 0.01
yval1 = np.exp(-xval**2 / 2 / std**2)
yval2 = np.exp(-(xval - 10)**2 / 2 / std**2)
yval3 = np.exp(-(xval + 10)**2 / 2 / std**2)
# background as exponential
a0 = 1.0
a1 = 0.1
a2 = 0.5
bg_true = a0 * np.exp(-xval * a1 + a2)
yval = yval1 + yval2 + yval3 + bg_true
bg = snip_method(yval,
0.0, 1.0, 0.0,
xmin=xmin, xmax=3000,
spectral_binning=None, width=0.1)
# ignore the boundary part
cutval = 15
bg_true_part = bg_true[cutval:-cutval]
bg_cal_part = bg[cutval:-cutval]
assert_allclose(bg_true_part, bg_cal_part, rtol=1e-3, atol=1e-1)
| bsd-3-clause |
margguo/python-ivi | ivi/agilent/agilentDSO90604A.py | 7 | 1686 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent90000 import *
class agilentDSO90604A(agilent90000):
"Agilent Infiniium DSO90604A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO90604A')
super(agilentDSO90604A, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 6e9
self._init_channels()
| mit |
ujenmr/ansible | lib/ansible/modules/network/aci/aci_encap_pool.py | 27 | 8605 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_encap_pool
short_description: Manage encap pools (fvns:VlanInstP, fvns:VxlanInstP, fvns:VsanInstP)
description:
- Manage vlan, vxlan, and vsan pools on Cisco ACI fabrics.
version_added: '2.5'
options:
description:
description:
- Description for the C(pool).
type: str
aliases: [ descr ]
pool:
description:
- The name of the pool.
type: str
aliases: [ name, pool_name ]
pool_allocation_mode:
description:
- The method used for allocating encaps to resources.
- Only vlan and vsan support allocation modes.
type: str
choices: [ dynamic, static ]
aliases: [ allocation_mode, mode ]
pool_type:
description:
- The encap type of C(pool).
type: str
required: yes
aliases: [ type ]
choices: [ vlan, vsan, vxlan ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
seealso:
- module: aci_encap_pool_range
- module: aci_vlan_pool
- name: APIC Management Information Model reference
description: More information about the internal APIC classes B(fvns:VlanInstP),
B(fvns:VxlanInstP) and B(fvns:VsanInstP)
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
'''
EXAMPLES = r'''
- name: Add a new vlan pool
aci_encap_pool:
host: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
description: Production VLANs
state: present
delegate_to: localhost
- name: Remove a vlan pool
aci_encap_pool:
host: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
state: absent
delegate_to: localhost
- name: Query a vlan pool
aci_encap_pool:
host: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
state: query
delegate_to: localhost
register: query_result
- name: Query all vlan pools
aci_encap_pool:
host: apic
username: admin
password: SomeSecretPassword
pool_type: vlan
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
ACI_POOL_MAPPING = dict(
vlan=dict(
aci_class='fvnsVlanInstP',
aci_mo='infra/vlanns-',
),
vxlan=dict(
aci_class='fvnsVxlanInstP',
aci_mo='infra/vxlanns-',
),
vsan=dict(
aci_class='fvnsVsanInstP',
aci_mo='infra/vsanns-',
),
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
pool_type=dict(type='str', required=True, aliases=['type'], choices=['vlan', 'vsan', 'vxlan']),
description=dict(type='str', aliases=['descr']),
pool=dict(type='str', aliases=['name', 'pool_name']), # Not required for querying all objects
pool_allocation_mode=dict(type='str', aliases=['allocation_mode', 'mode'], choices=['dynamic', 'static']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['pool']],
['state', 'present', ['pool']],
],
)
description = module.params['description']
pool = module.params['pool']
pool_type = module.params['pool_type']
pool_allocation_mode = module.params['pool_allocation_mode']
state = module.params['state']
aci_class = ACI_POOL_MAPPING[pool_type]['aci_class']
aci_mo = ACI_POOL_MAPPING[pool_type]['aci_mo']
pool_name = pool
# ACI Pool URL requires the pool_allocation mode for vlan and vsan pools (ex: uni/infra/vlanns-[poolname]-static)
if pool_type != 'vxlan' and pool is not None:
if pool_allocation_mode is not None:
pool_name = '[{0}]-{1}'.format(pool, pool_allocation_mode)
else:
module.fail_json(msg="ACI requires parameter 'pool_allocation_mode' for 'pool_type' of 'vlan' and 'vsan' when parameter 'pool' is provided")
# Vxlan pools do not support pool allocation modes
if pool_type == 'vxlan' and pool_allocation_mode is not None:
module.fail_json(msg="vxlan pools do not support setting the 'pool_allocation_mode'; please remove this parameter from the task")
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class=aci_class,
aci_rn='{0}{1}'.format(aci_mo, pool_name),
module_object=pool,
target_filter={'name': pool},
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class=aci_class,
class_config=dict(
allocMode=pool_allocation_mode,
descr=description,
name=pool,
)
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class=aci_class)
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
Tesora/tesora-tempest | tempest/api/compute/servers/test_server_actions.py | 2 | 25976 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from six.moves.urllib import parse as urlparse
import testtools
from tempest.api.compute import base
from tempest.common import compute
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ServerActionsTestJSON(base.BaseV2ComputeTest):
run_ssh = CONF.validation.run_validation
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ServerActionsTestJSON, self).setUp()
# Check if the server is in a clean state after test
try:
waiters.wait_for_server_status(self.client,
self.server_id, 'ACTIVE')
except lib_exc.NotFound:
# The server was deleted by previous test, create a new one
server = self.create_test_server(
validatable=True,
wait_until='ACTIVE')
self.__class__.server_id = server['id']
except Exception:
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(
self.server_id, validatable=True)
def tearDown(self):
self.server_check_teardown()
super(ServerActionsTestJSON, self).tearDown()
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServerActionsTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServerActionsTestJSON, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServerActionsTestJSON, cls).resource_setup()
cls.server_id = cls.rebuild_server(None, validatable=True)
@test.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
@testtools.skipUnless(CONF.compute_feature_enabled.change_password,
'Change password not available.')
def test_change_server_password(self):
# Since this test messes with the password and makes the
# server unreachable, it should create its own server
newserver = self.create_test_server(
validatable=True,
wait_until='ACTIVE')
# The server's password should be set to the provided password
new_password = 'Newpass1234'
self.client.change_password(newserver['id'], adminPass=new_password)
waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
if CONF.validation.run_validation:
# Verify that the user can authenticate with the new password
server = self.client.show_server(newserver['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server),
self.ssh_user,
new_password,
server=server,
servers_client=self.client)
linux_client.validate_authentication()
def _test_reboot_server(self, reboot_type):
if CONF.validation.run_validation:
# Get the time the server was last rebooted,
server = self.client.show_server(self.server_id)['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
boot_time = linux_client.get_boot_time()
# NOTE: This sync is for avoiding the loss of pub key data
# in a server
linux_client.exec_command("sync")
self.client.reboot_server(self.server_id, type=reboot_type)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
if CONF.validation.run_validation:
# Log in and verify the boot time has changed
linux_client = remote_client.RemoteClient(
self.get_server_ip(server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
new_boot_time = linux_client.get_boot_time()
self.assertTrue(new_boot_time > boot_time,
'%s > %s' % (new_boot_time, boot_time))
@test.attr(type='smoke')
@test.idempotent_id('2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32')
def test_reboot_server_hard(self):
# The server should be power cycled
self._test_reboot_server('HARD')
@decorators.skip_because(bug="1014647")
@test.idempotent_id('4640e3ef-a5df-482e-95a1-ceeeb0faa84d')
def test_reboot_server_soft(self):
# The server should be signaled to reboot gracefully
self._test_reboot_server('SOFT')
def _rebuild_server_and_check(self, image_ref):
rebuilt_server = (self.client.rebuild_server(self.server_id, image_ref)
['server'])
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
msg = ('Server was not rebuilt to the original image. '
'The original image: {0}. The current image: {1}'
.format(image_ref, rebuilt_server['image']['id']))
self.assertEqual(image_ref, rebuilt_server['image']['id'], msg)
@test.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
def test_rebuild_server(self):
# The server should be rebuilt using the provided image and data
meta = {'rebuild': 'server'}
new_name = data_utils.rand_name(self.__class__.__name__ + '-server')
password = 'rebuildPassw0rd'
rebuilt_server = self.client.rebuild_server(
self.server_id,
self.image_ref_alt,
name=new_name,
metadata=meta,
adminPass=password)['server']
# If the server was rebuilt on a different image, restore it to the
# original image once the test ends
if self.image_ref_alt != self.image_ref:
self.addCleanup(self._rebuild_server_and_check, self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
# Verify the server properties after the rebuild completes
waiters.wait_for_server_status(self.client,
rebuilt_server['id'], 'ACTIVE')
server = self.client.show_server(rebuilt_server['id'])['server']
rebuilt_image_id = server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(new_name, server['name'])
if CONF.validation.run_validation:
# Authentication is attempted in the following order of priority:
# 1.The key passed in, if one was passed in.
# 2.Any key we can find through an SSH agent (if allowed).
# 3.Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
# ~/.ssh/ (if allowed).
# 4.Plain username/password auth, if a password was given.
linux_client = remote_client.RemoteClient(
self.get_server_ip(rebuilt_server),
self.ssh_user,
password,
self.validation_resources['keypair']['private_key'],
server=rebuilt_server,
servers_client=self.client)
linux_client.validate_authentication()
@test.idempotent_id('30449a88-5aff-4f9b-9866-6ee9b17f906d')
def test_rebuild_server_in_stop_state(self):
# The server in stop state should be rebuilt using the provided
# image and remain in SHUTOFF state
server = self.client.show_server(self.server_id)['server']
old_image = server['image']['id']
new_image = (self.image_ref_alt
if old_image == self.image_ref else self.image_ref)
self.client.stop_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
rebuilt_server = (self.client.rebuild_server(self.server_id, new_image)
['server'])
# If the server was rebuilt on a different image, restore it to the
# original image once the test ends
if self.image_ref_alt != self.image_ref:
self.addCleanup(self._rebuild_server_and_check, old_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
# Verify the server properties after the rebuild completes
waiters.wait_for_server_status(self.client,
rebuilt_server['id'], 'SHUTOFF')
server = self.client.show_server(rebuilt_server['id'])['server']
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
self.client.start_server(self.server_id)
@test.idempotent_id('b68bd8d6-855d-4212-b59b-2e704044dace')
@test.services('volume')
def test_rebuild_server_with_volume_attached(self):
# create a new volume and attach it to the server
volume = self.create_volume()
server = self.client.show_server(self.server_id)['server']
self.attach_volume(server, volume)
# run general rebuild test
self.test_rebuild_server()
# make sure the volume is attached to the instance after rebuild
vol_after_rebuild = self.volumes_client.show_volume(volume['id'])
vol_after_rebuild = vol_after_rebuild['volume']
self.assertEqual('in-use', vol_after_rebuild['status'])
self.assertEqual(self.server_id,
vol_after_rebuild['attachments'][0]['server_id'])
def _test_resize_server_confirm(self, stop=False):
# The server's RAM and disk space should be modified to that of
# the provided flavor
if stop:
self.client.stop_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'SHUTOFF')
self.client.resize_server(self.server_id, self.flavor_ref_alt)
# NOTE(jlk): Explicitly delete the server to get a new one for later
# tests. Avoids resize down race issues.
self.addCleanup(self.delete_server, self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'VERIFY_RESIZE')
self.client.confirm_resize_server(self.server_id)
expected_status = 'SHUTOFF' if stop else 'ACTIVE'
waiters.wait_for_server_status(self.client, self.server_id,
expected_status)
server = self.client.show_server(self.server_id)['server']
self.assertEqual(self.flavor_ref_alt, server['flavor']['id'])
if stop:
# NOTE(mriedem): tearDown requires the server to be started.
self.client.start_server(self.server_id)
@test.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_confirm(self):
self._test_resize_server_confirm(stop=False)
@test.idempotent_id('138b131d-66df-48c9-a171-64f45eb92962')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_confirm_from_stopped(self):
self._test_resize_server_confirm(stop=True)
@test.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
def test_resize_server_revert(self):
# The server's RAM and disk space should return to its original
# values after a resize is reverted
self.client.resize_server(self.server_id, self.flavor_ref_alt)
# NOTE(zhufl): Explicitly delete the server to get a new one for later
# tests. Avoids resize down race issues.
self.addCleanup(self.delete_server, self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'VERIFY_RESIZE')
self.client.revert_resize_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
server = self.client.show_server(self.server_id)['server']
self.assertEqual(self.flavor_ref, server['flavor']['id'])
@test.idempotent_id('b963d4f1-94b3-4c40-9e97-7b583f46e470')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting not available, backup not possible.')
@test.services('image')
def test_create_backup(self):
# Positive test:create backup successfully and rotate backups correctly
# create the first and the second backup
# Check if glance v1 is available to determine which client to use. We
# prefer glance v1 for the compute API tests since the compute image
# API proxy was written for glance v1.
if CONF.image_feature_enabled.api_v1:
glance_client = self.os.image_client
elif CONF.image_feature_enabled.api_v2:
glance_client = self.os.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
backup1 = data_utils.rand_name('backup-1')
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
name=backup1).response
oldest_backup_exist = True
# the oldest one should be deleted automatically in this test
def _clean_oldest_backup(oldest_backup):
if oldest_backup_exist:
try:
glance_client.delete_image(oldest_backup)
except lib_exc.NotFound:
pass
else:
LOG.warning("Deletion of oldest backup %s should not have "
"been successful as it should have been "
"deleted during rotation." % oldest_backup)
image1_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
waiters.wait_for_image_status(glance_client,
image1_id, 'active')
backup2 = data_utils.rand_name('backup-2')
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
name=backup2).response
image2_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(glance_client.delete_image, image2_id)
waiters.wait_for_image_status(glance_client,
image2_id, 'active')
# verify they have been created
properties = {
'image_type': 'backup',
'backup_type': "daily",
'instance_uuid': self.server_id,
}
params = {
'status': 'active',
'sort_key': 'created_at',
'sort_dir': 'asc'
}
if CONF.image_feature_enabled.api_v1:
for key, value in properties.items():
params['property-%s' % key] = value
image_list = glance_client.list_images(
detail=True,
**params)['images']
else:
# Additional properties are flattened in glance v2.
params.update(properties)
image_list = glance_client.list_images(params)['images']
self.assertEqual(2, len(image_list))
self.assertEqual((backup1, backup2),
(image_list[0]['name'], image_list[1]['name']))
# create the third one, due to the rotation is 2,
# the first one will be deleted
backup3 = data_utils.rand_name('backup-3')
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
resp = self.client.create_backup(self.server_id,
backup_type='daily',
rotation=2,
name=backup3).response
image3_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(glance_client.delete_image, image3_id)
# the first back up should be deleted
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
glance_client.wait_for_resource_deletion(image1_id)
oldest_backup_exist = False
if CONF.image_feature_enabled.api_v1:
image_list = glance_client.list_images(
detail=True, **params)['images']
else:
image_list = glance_client.list_images(params)['images']
self.assertEqual(2, len(image_list),
'Unexpected number of images for '
'v2:test_create_backup; was the oldest backup not '
'yet deleted? Image list: %s' %
[image['name'] for image in image_list])
self.assertEqual((backup2, backup3),
(image_list[0]['name'], image_list[1]['name']))
def _get_output(self):
output = self.client.get_console_output(
self.server_id, length=10)['output']
self.assertTrue(output, "Console output was empty.")
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
@test.idempotent_id('4b8867e6-fffa-4d54-b1d1-6fdda57be2f3')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
def test_get_console_output(self):
# Positive test:Should be able to GET the console output
# for a given server_id and number of lines
# This reboot is necessary for outputting some console log after
# creating an instance backup. If an instance backup, the console
# log file is truncated and we cannot get any console log through
# "console-log" API.
# The detail is https://bugs.launchpad.net/nova/+bug/1251920
self.client.reboot_server(self.server_id, type='HARD')
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
self.wait_for(self._get_output)
@test.idempotent_id('89104062-69d8-4b19-a71b-f47b7af093d7')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
def test_get_console_output_with_unlimited_size(self):
server = self.create_test_server(wait_until='ACTIVE')
def _check_full_length_console_log():
output = self.client.get_console_output(server['id'])['output']
self.assertTrue(output, "Console output was empty.")
lines = len(output.split('\n'))
# NOTE: This test tries to get full length console log, and the
# length should be bigger than the one of test_get_console_output.
self.assertTrue(lines > 10, "Cannot get enough console log length."
" (lines: %s)" % lines)
self.wait_for(_check_full_length_console_log)
@test.idempotent_id('5b65d4e7-4ecd-437c-83c0-d6b79d927568')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
def test_get_console_output_server_id_in_shutoff_status(self):
# Positive test:Should be able to GET the console output
# for a given server_id in SHUTOFF status
# NOTE: SHUTOFF is irregular status. To avoid test instability,
# one server is created only for this test without using
# the server that was created in setupClass.
server = self.create_test_server(wait_until='ACTIVE')
temp_server_id = server['id']
self.client.stop_server(temp_server_id)
waiters.wait_for_server_status(self.client, temp_server_id, 'SHUTOFF')
self.wait_for(self._get_output)
@test.idempotent_id('bd61a9fd-062f-4670-972b-2d6c3e3b9e73')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
def test_pause_unpause_server(self):
self.client.pause_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
self.client.unpause_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@test.idempotent_id('0d8ee21e-b749-462d-83da-b85b41c86c7f')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
def test_suspend_resume_server(self):
self.client.suspend_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id,
'SUSPENDED')
self.client.resume_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@test.idempotent_id('77eba8e0-036e-4635-944b-f7a8f3b78dc9')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
def test_shelve_unshelve_server(self):
compute.shelve_server(self.client, self.server_id,
force_shelve_offload=True)
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
images = self.compute_images_client.list_images(**params)['images']
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
self.client.unshelve_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@test.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
def test_stop_start_server(self):
self.client.stop_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
self.client.start_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
@test.idempotent_id('80a8094c-211e-440a-ab88-9e59d556c7ee')
def test_lock_unlock_server(self):
# Lock the server,try server stop(exceptions throw),unlock it and retry
self.client.lock_server(self.server_id)
self.addCleanup(self.client.unlock_server, self.server_id)
server = self.client.show_server(self.server_id)['server']
self.assertEqual(server['status'], 'ACTIVE')
# Locked server is not allowed to be stopped by non-admin user
self.assertRaises(lib_exc.Conflict,
self.client.stop_server, self.server_id)
self.client.unlock_server(self.server_id)
self.client.stop_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
self.client.start_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
def _validate_url(self, url):
valid_scheme = ['http', 'https']
parsed_url = urlparse.urlparse(url)
self.assertNotEqual('None', parsed_url.port)
self.assertNotEqual('None', parsed_url.hostname)
self.assertIn(parsed_url.scheme, valid_scheme)
@test.idempotent_id('c6bc11bf-592e-4015-9319-1c98dc64daf5')
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled.')
def test_get_vnc_console(self):
# Get the VNC console of type 'novnc' and 'xvpvnc'
console_types = ['novnc', 'xvpvnc']
for console_type in console_types:
body = self.client.get_vnc_console(self.server_id,
type=console_type)['console']
self.assertEqual(console_type, body['type'])
self.assertNotEqual('', body['url'])
self._validate_url(body['url'])
| apache-2.0 |
maurofaccenda/ansible | lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels.py | 5 | 6900 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_affinity_labels
short_description: Module to manage affinity labels in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage affinity labels in oVirt/RHV. It can also manage assignments
of those labels to hosts and VMs."
options:
name:
description:
- "Name of the affinity label to manage."
required: true
state:
description:
- "Should the affinity label be present or absent."
choices: ['present', 'absent']
default: present
cluster:
description:
- "Name of the cluster where vms and hosts resides."
vms:
description:
- "List of the VMs names, which should have assigned this affinity label."
hosts:
description:
- "List of the hosts names, which should have assigned this affinity label."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign affinity label to vms vm1 and vm2 and host host1
- ovirt_affinity_labels:
name: mylabel
cluster: mycluster
vms:
- vm1
- vm2
hosts:
- host1
# To detach all VMs from label
- ovirt_affinity_labels:
name: mylabel
cluster: mycluster
vms: []
# Remove affinity label
- ovirt_affinity_labels:
state: absent
name: mylabel
'''
RETURN = '''
id:
description: ID of the affinity label which is managed
returned: On success if affinity label is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
affinity_label:
description: "Dictionary of all the affinity label attributes. Affinity label attributes can be found on your oVirt/RHV instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/affinity_label."
type: dict
returned: On success if affinity label is found.
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from collections import defaultdict
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
ovirt_full_argument_spec,
)
class AffinityLabelsModule(BaseModule):
def build_entity(self):
return otypes.AffinityLabel(name=self._module.params['name'])
def post_create(self, entity):
self.update_check(entity)
def pre_remove(self, entity):
self._module.params['vms'] = []
self._module.params['hosts'] = []
self.update_check(entity)
def _update_label_assignments(self, entity, name, label_obj_type):
objs_service = getattr(self._connection.system_service(), '%s_service' % name)()
if self._module.params[name] is not None:
objs = self._connection.follow_link(getattr(entity, name))
objs_names = defaultdict(list)
for obj in objs:
labeled_entity = objs_service.service(obj.id).get()
if self._module.params['cluster'] is None:
objs_names[labeled_entity.name].append(obj.id)
elif self._connection.follow_link(labeled_entity.cluster).name == self._module.params['cluster']:
objs_names[labeled_entity.name].append(obj.id)
for obj in self._module.params[name]:
if obj not in objs_names:
for obj_id in objs_service.list(
search='name=%s and cluster=%s' % (obj, self._module.params['cluster'])
):
label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
if not self._module.check_mode:
label_service.add(**{
name[:-1]: label_obj_type(id=obj_id.id)
})
self.changed = True
for obj in objs_names:
if obj not in self._module.params[name]:
label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
if not self._module.check_mode:
for obj_id in objs_names[obj]:
label_service.service(obj_id).remove()
self.changed = True
def update_check(self, entity):
self._update_label_assignments(entity, 'vms', otypes.Vm)
self._update_label_assignments(entity, 'hosts', otypes.Host)
return True
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
cluster=dict(default=None),
name=dict(default=None, required=True),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['cluster']),
],
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
affinity_labels_service = connection.system_service().affinity_labels_service()
affinity_labels_module = AffinityLabelsModule(
connection=connection,
module=module,
service=affinity_labels_service,
)
state = module.params['state']
if state == 'present':
ret = affinity_labels_module.create()
elif state == 'absent':
ret = affinity_labels_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
godfreyy/tablib | tablib/formats/_dbf.py | 10 | 2640 | # -*- coding: utf-8 -*-
""" Tablib - DBF Support.
"""
import tempfile
import struct
import os
from tablib.compat import StringIO
from tablib.compat import dbfpy
from tablib.compat import is_py3
if is_py3:
from tablib.packages.dbfpy3 import dbf
from tablib.packages.dbfpy3 import dbfnew
from tablib.packages.dbfpy3 import record as dbfrecord
import io
else:
from tablib.packages.dbfpy import dbf
from tablib.packages.dbfpy import dbfnew
from tablib.packages.dbfpy import record as dbfrecord
title = 'dbf'
extensions = ('csv',)
DEFAULT_ENCODING = 'utf-8'
def export_set(dataset):
"""Returns DBF representation of a Dataset"""
new_dbf = dbfnew.dbf_new()
temp_file, temp_uri = tempfile.mkstemp()
# create the appropriate fields based on the contents of the first row
first_row = dataset[0]
for fieldname, field_value in zip(dataset.headers, first_row):
if type(field_value) in [int, float]:
new_dbf.add_field(fieldname, 'N', 10, 8)
else:
new_dbf.add_field(fieldname, 'C', 80)
new_dbf.write(temp_uri)
dbf_file = dbf.Dbf(temp_uri, readOnly=0)
for row in dataset:
record = dbfrecord.DbfRecord(dbf_file)
for fieldname, field_value in zip(dataset.headers, row):
record[fieldname] = field_value
record.store()
dbf_file.close()
dbf_stream = open(temp_uri, 'rb')
if is_py3:
stream = io.BytesIO(dbf_stream.read())
else:
stream = StringIO(dbf_stream.read())
dbf_stream.close()
os.remove(temp_uri)
return stream.getvalue()
def import_set(dset, in_stream, headers=True):
"""Returns a dataset from a DBF stream."""
dset.wipe()
if is_py3:
_dbf = dbf.Dbf(io.BytesIO(in_stream))
else:
_dbf = dbf.Dbf(StringIO(in_stream))
dset.headers = _dbf.fieldNames
for record in range(_dbf.recordCount):
row = [_dbf[record][f] for f in _dbf.fieldNames]
dset.append(row)
def detect(stream):
"""Returns True if the given stream is valid DBF"""
#_dbf = dbf.Table(StringIO(stream))
try:
if is_py3:
if type(stream) is not bytes:
stream = bytes(stream, 'utf-8')
_dbf = dbf.Dbf(io.BytesIO(stream), readOnly=True)
else:
_dbf = dbf.Dbf(StringIO(stream), readOnly=True)
return True
except (ValueError, struct.error):
# When we try to open up a file that's not a DBF, dbfpy raises a
# ValueError.
# When unpacking a string argument with less than 8 chars, struct.error is
# raised.
return False
| mit |
arank/mxnet | example/speech_recognition/stt_bi_graphemes_util.py | 16 | 1494 | import csv
from collections import Counter
def split_every(n, label):
index = 0
if index <= len(label) - 1 <= index + n - 1:
yield label[index:len(label)]
index = index + n
while index+n-1 <= len(label)-1:
yield label[index:index+n]
index = index + n
if index <= len(label)-1 <= index+n-1:
yield label[index:len(label)]
index=index+n
def generate_bi_graphemes_label(label):
label_bi_graphemes = []
label = label.split(' ')
last_index = len(label) - 1
for label_index, item in enumerate(label):
for pair in split_every(2, item):
label_bi_graphemes.append(pair)
if label_index != last_index:
label_bi_graphemes.append(" ")
return label_bi_graphemes
def generate_bi_graphemes_dictionary(label_list):
freqs = Counter()
for label in label_list:
label = label.split(' ')
for i in label:
for pair in split_every(2, i):
if len(pair) == 2:
freqs[pair] += 1
with open('resources/unicodemap_en_baidu_bi_graphemes.csv', 'w') as bigram_label:
bigramwriter = csv.writer(bigram_label, delimiter = ',')
baidu_labels = list('\' abcdefghijklmnopqrstuvwxyz')
for index, key in enumerate(baidu_labels):
bigramwriter.writerow((key, index+1))
for index, key in enumerate(freqs.keys()):
bigramwriter.writerow((key, index+len(baidu_labels)+1))
| apache-2.0 |
nilbus/sublime-text-2-plugin | floobits.py | 1 | 16380 | # coding: utf-8
import os
import sys
import json
import threading
import traceback
import subprocess
import urllib2
import webbrowser
import sublime_plugin
import sublime
from floo import api
from floo import AgentConnection
from floo.listener import Listener
from floo import msg
from floo import shared as G
from floo import utils
settings = sublime.load_settings('Floobits.sublime-settings')
DATA = utils.get_persistent_data()
agent = None
ON_CONNECT = None
def update_recent_rooms(room):
recent_rooms = DATA.get('recent_rooms', [])
recent_rooms.insert(0, room)
recent_rooms = recent_rooms[:25]
seen = set()
new = []
for r in recent_rooms:
stringified = json.dumps(r)
if stringified not in seen:
new.append(r)
seen.add(stringified)
DATA['recent_rooms'] = new
utils.update_persistent_data(DATA)
def load_floorc():
"""try to read settings out of the .floorc file"""
s = {}
try:
fd = open(os.path.expanduser('~/.floorc'), 'rb')
except IOError as e:
if e.errno == 2:
return s
raise
default_settings = fd.read().split('\n')
fd.close()
for setting in default_settings:
# TODO: this is horrible
if len(setting) == 0 or setting[0] == '#':
continue
try:
name, value = setting.split(' ', 1)
except IndexError:
continue
s[name.upper()] = value
return s
def reload_settings():
global settings
print('Reloading settings...')
settings = sublime.load_settings('Floobits.sublime-settings')
G.ALERT_ON_MSG = settings.get('alert_on_msg', True)
G.DEBUG = settings.get('debug', False)
G.COLAB_DIR = settings.get('share_dir', '~/.floobits/share/')
G.COLAB_DIR = os.path.expanduser(G.COLAB_DIR)
G.COLAB_DIR = os.path.realpath(G.COLAB_DIR)
utils.mkdir(G.COLAB_DIR)
G.DEFAULT_HOST = settings.get('host', 'floobits.com')
G.DEFAULT_PORT = settings.get('port', 3448)
G.SECURE = settings.get('secure', True)
G.USERNAME = settings.get('username')
G.SECRET = settings.get('secret')
floorc_settings = load_floorc()
for name, val in floorc_settings.items():
setattr(G, name, val)
if agent and agent.is_ready():
msg.log('Reconnecting due to settings change')
agent.reconnect()
settings.add_on_change('', reload_settings)
reload_settings()
class FloobitsBaseCommand(sublime_plugin.WindowCommand):
def is_visible(self):
return self.is_enabled()
def is_enabled(self):
return agent and agent.is_ready()
class FloobitsShareDirCommand(sublime_plugin.WindowCommand):
def run(self, dir_to_share=''):
self.window.show_input_panel('Directory:', dir_to_share, self.on_input, None, None)
def on_input(self, dir_to_share):
global ON_CONNECT
dir_to_share = os.path.expanduser(dir_to_share)
dir_to_share = utils.unfuck_path(dir_to_share)
room_name = os.path.basename(dir_to_share)
floo_room_dir = os.path.join(G.COLAB_DIR, G.USERNAME, room_name)
print(G.COLAB_DIR, G.USERNAME, room_name, floo_room_dir)
if os.path.isfile(dir_to_share):
return sublime.error_message('give me a directory please')
try:
utils.mkdir(dir_to_share)
except Exception:
return sublime.error_message("The directory %s doesn't exist and I can't make it." % dir_to_share)
floo_file = os.path.join(dir_to_share, '.floo')
info = {}
try:
floo_info = open(floo_file, 'rb').read().decode('utf-8')
info = json.loads(floo_info)
except (IOError, OSError):
pass
except Exception:
print("couldn't read the floo_info file: %s" % floo_file)
room_url = info.get('url')
if room_url:
try:
result = utils.parse_url(room_url)
except Exception as e:
sublime.error_message(str(e))
else:
room_name = result['room']
floo_room_dir = os.path.join(G.COLAB_DIR, result['owner'], result['room'])
if os.path.realpath(floo_room_dir) == os.path.realpath(dir_to_share):
if result['owner'] == G.USERNAME:
try:
api.create_room(room_name)
print('Created room %s' % room_url)
except Exception as e:
print('Tried to create room' + str(e))
# they wanted to share teh dir, so always share it
return self.window.run_command('floobits_join_room', {'room_url': room_url})
# go make sym link
try:
utils.mkdir(os.path.dirname(floo_room_dir))
os.symlink(dir_to_share, floo_room_dir)
except OSError as e:
if e.errno != 17:
raise
except Exception as e:
return sublime.error_message("Couldn't create symlink from %s to %s: %s" % (dir_to_share, floo_room_dir, str(e)))
# make & join room
ON_CONNECT = lambda x: Listener.create_buf(dir_to_share)
self.window.run_command('floobits_create_room', {
'room_name': room_name,
'ln_path': floo_room_dir,
})
def is_enabled(self):
return not bool(agent and agent.is_ready())
class FloobitsCreateRoomCommand(sublime_plugin.WindowCommand):
def run(self, room_name='', ln_path=None, prompt='Room name:'):
self.ln_path = ln_path
self.window.show_input_panel(prompt, room_name, self.on_input, None, None)
def on_input(self, room_name):
try:
api.create_room(room_name)
room_url = 'https://%s/r/%s/%s' % (G.DEFAULT_HOST, G.USERNAME, room_name)
print('Created room %s' % room_url)
except urllib2.HTTPError as e:
if e.code != 409:
raise
args = {
'room_name': room_name,
'prompt': 'Room %s already exists. Choose another name:' % room_name
}
if self.ln_path:
while True:
room_name = room_name + '1'
new_path = os.path.join(os.path.dirname(self.ln_path), room_name)
try:
os.rename(self.ln_path, new_path)
except OSError:
continue
args = {
'ln_path': new_path,
'room_name': room_name,
'prompt': 'Room %s already exists. Choose another name:' % room_name
}
break
return self.window.run_command('floobits_create_room', args)
except Exception as e:
sublime.error_message('Unable to create room: %s' % str(e))
return
webbrowser.open(room_url + '/settings', new=2, autoraise=True)
self.window.run_command('floobits_join_room', {
'room_url': room_url,
})
def is_enabled(self):
return not bool(agent and agent.is_ready())
class FloobitsPromptJoinRoomCommand(sublime_plugin.WindowCommand):
def run(self, room=''):
self.window.show_input_panel('Room URL:', room, self.on_input, None, None)
def on_input(self, room_url):
self.window.run_command('floobits_join_room', {
'room_url': room_url,
})
def is_enabled(self):
return not bool(agent and agent.is_ready())
class FloobitsJoinRoomCommand(sublime_plugin.WindowCommand):
def run(self, room_url):
def open_room_window(cb):
if sublime.platform() == 'linux':
subl = open('/proc/self/cmdline').read().split(chr(0))[0]
elif sublime.platform() == 'osx':
# TODO: totally explodes if you install ST2 somewhere else
subl = settings.get('sublime_executable', '/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl')
elif sublime.platform() == 'windows':
subl = sys.executable
else:
raise Exception('WHAT PLATFORM ARE WE ON?!?!?')
command = [subl]
if utils.get_room_window() is None:
command.append('--new-window')
command.append('--add')
command.append(G.PROJECT_PATH)
print('command:', command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
poll_result = p.poll()
print('poll:', poll_result)
def create_chat_view():
with open(os.path.join(G.COLAB_DIR, 'msgs.floobits.log'), 'w') as msgs_fd:
msgs_fd.write('')
msg.get_or_create_chat(cb)
utils.set_room_window(create_chat_view)
def run_agent(owner, room, host, port, secure):
global agent
if agent:
agent.stop()
agent = None
try:
agent = AgentConnection(owner, room, host=host, port=port, secure=secure, on_connect=ON_CONNECT)
# owner and room name are slugfields so this should be safe
Listener.set_agent(agent)
agent.connect()
except Exception as e:
print(e)
tb = traceback.format_exc()
print(tb)
else:
joined_room = {'url': room_url}
update_recent_rooms(joined_room)
try:
result = utils.parse_url(room_url)
except Exception as e:
return sublime.error_message(str(e))
def run_thread(*args):
thread = threading.Thread(target=run_agent, kwargs=result)
thread.start()
def link_dir(d):
if d == '':
try:
utils.mkdir(G.PROJECT_PATH)
except Exception as e:
return sublime.error_message("Couldn't create directory %s: %s" % (G.PROJECT_PATH, str(e)))
return open_room_window(run_thread)
try:
utils.mkdir(os.path.dirname(G.PROJECT_PATH))
except Exception as e:
return sublime.error_message("Couldn't create directory %s: %s" % (os.path.dirname(G.PROJECT_PATH), str(e)))
d = os.path.realpath(os.path.expanduser(d))
if not os.path.isdir(d):
make_dir = sublime.ok_cancel_dialog('%s is not a directory. Create it?' % d)
if not make_dir:
return self.window.show_input_panel('%s is not a directory. Enter an existing path:' % d, d, link_dir, None, None)
try:
utils.mkdir(d)
except Exception as e:
return sublime.error_message("Could not create directory %s: %s" % (d, str(e)))
try:
os.symlink(d, G.PROJECT_PATH)
except Exception as e:
return sublime.error_message("Couldn't create symlink from %s to %s: %s" % (d, G.PROJECT_PATH, str(e)))
open_room_window(run_thread)
G.PROJECT_PATH = os.path.realpath(os.path.join(G.COLAB_DIR, result['owner'], result['room']))
if not os.path.isdir(G.PROJECT_PATH):
# TODO: really bad prompt here
return self.window.show_input_panel('Give me a directory to destructively dump data into (or just press enter):', '', link_dir, None, None)
open_room_window(run_thread)
class FloobitsLeaveRoomCommand(FloobitsBaseCommand):
def run(self):
global agent
if agent:
agent.stop()
agent = None
sublime.error_message('You have left the room.')
else:
sublime.error_message('You are not joined to any room.')
class FloobitsRejoinRoomCommand(FloobitsBaseCommand):
def run(self):
global agent
if agent:
room_url = utils.to_room_url({
'host': agent.host,
'owner': agent.owner,
'port': agent.port,
'room': agent.room,
'secure': agent.secure,
})
agent.stop()
agent = None
else:
try:
room_url = DATA['recent_rooms'][0]['url']
except Exception:
sublime.error_message('No recent room to rejoin.')
return
self.window.run_command('floobits_join_room', {
'room_url': room_url,
})
def is_visible(self):
return bool(self.is_enabled())
def is_enabled(self):
return True
class FloobitsPromptMsgCommand(FloobitsBaseCommand):
def run(self, msg=''):
print('msg', msg)
self.window.show_input_panel('msg:', msg, self.on_input, None, None)
def on_input(self, msg):
self.window.run_command('floobits_msg', {'msg': msg})
class FloobitsMsgCommand(FloobitsBaseCommand):
def run(self, msg):
if not msg:
return
if agent:
agent.send_msg(msg)
def description(self):
return 'Send a message to the floobits room you are in (join a room first)'
class FloobitsClearHighlightsCommand(FloobitsBaseCommand):
def run(self):
Listener.clear_highlights(self.window.active_view())
class FloobitsPingCommand(FloobitsBaseCommand):
# TODO: ghost this option if user doesn't have permissions
def run(self):
Listener.ping(self.window.active_view())
class FloobitsJoinRecentRoomCommand(sublime_plugin.WindowCommand):
def _get_recent_rooms(self):
return [x.get('url') for x in DATA['recent_rooms'] if x.get('url') is not None]
def run(self, *args):
rooms = self._get_recent_rooms()
self.window.show_quick_panel(rooms, self.on_done)
def on_done(self, item):
if item == -1:
return
room = DATA['recent_rooms'][item]
self.window.run_command('floobits_join_room', {'room_url': room['url']})
def is_enabled(self):
return not bool(agent and agent.is_ready() and len(self._get_recent_rooms()) > 0)
class FloobitsOpenMessageViewCommand(FloobitsBaseCommand):
def run(self, *args):
def print_msg(chat_view):
msg.log('Opened message view')
if not agent:
msg.log('Not joined to a room.')
msg.get_or_create_chat(print_msg)
def description(self):
return 'Open the floobits messages view.'
class FloobitsAddToRoomCommand(FloobitsBaseCommand):
def run(self, paths, current_file=False):
if not self.is_enabled():
return
if paths is None and current_file:
paths = [self.window.active_view().file_name()]
for path in paths:
Listener.create_buf(path)
def description(self):
return 'Add file or directory to currently-joined Floobits room.'
class FloobitsDeleteFromRoomCommand(FloobitsBaseCommand):
def run(self, paths, current_file=False):
if not self.is_enabled():
return
if paths is None and current_file:
paths = [self.window.active_view().file_name()]
for path in paths:
Listener.delete_buf(path)
def description(self):
return 'Add file or directory to currently-joined Floobits room.'
class FloobitsEnableFollowModeCommand(FloobitsBaseCommand):
def run(self):
G.FOLLOW_MODE = True
# TODO: go to most recent highlight
def is_visible(self):
return bool(self.is_enabled())
def is_enabled(self):
return bool(agent and agent.is_ready() and not G.FOLLOW_MODE)
class FloobitsDisableFollowModeCommand(FloobitsBaseCommand):
def run(self):
G.FOLLOW_MODE = False
def is_visible(self):
return bool(self.is_enabled())
def is_enabled(self):
return bool(agent and agent.is_ready() and G.FOLLOW_MODE)
class FloobitsNotACommand(sublime_plugin.WindowCommand):
def run(self, *args, **kwargs):
pass
def is_visible(self):
return True
def is_enabled(self):
return False
def description(self):
return
Listener.push()
| apache-2.0 |
dyoung418/tensorflow | tensorflow/contrib/slim/python/slim/nets/inception_v1_test.py | 112 | 8960 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim import model_analyzer
from tensorflow.contrib.slim.python.slim.nets import inception_v1
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InceptionV1Test(test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v1.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception_v1.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = [
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c'
]
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = [
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c'
]
for index, endpoint in enumerate(endpoints):
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception_v1.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(
out_tensor.op.name.startswith('InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v1.inception_v1_base(
inputs, final_endpoint='Mixed_5c')
endpoints_shapes = {
'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with arg_scope(inception_v1.inception_v1_arg_scope()):
inception_v1.inception_v1_base(inputs)
total_params, _ = model_analyzer.analyze_vars(
variables_lib.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = random_ops.random_uniform((batch_size, height, width, 3))
mixed_5c, _ = inception_v1.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
ops.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception_v1.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
variables.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
logits, _ = inception_v1.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = random_ops.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = inception_v1.inception_v1(
eval_inputs, num_classes, is_training=False)
predictions = math_ops.argmax(logits, 1)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = random_ops.random_uniform(
(train_batch_size, height, width, 3))
inception_v1.inception_v1(train_inputs, num_classes)
eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception_v1.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = math_ops.argmax(logits, 1)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = random_ops.random_uniform([1, 224, 224, 3])
logits, _ = inception_v1.inception_v1(
images, num_classes=num_classes, spatial_squeeze=False)
with self.test_session() as sess:
variables.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
test.main()
| apache-2.0 |
adamkoziol/sipprcommon | bowtie.py | 1 | 62181 | #!/usr/bin/env python
from Bio.Application import _Option, AbstractCommandline, _Switch, _Argument
import re
__author__ = 'mike knowles'
__doc__ = 'Wrapper for bowtie2'
class _PipeArgumentList(_Argument):
"""Represent a variable list of arguments for piping on a command line, e.g. sam to bam to sorted bam."""
def __str__(self):
assert isinstance(self.value, list), \
"Arguments should be a list"
assert self.value, "Requires at least one argument"
# A leading pipe is required so that commands following the last filename
# do not appear merged.
# e.g.: samtools view -bS - | samtools sort -o out.sorted.bam - [without leading pipe][Incorrect]
# | samtools view -bS - | samtools sort -o out.sorted.bam - [with leading pipe][Correct]
# if any(not isinstance(x, basestring) for x in self.value):
# Correct for non-string commands.
# e.g. command classes like Bio.Sequencing.Applications.SamtoolsViewCommandLine
self.value = map(str, self.value)
return "| " + " | ".join(self.value)
class _Bowtie2BaseCommandLine(AbstractCommandline):
"""Base bowtie wrapper"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
_Switch(["-h", "h"],
"Print USAGE and DESCRIPTION; ignore other arguments."),
_Switch(["--help", "help"],
"Print USAGE, DESCRIPTION and ARGUMENTS description; "
"ignore other arguments."),
_Switch(["--version", "version"],
"Print version number; ignore other arguments."),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
AbstractCommandline.__init__(self, cmd, **kwargs)
def _validate(self):
AbstractCommandline._validate(self)
def _validate_incompatibilities(self, incompatibles):
"""Used by the bowtie _validate method (PRIVATE)."""
for element in incompatibles:
if type(element) is list:
i = [a for a in element if self._get_parameter(a)]
if len(i) > 1:
raise ValueError("Options {} are incompatible".format(" and ".join(i)))
elif type(incompatibles) is dict:
if self._get_parameter(element):
for b in incompatibles[element]:
if self._get_parameter(b):
raise ValueError("Options %s and %s are incompatible."
% (element, b))
else:
for a in element:
if self._get_parameter(a):
for b in incompatibles[a]:
if self._get_parameter(b):
raise ValueError("Options %s and %s are incompatible."
% (a, b))
class Bowtie2CommandLine(_Bowtie2BaseCommandLine):
"""Base Bowtie2 wrapper"""
def __init__(self, cmd='bowtie2', **kwargs):
assert cmd is not None
self.parameters = [
_Option(["-x", "bt2"],
"The basename of the index for the reference genome. The basename is the name of any of the index "
"files up to but not including the final .1.bt2 / .rev.1.bt2 / etc. bowtie2 looks for the "
"specified index first in the current directory, then in the directory specified in the "
"BOWTIE2_INDEXES environment variable",
filename=True,
equate=False),
_Option(["-1", "m1"],
"Comma-separated list of files containing mate 1s (filename usually includes _1), "
"e.g. -1 flyA_1.fq,flyB_1.fq. Sequences specified with this option must correspond file-for-file "
"and read-for-read with those specified in <m2>. Reads may be a mix of different lengths. If - is "
"specified, bowtie2 will read the mate 1s from the standard in or stdin filehandle",
equate=False),
_Option(["-2", "m2"],
"Comma-separated list of files containing mate 2s (filename usually includes _2), "
"e.g. -2 flyA_2.fq,flyB_2.fq. Sequences specified with this option must correspond file-for-file "
"and read-for-read with those specified in <m1>. Reads may be a mix of different lengths. If - is "
"specified, bowtie2 will read the mate 2s from the standard in or stdin filehandle",
equate=False),
_Option(["-U", "U"],
"Comma-separated list of files containing unpaired reads to be aligned, e.g. lane1.fq,lane2.fq,"
"lane3.fq,lane4.fq. Reads may be a mix of different lengths. If - is specified, bowtie2 gets the "
"reads from the standard in or stdin filehandle",
equate=False),
_Option(['-S', 'S'],
"File to write SAM alignments to. By default, alignments are written to the standard out or "
"stdout filehandle (i.e. the console)",
filename=True,
equate=False)
]
extra_parameters = [
# Other options
_Option(["--seed", "seed"],
"Use <int> as the seed for pseudo-random number generator. Default: 0",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["--non-deterministic", "non_deterministic"],
"Normally, Bowtie 2 re-initializes its pseudo-random generator for each read. It seeds the "
"generator with a number derived from (a) the read name, (b) the nucleotide sequence, "
"(c) the quality sequence, (d) the value of the --seed option. This means that if two reads are "
"identical (same name, same nucleotides, same qualities) Bowtie 2 will find and report the same "
"alignment(s) for both, even if there was ambiguity. When --non-deterministic is specified, "
"Bowtie 2 re-initializes its pseudo-random generator for each read using the current time. This "
"means that Bowtie 2 will not necessarily report the same alignment for two identical reads. This "
"is counter-intuitive for some users, but might be more appropriate in situations where the input "
"consists of many identical reads"),
_Switch(["--qc-filter", "qc_filter"],
"Filter out reads for which the QSEQ filter field is non-zero. Only has an effect when read "
"format is --qseq. Default: off"),
# Input Options
_Switch(["-q", "fastq"],
"Reads (specified with <m1>, <m2>, <s>) are FASTQ files. FASTQ files usually have "
"at. See also: --solexa-quals and --int-quals."),
_Switch(["--qseq", "qseq"],
"Reads (specified with <m1>, <m2>, <s>) are QSEQ files. QSEQ files usually end in s."),
_Switch(["-f", "fasta"],
"Reads (specified with <m1>, <m2>, <s>) are FASTA files. FASTA files usually have "
"ore-quals is also set."),
_Switch(["-r", "unformated"],
"Reads (specified with <m1>, <m2>, <s>) are unformated files. With one input sequence per "
"if --ignore-quals is also set."),
_Switch(["-c", "csv"],
"The read sequences are given on command line. I.e. <m1>, <m2> and <singles> are CSV files of "
"reads rather than lists of or qualities, so -c also implies --ignore-quals."),
_Switch(["--phred33", "phred33"],
"Input qualities are ASCII chars equal to the Phred quality plus 33. This is also called "
"the Phred+33 encoding, which is used by the very latest Illumina pipelines"),
_Switch(["--phred64", "phred64"],
"Input qualities are ASCII chars equal to the Phred quality plus 64. This is also called "
"the Phred+64 encoding"),
_Switch(["--solexa-quals", "solexa_quals"],
"Convert input qualities from Solexa (which can be negative) to Phred (which can't). This "
"scheme was used in older Illumina GA Pipeline versions (prior to 1.3). Default: off"),
_Switch(["--int-quals", "int_quals"],
"Quality values are represented in the read input file as space-separated ASCII integers, "
"e.g., 40 40 30 40..., rather than ASCII characters, e.g., II?I.... Integers are treated as "
"being on the Phred quality scale unless --solexa-quals is also specified. Default: off"),
# Preset options in --end-to-end mode
_Switch(["--very-fast", "very_fast"],
"Same as: -D 5 -R 1 -N 0 -L 22 -i S,0,2.50"),
_Switch(["--fast", "fast"],
"Same as: -D 10 -R 2 -N 0 -L 22 -i S,0,2.50"),
_Switch(["--sensitive", "sensitive"],
"Same as: -D 15 -R 2 -L 22 -i S,1,1.15 (default in --end-to-end mode)"),
_Switch(["--very-sensitive", "very_sensitive"],
"Same as: -D 20 -R 3 -N 0 -L 20 -i S,1,0.50"),
# Preset options in --local mode
_Switch(["--very-fast-local", "very_fast_local"],
"Same as: -D 5 -R 1 -N 0 -L 25 -i S,1,2.00"),
_Switch(["--fast-local", "fast_local"],
"Same as: -D 10 -R 2 -N 0 -L 22 -i S,1,1.75"),
_Switch(["--sensitive-local", "sensitive_local"],
"Same as: -D 15 -R 2 -N 0 -L 20 -i S,1,0.75 (default in --local mode)"),
_Switch(["--very-sensitive-local", "very_sensitive_local"],
"Same as: -D 20 -R 3 -N 0 -L 20 -i S,1,0.50"),
# Input configuration options
_Option(["--skip", "skip"],
"Skip (i.e. do not align) the first <int> reads or "
"pairs in the input",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--qupto", "qupto"],
"Align the first <int> reads or read pairs from the"
" input (after the -s/--skip reads or pairs have been skipped), then stop. Default: no limit",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--trim5", "trim5"],
"Trim <int> bases from 5' (left) end of each read before alignment (default: 0)",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--trim3", "trim3"],
"Trim <int> bases from 3' (right) end of each read before alignment (default: 0)",
checker_function=lambda value: type(value) is int,
equate=False),
# Alignment options
_Option(["-N", "num_mismatches"],
"Sets the number of mismatches to allowed in a seed alignment during multiseed "
"alignment. Can be set to 0 or 1. Setting this higher makes alignment slower (often much slower) "
"but increases sensitivity. Default: 0",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["-L", "seed_length"],
"Sets the length of the seed substrings to align during multiseed alignment. "
"Smaller values make alignment slower but more senstive. Default: the --sensitive preset is used "
"by default, which sets -L to 20 both in --end-to-end mode and in --local mode",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["-i", "i_func"],
"Sets a function governing the interval between seed substrings to use during multiseed alignment. "
"For instance, if the read has 30 characters, and seed length is 10, and the seed interval is 6, "
"the seeds extracted will be: Since it's best to use longer intervals for longer reads, this "
"parameter sets the interval as a function of the read length, rather than a single one-size-fits-"
"all number. For instance, specifying -i S,1,2.5 sets the interval "
"function f to f(x) = 1 + 2.5 * sqrt(x), where x is the read length. "
"See also: setting function options. If the function returns a result less than 1, it is rounded up"
" to 1. Default: the --sensitive preset is used by default, which sets -i to S,1,1.15 "
"in --end-to-end mode to -i S,1,0.75 in --local mode.",
checker_function=lambda value: re.match('^[CLSG],[-\d\.],[-\d\.]', value) is not None,
equate=False),
_Option(["--n-ceil", "n_ceil"],
"Sets a function governing the maximum number of ambiguous characters (usually Ns and/or .s) "
"allowed in a read as a function of read length. For instance, specifying -L,0,0.15 sets the "
"N-ceiling function f to f(x) = 0 + 0.15 * x, where x is the read length. See also: setting "
"function options. Reads exceeding this ceiling are filtered out. Default: L,0,0.15.",
checker_function=lambda value: re.match('^[CLSG],[-\d\.],[-\d\.]', value) is not None,
equate=False),
_Option(["--gbar", "gbar"],
"Disallow gaps within <int> positions of the beginning or end of the read. Default: 4.",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--dpad", "dpad"],
"Pads dynamic programming problems by <int> columns on either side to allow gaps. Default: 15.",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["--ignore-quals", "ignore_quals"],
"When calculating a mismatch penalty, always consider the quality value at the mismatched position "
"to be the highest possible, regardless of the actual value. I.e. input is treated as though all "
"quality values are high. This is also the default behavior when the input doesn't specify quality "
"values (e.g. in -f, -r, or -c modes)"),
_Switch(["--nofw", "nofw"],
"If --nofw is specified, bowtie2 will not attempt to align unpaired reads to the forward (Watson) "
"reference strand. In paired-end mode, --nofw and --norc pertain to the fragments; i.e. specifying "
"--nofw causes bowtie2 to explore only those paired-end configurations corresponding to fragments "
"from the reverse-complement (Crick) strand. Default: both strands enabled"),
_Switch(["--norc", "norc"],
"If --norc is specified, bowtie2 will not attempt to align unpaired reads against the reverse-"
"complement Crick reference strand. In paired-end mode, --nofw and --norc pertain to the fragments;"
" i.e. specifying --nofw causes bowtie2 to explore only those paired-end configurations "
"corresponding to fragments from the reverse-complement (Crick) strand. Default: both strands"),
_Switch(["--no-1mm-upfront", "no_1mm_upfront"],
"By default, Bowtie 2 will attempt to find either an exact or a 1-mismatch end-to-end alignment"
" for the read before trying the multiseed heuristic. Such alignments can be found very quickly,"
" and many short read alignments have exact or near-exact end-to-end alignments. However, this can "
"lead to unexpected alignments when the user also sets options governing the multiseed heuristic, "
"like -L and -N. For instance, if the user specifies -N 0 and -L equal to the length of the read, "
"the user will be surprised to find 1-mismatch alignments reported. This option prevents Bowtie 2 "
"from searching for 1-mismatch end-to-end alignments before using the multiseed heuristic, which "
"leads to the expected behavior when combined with options such as -L and -N. This comes at the "
"expense of speed"),
_Switch(["--end-to-end", "end_to_end"],
"In this mode, Bowtie 2 requires that the entire read align from one end to the other, without any "
"trimming (or soft clipping) of characters from either end. The match bonus --ma always equals 0 in"
" this mode, so all alignment scores are less than or equal to 0, and the greatest possible "
"alignment score is 0. This is mutually exclusive with --local. --end-to-end is the default mode"),
_Switch(["--local", "local"],
"In this mode, Bowtie 2 does not require that the entire read align from one end to the other. "
"Rather, some characters may be omitted (soft clipped) from the ends in order to achieve the "
"greatest possible alignment score. The match bonus --ma is used in this mode, and the best "
"possible alignment score is equal to the match bonus (--ma) times the length of the read. "
"Specifying --local and one of the presets (e.g. --local --very-fast) is equivalent to specifying "
"the local version of the preset (--very-fast-local). This is mutually exclusive with --end-to-end."
" --end-to-end is the default mode"),
# Scoring Options
_Option(["--score-min", "score_min"],
"Sets a function governing the minimum alignment score needed for an alignment to be considered "
"valid (i.e. good enough to report). This is a function of read length. For instance, specifying "
"L,0,-0.6 sets the minimum-score function f to f(x) = 0 + -0.6 * x, where x is the read length."
" See also: setting function options. The default in --end-to-end mode is L,-0.6,-0.6 "
"and the default in --local mode is G,20,8.",
checker_function=lambda value: re.match('^[CLSG],[-\d\.],[-\d\.]', value) is not None,
equate=False),
_Option(["--ma", "ma"],
"Sets the match bonus. In --local mode <int> is added to the alignment score for each "
"position where a read character aligns to a reference character and the characters match. "
"Not used in --end-to-end mode. Default: 2.",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--np", "np"],
"Sets penalty for positions where the read, reference, or both, contain an ambiguous "
"character such as N. Default: 1.",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--rdg", "rdg"],
"Sets the read gap open (<int1>) and extend (<int2>) penalties. A read gap of length N gets"
" a penalty of <int1> + N * <int2>. Default: 5, 3.",
checker_function=lambda value: re.match('[-d.],[-d.]', value) is not None,
equate=False),
_Option(["--rfg", "rfg"],
"Sets the reference gap open (<int1>) and extend (<int2>) penalties. A reference gap of "
"length N gets a penalty of <int1> + N * <int2>. Default: 5, 3.",
checker_function=lambda value: re.match('[-d.],[-d.]', value) is not None,
equate=False),
_Option(["--mp", "mp"],
"Sets the maximum (MX) and minimum (MN) mismatch penalties, both integers. A number less "
"than or equal to MX and greater than or equal to MN is subtracted from the alignment score for "
"each position where a read character aligns to a reference character, the characters do not match,"
" and neither is an N. If --ignore-quals is specified, the number subtracted quals MX. "
"Otherwise, the number subtracted is MN + floor( (MX-MN)(MIN(Q, 40.0)/40.0) ) "
"where Q is the Phred quality value. Default: MX = 6, MN = 2.",
checker_function=lambda value: re.match('[-d.],[-d.]', value) is not None,
equate=False),
# Reporting Options
_Option(["-k", "k"],
"By default, bowtie2 searches for distinct, valid alignments for each read. When it finds a"
" valid alignment, it continues looking for alignments that are nearly as good or better. The best "
"alignment found is reported (randomly selected from among best if tied). Information about the "
"best alignments is used to estimate mapping quality and to set SAM optional fields, such as "
"AS:i and XS:i.",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["-a", "a"],
"Like -k but with no upper limit on number of alignments to search for. "
"-a is mutually exclusive with -k."),
# Effort Options
_Option(["-D", "D"],
"Up to <int> consecutive seed extension attempts can fail before Bowtie 2 moves on, using"
" the alignments found so far. A seed extension fails if it does not yield a new best or a new "
"second-best alignment. This limit is automatically adjusted up when -k or -a are specified. "
"Default: 15.",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["-R", "R"],
"<int> is the maximum number of times Bowtie 2 will re-seed reads with repetitive seeds. "
"When re-seeding, Bowtie 2 simply chooses a new set of reads (same length, same number of "
"mismatches allowed) at different offsets and searches for more alignments. A read is considered "
"to have repetitive seeds if the total number of seed hits divided by the number of seeds that "
"aligned at least once is greater than 300. Default: 2.",
checker_function=lambda value: type(value) is int,
equate=False),
# Paired-end options
_Option(["--minins", "minins"],
"The minimum fragment length for valid paired-end alignments. E.g. if -I 60 is specified "
"and a paired-end alignment consists of two 20-bp alignments in the appropriate orientation with "
"a 20-bp gap between them, that alignment is considered valid (as long as -X is also satisfied). "
"A 19-bp gap would not be valid in that case. If trimming options -3 or -5 are also used, "
"the -I constraint is applied with respect to the untrimmed mates. The larger the difference "
"between -I and -X, the slower Bowtie 2 will run. This is because larger differences bewteen -I "
"and -X require that Bowtie 2 scan a larger window to determine if a concordant alignment exists. "
"For typical fragment length ranges (200 to 400 nucleotides), Bowtie 2 is very efficient. "
"Default: 0 (essentially imposing no minimum)",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--maxins", "maxins"],
"The maximum fragment length for valid paired-end alignments. E.g. if -X 100 is specified "
"and a paired-end alignment consists of two 20-bp alignments in the proper orientation with a "
"60-bp gap between them, that alignment is considered valid (as long as -I is also satisfied). "
"A 61-bp gap would not be valid in that case. If trimming options -3 or -5 are also used, the "
"-X constraint is applied with respect to the untrimmed mates, not the trimmed mates. The larger "
"the difference between -I and -X, the slower Bowtie 2 will run. This is because larger differences"
" bewteen -I and -X require that Bowtie 2 scan a larger window to determine if a concordant "
"alignment exists. For typical fragment length ranges (200 to 400 nucleotides), "
"Bowtie 2 is very efficient. Default: 500",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["--fr", "fr"],
"The upstream/downstream mate orientations for a valid paired-end alignment against the "
"forward reference strand. E.g., if --fr is specified and there is a candidate paired-end "
"alignment where mate 1 appears upstream of the reverse complement of mate 2 and the fragment "
"length constraints (-I and -X) are met, that alignment is valid. Also, if mate 2 appears "
"upstream of the reverse complement of mate 1 and all other constraints are met, "
"that too is valid. --rf likewise requires that an upstream mate1 be reverse-complemented "
"and a downstream mate2 be forward-oriented. --ff requires both an upstream mate 1 and a "
"downstream mate 2 to be forward-oriented. "
"Default: --fr (appropriate for Illumina's Paired-end Sequencing Assay)."),
_Switch(["--rf", "rf"],
"The upstream/downstream mate orientations for a valid paired-end alignment against the "
"forward reference strand. E.g., if --fr is specified and there is a candidate paired-end "
"alignment where mate 1 appears upstream of the reverse complement of mate 2 and the fragment "
"length constraints (-I and -X) are met, that alignment is valid. Also, if mate 2 appears "
"upstream of the reverse complement of mate 1 and all other constraints are met, "
"that too is valid. --rf likewise requires that an upstream mate1 be reverse-complemented "
"and a downstream mate2 be forward-oriented. --ff requires both an upstream mate 1 and a "
"downstream mate 2 to be forward-oriented. "
"Default: --fr (appropriate for Illumina's Paired-end Sequencing Assay)."),
_Switch(["--ff", "ff"],
"The upstream/downstream mate orientations for a valid paired-end alignment against the "
"forward reference strand. E.g., if --fr is specified and there is a candidate paired-end "
"alignment where mate 1 appears upstream of the reverse complement of mate 2 and the fragment "
"length constraints (-I and -X) are met, that alignment is valid. Also, if mate 2 appears "
"upstream of the reverse complement of mate 1 and all other constraints are met, "
"that too is valid. --rf likewise requires that an upstream mate1 be reverse-complemented "
"and a downstream mate2 be forward-oriented. --ff requires both an upstream mate 1 and a "
"downstream mate 2 to be forward-oriented. "
"Default: --fr (appropriate for Illumina's Paired-end Sequencing Assay)."),
_Switch(["--no-mixed", "no_mixed"],
"By default, when bowtie2 cannot find a concordant or discordant alignment for a pair, it "
"then tries to find alignments for the individual mates. This option disables that behavior."),
_Switch(["--no-discordant", "no_discordant"],
"By default, bowtie2 looks for discordant alignments if it cannot find any concordant "
"alignments. A discordant alignment is an alignment where both mates align uniquely, "
"but that does not satisfy the paired-end constraints (--fr/--rf/--ff, -I, -X). "
"This option disables that behavior."),
_Switch(["--dovetail", "dovetail"],
"If the mates dovetail, that is if one mate alignment extends past the beginning of the "
"other such that the wrong mate begins upstream, consider that to be concordant. See also: "
"Mates can overlap, contain or dovetail each other. Default: mates cannot dovetail "
"in a concordant alignment."),
_Switch(["--no-contain", "no_contain"],
"If one mate alignment contains the other, consider that to be non-concordant. See also: "
"Mates can overlap, contain or dovetail each other. Default: a mate can contain "
"the other in a concordant alignment."),
_Switch(["--no-overlap", "no_overlap"],
"If one mate alignment overlaps the other at all, consider that to be non-concordant. See "
"also: Mates can overlap, contain or dovetail each other. Default: mates can overlap in "
"a concordant alignment."),
# SAM options
_Switch(["--no-unal", "no_unal"],
"Suppress SAM records for reads that failed to align"),
_Switch(["--no-hd", "no_hd"],
"Suppress SAM header lines (starting with"),
_Switch(["--no-sq", "no_sq"],
"Suppress @SQ SAM header lines"),
_Switch(["--omit-sec-seq", "omit_sec_seq"],
"When printing secondary alignments, Bowtie 2 by default will write out the SEQ and QUAL strings. "
"Specifying this option causes Bowtie 2 to print an asterix in those fields instead."),
_Option(["--rg-id", "rg_id"],
"Set the read group ID to <text>. This causes the SAM @RG header line to be printed, with <text> as"
" the value associated with the ID: tag. It also causes the RG:Z: extra field to be attached to "
"each SAM output record, with value set to <text>.",
checker_function=lambda value: type(value) is str,
equate=False),
_Option(["--rg", "rg"],
"Add <text> (usually of the form TAG:VAL, e.g. SM:Pool1) as a field on the @RG header line. Note: "
"in order for the @RG line to appear, --rg-id must also be specified. This is because the ID tag is"
" required by the SAM Spec. Specify --rg multiple times to set multiple fields. See the SAM "
"Spec for details about what fields are legal.",
checker_function=lambda value: type(value) is str,
equate=False),
# Output options
_Option(["--un", "un"],
"Write unpaired reads that fail to align to file at <path>. These reads correspond to the SAM "
"records with the FLAGS 0x4 bit set and neither the 0x40 nor 0x80 bits set. Reads written in this "
"way will appear exactly as they did in the input file, without any modification (same sequence, "
"same name, same quality string, same quality encoding). Reads will not necessarily appear in the "
"same order as they did in the input",
filename=True,
equate=False),
_Option(["--un-gz", "un_gz"],
"Write unpaired reads that fail to align to file at <path>. These reads correspond to the SAM "
"records with the FLAGS 0x4 bit set and neither the 0x40 nor 0x80 bits set. If --un-gz is "
"specified, output will be gzip compressed. Reads written in this way will appear exactly as they "
"did in the input file, without any modification (same sequence, same name, same quality string, "
"same quality encoding). Reads will not necessarily appear in the same order as they did in the "
"input",
filename=True,
equate=False),
_Option(["--un-bz2", "un_bz2"],
"Write unpaired reads that fail to align to file at <path>. These reads correspond to the SAM "
"records with the FLAGS 0x4 bit set and neither the 0x40 nor 0x80 bits set. If --un-bz2 is "
"specified, output will be bzip2 compressed. Reads written in this way will appear exactly as "
"they did in the input file, without any modification (same sequence, same name, same quality "
"string, same quality encoding). Reads will not necessarily appear in the same order as they did "
"in the input",
filename=True,
equate=False),
_Option(["--un-lz4", "un_lz4"],
"Write unpaired reads that fail to align to file at <path>. These reads correspond to the SAM "
"records with the FLAGS 0x4 bit set and neither the 0x40 nor 0x80 bits set. If --un-lz4 is "
"specified, output will be lz4 compressed. Reads written in this way will appear exactly as they "
"did in the input file, without any modification (same sequence, same name, same quality string, "
"same quality encoding). Reads will not necessarily appear in the same order as they did in the "
"input",
filename=True,
equate=False),
_Option(["--al", "al"],
"Write unpaired reads that align at least once to file at <path>. These reads correspond to the "
"SAM records with the FLAGS 0x4, 0x40, and 0x80 bits unset. Reads written in this way will appear "
"exactly as they did in the input file, without any modification (same sequence, same name, "
"same quality string, same quality encoding). Reads will not necessarily appear in the same order "
"as they did in the input",
filename=True,
equate=False),
_Option(["--al-gz", "al_gz"],
"Write unpaired reads that align at least once to file at <path>. These reads correspond to the "
"SAM records with the FLAGS 0x4, 0x40, and 0x80 bits unset. If --al-gz is specified, output will "
"be gzip compressed. Reads written in this way will appear exactly as they did in the input file, "
"without any modification (same sequence, same name, same quality string, same quality encoding). "
"Reads will not necessarily appear in the same order as they did in the input",
filename=True,
equate=False),
_Option(["--al-bz2", "al_bz2"],
"Write unpaired reads that align at least once to file at <path>. These reads correspond to the "
"SAM records with the FLAGS 0x4, 0x40, and 0x80 bits unset. If --al-bz2 is specified, output will "
"be bzip2 compressed. Reads written in this way will appear exactly as they did in the input "
"file, without any modification (same sequence, same name, same quality string, same quality "
"encoding). Reads will not necessarily appear in the same order as they did in the input",
filename=True,
equate=False),
_Option(["--al-lz4", "al_lz4"],
"Write unpaired reads that align at least once to file at <path>. These reads correspond to the "
"SAM records with the FLAGS 0x4, 0x40, and 0x80 bits unset. If --al-lz4 is specified, output will "
"be lz4 compressed. Reads written in this way will appear exactly as they did in the input file, "
"without any modification (same sequence, same name, same quality string, same quality encoding). "
"Reads will not necessarily appear in the same order as they did in the input",
filename=True,
equate=False),
_Option(["--un-conc", "un_conc"],
"Write paired-end reads that fail to align concordantly to file(s) at <path>. These reads "
"correspond to the SAM records with the FLAGS 0x4 bit set and either the 0x40 or 0x80 bit set ("
"depending on whether it's mate #1 or #2). .1 and .2 strings are added to the filename to "
"distinguish which file contains mate #1 and mate #2. If a percent symbol, %, is used in <path>, "
"the percent symbol is replaced with 1 or 2 to make the per-mate filenames. Otherwise, "
".1 or .2 are added before the final dot in <path> to make the per-mate filenames. Reads written "
"in this way will appear exactly as they did in the input files, without any modification (same "
"sequence, same name, same quality string, same quality encoding). Reads will not necessarily "
"appear in the same order as they did in the inputs",
filename=True,
equate=False),
_Option(["--un-conc-gz", "un_conc_gz"],
"Write paired-end reads that fail to align concordantly to file(s) at <path>. These reads "
"correspond to the SAM records with the FLAGS 0x4 bit set and either the 0x40 or 0x80 bit set ("
"depending on whether it's mate #1 or #2). .1 and .2 strings are added to the filename to "
"distinguish which file contains mate #1 and mate #2. If a percent symbol, %, is used in <path>, "
"the percent symbol is replaced with 1 or 2 to make the per-mate filenames. Otherwise, "
".1 or .2 are added before the final dot in <path> to make the per-mate filenames. Reads written "
"in this way will appear exactly as they did in the input files, without any modification (same "
"sequence, same name, same quality string, same quality encoding). Reads will not necessarily "
"appear in the same order as they did in the inputs",
filename=True,
equate=False),
_Option(["--un-conc-bz2", "un_conc_bz2"],
"Write paired-end reads that fail to align concordantly to file(s) at <path>. These reads "
"correspond to the SAM records with the FLAGS 0x4 bit set and either the 0x40 or 0x80 bit set ("
"depending on whether it's mate #1 or #2). .1 and .2 strings are added to the filename to "
"distinguish which file contains mate #1 and mate #2. If a percent symbol, %, is used in <path>, "
"the percent symbol is replaced with 1 or 2 to make the per-mate filenames. Otherwise, "
".1 or .2 are added before the final dot in <path> to make the per-mate filenames. Reads written "
"in this way will appear exactly as they did in the input files, without any modification (same "
"sequence, same name, same quality string, same quality encoding). Reads will not necessarily "
"appear in the same order as they did in the inputs",
filename=True,
equate=False),
_Option(["--un-conc-lz4", "un_conc_lz4"],
"Write paired-end reads that fail to align concordantly to file(s) at <path>. These reads "
"correspond to the SAM records with the FLAGS 0x4 bit set and either the 0x40 or 0x80 bit set ("
"depending on whether it's mate #1 or #2). .1 and .2 strings are added to the filename to "
"distinguish which file contains mate #1 and mate #2. If a percent symbol, %, is used in <path>, "
"the percent symbol is replaced with 1 or 2 to make the per-mate filenames. Otherwise, "
".1 or .2 are added before the final dot in <path> to make the per-mate filenames. Reads written "
"in this way will appear exactly as they did in the input files, without any modification (same "
"sequence, same name, same quality string, same quality encoding). Reads will not necessarily "
"appear in the same order as they did in the inputs",
filename=True,
equate=False),
_Option(["--al-conc", "al_conc"],
"Write paired-end reads that align concordantly at least once to file(s) at <path>. These reads "
"correspond to the SAM records with the FLAGS 0x4 bit unset and either the 0x40 or 0x80 bit set ("
"depending on whether it's mate #1 or #2). .1 and .2 strings are added to the filename to "
"distinguish which file contains mate #1 and mate #2. If a percent symbol, %, is used in <path>, "
"the percent symbol is replaced with 1 or 2 to make the per-mate filenames. Otherwise, "
".1 or .2 are added before the final dot in <path> to make the per-mate filenames. Reads written "
"in this way will appear exactly as they did in the input files, without any modification (same "
"sequence, same name, same quality string, same quality encoding). Reads will not necessarily "
"appear in the same order as they did in the inputs",
filename=True,
equate=False),
_Option(["--al-conc-gz", "al_conc_gz"],
"Write paired-end reads that align concordantly at least once to file(s) at <path>. These reads "
"correspond to the SAM records with the FLAGS 0x4 bit unset and either the 0x40 or 0x80 bit set ("
"depending on whether it's mate #1 or #2). .1 and .2 strings are added to the filename to "
"distinguish which file contains mate #1 and mate #2. If a percent symbol, %, is used in <path>, "
"the percent symbol is replaced with 1 or 2 to make the per-mate filenames. Otherwise, "
".1 or .2 are added before the final dot in <path> to make the per-mate filenames. Reads written "
"in this way will appear exactly as they did in the input files, without any modification (same "
"sequence, same name, same quality string, same quality encoding). Reads will not necessarily "
"appear in the same order as they did in the inputs",
filename=True,
equate=False),
_Option(["--al-conc-bz2", "al_conc_bz2"],
"Write paired-end reads that align concordantly at least once to file(s) at <path>. These reads "
"correspond to the SAM records with the FLAGS 0x4 bit unset and either the 0x40 or 0x80 bit set ("
"depending on whether it's mate #1 or #2). .1 and .2 strings are added to the filename to "
"distinguish which file contains mate #1 and mate #2. If a percent symbol, %, is used in <path>, "
"the percent symbol is replaced with 1 or 2 to make the per-mate filenames. Otherwise, "
".1 or .2 are added before the final dot in <path> to make the per-mate filenames. Reads written "
"in this way will appear exactly as they did in the input files, without any modification (same "
"sequence, same name, same quality string, same quality encoding). Reads will not necessarily "
"appear in the same order as they did in the inputs",
filename=True,
equate=False),
_Option(["--al-conc-lz4", "al_conc_lz4"],
"Write paired-end reads that align concordantly at least once to file(s) at <path>. These reads "
"correspond to the SAM records with the FLAGS 0x4 bit unset and either the 0x40 or 0x80 bit set ("
"depending on whether it's mate #1 or #2). .1 and .2 strings are added to the filename to "
"distinguish which file contains mate #1 and mate #2. If a percent symbol, %, is used in <path>, "
"the percent symbol is replaced with 1 or 2 to make the per-mate filenames. Otherwise, "
".1 or .2 are added before the final dot in <path> to make the per-mate filenames. Reads written "
"in this way will appear exactly as they did in the input files, without any modification (same "
"sequence, same name, same quality string, same quality encoding). Reads will not necessarily "
"appear in the same order as they did in the inputs",
filename=True,
equate=False),
_Option(["--met-file", "met_file"],
"Write bowtie2 metrics to file <path>. Having alignment metric can be useful for debugging "
"certain problems, especially performance issues. See also: --met. Default: metrics disabled",
filename=True,
equate=False),
_Option(["--met-stderr", "met_stderr"],
"Write bowtie2 metrics to the standard error (stderr) filehandle. This is not mutually exclusive "
"with --met-file. Having alignment metric can be useful for debugging certain problems, "
"especially performance issues. See also: --met. Default: metrics disabled",
filename=True,
equate=False),
_Option(["--met", "met"],
"Write a new bowtie2 metrics record every <int> seconds. Only matters if either --met-stderr or "
"--met-file are specified. Default: 1.",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["--time", "time"],
"Print the wall-clock time required to load the index files and align the reads. This is printed "
"to the standard error (stderr) filehandle. Default: off"),
_Switch(["--quiet", "quiet"],
"Print nothing besides alignments and serious errors"),
# Preformance Options
_Option(["--offrate", "offrate"],
"Override the offrate of the index with <int>. If <int> is greater than the offrate used to build "
"the index, then some row markings are discarded when the index is read into memory. This reduces "
"the memory footprint of the aligner but requires more time to calculate text offsets. <int> must "
"be greater than the value used to build the index",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--threads", "threads"],
"Launch NTHREADS parallel search threads (default: 1). Threads will run on separate "
"processors/cores and synchronize when parsing reads and outputting alignments. Searching for "
"alignments is highly parallel, and speedup is close to linear. Increasing -p increases Bowtie "
"2's memory footprint. E.g. when aligning to a human genome index, increasing -p from 1 to 8 "
"increases the memory footprint by a few hundred megabytes. This option is only available if "
"bowtie is linked with the pthreads library (i.e. if BOWTIE_PTHREADS=0 is not specified at build "
"time)",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["--reorder", "reorder"],
"Guarantees that output SAM records are printed in an order corresponding to the order of the "
"reads in the original input file, even when -p is set greater than 1. Specifying --reorder and "
"setting -p greater than 1 causes Bowtie 2 to run somewhat slower and use somewhat more memory "
"then if --reorder were not specified. Has no effect if -p is set to 1, since output order will "
"naturally correspond to input order in that case"),
_Switch(["--mm", "mm"],
"Use memory-mapped I/O to load the index, rather than typical file I/O. Memory-mapping allows "
"many concurrent bowtie processes on the same computer to share the same memory image of the "
"index (i.e. you pay the memory overhead just once). This facilitates memory-efficient "
"parallelization of bowtie in situations where using -p is not possible or not preferable"),
]
pipe_parameters = [
_PipeArgumentList(["samcmds", "samtools"],
"Allow user to pipe bowtie2 output to samtools for bam output")
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
# add pipe parameters to the end
self.parameters = extra_parameters + self.parameters + pipe_parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters + pipe_parameters
_Bowtie2BaseCommandLine.__init__(self, cmd, **kwargs)
def _validate(self):
incompatibles = [["local", "end_to_end"],
["k", "a"],
["al", "al_gz", "al_bz2", "al_lz4"],
["un", "un_gz", "un_bz2", "un_lz4"],
["un_conc", "un_conc_gz", "un_conc_bz2", "un_conc_lz4"],
["al_conc", "al_conc_gz", "al_conc_bz2", "al_lz4"]]
self._validate_incompatibilities(incompatibles)
# TODO add incompatibilites
if self.bt2:
if (not self.m1 and not self.m2) and not self.U:
raise ValueError("Option bowtie2 requires input fastq.")
_Bowtie2BaseCommandLine._validate(self)
class _Bowtie2SeqBaseCommandLine(_Bowtie2BaseCommandLine):
"""Base bowtie wrapper"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
self.parameters += [
_Argument(["bt2"],
"bt2 filename minus trailing .1.bt2/.2.bt2. bt2 data to files with this dir/basename")
]
extra_parameters = [
_Switch(["--large-index", "large_index"],
"Force bowtie2-build to build a large index, even if the reference is less than ~ 4 billion "
"nucleotides inlong."),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
_Bowtie2BaseCommandLine.__init__(self, cmd, **kwargs)
def _validate(self):
incompatibles = []
self._validate_incompatibilities(incompatibles)
_Bowtie2BaseCommandLine._validate(self)
class Bowtie2BuildCommandLine(_Bowtie2SeqBaseCommandLine):
"""Base bowtie2-build wrapper"""
def __init__(self, cmd='bowtie2-build', **kwargs):
assert cmd is not None
self.parameters = [
_Argument(["reference"],
"comma-separated list of files with ref sequences")
]
extra_parameters = [
# Other options
_Option(["--seed", "seed"],
"Use <int> as the seed for pseudo-random number generator. Default: 0",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["--quiet", "quiet"],
"Print nothing besides alignments and serious errors"),
_Option(["--bmax", "bmax"],
"The maximum number of suffixes allowed in a block. Allowing more suffixes per block makes "
"indexing faster, but increases peak memory usage. Setting this option overrides any previous "
"setting for --bmax, or --bmaxdivn. Default (in terms of the --bmaxdivn parameter) is --bmaxdivn "
"4. This is configured automatically by default; use -a/--noauto to configure manually",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--bmaxdivn", "bmaxdivn"],
"The maximum number of suffixes allowed in a block, expressed as a fraction of the length of the "
"reference. Setting this option overrides any previous setting for --bmax, or --bmaxdivn. "
"Default: --bmaxdivn 4. This is configured automatically by default; use -a/--noauto to configure "
"manually",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--dcv", "dcv"],
"Use <int> as the period for the difference-cover sample. A larger period yields less memory "
"overhead, but may make suffix sorting slower, especially if repeats are present. Must be a power "
"of 2 no greater than 4096. Default: 1024. This is configured automatically by default; use "
"-a/--noauto to configure manually",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--offrate", "offrate"],
"To map alignments back to positions on the reference sequences, it's necessary to annotate ("
"mark) some or all of the Burrows-Wheeler rows with their corresponding location on the genome. "
"-o/--offrate governs how many rows get marked: the indexer will mark every 2^<int> rows. Marking "
"more rows makes reference-position lookups faster, but requires more memory to hold the "
"annotations at runtime. The default is 5 (every 32nd row is marked; for human genome, "
"annotations occupy about 340 megabytes)",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--ftabchars", "ftabchars"],
"The ftab is the lookup table used to calculate an initial Burrows-Wheeler range with respect to "
"the first <int> characters of the query. A larger <int> yields a larger lookup table but faster "
"query times. The ftab has size 4^(<int>+1) bytes. The default setting is 10 (ftab is 4MB)",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--cutoff", "cutoff"],
"Index only the first <int> bases of the reference sequences (cumulative across sequences) and "
"ignore the rest",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["-f", "f"],
"The reference input files (specified as <reference_in>) are FASTA files (usually having "
"extension .fa, .mfa, .fna or similar)."),
_Switch(["-c", "c"],
"The reference sequences are given on the command line. I.e. <reference_in> is a comma-separated "
"list of sequences rather than a list of FASTA files."),
_Switch(["--noauto", "noauto"],
"Disable the default behavior whereby bowtie2-build automatically selects values for the --bmax, "
"--dcv and --packed parameters according to available memory. Instead, user may specify values "
"for those parameters. If memory is exhausted during indexing, an error message will be printed; "
"it is up to the user to try new parameters."),
_Switch(["--packed", "packed"],
"Use a packed (2-bits-per-nucleotide) representation for DNA strings. This saves memory but makes "
"indexing 2-3 times slower. Default: off. This is configured automatically by default; use "
"-a/--noauto to configure manually."),
_Switch(["--nodc", "nodc"],
"Disable use of the difference-cover sample. Suffix sorting becomes quadratic-time in the worst "
"case (where the worst case is an extremely repetitive reference). Default: off."),
_Switch(["--noref", "noref"],
"Do not build the NAME.3.bt2 and NAME.4.bt2 portions of the index, which contain a bitpacked "
"version of the reference sequences and are used for paired-end alignment."),
_Switch(["--justref", "justref"],
"Build only the NAME.3.bt2 and NAME.4.bt2 portions of the index, which contain a bitpacked "
"version of the reference sequences and are used for paired-end alignment."),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
_Bowtie2SeqBaseCommandLine.__init__(self, cmd, **kwargs)
class Bowtie2InspectCommandLine(_Bowtie2SeqBaseCommandLine):
"""Base bowtie2-inspoect wrapper"""
def __init__(self, cmd='bowtie2-inspect', **kwargs):
assert cmd is not None
self.parameters = list()
extra_parameters = [
_Option(["--across", "across"],
"When printing FASTA output, output a newline character every <int> bases (default: 60).",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["--names", "names"],
"Print reference sequence names, one per line, and quit."),
_Switch(["--summary", "summary"],
"Print a summary that includes information about index settings, as well as the names and lengths "
"of the input sequences. Fields are separated by tabs. Colorspace is always set to 0 for Bowtie "
"2."),
_Switch(["--verbose", "verbose"],
"Print verbose output (for debugging)."),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
_Bowtie2SeqBaseCommandLine.__init__(self, cmd, **kwargs)
if __name__ == '__main__':
from Bio.Sequencing.Applications import SamtoolsViewCommandline, SamtoolsSortCommandline
ubam = "/data/2015-SEQ-1283/qualimap_results/2015-SEQ-1283.sorted.bam"
samsortt = SamtoolsSortCommandline(input_bam="-", out_prefix=ubam[:-4])
samtoolss = [SamtoolsViewCommandline(b=True, S=True, input_file="-"), samsortt]
# print samtools
# print Bowtie2CommandLine(bt2="test", m1="none", m2="yes", samtools=samtoolss)
# print Bowtie2InspectCommandLine(bt2="test")
pass
| mit |
gramps-project/gramps | gramps/gen/datehandler/_date_ru.py | 6 | 6750 | # -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Russian-specific classes for parsing and displaying dates.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from ._dateparser import DateParser
from ._datedisplay import DateDisplay
from ._datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Russian parser
#
#-------------------------------------------------------------------------
class DateParserRU(DateParser):
modifier_to_int = {
'перед' : Date.MOD_BEFORE,
'по' : Date.MOD_BEFORE,
'до' : Date.MOD_BEFORE,
'после' : Date.MOD_AFTER,
'п.' : Date.MOD_AFTER,
'п' : Date.MOD_AFTER,
'с' : Date.MOD_AFTER,
'ок' : Date.MOD_ABOUT,
'ок.' : Date.MOD_ABOUT,
'около' : Date.MOD_ABOUT,
'примерно' : Date.MOD_ABOUT,
'прим' : Date.MOD_ABOUT,
'прим.' : Date.MOD_ABOUT,
'приблизительно' : Date.MOD_ABOUT,
'приб.' : Date.MOD_ABOUT,
'прибл.' : Date.MOD_ABOUT,
'приб' : Date.MOD_ABOUT,
'прибл' : Date.MOD_ABOUT,
}
quality_to_int = {
'оценено' : Date.QUAL_ESTIMATED,
'оцен.' : Date.QUAL_ESTIMATED,
'оц.' : Date.QUAL_ESTIMATED,
'оцен' : Date.QUAL_ESTIMATED,
'оц' : Date.QUAL_ESTIMATED,
'вычислено' : Date.QUAL_CALCULATED,
'вычисл.' : Date.QUAL_CALCULATED,
'выч.' : Date.QUAL_CALCULATED,
'вычисл' : Date.QUAL_CALCULATED,
'выч' : Date.QUAL_CALCULATED,
}
bce = [
'до нашей эры', 'до н. э.', 'до н.э.',
'до н э', 'до нэ'] + DateParser.bce
def init_strings(self):
DateParser.init_strings(self)
DateParser.calendar_to_int.update({
'персидский' : Date.CAL_PERSIAN,
'п' : Date.CAL_PERSIAN,
})
_span_1 = ['с', 'от']
#_span_2 = ['по', 'до'] # <-- clashes with bce parsing :-(
_span_2 = ['по']
_range_1 = ['между', r'меж\.', 'меж']
_range_2 = ['и']
self._span = re.compile(r"(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)"
% ('|'.join(_span_1), '|'.join(_span_2)),
re.IGNORECASE)
self._range = re.compile(r"(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)"
% ('|'.join(_range_1), '|'.join(_range_2)),
re.IGNORECASE)
#-------------------------------------------------------------------------
#
# Russian displayer
#
#-------------------------------------------------------------------------
class DateDisplayRU(DateDisplay):
"""
Russian language date display class.
"""
# TODO fix BUG 7064: non-Gregorian calendars wrongly use BCE notation for negative dates
# not refactoring _bce_str into base class because it'll be gone under #7064
_bce_str = "%s до н.э."
display = DateDisplay.display_formatted
def dd_dformat04(self, date_val, inflect, long_months):
"""
day month_name year -- for Russian locale
"""
year = self._slash_year(date_val[2], date_val[3])
if date_val[0] == 0:
if date_val[1] == 0:
return year
else:
return self.format_long_month_year(date_val[1], year,
inflect, long_months)
elif date_val[1] == 0: # month is zero but day is not (see 8477)
return self.display_iso(date_val)
elif not hasattr(long_months[date_val[1]], 'f'): # not a Lexeme
return "{day:d} {long_month} {year}".format(
day = date_val[0],
long_month = long_months[date_val[1]],
year = year)
else:
return "{day:d} {long_month.f[Р]} {year}".format(
day = date_val[0],
long_month = long_months[date_val[1]],
year = year)
def dd_dformat05(self, date_val, inflect, short_months):
"""
day month_abbreviation year -- for Russian locale
"""
year = self._slash_year(date_val[2], date_val[3])
if date_val[0] == 0:
if date_val[1] == 0:
return year
else:
return self.format_short_month_year(date_val[1], year,
inflect, short_months)
elif date_val[1] == 0: # month is zero but day is not (see 8477)
return self.display_iso(date_val)
elif not hasattr(short_months[date_val[1]], 'f'): # not a Lexeme
return "{day:d} {short_month} {year}".format(
day = date_val[0],
short_month = short_months[date_val[1]],
year = year)
else:
return "{day:d} {short_month.f[Р]} {year}".format(
day = date_val[0],
short_month = short_months[date_val[1]],
year = year)
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(
('ru_RU', 'ru', 'russian', 'Russian', ('%d.%m.%Y',)),
DateParserRU, DateDisplayRU)
| gpl-2.0 |
noobies/f240l-kernel | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
ahmadiga/min_edx | openedx/core/djangoapps/content/course_overviews/admin.py | 25 | 1357 | """
Django admin page for CourseOverviews, the basic metadata about a course that
is used in user dashboard queries and other places where you need info like
name, and start dates, but don't actually need to crawl into course content.
"""
from django.contrib import admin
from .models import CourseOverview
class CourseOverviewAdmin(admin.ModelAdmin):
"""
Simple, read-only list/search view of Course Overviews.
The detail view is broken because our primary key for this model are
course keys, which can have a number of chars that break admin URLs.
There's probably a way to make this work properly, but I don't have the
time to investigate. I would normally disable the links by setting
`list_display_links = None`, but that's not a valid value for that
field in Django 1.4. So I'm left with creating a page where the detail
view links are all broken for Split courses. Because I only created
this page to manually test a hotfix, the list view works for this
purpose, and that's all the yak I have time to shave today.
"""
list_display = [
'id',
'display_name',
'version',
'enrollment_start',
'enrollment_end',
'created',
'modified',
]
search_fields = ['id', 'display_name']
admin.site.register(CourseOverview, CourseOverviewAdmin)
| agpl-3.0 |
superdesk/superdesk-core | superdesk/data_updates.py | 1 | 1044 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.resource import Resource
from superdesk.services import BaseService
import superdesk.metadata.utils
import datetime
import superdesk
class DataUpdatesResource(Resource):
schema = {"name": {"type": "string", "required": True}, "applied": {"type": "datetime", "required": True}}
internal_resource = True
item_url = superdesk.metadata.utils.item_url
class DataUpdatesService(BaseService):
def on_create(self, docs):
for doc in docs:
doc["applied"] = datetime.datetime.now()
def init_app(app) -> None:
endpoint_name = "data_updates"
service = DataUpdatesService(endpoint_name, backend=superdesk.get_backend())
DataUpdatesResource(endpoint_name, app=app, service=service)
| agpl-3.0 |
DanielTakeshi/rl_algorithms | dqn/atari_wrappers.py | 9 | 5290 | import cv2
import numpy as np
from collections import deque
import gym
from gym import spaces
class NoopResetEnv(gym.Wrapper):
def __init__(self, env=None, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
noops = np.random.randint(1, self.noop_max + 1)
for _ in range(noops):
obs, _, _, _ = self.env.step(0)
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""Take action on reset for environments that are fixed until firing."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self):
self.env.reset()
obs, _, _, _ = self.env.step(1)
obs, _, _, _ = self.env.step(2)
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env=None):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
self.was_real_reset = False
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
self.was_real_reset = True
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.was_real_reset = False
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def _step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def _reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
def _process_frame84(frame):
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_LINEAR)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ProcessFrame84(gym.Wrapper):
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.observation_space = spaces.Box(low=0, high=255, shape=(84, 84, 1))
def _step(self, action):
obs, reward, done, info = self.env.step(action)
return _process_frame84(obs), reward, done, info
def _reset(self):
return _process_frame84(self.env.reset())
class ClippedRewardsWrapper(gym.Wrapper):
def _step(self, action):
obs, reward, done, info = self.env.step(action)
return obs, np.sign(reward), done, info
def wrap_deepmind_ram(env):
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ClippedRewardsWrapper(env)
return env
def wrap_deepmind(env):
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = ClippedRewardsWrapper(env)
return env
| mit |
Pluto-tv/chromium-crosswalk | tools/telemetry/telemetry/internal/actions/page_action.py | 29 | 4383 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry import decorators
GESTURE_SOURCE_DEFAULT = 'DEFAULT'
GESTURE_SOURCE_MOUSE = 'MOUSE'
GESTURE_SOURCE_TOUCH = 'TOUCH'
SUPPORTED_GESTURE_SOURCES = (GESTURE_SOURCE_DEFAULT,
GESTURE_SOURCE_MOUSE,
GESTURE_SOURCE_TOUCH)
class PageActionNotSupported(Exception):
pass
class PageActionFailed(Exception):
pass
class PageAction(object):
"""Represents an action that a user might try to perform to a page."""
def WillRunAction(self, tab):
"""Override to do action-specific setup before
Test.WillRunAction is called."""
pass
def RunAction(self, tab):
raise NotImplementedError()
def CleanUp(self, tab):
pass
def EvaluateCallbackWithElement(
tab, callback_js, selector=None, text=None, element_function=None,
wait=False, timeout_in_seconds=60):
"""Evaluates the JavaScript callback with the given element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Returns:
The callback's return value, if any. The return value must be
convertible to JSON.
Args:
tab: A telemetry.core.Tab object.
callback_js: The JavaScript callback to call (as string).
The callback receive 2 parameters: the element, and information
string about what method was used to retrieve the element.
Example: '''
function(element, info) {
if (!element) {
throw Error('Can not find element: ' + info);
}
element.click()
}'''
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
wait: Whether to wait for the return value to be true.
timeout_in_seconds: The timeout for wait (if waiting).
"""
count = 0
info_msg = ''
if element_function is not None:
count = count + 1
info_msg = 'using element_function "%s"' % re.escape(element_function)
if selector is not None:
count = count + 1
info_msg = 'using selector "%s"' % _EscapeSelector(selector)
element_function = 'document.querySelector(\'%s\')' % _EscapeSelector(
selector)
if text is not None:
count = count + 1
info_msg = 'using exact text match "%s"' % re.escape(text)
element_function = '''
(function() {
function _findElement(element, text) {
if (element.innerHTML == text) {
return element;
}
var childNodes = element.childNodes;
for (var i = 0, len = childNodes.length; i < len; ++i) {
var found = _findElement(childNodes[i], text);
if (found) {
return found;
}
}
return null;
}
return _findElement(document, '%s');
})()''' % text
if count != 1:
raise PageActionFailed(
'Must specify 1 way to retrieve element, but %s was specified.' % count)
code = '''
(function() {
var element = %s;
var callback = %s;
return callback(element, '%s');
})()''' % (element_function, callback_js, info_msg)
if wait:
tab.WaitForJavaScriptExpression(code, timeout_in_seconds)
return True
else:
return tab.EvaluateJavaScript(code)
def _EscapeSelector(selector):
return selector.replace('\'', '\\\'')
@decorators.Cache
def IsGestureSourceTypeSupported(tab, gesture_source_type):
# TODO(dominikg): remove once support for
# 'chrome.gpuBenchmarking.gestureSourceTypeSupported' has
# been rolled into reference build.
if tab.EvaluateJavaScript("""
typeof chrome.gpuBenchmarking.gestureSourceTypeSupported ===
'undefined'"""):
return (tab.browser.platform.GetOSName() != 'mac' or
gesture_source_type.lower() != 'touch')
return tab.EvaluateJavaScript("""
chrome.gpuBenchmarking.gestureSourceTypeSupported(
chrome.gpuBenchmarking.%s_INPUT)"""
% (gesture_source_type.upper()))
| bsd-3-clause |
Mitchkoens/sympy | examples/advanced/relativity.py | 89 | 4384 | #!/usr/bin/env python
"""
This example calculates the Ricci tensor from the metric and does this
on the example of Schwarzschild solution.
If you want to derive this by hand, follow the wiki page here:
http://en.wikipedia.org/wiki/Deriving_the_Schwarzschild_solution
Also read the above wiki and follow the references from there if
something is not clear, like what the Ricci tensor is, etc.
"""
from sympy import (exp, Symbol, sin, Rational, Derivative, dsolve, Function,
Matrix, Eq, pprint, Pow, classify_ode, solve)
def grad(f, X):
a = []
for x in X:
a.append(f.diff(x))
return a
def d(m, x):
return grad(m[0, 0], x)
class MT(object):
def __init__(self, m):
self.gdd = m
self.guu = m.inv()
def __str__(self):
return "g_dd =\n" + str(self.gdd)
def dd(self, i, j):
return self.gdd[i, j]
def uu(self, i, j):
return self.guu[i, j]
class G(object):
def __init__(self, g, x):
self.g = g
self.x = x
def udd(self, i, k, l):
g = self.g
x = self.x
r = 0
for m in [0, 1, 2, 3]:
r += g.uu(i, m)/2 * (g.dd(m, k).diff(x[l]) + g.dd(m, l).diff(x[k])
- g.dd(k, l).diff(x[m]))
return r
class Riemann(object):
def __init__(self, G, x):
self.G = G
self.x = x
def uddd(self, rho, sigma, mu, nu):
G = self.G
x = self.x
r = G.udd(rho, nu, sigma).diff(x[mu]) - G.udd(rho, mu, sigma).diff(x[nu])
for lam in [0, 1, 2, 3]:
r += G.udd(rho, mu, lam)*G.udd(lam, nu, sigma) \
- G.udd(rho, nu, lam)*G.udd(lam, mu, sigma)
return r
class Ricci(object):
def __init__(self, R, x):
self.R = R
self.x = x
self.g = R.G.g
def dd(self, mu, nu):
R = self.R
x = self.x
r = 0
for lam in [0, 1, 2, 3]:
r += R.uddd(lam, mu, lam, nu)
return r
def ud(self, mu, nu):
r = 0
for lam in [0, 1, 2, 3]:
r += self.g.uu(mu, lam)*self.dd(lam, nu)
return r.expand()
def curvature(Rmn):
return Rmn.ud(0, 0) + Rmn.ud(1, 1) + Rmn.ud(2, 2) + Rmn.ud(3, 3)
nu = Function("nu")
lam = Function("lambda")
t = Symbol("t")
r = Symbol("r")
theta = Symbol(r"theta")
phi = Symbol(r"phi")
# general, spherically symmetric metric
gdd = Matrix((
(-exp(nu(r)), 0, 0, 0),
(0, exp(lam(r)), 0, 0),
(0, 0, r**2, 0),
(0, 0, 0, r**2*sin(theta)**2)
))
g = MT(gdd)
X = (t, r, theta, phi)
Gamma = G(g, X)
Rmn = Ricci(Riemann(Gamma, X), X)
def pprint_Gamma_udd(i, k, l):
pprint(Eq(Symbol('Gamma^%i_%i%i' % (i, k, l)), Gamma.udd(i, k, l)))
def pprint_Rmn_dd(i, j):
pprint(Eq(Symbol('R_%i%i' % (i, j)), Rmn.dd(i, j)))
# from Differential Equations example
def eq1():
r = Symbol("r")
e = Rmn.dd(0, 0)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
def eq2():
r = Symbol("r")
e = Rmn.dd(1, 1)
C = Symbol("CC")
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
def eq3():
r = Symbol("r")
e = Rmn.dd(2, 2)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
def eq4():
r = Symbol("r")
e = Rmn.dd(3, 3)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
pprint(dsolve(e, lam(r), 'best'))
def main():
print("Initial metric:")
pprint(gdd)
print("-"*40)
print("Christoffel symbols:")
pprint_Gamma_udd(0, 1, 0)
pprint_Gamma_udd(0, 0, 1)
print()
pprint_Gamma_udd(1, 0, 0)
pprint_Gamma_udd(1, 1, 1)
pprint_Gamma_udd(1, 2, 2)
pprint_Gamma_udd(1, 3, 3)
print()
pprint_Gamma_udd(2, 2, 1)
pprint_Gamma_udd(2, 1, 2)
pprint_Gamma_udd(2, 3, 3)
print()
pprint_Gamma_udd(3, 2, 3)
pprint_Gamma_udd(3, 3, 2)
pprint_Gamma_udd(3, 1, 3)
pprint_Gamma_udd(3, 3, 1)
print("-"*40)
print("Ricci tensor:")
pprint_Rmn_dd(0, 0)
e = Rmn.dd(1, 1)
pprint_Rmn_dd(1, 1)
pprint_Rmn_dd(2, 2)
pprint_Rmn_dd(3, 3)
print("-"*40)
print("Solve Einstein's equations:")
e = e.subs(nu(r), -lam(r)).doit()
l = dsolve(e, lam(r))
pprint(l)
lamsol = solve(l, lam(r))[0]
metric = gdd.subs(lam(r), lamsol).subs(nu(r), -lamsol) # .combine()
print("metric:")
pprint(metric)
if __name__ == "__main__":
main()
| bsd-3-clause |
Venturi/cms | env/lib/python2.7/site-packages/filer/admin/patched/admin_utils.py | 5 | 3016 | # -*- coding: utf-8 -*-
"""
Copy of ``django.contrib.admin.utils.get_deleted_objects`` and a subclass of
``django.contrib.admin.utils.NestedObjects`` that work with djongo_polymorphic querysets.
Ultimatly these should go directly into django_polymorphic or, in a more generic way, into django itself.
This code has been copied from Django 1.4.
At all locations where something has been changed, there are inline comments in the code.
"""
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
try:
from django.utils.encoding import force_text
except ImportError:
# Django < 1.5
from django.utils.encoding import force_unicode as force_text
from filer.utils.compatibility import (get_delete_permission, NestedObjects,
quote)
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
# --- begin patch ---
collector = PolymorphicAwareNestedObjects(using=using)
# --- end patch ---
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
p = get_delete_permission(opts)
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe('%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class PolymorphicAwareNestedObjects(NestedObjects):
def collect(self, objs, source_attr=None, **kwargs):
if hasattr(objs, 'non_polymorphic'):
# .filter() is needed, because there may already be cached
# polymorphic results in the queryset
objs = objs.non_polymorphic().filter()
return super(PolymorphicAwareNestedObjects, self).collect(
objs, source_attr=source_attr, **kwargs)
| gpl-2.0 |
crew/metrics-api | doc/source/conf.py | 1 | 7185 | # -*- coding: utf-8 -*-
#
# Crew Metrics API documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 21 19:18:52 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Crew Metrics API'
copyright = u'2010, Alex Lee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CrewMetricsAPIdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CrewMetricsAPI.tex', u'Crew Metrics API Documentation',
u'Alex Lee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'crewmetricsapi', u'Crew Metrics API Documentation',
[u'Alex Lee'], 1)
]
| mit |
google/contentbox | main/models.py | 4 | 6204 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from main.managers import PublishedBoxesManager
from uuslug import uuslug
from django.utils.translation import ugettext_lazy as _
class Box(models.Model):
CODELAB = "codelab"
TRACKSESSION = "tracksession"
KEYNOTE = "keynote"
WORKSHOP = "workshop"
CONTENT_TYPE_CHOICES = (
(CODELAB, _("CodeLab")),
(TRACKSESSION, _("Track Session")),
(KEYNOTE, _("Keynote")),
(WORKSHOP, _("Workshop"))
)
title = models.CharField(max_length=100)
slug = models.SlugField(max_length = 80)
image = models.FileField(upload_to='box_images')
back_image = models.FileField(upload_to='box_back_images')
type = models.CharField(max_length=15, choices=CONTENT_TYPE_CHOICES)
description = models.TextField(max_length=1000)
github_url = models.URLField(blank=True)
drive_folder_url = models.URLField(blank=True)
published = models.BooleanField(default=False)
instructor = models.ForeignKey('main.Instructor')
language = models.ForeignKey('main.Language')
creator = models.ForeignKey(User, related_name="boxes", null=True, blank=True, on_delete=models.SET_NULL)
objects = models.Manager()
published_boxes = PublishedBoxesManager()
class Meta:
verbose_name_plural = "boxes"
def __unicode__(self):
return u"%s" % self.title
def save(self, *args, **kwargs):
self.slug = uuslug(self.title, instance=self, max_length=80, word_boundary=True)
super(Box, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('main:box_detail', args=[self.slug])
def register_user(self, user):
try:
BoxRegistration.objects.get(user=user, box=self)
except BoxRegistration.DoesNotExist:
BoxRegistration.objects.create(user=user, box=self)
def unregister_user(self, user):
BoxRegistration.objects.filter(user=user, box=self).delete()
def is_registered(self, user):
try:
BoxRegistration.objects.get(user=user.id,box=self)
return True
except BoxRegistration.DoesNotExist:
return False
def get_tags(self):
tags = []
box_tags = self.boxtag_set.all()
for boxtag in box_tags:
tags.append(boxtag.tag)
return tags
class Unit(models.Model):
title = models.CharField(max_length=300)
video_link = models.URLField()
order = models.IntegerField()
box = models.ForeignKey(Box, related_name='units')
def __unicode__(self):
return u"%s - %s" % (self.box, self.video_link)
class Meta:
ordering = ['order']
class Tag(models.Model):
title = models.CharField(max_length=50)
icon = models.FileField(upload_to='tag_images')
slug = models.SlugField(max_length = 40)
description = models.TextField(max_length=500, blank=True)
class Meta:
ordering = ['title']
def __unicode__(self):
return u"%s" % self.title
def save(self, *args, **kwargs):
self.slug = uuslug(self.title, instance=self, max_length=40, word_boundary=True)
super(Tag, self).save(*args, **kwargs)
class Link(models.Model):
title = models.CharField(max_length=50)
url = models.URLField()
box = models.ForeignKey(Box, related_name="links")
def __unicode__(self):
return u"%s" % self.title
class BoxTag(models.Model):
box = models.ForeignKey(Box)
tag = models.ForeignKey(Tag)
def __unicode__(self):
return u"%s - %s" % (self.box, self.tag)
class BoxRegistration(models.Model):
box = models.ForeignKey(Box)
user = models.ForeignKey(User)
def __unicode__(self):
return u"%s - %s" % (self.box, self.user)
class UserTag(models.Model):
user = models.ForeignKey(User)
tag = models.ForeignKey(Tag)
def __unicode__(self):
return u"%s - %s" % (self.user, self.tag)
class Instructor(models.Model):
PLATFORM = "platform"
ENGINEERING = "engineering"
GDE = "gde"
GDG = "gdg"
TECHMAKERS = "techmakers"
OTHERS = "others"
AFFILIATION_CHOICES = (
(PLATFORM, _("Google Developer Platform")),
(ENGINEERING, _("Google Engineering")),
(GDE, _("Google Developer Experts")),
(GDG, _("Google Developers Groups")),
(TECHMAKERS, _("Women Techmakers")),
(OTHERS, _("Google Others"))
)
name = models.CharField(max_length=100)
affiliation = models.CharField(max_length=15, choices=AFFILIATION_CHOICES)
description = models.TextField(max_length=100)
image = models.FileField(upload_to='instructor_images')
plus_page_id = models.CharField(max_length=100, blank=True)
def __unicode__(self):
return u"%s" % self.name
class Language(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(max_length = 80)
short_name = models.CharField(max_length=300, blank=True)
image = models.FileField(upload_to='language_images')
def __unicode__(self):
return u"%s" % self.name
def save(self, *args, **kwargs):
self.slug = uuslug(self.name, instance=self, max_length=80, word_boundary=True)
super(Language, self).save(*args, **kwargs)
| apache-2.0 |
davidcusatis/horizon | openstack_dashboard/static_settings.py | 7 | 6577 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This file contains configuration for the locations of all the static file
libraries, such as JavaScript and CSS libraries. Packagers for individual
distributions can edit or replace this file, in order to change the paths
to match their distribution's standards.
"""
import os
import xstatic.main
import xstatic.pkg.angular
import xstatic.pkg.angular_bootstrap
import xstatic.pkg.angular_gettext
import xstatic.pkg.angular_lrdragndrop
import xstatic.pkg.angular_smart_table
import xstatic.pkg.bootstrap_datepicker
import xstatic.pkg.bootstrap_scss
import xstatic.pkg.bootswatch
import xstatic.pkg.d3
import xstatic.pkg.font_awesome
import xstatic.pkg.hogan
import xstatic.pkg.jasmine
import xstatic.pkg.jquery
import xstatic.pkg.jquery_migrate
import xstatic.pkg.jquery_quicksearch
import xstatic.pkg.jquery_tablesorter
import xstatic.pkg.jquery_ui
import xstatic.pkg.jsencrypt
import xstatic.pkg.mdi
import xstatic.pkg.rickshaw
import xstatic.pkg.roboto_fontface
import xstatic.pkg.spin
import xstatic.pkg.termjs
from horizon.utils import file_discovery
def get_staticfiles_dirs(webroot='/'):
STATICFILES_DIRS = [
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_bootstrap,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_gettext,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_lrdragndrop,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_smart_table,
root_url=webroot).base_dir),
('horizon/lib/bootstrap_datepicker',
xstatic.main.XStatic(xstatic.pkg.bootstrap_datepicker,
root_url=webroot).base_dir),
('bootstrap',
xstatic.main.XStatic(xstatic.pkg.bootstrap_scss,
root_url=webroot).base_dir),
('horizon/lib/bootswatch',
xstatic.main.XStatic(xstatic.pkg.bootswatch,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.d3,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.hogan,
root_url=webroot).base_dir),
('horizon/lib/font-awesome',
xstatic.main.XStatic(xstatic.pkg.font_awesome,
root_url=webroot).base_dir),
('horizon/lib/jasmine',
xstatic.main.XStatic(xstatic.pkg.jasmine,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_migrate,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_quicksearch,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_tablesorter,
root_url=webroot).base_dir),
('horizon/lib/jsencrypt',
xstatic.main.XStatic(xstatic.pkg.jsencrypt,
root_url=webroot).base_dir),
('horizon/lib/mdi',
xstatic.main.XStatic(xstatic.pkg.mdi,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.rickshaw,
root_url=webroot).base_dir),
('horizon/lib/roboto_fontface',
xstatic.main.XStatic(xstatic.pkg.roboto_fontface,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.spin,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.termjs,
root_url=webroot).base_dir),
]
if xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).version.startswith('1.10.'):
# The 1.10.x versions already contain the 'ui' directory.
STATICFILES_DIRS.append(
('horizon/lib/jquery-ui',
xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).base_dir))
else:
# Newer versions dropped the directory, add it to keep the path the
# same.
STATICFILES_DIRS.append(
('horizon/lib/jquery-ui/ui',
xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).base_dir))
return STATICFILES_DIRS
def find_static_files(HORIZON_CONFIG):
import horizon
import openstack_dashboard
os_dashboard_home_dir = openstack_dashboard.__path__[0]
horizon_home_dir = horizon.__path__[0]
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(horizon_home_dir, 'static/')
)
# filter out non-angular javascript code and lib
HORIZON_CONFIG['js_files'] = ([f for f in HORIZON_CONFIG['js_files']
if not f.startswith('horizon/')])
# note the path must end in a '/' or the resultant file paths will have a
# leading "/"
file_discovery.populate_horizon_config(
HORIZON_CONFIG,
os.path.join(os_dashboard_home_dir, 'static/'),
sub_path='app/'
)
| apache-2.0 |
Kingclove/lab5info3180 | main/lib/werkzeug/contrib/profiler.py | 315 | 4920 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys, time, os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = ''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip('/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| mit |
flipjack/django | oscar/apps/payment/migrations/0001_initial.py | 51 | 4813 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
from django.conf import settings
from decimal import Decimal
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bankcard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('card_type', models.CharField(max_length=128, verbose_name='Card Type')),
('name', models.CharField(max_length=255, verbose_name='Name', blank=True)),
('number', models.CharField(max_length=32, verbose_name='Number')),
('expiry_date', models.DateField(verbose_name='Expiry Date')),
('partner_reference', models.CharField(max_length=255, verbose_name='Partner Reference', blank=True)),
('user', models.ForeignKey(verbose_name='User', related_name='bankcards', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Bankcards',
'verbose_name': 'Bankcard',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.CharField(default='GBP', max_length=12, verbose_name='Currency')),
('amount_allocated', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Allocated')),
('amount_debited', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Debited')),
('amount_refunded', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Refunded')),
('reference', models.CharField(max_length=128, verbose_name='Reference', blank=True)),
('label', models.CharField(max_length=128, verbose_name='Label', blank=True)),
('order', models.ForeignKey(verbose_name='Order', related_name='sources', to='order.Order')),
],
options={
'verbose_name_plural': 'Sources',
'verbose_name': 'Source',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SourceType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', editable=False, max_length=128, help_text='This is used within forms to identify this source type', blank=True)),
],
options={
'verbose_name_plural': 'Source Types',
'verbose_name': 'Source Type',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('txn_type', models.CharField(max_length=128, verbose_name='Type', blank=True)),
('amount', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Amount')),
('reference', models.CharField(max_length=128, verbose_name='Reference', blank=True)),
('status', models.CharField(max_length=128, verbose_name='Status', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('source', models.ForeignKey(verbose_name='Source', related_name='transactions', to='payment.Source')),
],
options={
'ordering': ['-date_created'],
'verbose_name_plural': 'Transactions',
'verbose_name': 'Transaction',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='source',
name='source_type',
field=models.ForeignKey(verbose_name='Source Type', related_name='sources', to='payment.SourceType'),
preserve_default=True,
),
]
| bsd-3-clause |
CLOUGH/info3180-project-2 | lib/flask/__init__.py | 345 | 1672 | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.10'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .module import Module
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| apache-2.0 |
dennis-sheil/commandergenius | project/jni/python/src/Demo/turtle/tdemo_tree.py | 32 | 1406 | #!/usr/bin/python
""" turtle-example-suite:
tdemo_tree.py
Displays a 'breadth-first-tree' - in contrast
to the classical Logo tree drawing programs,
which use a depth-first-algorithm.
Uses:
(1) a tree-generator, where the drawing is
quasi the side-effect, whereas the generator
always yields None.
(2) Turtle-cloning: At each branching point the
current pen is cloned. So in the end there
are 1024 turtles.
"""
from turtle import Turtle, mainloop
from time import clock
def tree(plist, l, a, f):
""" plist is list of pens
l is length of branch
a is half of the angle between 2 branches
f is factor by which branch is shortened
from level to level."""
if l > 3:
lst = []
for p in plist:
p.forward(l)
q = p.clone()
p.left(a)
q.right(a)
lst.append(p)
lst.append(q)
for x in tree(lst, l*f, a, f):
yield None
def maketree():
p = Turtle()
p.setundobuffer(None)
p.hideturtle()
p.speed(0)
p.tracer(30,0)
p.left(90)
p.penup()
p.forward(-210)
p.pendown()
t = tree([p], 200, 65, 0.6375)
for x in t:
pass
print len(p.getscreen().turtles())
def main():
a=clock()
maketree()
b=clock()
return "done: %.2f sec." % (b-a)
if __name__ == "__main__":
msg = main()
print msg
mainloop()
| lgpl-2.1 |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/raw/GL/AMD/performance_monitor.py | 1 | 2250 | '''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p, constants as _cs, arrays
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_AMD_performance_monitor'
def _f( function ):
return _p.createFunction( function,_p.GL,'GL_AMD_performance_monitor',False)
_p.unpack_constants( """GL_COUNTER_TYPE_AMD 0x8BC0
GL_COUNTER_RANGE_AMD 0x8BC1
GL_UNSIGNED_INT64_AMD 0x8BC2
GL_PERCENTAGE_AMD 0x8BC3
GL_PERFMON_RESULT_AVAILABLE_AMD 0x8BC4
GL_PERFMON_RESULT_SIZE_AMD 0x8BC5
GL_PERFMON_RESULT_AMD 0x8BC6""", globals())
@_f
@_p.types(None,arrays.GLintArray,_cs.GLsizei,arrays.GLuintArray)
def glGetPerfMonitorGroupsAMD( numGroups,groupsSize,groups ):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLintArray,arrays.GLintArray,_cs.GLsizei,arrays.GLuintArray)
def glGetPerfMonitorCountersAMD( group,numCounters,maxActiveCounters,counterSize,counters ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetPerfMonitorGroupStringAMD( group,bufSize,length,groupString ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLsizei,arrays.GLsizeiArray,arrays.GLcharArray)
def glGetPerfMonitorCounterStringAMD( group,counter,bufSize,length,counterString ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum,ctypes.c_void_p)
def glGetPerfMonitorCounterInfoAMD( group,counter,pname,data ):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glGenPerfMonitorsAMD( n,monitors ):pass
@_f
@_p.types(None,_cs.GLsizei,arrays.GLuintArray)
def glDeletePerfMonitorsAMD( n,monitors ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLboolean,_cs.GLuint,_cs.GLint,arrays.GLuintArray)
def glSelectPerfMonitorCountersAMD( monitor,enable,group,numCounters,counterList ):pass
@_f
@_p.types(None,_cs.GLuint)
def glBeginPerfMonitorAMD( monitor ):pass
@_f
@_p.types(None,_cs.GLuint)
def glEndPerfMonitorAMD( monitor ):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLsizei,arrays.GLuintArray,arrays.GLintArray)
def glGetPerfMonitorCounterDataAMD( monitor,pname,dataSize,data,bytesWritten ):pass
def glInitPerformanceMonitorAMD():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| bsd-2-clause |
zerkrx/zerkbox | lib/pip/_vendor/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| gpl-3.0 |
amrdraz/kodr | app/brython/www/src/Lib/multiprocessing/util.py | 696 | 9917 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| mit |
prodromou87/gem5 | util/stats/profile.py | 87 | 15976 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from orderdict import orderdict
import output
class FileData(dict):
def __init__(self, filename):
self.filename = filename
fd = file(filename)
current = []
for line in fd:
line = line.strip()
if line.startswith('>>>'):
current = []
self[line[3:]] = current
else:
current.append(line)
fd.close()
class RunData(dict):
def __init__(self, filename):
self.filename = filename
def __getattribute__(self, attr):
if attr == 'total':
total = 0.0
for value in self.itervalues():
total += value
return total
if attr == 'filedata':
return FileData(self.filename)
if attr == 'maxsymlen':
return max([ len(sym) for sym in self.iterkeys() ])
return super(RunData, self).__getattribute__(attr)
def display(self, output=None, limit=None, maxsymlen=None):
if not output:
import sys
output = sys.stdout
elif isinstance(output, str):
output = file(output, 'w')
total = float(self.total)
# swap (string,count) order so we can sort on count
symbols = [ (count,name) for name,count in self.iteritems() ]
symbols.sort(reverse=True)
if limit is not None:
symbols = symbols[:limit]
if not maxsymlen:
maxsymlen = self.maxsymlen
symbolf = "%-" + str(maxsymlen + 1) + "s %.2f%%"
for number,name in symbols:
print >>output, symbolf % (name, 100.0 * (float(number) / total))
class PCData(RunData):
def __init__(self, filename=None, categorize=None, showidle=True):
super(PCData, self).__init__(self, filename)
filedata = self.filedata['PC data']
for line in filedata:
(symbol, count) = line.split()
if symbol == "0x0":
continue
count = int(count)
if categorize is not None:
category = categorize(symbol)
if category is None:
category = 'other'
elif category == 'idle' and not showidle:
continue
self[category] = count
class FuncNode(object):
def __new__(cls, filedata=None):
if filedata is None:
return super(FuncNode, cls).__new__(cls)
nodes = {}
for line in filedata['function data']:
data = line.split(' ')
node_id = long(data[0], 16)
node = FuncNode()
node.symbol = data[1]
if node.symbol == '':
node.symbol = 'unknown'
node.count = long(data[2])
node.children = [ long(child, 16) for child in data[3:] ]
nodes[node_id] = node
for node in nodes.itervalues():
children = []
for cid in node.children:
child = nodes[cid]
children.append(child)
child.parent = node
node.children = tuple(children)
if not nodes:
print filedata.filename
print nodes
return nodes[0]
def total(self):
total = self.count
for child in self.children:
total += child.total()
return total
def aggregate(self, dict, categorize, incategory):
category = None
if categorize:
category = categorize(self.symbol)
total = self.count
for child in self.children:
total += child.aggregate(dict, categorize, category or incategory)
if category:
dict[category] = dict.get(category, 0) + total
return 0
elif not incategory:
dict[self.symbol] = dict.get(self.symbol, 0) + total
return total
def dump(self):
kids = [ child.symbol for child in self.children]
print '%s %d <%s>' % (self.symbol, self.count, ', '.join(kids))
for child in self.children:
child.dump()
def _dot(self, dot, threshold, categorize, total):
from pydot import Dot, Edge, Node
self.dot_node = None
value = self.total() * 100.0 / total
if value < threshold:
return
if categorize:
category = categorize(self.symbol)
if category and category != 'other':
return
label = '%s %.2f%%' % (self.symbol, value)
self.dot_node = Node(self, label=label)
dot.add_node(self.dot_node)
for child in self.children:
child._dot(dot, threshold, categorize, total)
if child.dot_node is not None:
dot.add_edge(Edge(self, child))
def _cleandot(self):
for child in self.children:
child._cleandot()
self.dot_node = None
del self.__dict__['dot_node']
def dot(self, dot, threshold=0.1, categorize=None):
self._dot(dot, threshold, categorize, self.total())
self._cleandot()
class FuncData(RunData):
def __init__(self, filename, categorize=None):
super(FuncData, self).__init__(filename)
tree = self.tree
tree.aggregate(self, categorize, incategory=False)
self.total = tree.total()
def __getattribute__(self, attr):
if attr == 'tree':
return FuncNode(self.filedata)
return super(FuncData, self).__getattribute__(attr)
def displayx(self, output=None, maxcount=None):
if output is None:
import sys
output = sys.stdout
items = [ (val,key) for key,val in self.iteritems() ]
items.sort(reverse=True)
for val,key in items:
if maxcount is not None:
if maxcount == 0:
return
maxcount -= 1
percent = val * 100.0 / self.total
print >>output, '%-30s %8s' % (key, '%3.2f%%' % percent)
class Profile(object):
# This list controls the order of values in stacked bar data output
default_categories = [ 'interrupt',
'driver',
'stack',
'buffer',
'copy',
'syscall',
'user',
'other',
'idle']
def __init__(self, datatype, categorize=None):
categories = Profile.default_categories
self.datatype = datatype
self.categorize = categorize
self.data = {}
self.categories = categories[:]
self.rcategories = categories[:]
self.rcategories.reverse()
self.cpu = 0
# Read in files
def inputdir(self, directory):
import os, os.path, re
from os.path import expanduser, join as joinpath
directory = expanduser(directory)
label_ex = re.compile(r'profile\.(.*).dat')
for root,dirs,files in os.walk(directory):
for name in files:
match = label_ex.match(name)
if not match:
continue
filename = joinpath(root, name)
prefix = os.path.commonprefix([root, directory])
dirname = root[len(prefix)+1:]
data = self.datatype(filename, self.categorize)
self.setdata(dirname, match.group(1), data)
def setdata(self, run, cpu, data):
if run not in self.data:
self.data[run] = {}
if cpu in self.data[run]:
raise AttributeError, \
'data already stored for run %s and cpu %s' % (run, cpu)
self.data[run][cpu] = data
def getdata(self, run, cpu):
try:
return self.data[run][cpu]
except KeyError:
print run, cpu
return None
def alldata(self):
for run,cpus in self.data.iteritems():
for cpu,data in cpus.iteritems():
yield run,cpu,data
def get(self, job, stat, system=None):
if system is None and hasattr('system', job):
system = job.system
if system is None:
raise AttributeError, 'The job must have a system set'
cpu = '%s.run%d' % (system, self.cpu)
data = self.getdata(str(job), cpu)
if not data:
return None
values = []
for category in self.categories:
val = float(data.get(category, 0.0))
if val < 0.0:
raise ValueError, 'value is %f' % val
values.append(val)
total = sum(values)
return [ v / total * 100.0 for v in values ]
def dump(self):
for run,cpu,data in self.alldata():
print 'run %s, cpu %s' % (run, cpu)
data.dump()
print
def write_dot(self, threshold, jobfile=None, jobs=None):
import pydot
if jobs is None:
jobs = [ job for job in jobfile.jobs() ]
for job in jobs:
cpu = '%s.run%d' % (job.system, self.cpu)
symbols = self.getdata(job.name, cpu)
if not symbols:
continue
dot = pydot.Dot()
symbols.tree.dot(dot, threshold=threshold)
dot.write(symbols.filename[:-3] + 'dot')
def write_txt(self, jobfile=None, jobs=None, limit=None):
if jobs is None:
jobs = [ job for job in jobfile.jobs() ]
for job in jobs:
cpu = '%s.run%d' % (job.system, self.cpu)
symbols = self.getdata(job.name, cpu)
if not symbols:
continue
output = file(symbols.filename[:-3] + 'txt', 'w')
symbols.display(output, limit)
def display(self, jobfile=None, jobs=None, limit=None):
if jobs is None:
jobs = [ job for job in jobfile.jobs() ]
maxsymlen = 0
thejobs = []
for job in jobs:
cpu = '%s.run%d' % (job.system, self.cpu)
symbols = self.getdata(job.name, cpu)
if symbols:
thejobs.append(job)
maxsymlen = max(maxsymlen, symbols.maxsymlen)
for job in thejobs:
cpu = '%s.run%d' % (job.system, self.cpu)
symbols = self.getdata(job.name, cpu)
print job.name
symbols.display(limit=limit, maxsymlen=maxsymlen)
print
from categories import func_categorize, pc_categorize
class PCProfile(Profile):
def __init__(self, categorize=pc_categorize):
super(PCProfile, self).__init__(PCData, categorize)
class FuncProfile(Profile):
def __init__(self, categorize=func_categorize):
super(FuncProfile, self).__init__(FuncData, categorize)
def usage(exitcode = None):
print '''\
Usage: %s [-bc] [-g <dir>] [-j <jobfile>] [-n <num>]
-c groups symbols into categories
-b dumps data for bar charts
-d generate dot output
-g <d> draw graphs and send output to <d>
-j <jobfile> specify a different jobfile (default is Test.py)
-n <n> selects number of top symbols to print (default 5)
''' % sys.argv[0]
if exitcode is not None:
sys.exit(exitcode)
if __name__ == '__main__':
import getopt, re, sys
from os.path import expanduser
from output import StatOutput
# default option values
numsyms = 10
graph = None
cpus = [ 0 ]
categorize = False
showidle = True
funcdata = True
jobfilename = 'Test.py'
dodot = False
dotfile = None
textout = False
threshold = 0.01
inputfile = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'C:cdD:f:g:ij:n:pT:t')
except getopt.GetoptError:
usage(2)
for o,a in opts:
if o == '-C':
cpus = [ int(x) for x in a.split(',') ]
elif o == '-c':
categorize = True
elif o == '-D':
dotfile = a
elif o == '-d':
dodot = True
elif o == '-f':
inputfile = expanduser(a)
elif o == '-g':
graph = a
elif o == '-i':
showidle = False
elif o == '-j':
jobfilename = a
elif o == '-n':
numsyms = int(a)
elif o == '-p':
funcdata = False
elif o == '-T':
threshold = float(a)
elif o == '-t':
textout = True
if args:
print "'%s'" % args, len(args)
usage(1)
if inputfile:
catfunc = None
if categorize:
catfunc = func_categorize
data = FuncData(inputfile, categorize=catfunc)
if dodot:
import pydot
dot = pydot.Dot()
data.tree.dot(dot, threshold=threshold)
#dot.orientation = 'landscape'
#dot.ranksep='equally'
#dot.rank='samerank'
dot.write(dotfile, format='png')
else:
data.display(limit=numsyms)
else:
from jobfile import JobFile
jobfile = JobFile(jobfilename)
if funcdata:
profile = FuncProfile()
else:
profile = PCProfile()
if not categorize:
profile.categorize = None
profile.inputdir(jobfile.rootdir)
if graph:
for cpu in cpus:
profile.cpu = cpu
if funcdata:
name = 'funcstacks%d' % cpu
else:
name = 'stacks%d' % cpu
output = StatOutput(jobfile, info=profile)
output.xlabel = 'System Configuration'
output.ylabel = '% CPU utilization'
output.stat = name
output.graph(name, graph)
if dodot:
for cpu in cpus:
profile.cpu = cpu
profile.write_dot(jobfile=jobfile, threshold=threshold)
if textout:
for cpu in cpus:
profile.cpu = cpu
profile.write_txt(jobfile=jobfile)
if not graph and not textout and not dodot:
for cpu in cpus:
if not categorize:
profile.categorize = None
profile.cpu = cpu
profile.display(jobfile=jobfile, limit=numsyms)
| bsd-3-clause |
gentunian/tellapic | src/client/python/pyqt/ToolBoxUi.py | 2 | 30199 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'toolBox.ui'
#
# Created: Mon Nov 7 17:01:59 2011
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ToolBox(object):
def setupUi(self, ToolBox):
ToolBox.setObjectName(_fromUtf8("ToolBox"))
ToolBox.resize(438, 636)
ToolBox.setWindowTitle(QtGui.QApplication.translate("ToolBox", "ToolBox", None, QtGui.QApplication.UnicodeUTF8))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/logo_small.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ToolBox.setWindowIcon(icon)
ToolBox.setAutoFillBackground(True)
ToolBox.setStyleSheet(QtGui.QApplication.translate("ToolBox", "0", None, QtGui.QApplication.UnicodeUTF8))
ToolBox.setFrameShape(QtGui.QFrame.StyledPanel)
self.drawingPage = QtGui.QWidget()
self.drawingPage.setGeometry(QtCore.QRect(0, 0, 436, 389))
self.drawingPage.setObjectName(_fromUtf8("drawingPage"))
self.verticalLayout_14 = QtGui.QVBoxLayout(self.drawingPage)
self.verticalLayout_14.setMargin(1)
self.verticalLayout_14.setObjectName(_fromUtf8("verticalLayout_14"))
self.scrollArea_6 = QtGui.QScrollArea(self.drawingPage)
self.scrollArea_6.setStyleSheet(_fromUtf8("QScrollArea::title { \n"
" background-color: transparent;\n"
" subcontrol-position: top left; /* position at the top left*/ \n"
" padding:2 13px;\n"
" } \n"
"\n"
"QScrollArea { \n"
" border: 2px solid gray;\n"
" background-color: lightgray;\n"
" border-radius: 5px; \n"
" } \n"
""))
self.scrollArea_6.setWidgetResizable(True)
self.scrollArea_6.setObjectName(_fromUtf8("scrollArea_6"))
self.scrollAreaWidgetContents_6 = QtGui.QWidget()
self.scrollAreaWidgetContents_6.setGeometry(QtCore.QRect(0, 0, 430, 383))
self.scrollAreaWidgetContents_6.setObjectName(_fromUtf8("scrollAreaWidgetContents_6"))
self.verticalLayout_15 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents_6)
self.verticalLayout_15.setMargin(1)
self.verticalLayout_15.setObjectName(_fromUtf8("verticalLayout_15"))
self.drawingPropertiesWidget = DrawingPropertiesWidget(self.scrollAreaWidgetContents_6)
self.drawingPropertiesWidget.setToolTip(QtGui.QApplication.translate("ToolBox", "Custom widget", None, QtGui.QApplication.UnicodeUTF8))
self.drawingPropertiesWidget.setWhatsThis(QtGui.QApplication.translate("ToolBox", "You don\'t care", None, QtGui.QApplication.UnicodeUTF8))
self.drawingPropertiesWidget.setObjectName(_fromUtf8("drawingPropertiesWidget"))
self.verticalLayout_15.addWidget(self.drawingPropertiesWidget)
self.scrollArea_6.setWidget(self.scrollAreaWidgetContents_6)
self.verticalLayout_14.addWidget(self.scrollArea_6)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/property.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ToolBox.addItem(self.drawingPage, icon1, _fromUtf8(""))
self.strokeStylePage = QtGui.QWidget()
self.strokeStylePage.setGeometry(QtCore.QRect(0, 0, 436, 389))
self.strokeStylePage.setObjectName(_fromUtf8("strokeStylePage"))
self.verticalLayout = QtGui.QVBoxLayout(self.strokeStylePage)
self.verticalLayout.setMargin(1)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.scrollArea = QtGui.QScrollArea(self.strokeStylePage)
self.scrollArea.setMinimumSize(QtCore.QSize(235, 0))
self.scrollArea.setAutoFillBackground(False)
self.scrollArea.setStyleSheet(_fromUtf8("QScrollArea::title { \n"
" background-color: transparent;\n"
" subcontrol-position: top left; /* position at the top left*/ \n"
" padding:2 13px;\n"
" } \n"
"\n"
"QScrollArea { \n"
" border: 2px solid gray;\n"
" background-color: lightgray;\n"
" border-radius: 5px; \n"
" } "))
self.scrollArea.setFrameShape(QtGui.QFrame.NoFrame)
self.scrollArea.setFrameShadow(QtGui.QFrame.Plain)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 430, 383))
self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(200, 181))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_2.setMargin(1)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.strokeStyleWidget = StrokeStyleWidget(self.scrollAreaWidgetContents)
self.strokeStyleWidget.setToolTip(QtGui.QApplication.translate("ToolBox", "Custom widget", None, QtGui.QApplication.UnicodeUTF8))
self.strokeStyleWidget.setWhatsThis(QtGui.QApplication.translate("ToolBox", "You don\'t care", None, QtGui.QApplication.UnicodeUTF8))
self.strokeStyleWidget.setObjectName(_fromUtf8("strokeStyleWidget"))
self.verticalLayout_2.addWidget(self.strokeStyleWidget)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/tool-icons/drawings.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ToolBox.addItem(self.strokeStylePage, icon2, _fromUtf8(""))
self.strokeColorPage = QtGui.QWidget()
self.strokeColorPage.setGeometry(QtCore.QRect(0, 0, 436, 389))
self.strokeColorPage.setObjectName(_fromUtf8("strokeColorPage"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.strokeColorPage)
self.verticalLayout_3.setMargin(1)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.scrollArea_2 = QtGui.QScrollArea(self.strokeColorPage)
self.scrollArea_2.setStyleSheet(_fromUtf8("QScrollArea::title { \n"
" background-color: transparent;\n"
" subcontrol-position: top left; /* position at the top left*/ \n"
" padding:2 13px;\n"
" } \n"
"\n"
"QScrollArea { \n"
" border: 2px solid gray;\n"
" background-color: lightgray;\n"
" border-radius: 5px; \n"
" } "))
self.scrollArea_2.setFrameShape(QtGui.QFrame.NoFrame)
self.scrollArea_2.setFrameShadow(QtGui.QFrame.Plain)
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName(_fromUtf8("scrollArea_2"))
self.scrollAreaWidgetContents_2 = QtGui.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 430, 383))
self.scrollAreaWidgetContents_2.setObjectName(_fromUtf8("scrollAreaWidgetContents_2"))
self.verticalLayout_13 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents_2)
self.verticalLayout_13.setMargin(1)
self.verticalLayout_13.setObjectName(_fromUtf8("verticalLayout_13"))
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.strokeColor = ColorWidget(self.scrollAreaWidgetContents_2)
self.strokeColor.setToolTip(QtGui.QApplication.translate("ToolBox", "Custom color widget", None, QtGui.QApplication.UnicodeUTF8))
self.strokeColor.setWhatsThis(QtGui.QApplication.translate("ToolBox", "You don\'t care", None, QtGui.QApplication.UnicodeUTF8))
self.strokeColor.setObjectName(_fromUtf8("strokeColor"))
self.verticalLayout_4.addWidget(self.strokeColor)
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setContentsMargins(9, 0, 9, 9)
self.formLayout_2.setHorizontalSpacing(22)
self.formLayout_2.setVerticalSpacing(9)
self.formLayout_2.setObjectName(_fromUtf8("formLayout_2"))
self.label_14 = QtGui.QLabel(self.scrollAreaWidgetContents_2)
self.label_14.setText(QtGui.QApplication.translate("ToolBox", "Current Color:", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setObjectName(_fromUtf8("label_14"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_14)
self.strokeColorLabel = QtGui.QLabel(self.scrollAreaWidgetContents_2)
self.strokeColorLabel.setStyleSheet(_fromUtf8("QLabel {\n"
" border: 1px solid \n"
"}"))
self.strokeColorLabel.setText(_fromUtf8(""))
self.strokeColorLabel.setObjectName(_fromUtf8("strokeColorLabel"))
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.strokeColorLabel)
self.label_6 = QtGui.QLabel(self.scrollAreaWidgetContents_2)
self.label_6.setText(QtGui.QApplication.translate("ToolBox", "Stroke Enabled:", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_6)
self.shouldStrokeCheckbox = QtGui.QCheckBox(self.scrollAreaWidgetContents_2)
self.shouldStrokeCheckbox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.shouldStrokeCheckbox.setText(_fromUtf8(""))
self.shouldStrokeCheckbox.setObjectName(_fromUtf8("shouldStrokeCheckbox"))
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.shouldStrokeCheckbox)
self.verticalLayout_4.addLayout(self.formLayout_2)
self.verticalLayout_13.addLayout(self.verticalLayout_4)
self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_2)
self.verticalLayout_3.addWidget(self.scrollArea_2)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/tool-icons/color1.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ToolBox.addItem(self.strokeColorPage, icon3, _fromUtf8(""))
self.strokeFillPage = QtGui.QWidget()
self.strokeFillPage.setGeometry(QtCore.QRect(0, 0, 436, 389))
self.strokeFillPage.setAutoFillBackground(True)
self.strokeFillPage.setObjectName(_fromUtf8("strokeFillPage"))
self.verticalLayout_9 = QtGui.QVBoxLayout(self.strokeFillPage)
self.verticalLayout_9.setMargin(1)
self.verticalLayout_9.setObjectName(_fromUtf8("verticalLayout_9"))
self.scrollArea_3 = QtGui.QScrollArea(self.strokeFillPage)
self.scrollArea_3.setAutoFillBackground(True)
self.scrollArea_3.setStyleSheet(_fromUtf8("QScrollArea::title { \n"
" background-color: transparent;\n"
" subcontrol-position: top left; /* position at the top left*/ \n"
" padding:2 13px;\n"
" } \n"
"\n"
"QScrollArea { \n"
" border: 2px solid gray;\n"
" background-color: lightgray;\n"
" border-radius: 5px; \n"
" } \n"
""))
self.scrollArea_3.setFrameShape(QtGui.QFrame.NoFrame)
self.scrollArea_3.setFrameShadow(QtGui.QFrame.Plain)
self.scrollArea_3.setWidgetResizable(True)
self.scrollArea_3.setObjectName(_fromUtf8("scrollArea_3"))
self.scrollAreaWidgetContents_3 = QtGui.QWidget()
self.scrollAreaWidgetContents_3.setGeometry(QtCore.QRect(0, 0, 430, 383))
self.scrollAreaWidgetContents_3.setMinimumSize(QtCore.QSize(0, 0))
self.scrollAreaWidgetContents_3.setAutoFillBackground(True)
self.scrollAreaWidgetContents_3.setObjectName(_fromUtf8("scrollAreaWidgetContents_3"))
self.verticalLayout_12 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents_3)
self.verticalLayout_12.setMargin(1)
self.verticalLayout_12.setObjectName(_fromUtf8("verticalLayout_12"))
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.fillColor = ColorWidget(self.scrollAreaWidgetContents_3)
self.fillColor.setToolTip(QtGui.QApplication.translate("ToolBox", "Custom color widget", None, QtGui.QApplication.UnicodeUTF8))
self.fillColor.setWhatsThis(QtGui.QApplication.translate("ToolBox", "You don\'t care", None, QtGui.QApplication.UnicodeUTF8))
self.fillColor.setObjectName(_fromUtf8("fillColor"))
self.verticalLayout_5.addWidget(self.fillColor)
self.formLayout_3 = QtGui.QFormLayout()
self.formLayout_3.setSizeConstraint(QtGui.QLayout.SetMaximumSize)
self.formLayout_3.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_3.setLabelAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.formLayout_3.setFormAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing)
self.formLayout_3.setContentsMargins(9, 0, 9, 9)
self.formLayout_3.setHorizontalSpacing(22)
self.formLayout_3.setVerticalSpacing(9)
self.formLayout_3.setObjectName(_fromUtf8("formLayout_3"))
self.label_8 = QtGui.QLabel(self.scrollAreaWidgetContents_3)
self.label_8.setText(QtGui.QApplication.translate("ToolBox", "Current Color:", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_8)
self.fillColorLabel = QtGui.QLabel(self.scrollAreaWidgetContents_3)
self.fillColorLabel.setMinimumSize(QtCore.QSize(0, 0))
self.fillColorLabel.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.fillColorLabel.setLayoutDirection(QtCore.Qt.LeftToRight)
self.fillColorLabel.setStyleSheet(_fromUtf8("QLabel {\n"
" border: 1px solid black;\n"
"}"))
self.fillColorLabel.setText(_fromUtf8(""))
self.fillColorLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.fillColorLabel.setObjectName(_fromUtf8("fillColorLabel"))
self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.fillColorLabel)
self.label_9 = QtGui.QLabel(self.scrollAreaWidgetContents_3)
self.label_9.setAutoFillBackground(False)
self.label_9.setText(QtGui.QApplication.translate("ToolBox", "Fill Enabled:", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_9)
self.shouldFillCheckbox = QtGui.QCheckBox(self.scrollAreaWidgetContents_3)
self.shouldFillCheckbox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.shouldFillCheckbox.setAutoFillBackground(False)
self.shouldFillCheckbox.setText(_fromUtf8(""))
self.shouldFillCheckbox.setObjectName(_fromUtf8("shouldFillCheckbox"))
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.shouldFillCheckbox)
self.verticalLayout_5.addLayout(self.formLayout_3)
self.verticalLayout_12.addLayout(self.verticalLayout_5)
self.scrollArea_3.setWidget(self.scrollAreaWidgetContents_3)
self.verticalLayout_9.addWidget(self.scrollArea_3)
ToolBox.addItem(self.strokeFillPage, icon3, _fromUtf8(""))
self.fontPage = QtGui.QWidget()
self.fontPage.setGeometry(QtCore.QRect(0, 0, 436, 389))
self.fontPage.setObjectName(_fromUtf8("fontPage"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.fontPage)
self.verticalLayout_6.setMargin(1)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.scrollArea_4 = QtGui.QScrollArea(self.fontPage)
self.scrollArea_4.setMinimumSize(QtCore.QSize(0, 0))
self.scrollArea_4.setStyleSheet(_fromUtf8("QScrollArea::title { \n"
" background-color: transparent;\n"
" subcontrol-position: top left; /* position at the top left*/ \n"
" padding:2 13px;\n"
" } \n"
"\n"
"QScrollArea { \n"
" border: 2px solid gray;\n"
" background-color: lightgray;\n"
" border-radius: 5px; \n"
" } \n"
""))
self.scrollArea_4.setWidgetResizable(True)
self.scrollArea_4.setObjectName(_fromUtf8("scrollArea_4"))
self.scrollAreaWidgetContents_4 = QtGui.QWidget()
self.scrollAreaWidgetContents_4.setGeometry(QtCore.QRect(0, 0, 430, 383))
self.scrollAreaWidgetContents_4.setMinimumSize(QtCore.QSize(180, 0))
self.scrollAreaWidgetContents_4.setObjectName(_fromUtf8("scrollAreaWidgetContents_4"))
self.verticalLayout_8 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents_4)
self.verticalLayout_8.setMargin(1)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.fontWidget = FontWidget(self.scrollAreaWidgetContents_4)
self.fontWidget.setToolTip(QtGui.QApplication.translate("ToolBox", "Custom widget", None, QtGui.QApplication.UnicodeUTF8))
self.fontWidget.setWhatsThis(QtGui.QApplication.translate("ToolBox", "You don\'t care", None, QtGui.QApplication.UnicodeUTF8))
self.fontWidget.setObjectName(_fromUtf8("fontWidget"))
self.verticalLayout_8.addWidget(self.fontWidget)
self.scrollArea_4.setWidget(self.scrollAreaWidgetContents_4)
self.verticalLayout_6.addWidget(self.scrollArea_4)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/tool-icons/text.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ToolBox.addItem(self.fontPage, icon4, _fromUtf8(""))
self.usersPage = QtGui.QWidget()
self.usersPage.setGeometry(QtCore.QRect(0, 0, 436, 389))
self.usersPage.setObjectName(_fromUtf8("usersPage"))
self.verticalLayout_10 = QtGui.QVBoxLayout(self.usersPage)
self.verticalLayout_10.setSpacing(6)
self.verticalLayout_10.setMargin(2)
self.verticalLayout_10.setObjectName(_fromUtf8("verticalLayout_10"))
self.scrollArea_5 = QtGui.QScrollArea(self.usersPage)
self.scrollArea_5.setWidgetResizable(True)
self.scrollArea_5.setObjectName(_fromUtf8("scrollArea_5"))
self.scrollAreaWidgetContents_5 = QtGui.QWidget()
self.scrollAreaWidgetContents_5.setGeometry(QtCore.QRect(0, 0, 430, 383))
self.scrollAreaWidgetContents_5.setObjectName(_fromUtf8("scrollAreaWidgetContents_5"))
self.verticalLayout_11 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents_5)
self.verticalLayout_11.setMargin(2)
self.verticalLayout_11.setObjectName(_fromUtf8("verticalLayout_11"))
self.treeView = QtGui.QTreeView(self.scrollAreaWidgetContents_5)
self.treeView.setSortingEnabled(True)
self.treeView.setAnimated(True)
self.treeView.setObjectName(_fromUtf8("treeView"))
self.treeView.header().setDefaultSectionSize(0)
self.verticalLayout_11.addWidget(self.treeView)
self.scrollArea_5.setWidget(self.scrollAreaWidgetContents_5)
self.verticalLayout_10.addWidget(self.scrollArea_5)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/users.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ToolBox.addItem(self.usersPage, icon5, _fromUtf8(""))
self.chatPage = QtGui.QWidget()
self.chatPage.setGeometry(QtCore.QRect(0, 0, 436, 389))
self.chatPage.setObjectName(_fromUtf8("chatPage"))
self.verticalLayout_16 = QtGui.QVBoxLayout(self.chatPage)
self.verticalLayout_16.setMargin(1)
self.verticalLayout_16.setObjectName(_fromUtf8("verticalLayout_16"))
self.dockWidget = QtGui.QDockWidget(self.chatPage)
self.dockWidget.setObjectName(_fromUtf8("dockWidget"))
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
self.verticalLayout_7 = QtGui.QVBoxLayout(self.dockWidgetContents)
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.chatWidget = ChatWidget(self.dockWidgetContents)
self.chatWidget.setToolTip(QtGui.QApplication.translate("ToolBox", "Custom widget", None, QtGui.QApplication.UnicodeUTF8))
self.chatWidget.setWhatsThis(QtGui.QApplication.translate("ToolBox", "You don\'t care", None, QtGui.QApplication.UnicodeUTF8))
self.chatWidget.setObjectName(_fromUtf8("chatWidget"))
self.verticalLayout_7.addWidget(self.chatWidget)
self.dockWidget.setWidget(self.dockWidgetContents)
self.verticalLayout_16.addWidget(self.dockWidget)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/chat.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ToolBox.addItem(self.chatPage, icon6, _fromUtf8(""))
self.actionCapSquare = QtGui.QAction(ToolBox)
self.actionCapSquare.setCheckable(True)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/capsquare.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCapSquare.setIcon(icon7)
self.actionCapSquare.setText(QtGui.QApplication.translate("ToolBox", "capSquare", None, QtGui.QApplication.UnicodeUTF8))
self.actionCapSquare.setObjectName(_fromUtf8("actionCapSquare"))
self.actionCapRound = QtGui.QAction(ToolBox)
self.actionCapRound.setCheckable(True)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/capround.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCapRound.setIcon(icon8)
self.actionCapRound.setText(QtGui.QApplication.translate("ToolBox", "capRound", None, QtGui.QApplication.UnicodeUTF8))
self.actionCapRound.setObjectName(_fromUtf8("actionCapRound"))
self.actionCapFlat = QtGui.QAction(ToolBox)
self.actionCapFlat.setCheckable(True)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/capbutt.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionCapFlat.setIcon(icon9)
self.actionCapFlat.setText(QtGui.QApplication.translate("ToolBox", "capFlat", None, QtGui.QApplication.UnicodeUTF8))
self.actionCapFlat.setObjectName(_fromUtf8("actionCapFlat"))
self.actionJoinRound = QtGui.QAction(ToolBox)
self.actionJoinRound.setCheckable(True)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/joinround.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionJoinRound.setIcon(icon10)
self.actionJoinRound.setText(QtGui.QApplication.translate("ToolBox", "joinRound", None, QtGui.QApplication.UnicodeUTF8))
self.actionJoinRound.setObjectName(_fromUtf8("actionJoinRound"))
self.actionJoinMiter = QtGui.QAction(ToolBox)
self.actionJoinMiter.setCheckable(True)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/joinmiter.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionJoinMiter.setIcon(icon11)
self.actionJoinMiter.setText(QtGui.QApplication.translate("ToolBox", "joinMiter", None, QtGui.QApplication.UnicodeUTF8))
self.actionJoinMiter.setObjectName(_fromUtf8("actionJoinMiter"))
self.actionJoinBevel = QtGui.QAction(ToolBox)
self.actionJoinBevel.setCheckable(True)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/joinbevel.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionJoinBevel.setIcon(icon12)
self.actionJoinBevel.setText(QtGui.QApplication.translate("ToolBox", "joinBevel", None, QtGui.QApplication.UnicodeUTF8))
self.actionJoinBevel.setObjectName(_fromUtf8("actionJoinBevel"))
self.actionStrokeColor = QtGui.QAction(ToolBox)
self.actionStrokeColor.setIcon(icon3)
self.actionStrokeColor.setText(QtGui.QApplication.translate("ToolBox", "strokeColor", None, QtGui.QApplication.UnicodeUTF8))
self.actionStrokeColor.setObjectName(_fromUtf8("actionStrokeColor"))
self.actionFillColor = QtGui.QAction(ToolBox)
self.actionFillColor.setIcon(icon3)
self.actionFillColor.setText(QtGui.QApplication.translate("ToolBox", "fillColor", None, QtGui.QApplication.UnicodeUTF8))
self.actionFillColor.setObjectName(_fromUtf8("actionFillColor"))
self.actionShouldFill = QtGui.QAction(ToolBox)
self.actionShouldFill.setCheckable(True)
self.actionShouldFill.setText(QtGui.QApplication.translate("ToolBox", "shouldFill", None, QtGui.QApplication.UnicodeUTF8))
self.actionShouldFill.setObjectName(_fromUtf8("actionShouldFill"))
self.actionShouldStroke = QtGui.QAction(ToolBox)
self.actionShouldStroke.setCheckable(True)
self.actionShouldStroke.setText(QtGui.QApplication.translate("ToolBox", "shouldStroke", None, QtGui.QApplication.UnicodeUTF8))
self.actionShouldStroke.setObjectName(_fromUtf8("actionShouldStroke"))
self.actionWidthChange = QtGui.QAction(ToolBox)
self.actionWidthChange.setText(QtGui.QApplication.translate("ToolBox", "widthChange", None, QtGui.QApplication.UnicodeUTF8))
self.actionWidthChange.setObjectName(_fromUtf8("actionWidthChange"))
self.actionDashSet = QtGui.QAction(ToolBox)
self.actionDashSet.setText(QtGui.QApplication.translate("ToolBox", "dashSet", None, QtGui.QApplication.UnicodeUTF8))
self.actionDashSet.setObjectName(_fromUtf8("actionDashSet"))
self.actionDashPhaseChange = QtGui.QAction(ToolBox)
self.actionDashPhaseChange.setText(QtGui.QApplication.translate("ToolBox", "dashPhaseChange", None, QtGui.QApplication.UnicodeUTF8))
self.actionDashPhaseChange.setObjectName(_fromUtf8("actionDashPhaseChange"))
self.actionFontStyleBold = QtGui.QAction(ToolBox)
self.actionFontStyleBold.setCheckable(True)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/bold.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionFontStyleBold.setIcon(icon13)
self.actionFontStyleBold.setText(QtGui.QApplication.translate("ToolBox", "fontStyleBold", None, QtGui.QApplication.UnicodeUTF8))
self.actionFontStyleBold.setObjectName(_fromUtf8("actionFontStyleBold"))
self.actionFontStyleItalic = QtGui.QAction(ToolBox)
self.actionFontStyleItalic.setCheckable(True)
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/icons/app-icons/italic.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionFontStyleItalic.setIcon(icon14)
self.actionFontStyleItalic.setText(QtGui.QApplication.translate("ToolBox", "fontStyleItalic", None, QtGui.QApplication.UnicodeUTF8))
self.actionFontStyleItalic.setObjectName(_fromUtf8("actionFontStyleItalic"))
self.actionFontFamilySet = QtGui.QAction(ToolBox)
self.actionFontFamilySet.setIcon(icon4)
self.actionFontFamilySet.setText(QtGui.QApplication.translate("ToolBox", "fontFamilySet", None, QtGui.QApplication.UnicodeUTF8))
self.actionFontFamilySet.setObjectName(_fromUtf8("actionFontFamilySet"))
self.actionFontSizeSet = QtGui.QAction(ToolBox)
self.actionFontSizeSet.setText(QtGui.QApplication.translate("ToolBox", "fontSizeSet", None, QtGui.QApplication.UnicodeUTF8))
self.actionFontSizeSet.setObjectName(_fromUtf8("actionFontSizeSet"))
self.actionChangeCharCounter = QtGui.QAction(ToolBox)
self.actionChangeCharCounter.setText(QtGui.QApplication.translate("ToolBox", "changeCharCounter", None, QtGui.QApplication.UnicodeUTF8))
self.actionChangeCharCounter.setObjectName(_fromUtf8("actionChangeCharCounter"))
self.retranslateUi(ToolBox)
ToolBox.setCurrentIndex(6)
QtCore.QObject.connect(self.shouldFillCheckbox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.actionShouldFill.toggle)
QtCore.QObject.connect(self.strokeColor, QtCore.SIGNAL(_fromUtf8("colorChanged(QColor)")), ToolBox.strokeColorChanged)
QtCore.QObject.connect(self.fillColor, QtCore.SIGNAL(_fromUtf8("colorChanged(QColor)")), ToolBox.fillColorChanged)
QtCore.QObject.connect(self.shouldStrokeCheckbox, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.actionShouldStroke.toggle)
QtCore.QMetaObject.connectSlotsByName(ToolBox)
def retranslateUi(self, ToolBox):
ToolBox.setItemText(ToolBox.indexOf(self.drawingPage), QtGui.QApplication.translate("ToolBox", "Drawing Properties", None, QtGui.QApplication.UnicodeUTF8))
ToolBox.setItemText(ToolBox.indexOf(self.strokeStylePage), QtGui.QApplication.translate("ToolBox", "Stroke Style", None, QtGui.QApplication.UnicodeUTF8))
ToolBox.setItemText(ToolBox.indexOf(self.strokeColorPage), QtGui.QApplication.translate("ToolBox", "Stroke Color", None, QtGui.QApplication.UnicodeUTF8))
ToolBox.setItemText(ToolBox.indexOf(self.strokeFillPage), QtGui.QApplication.translate("ToolBox", "Fill Color", None, QtGui.QApplication.UnicodeUTF8))
ToolBox.setItemText(ToolBox.indexOf(self.fontPage), QtGui.QApplication.translate("ToolBox", "Font", None, QtGui.QApplication.UnicodeUTF8))
ToolBox.setItemText(ToolBox.indexOf(self.usersPage), QtGui.QApplication.translate("ToolBox", "Users", None, QtGui.QApplication.UnicodeUTF8))
ToolBox.setItemText(ToolBox.indexOf(self.chatPage), QtGui.QApplication.translate("ToolBox", "Chat", None, QtGui.QApplication.UnicodeUTF8))
from StrokeStyleWidget import StrokeStyleWidget
from ChatWidget import ChatWidget
from DrawingPropertiesWidget import DrawingPropertiesWidget
from ColorWidget import ColorWidget
from FontWidget import FontWidget
import rsrc_rc
class ToolBox(QtGui.QToolBox, Ui_ToolBox):
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QToolBox.__init__(self, parent, f)
self.setupUi(self)
| gpl-3.0 |
qpxu007/Flask-AppBuilder | examples/quickfiles/app/views.py | 3 | 1312 | from flask.ext.appbuilder.models.sqla.interface import SQLAInterface
from flask.ext.appbuilder.views import ModelView, CompactCRUDMixin
from app.models import Project, ProjectFiles
from app import appbuilder, db
class ProjectFilesModelView(ModelView):
datamodel = SQLAInterface(ProjectFiles)
label_columns = {'file_name': 'File Name', 'download': 'Download'}
add_columns = ['file', 'description','project']
edit_columns = ['file', 'description','project']
list_columns = ['file_name', 'download']
show_columns = ['file_name', 'download']
class ProjectModelView(CompactCRUDMixin, ModelView):
datamodel = SQLAInterface(Project)
related_views = [ProjectFilesModelView]
show_template = 'appbuilder/general/model/show_cascade.html'
edit_template = 'appbuilder/general/model/edit_cascade.html'
add_columns = ['name']
edit_columns = ['name']
list_columns = ['name', 'created_by', 'created_on', 'changed_by', 'changed_on']
show_fieldsets = [
('Info', {'fields': ['name']}),
('Audit', {'fields': ['created_by', 'created_on', 'changed_by', 'changed_on'], 'expanded': False})
]
db.create_all()
appbuilder.add_view(ProjectModelView, "List Projects", icon="fa-table", category="Projects")
appbuilder.add_view_no_menu(ProjectFilesModelView)
| bsd-3-clause |
SmartArduino/Arduino-1 | arduino-core/src/processing/app/i18n/python/requests/packages/charade/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| lgpl-2.1 |
crackhopper/TFS-toolbox | tests/dataset/dataset_test.py | 1 | 1408 | import pytest
import tensorflow as tf
import numpy as np
from tfs.dataset.skdata import *
from tfs.dataset.predefined import *
import tfs.dataset.data_tool as dtool
import shutil
@pytest.fixture
def data():
return MakeBlobs(test_percent=0.3,n_samples=100)
class TestDataTool:
def test_split(self,capsys):
d=np.arange(10)
ds=dtool.split_n(d,5)
assert len(ds)==5
for dd in ds:
assert len(dd)==2
class TestDataset:
def test_empty(self):
d=Dataset()
def test_dataset(self,data):
assert data.train.shape[0]==70
def test_cv(self,data):
i=0
for train,test in data.train.cross_validation_loop(7):
i=i+1
assert train.shape[0]==60
assert test.shape[0]==10
assert i==7
def test_batch(self,data):
first_data=data.train.data[0]
for i in range(8):
x,y = data.train.next_batch(10,False)
assert x.shape[0]==10 and y.shape[0]==10
assert data.train.epochs_completed==1
assert (x[0] == first_data).all()
def test_one_hot(self,data):
lbls = data.train.labels.copy()
data.to_one_hot()
assert data.train.labels.ndim==2
data.to_raw_label()
assert data.train.labels.ndim==1
np.testing.assert_array_equal(lbls,data.train.labels)
def test_cifar10(self,capsys):
with capsys.disabled():
data = Cifar10()
def test_mnist(self,capsys):
with capsys.disabled():
data = Mnist()
| mit |
WatanabeYasumasa/edx-platform | lms/djangoapps/courseware/migrations/0004_add_field_studentmodule_course_id.py | 194 | 9027 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StudentModule.course_id'
db.add_column('courseware_studentmodule', 'course_id',
self.gf('django.db.models.fields.CharField')(default="", max_length=255, db_index=True),
keep_default=False)
# Removing unique constraint on 'StudentModule', fields ['module_id', 'student']
db.delete_unique('courseware_studentmodule', ['module_id', 'student_id'])
# NOTE: manually remove this constaint (from 0001)--0003 tries, but fails for sqlite.
# Removing unique constraint on 'StudentModule', fields ['module_id', 'module_type', 'student']
if db.backend_name == "sqlite3":
db.delete_unique('courseware_studentmodule', ['student_id', 'module_id', 'module_type'])
# Adding unique constraint on 'StudentModule', fields ['course_id', 'module_state_key', 'student']
db.create_unique('courseware_studentmodule', ['student_id', 'module_id', 'course_id'])
def backwards(self, orm):
# Removing unique constraint on 'StudentModule', fields ['studnet_id', 'module_state_key', 'course_id']
db.delete_unique('courseware_studentmodule', ['student_id', 'module_id', 'course_id'])
# Deleting field 'StudentModule.course_id'
db.delete_column('courseware_studentmodule', 'course_id')
# Adding unique constraint on 'StudentModule', fields ['module_id', 'student']
db.create_unique('courseware_studentmodule', ['module_id', 'student_id'])
# Adding unique constraint on 'StudentModule', fields ['module_id', 'module_type', 'student']
db.create_unique('courseware_studentmodule', ['student_id', 'module_id', 'module_type'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courseware.studentmodule': {
'Meta': {'unique_together': "(('course_id', 'student', 'module_state_key'),)", 'object_name': 'StudentModule'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}),
'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_state_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'module_id'", 'db_index': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['courseware']
| agpl-3.0 |
fernandoacorreia/DjangoWAWSLogging | DjangoWAWSLogging/env/Lib/site-packages/pywin32-218-py2.7-win32.egg/Demos/BackupSeek_streamheaders.py | 34 | 3454 | ## demonstrates using BackupSeek to enumerate data streams for a file
import win32file, win32api, win32con
from win32com import storagecon
import pythoncom, pywintypes
import struct, traceback
stream_types={
win32con.BACKUP_DATA:"Standard data",
win32con.BACKUP_EA_DATA:"Extended attribute data",
win32con.BACKUP_SECURITY_DATA:"Security descriptor data",
win32con.BACKUP_ALTERNATE_DATA:"Alternative data streams",
win32con.BACKUP_LINK:"Hard link information",
win32con.BACKUP_PROPERTY_DATA:"Property data",
win32con.BACKUP_OBJECT_ID:"Objects identifiers",
win32con.BACKUP_REPARSE_DATA:"Reparse points",
win32con.BACKUP_SPARSE_BLOCK:"Sparse file"
}
tempdir=win32api.GetTempPath()
tempfile=win32api.GetTempFileName(tempdir,'bkr')[0]
print 'Filename:',tempfile
f=open(tempfile,'w')
f.write('some random junk'+'x'*100)
f.close()
f=open(tempfile+':streamdata','w')
f.write('data written to alternate stream'+'y'*100)
f.close()
f=open(tempfile+':anotherstream','w')
f.write('z'*200)
f.close()
## add Summary Information, which is stored as a separate stream
m=storagecon.STGM_READWRITE | storagecon.STGM_SHARE_EXCLUSIVE |storagecon.STGM_DIRECT
pss=pythoncom.StgOpenStorageEx(tempfile, m, storagecon.STGFMT_FILE, 0 , pythoncom.IID_IPropertySetStorage,None)
ps=pss.Create(pythoncom.FMTID_SummaryInformation,pythoncom.IID_IPropertyStorage,0,storagecon.STGM_READWRITE|storagecon.STGM_SHARE_EXCLUSIVE)
ps.WriteMultiple((storagecon.PIDSI_KEYWORDS,storagecon.PIDSI_COMMENTS),('keywords','comments'))
ps=None
pss=None
sa=pywintypes.SECURITY_ATTRIBUTES()
sa.bInheritHandle=False
h=win32file.CreateFile(tempfile, win32con.GENERIC_ALL ,win32con.FILE_SHARE_READ,
sa, win32con.OPEN_EXISTING, win32file.FILE_FLAG_BACKUP_SEMANTICS , None)
""" stream header:
typedef struct _WIN32_STREAM_ID {
DWORD dwStreamId; DWORD dwStreamAttributes; LARGE_INTEGER Size;
DWORD dwStreamNameSize; WCHAR cStreamName[ANYSIZE_ARRAY];
}
"""
win32_stream_id_format="LLQL"
win32_stream_id_size=struct.calcsize(win32_stream_id_format)
def parse_stream_header(h,ctxt,data):
stream_type, stream_attributes, stream_size, stream_name_size=struct.unpack(win32_stream_id_format,data)
print '\nType:',stream_type,stream_types[stream_type], 'Attributes:', stream_attributes, 'Size:', stream_size, 'Name len:',stream_name_size
if stream_name_size>0:
## ??? sdk says this size is in characters, but it appears to be number of bytes ???
bytes_read, stream_name_buf, ctxt=win32file.BackupRead(h, stream_name_size, None, False, True, ctxt)
stream_name=pywintypes.UnicodeFromRaw(stream_name_buf[:])
else:
stream_name='Unnamed'
print 'Name:'+stream_name
return ctxt, stream_type, stream_attributes, stream_size, stream_name_size, stream_name
ctxt=0
win32_stream_id_buf=None ## gets rebound to a writable buffer on first call and reused
while 1:
bytes_read, win32_stream_id_buf, ctxt=win32file.BackupRead(h, win32_stream_id_size, win32_stream_id_buf, False, True, ctxt)
if bytes_read==0:
break
ctxt, stream_type, stream_attributes, stream_size, stream_name_size, stream_name=\
parse_stream_header(h, ctxt, win32_stream_id_buf[:])
if stream_size>0:
bytes_moved=win32file.BackupSeek(h, stream_size, ctxt)
print 'Moved: ',bytes_moved
win32file.BackupRead(h, win32_stream_id_size, win32_stream_id_buf, True, True, ctxt)
win32file.CloseHandle(h)
| mit |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/test/test_httplib.py | 8 | 26664 | import httplib
import array
import StringIO
import socket
import errno
import os
import unittest
TestCase = unittest.TestCase
from test import test_support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
HOST = test_support.HOST
class FakeSocket:
def __init__(self, text, fileclass=StringIO.StringIO, host=None, port=None):
self.text = text
self.fileclass = fileclass
self.data = ''
self.host = host
self.port = port
def sendall(self, data):
self.data += ''.join(data)
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise httplib.UnimplementedFileMode()
return self.fileclass(self.text)
def close(self):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise socket.error(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFStringIO(StringIO.StringIO):
"""Like StringIO, but raises AssertionError on EOF.
This is used below to test that httplib doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = StringIO.StringIO.read(self, n)
if data == '':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = StringIO.StringIO.readline(self, length)
if data == '':
raise AssertionError('caller tried to read past EOF')
return data
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(':', 1)
if len(kv) > 1 and kv[0].lower() == 'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# POST with empty body
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request('POST', '/', '')
self.assertEqual(conn._buffer.content_length, '0',
'Header Content-Length not set')
# PUT request with empty body
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request('PUT', '/', '')
self.assertEqual(conn._buffer.content_length, '0',
'Header Content-Length not set')
def test_putheader(self):
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length',42)
self.assertIn('Content-length: 42', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should wrapped by [] if
# its actual IPv6 address
expected = 'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
'Accept-Encoding: identity\r\n\r\n'
conn = httplib.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = 'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
'Accept-Encoding: identity\r\n\r\n'
conn = httplib.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), '') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), 'Text')
self.assertTrue(resp.isclosed())
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
self.assertRaises(httplib.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = httplib.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have a length, the system knows when to close itself
# same behaviour than when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), 'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), 'xt')
self.assertTrue(resp.isclosed())
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), 'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), 'xt')
self.assertEqual(resp.read(1), '')
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), 'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), 'xt')
self.assertEqual(resp.read(1), '')
self.assertTrue(resp.isclosed())
def test_host_port(self):
# Check invalid host_port
# Note that httplib does not accept user:password@ in the host-port.
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(httplib.InvalidURL, httplib.HTTP, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80)):
http = httplib.HTTP(hp)
c = http._conn
if h != c.host:
self.fail("Host incorrectly parsed: %s != %s" % (h, c.host))
if p != c.port:
self.fail("Port incorrectly parsed: %s != %s" % (p, c.host))
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE";'
' Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = httplib.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
if cookies != hdr:
self.fail("multiple headers not combined properly")
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFStringIO)
resp = httplib.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read() != "":
self.fail("Did not expect response from HEAD request")
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i for i in xrange(200)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = httplib.HTTPResponse(s)
self.assertRaises(httplib.HTTPException, r.begin)
def test_send_file(self):
expected = 'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
'Accept-Encoding: identity\r\nContent-Length:'
body = open(__file__, 'rb')
conn = httplib.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected))
def test_send(self):
expected = 'this is a test this is only a test'
conn = httplib.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = ''
conn.send(array.array('c', expected))
self.assertEqual(expected, sock.data)
sock.data = ''
conn.send(StringIO.StringIO(expected))
self.assertEqual(expected, sock.data)
def test_chunked(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), 'hello world')
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except httplib.IncompleteRead, i:
self.assertEqual(i.partial, 'hello world')
self.assertEqual(repr(i),'IncompleteRead(11 bytes read)')
self.assertEqual(str(i),'IncompleteRead(11 bytes read)')
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = httplib.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), '')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
def test_negative_content_length(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\n'
'Content-Length: -1\r\n\r\nHello\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), 'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except httplib.IncompleteRead as i:
self.assertEqual(i.partial, 'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = httplib.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(socket.error,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
def test_filenoattr(self):
# Just test the fileno attribute in the HTTPResponse Object.
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
self.assertTrue(hasattr(resp,'fileno'),
'HTTPResponse should expose a fileno attribute')
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
self.skipTest("disabled for HTTP 0.9 support")
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = httplib.HTTPResponse(FakeSocket(body))
self.assertRaises((httplib.LineTooLong, httplib.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = httplib.HTTPResponse(FakeSocket(body))
self.assertRaises(httplib.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
)
resp = httplib.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(httplib.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), '')
self.assertTrue(resp.isclosed())
class OfflineTest(TestCase):
def test_responses(self):
self.assertEqual(httplib.responses[httplib.NOT_FOUND], "Not Found")
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.serv)
self.source_port = test_support.find_unused_port()
self.serv.listen(5)
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = httplib.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(httplib, 'HTTPSConnection'),
'httplib.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = httplib.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = test_support.bind_port(self.serv)
self.serv.listen(5)
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
'''This will prove that the timeout gets through
HTTPConnection and into the socket.
'''
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(httplib, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = httplib.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
test_support.requires('network')
with test_support.transient_internet('self-signed.pythontest.net'):
h = httplib.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
test_support.requires('network')
with test_support.transient_internet('self-signed.pythontest.net'):
context = ssl._create_stdlib_context()
h = httplib.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
self.assertIn('nginx', resp.getheader('server'))
@test_support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
test_support.requires('network')
with test_support.transient_internet('www.python.org'):
h = httplib.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
test_support.requires('network')
with test_support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
h = httplib.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
h.request('GET', '/')
resp = h.getresponse()
server_string = resp.getheader('server')
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
test_support.requires('network')
with test_support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = httplib.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = httplib.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = httplib.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(CERT_fakehostname)
h = httplib.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
h.close()
# With context.check_hostname=False, the mismatching is ignored
context.check_hostname = False
h = httplib.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(httplib.InvalidURL, httplib.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = httplib.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
class TunnelTests(TestCase):
def test_connect(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
conn = httplib.HTTPConnection('proxy.com')
conn._create_connection = create_connection
# Once connected, we should not be able to tunnel anymore
conn.connect()
self.assertRaises(RuntimeError, conn.set_tunnel, 'destination.com')
# But if close the connection, we are good.
conn.close()
conn.set_tunnel('destination.com')
conn.request('HEAD', '/', '')
self.assertEqual(conn.sock.host, 'proxy.com')
self.assertEqual(conn.sock.port, 80)
self.assertTrue('CONNECT destination.com' in conn.sock.data)
self.assertTrue('Host: destination.com' in conn.sock.data)
self.assertTrue('Host: proxy.com' not in conn.sock.data)
conn.close()
conn.request('PUT', '/', '')
self.assertEqual(conn.sock.host, 'proxy.com')
self.assertEqual(conn.sock.port, 80)
self.assertTrue('CONNECT destination.com' in conn.sock.data)
self.assertTrue('Host: destination.com' in conn.sock.data)
@test_support.reap_threads
def test_main(verbose=None):
test_support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
HTTPSTest, SourceAddressTest, TunnelTests)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
thnee/ansible | lib/ansible/modules/network/aci/aci_aep.py | 8 | 7111 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_aep
short_description: Manage attachable Access Entity Profile (AEP) objects (infra:AttEntityP, infra:ProvAcc)
description:
- Connect to external virtual and physical domains by using
attachable Access Entity Profiles (AEP) on Cisco ACI fabrics.
version_added: '2.4'
options:
aep:
description:
- The name of the Attachable Access Entity Profile.
type: str
required: yes
aliases: [ aep_name, name ]
description:
description:
- Description for the AEP.
type: str
aliases: [ descr ]
infra_vlan:
description:
- Enable infrastructure VLAN.
- The hypervisor functions of the AEP.
- C(no) will disable the infrastructure vlan if it is enabled.
type: bool
aliases: [ infrastructure_vlan ]
version_added: '2.5'
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
default: present
choices: [ absent, present, query ]
extends_documentation_fragment: aci
seealso:
- module: aci_aep_to_domain
- name: APIC Management Information Model reference
description: More information about the internal APIC classes B(infra:AttEntityP) and B(infra:ProvAcc).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Swetha Chunduri (@schunduri)
'''
EXAMPLES = r'''
- name: Add a new AEP
aci_aep:
host: apic
username: admin
password: SomeSecretPassword
aep: ACI-AEP
description: default
state: present
delegate_to: localhost
- name: Remove an existing AEP
aci_aep:
host: apic
username: admin
password: SomeSecretPassword
aep: ACI-AEP
state: absent
delegate_to: localhost
- name: Query all AEPs
aci_aep:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
- name: Query a specific AEP
aci_aep:
host: apic
username: admin
password: SomeSecretPassword
aep: ACI-AEP
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
aep=dict(type='str', aliases=['name', 'aep_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
infra_vlan=dict(type='bool', aliases=['infrastructure_vlan']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['aep']],
['state', 'present', ['aep']],
],
)
aep = module.params.get('aep')
description = module.params.get('description')
infra_vlan = module.params.get('infra_vlan')
state = module.params.get('state')
if infra_vlan:
child_configs = [dict(infraProvAcc=dict(attributes=dict(name='provacc')))]
elif infra_vlan is False:
child_configs = [dict(infraProvAcc=dict(attributes=dict(name='provacc', status='deleted')))]
else:
child_configs = []
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAttEntityP',
aci_rn='infra/attentp-{0}'.format(aep),
module_object=aep,
target_filter={'name': aep},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraAttEntityP',
class_config=dict(
name=aep,
descr=description,
),
child_configs=child_configs,
)
aci.get_diff(aci_class='infraAttEntityP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
HelloAWorld/NoahGameFrame | Dependencies/protobuf-2.5.0/python/mox.py | 603 | 38237 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
| apache-2.0 |
gauthierm/bedrock | bedrock/grants/views.py | 38 | 1352 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from operator import attrgetter
from django.http import Http404
from lib import l10n_utils
import bleach
from grants_db import GRANTS
grant_labels = {
'': 'All',
'open-source-technology': 'Open Source Technology',
'learning-webmaking': 'Learning & Webmaking',
'user-sovereignty': 'User Sovereignty',
'free-culture-community': 'Free Culture & Community'
}
def grant_info(request, slug):
grant_data = filter(lambda k: k.url == slug, GRANTS)
if not grant_data:
raise Http404
return l10n_utils.render(request, "grants/info.html", {
'grant': grant_data[0],
'grant_labels': grant_labels
})
def grants(request):
type_filter = bleach.clean(request.GET.get('type', ''))
if type_filter and type_filter not in grant_labels:
raise Http404
if type_filter:
grants = filter(lambda k: k.type == type_filter, GRANTS)
else:
grants = GRANTS
grants.sort(key=attrgetter('grantee'))
return l10n_utils.render(request, "grants/index.html", {
'filter': type_filter,
'grants': grants,
'grant_labels': grant_labels
})
| mpl-2.0 |
krafczyk/spack | var/spack/repos/builtin/packages/linkphase3/package.py | 2 | 1744 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Linkphase3(Package):
"""Haplotype reconstruction in pedigreed populations."""
homepage = "https://github.com/tdruet/LINKPHASE3"
git = "https://github.com/tdruet/LINKPHASE3.git"
version('2017-06-14', commit='559913593fc818bb1adb29796a548cf5bf323827')
def install(self, spec, prefix):
fortran = Executable(self.compiler.fc)
fortran('LINKPHASE3.f90', '-o', 'LINKPHASE3')
mkdirp(prefix.bin)
install('LINKPHASE3', prefix.bin)
| lgpl-2.1 |
repotvsupertuga/repo | script.video.F4mProxy/lib/f4mUtils/cryptomath.py | 135 | 8356 | # Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""cryptomath module
This module has basic math/crypto code."""
from __future__ import print_function
import os
import math
import base64
import binascii
from .compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Simple hash functions
# **************************************************************************
import hmac
import hashlib
def MD5(b):
return bytearray(hashlib.md5(compat26Str(b)).digest())
def SHA1(b):
return bytearray(hashlib.sha1(compat26Str(b)).digest())
def HMAC_MD5(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.md5).digest())
def HMAC_SHA1(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha1).digest())
def HMAC_SHA256(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha256).digest())
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| gpl-2.0 |
bitkeeper/python-opcua | opcua/server/standard_address_space/standard_address_space.py | 1 | 1407 |
import os.path
import opcua
from opcua.server.standard_address_space.standard_address_space_part3 import create_standard_address_space_Part3
from opcua.server.standard_address_space.standard_address_space_part4 import create_standard_address_space_Part4
from opcua.server.standard_address_space.standard_address_space_part5 import create_standard_address_space_Part5
from opcua.server.standard_address_space.standard_address_space_part8 import create_standard_address_space_Part8
from opcua.server.standard_address_space.standard_address_space_part9 import create_standard_address_space_Part9
from opcua.server.standard_address_space.standard_address_space_part10 import create_standard_address_space_Part10
from opcua.server.standard_address_space.standard_address_space_part11 import create_standard_address_space_Part11
from opcua.server.standard_address_space.standard_address_space_part13 import create_standard_address_space_Part13
def fill_address_space(nodeservice):
create_standard_address_space_Part3(nodeservice)
create_standard_address_space_Part4(nodeservice)
create_standard_address_space_Part5(nodeservice)
create_standard_address_space_Part8(nodeservice)
create_standard_address_space_Part9(nodeservice)
create_standard_address_space_Part10(nodeservice)
create_standard_address_space_Part11(nodeservice)
create_standard_address_space_Part13(nodeservice)
| lgpl-3.0 |
singlebrook/AWS-ElasticBeanstalk-CLI | eb/macosx/python3/lib/aws/requests/packages/charade/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| apache-2.0 |
rppowell-lasfs/loscon2py | models/db.py | 12 | 3887 | # -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_https()
## app configuration made easy. Look inside private/appconfig.ini
from gluon.contrib.appconfig import AppConfig
## once in production, remove reload=True to gain full speed
myconf = AppConfig(reload=True)
if not request.env.web2py_runtime_gae:
## if NOT running on Google App Engine use SQLite or other DB
db = DAL(myconf.take('db.uri'), pool_size=myconf.take('db.pool_size', cast=int), check_reserved=['all'])
else:
## connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore+ndb')
## store sessions and tickets there
session.connect(request, response, db=db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
## choose a style for forms
response.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.take('forms.separator')
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
## (optional) static assets folder versioning
# response.static_version = '0.0.0'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Auth, Service, PluginManager
auth = Auth(db)
service = Service()
plugins = PluginManager()
## create all tables needed by auth if not custom tables
auth.define_tables(username=False, signature=False)
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')
mail.settings.sender = myconf.take('smtp.sender')
mail.settings.login = myconf.take('smtp.login')
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
## after defining tables, uncomment below to enable auditing
# auth.enable_record_versioning(db)
| mit |
Godiyos/python-for-android | python-build/python-libs/gdata/tests/gdata_tests/contacts/service_test.py | 87 | 10530 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import getpass
import re
import unittest
import urllib
import atom
import gdata.contacts.service
import gdata.test_config as conf
class ContactsServiceTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.contacts.service.ContactsService()
conf.configure_service(self.gd_client, conf.settings.ContactsConfig,
'ContactsServiceTest')
self.gd_client.email = conf.settings.ContactsConfig.email()
def tearDown(self):
conf.close_service(self.gd_client)
def testGetContactsFeed(self):
if not conf.settings.RUN_LIVE_TESTS:
return
conf.configure_service_cache(self.gd_client, 'testGetContactsFeed')
feed = self.gd_client.GetContactsFeed()
self.assert_(isinstance(feed, gdata.contacts.ContactsFeed))
def testDefaultContactList(self):
self.assertEquals('default', self.gd_client.contact_list)
def testCustomContactList(self):
if not conf.settings.RUN_LIVE_TESTS:
return
conf.configure_service_cache(self.gd_client, 'testCustomContactList')
self.gd_client.contact_list = conf.settings.ContactsConfig.email()
feed = self.gd_client.GetContactsFeed()
self.assert_(isinstance(feed, gdata.contacts.ContactsFeed))
def testGetFeedUriDefault(self):
self.gd_client.contact_list = 'domain.com'
self.assertEquals('/m8/feeds/contacts/domain.com/full',
self.gd_client.GetFeedUri())
def testGetFeedUriCustom(self):
uri = self.gd_client.GetFeedUri(kind='groups',
contact_list='example.com',
projection='base/batch',
scheme='https')
self.assertEquals(
'https://www.google.com/m8/feeds/groups/example.com/base/batch', uri)
def testCreateUpdateDeleteContactAndUpdatePhoto(self):
if not conf.settings.RUN_LIVE_TESTS:
return
conf.configure_service_cache(self.gd_client, 'testCreateUpdateDeleteContactAndUpdatePhoto')
DeleteTestContact(self.gd_client)
# Create a new entry
new_entry = gdata.contacts.ContactEntry()
new_entry.title = atom.Title(text='Elizabeth Bennet')
new_entry.content = atom.Content(text='Test Notes')
new_entry.email.append(gdata.contacts.Email(
rel='http://schemas.google.com/g/2005#work',
primary='true',
address='liz@gmail.com'))
new_entry.phone_number.append(gdata.contacts.PhoneNumber(
rel='http://schemas.google.com/g/2005#work', text='(206)555-1212'))
new_entry.organization = gdata.contacts.Organization(
org_name=gdata.contacts.OrgName(text='TestCo.'),
rel='http://schemas.google.com/g/2005#work')
entry = self.gd_client.CreateContact(new_entry)
# Generate and parse the XML for the new entry.
self.assertEquals(entry.title.text, new_entry.title.text)
self.assertEquals(entry.content.text, 'Test Notes')
self.assertEquals(len(entry.email), 1)
self.assertEquals(entry.email[0].rel, new_entry.email[0].rel)
self.assertEquals(entry.email[0].address, 'liz@gmail.com')
self.assertEquals(len(entry.phone_number), 1)
self.assertEquals(entry.phone_number[0].rel,
new_entry.phone_number[0].rel)
self.assertEquals(entry.phone_number[0].text, '(206)555-1212')
self.assertEquals(entry.organization.org_name.text, 'TestCo.')
# Edit the entry.
entry.phone_number[0].text = '(555)555-1212'
updated = self.gd_client.UpdateContact(entry.GetEditLink().href, entry)
self.assertEquals(updated.content.text, 'Test Notes')
self.assertEquals(len(updated.phone_number), 1)
self.assertEquals(updated.phone_number[0].rel,
entry.phone_number[0].rel)
self.assertEquals(updated.phone_number[0].text, '(555)555-1212')
# Change the contact's photo.
updated_photo = self.gd_client.ChangePhoto(
conf.settings.ContactsConfig.get_image_location(), updated,
content_type='image/jpeg')
# Refetch the contact so that it has the new photo link
updated = self.gd_client.GetContact(updated.GetSelfLink().href)
self.assert_(updated.GetPhotoLink() is not None)
# Fetch the photo data.
hosted_image = self.gd_client.GetPhoto(updated)
self.assert_(hosted_image is not None)
# Delete the entry.
self.gd_client.DeleteContact(updated.GetEditLink().href)
def testCreateAndDeleteContactUsingBatch(self):
if not conf.settings.RUN_LIVE_TESTS:
return
conf.configure_service_cache(self.gd_client, 'testCreateAndDeleteContactUsingBatch')
# Get random data for creating contact
random_contact_number = 'notRandom5'
random_contact_title = 'Random Contact %s' % (
random_contact_number)
# Set contact data
contact = gdata.contacts.ContactEntry()
contact.title = atom.Title(text=random_contact_title)
contact.email = gdata.contacts.Email(
address='user%s@example.com' % random_contact_number,
primary='true',
rel=gdata.contacts.REL_WORK)
contact.content = atom.Content(text='Contact created by '
'gdata-python-client automated test '
'suite.')
# Form a batch request
batch_request = gdata.contacts.ContactsFeed()
batch_request.AddInsert(entry=contact)
# Execute the batch request to insert the contact.
default_batch_url = gdata.contacts.service.DEFAULT_BATCH_URL
batch_result = self.gd_client.ExecuteBatch(batch_request,
default_batch_url)
self.assertEquals(len(batch_result.entry), 1)
self.assertEquals(batch_result.entry[0].title.text,
random_contact_title)
self.assertEquals(batch_result.entry[0].batch_operation.type,
gdata.BATCH_INSERT)
self.assertEquals(batch_result.entry[0].batch_status.code,
'201')
expected_batch_url = re.compile('default').sub(
urllib.quote(self.gd_client.email),
gdata.contacts.service.DEFAULT_BATCH_URL)
self.failUnless(batch_result.GetBatchLink().href,
expected_batch_url)
# Create a batch request to delete the newly created entry.
batch_delete_request = gdata.contacts.ContactsFeed()
batch_delete_request.AddDelete(entry=batch_result.entry[0])
batch_delete_result = self.gd_client.ExecuteBatch(
batch_delete_request,
batch_result.GetBatchLink().href)
self.assertEquals(len(batch_delete_result.entry), 1)
self.assertEquals(batch_delete_result.entry[0].batch_operation.type,
gdata.BATCH_DELETE)
self.assertEquals(batch_result.entry[0].batch_status.code,
'201')
def testCleanUriNeedsCleaning(self):
self.assertEquals('/relative/uri', self.gd_client._CleanUri(
'http://www.google.com/relative/uri'))
def testCleanUriDoesNotNeedCleaning(self):
self.assertEquals('/relative/uri', self.gd_client._CleanUri(
'/relative/uri'))
class ContactsQueryTest(unittest.TestCase):
def testConvertToStringDefaultFeed(self):
query = gdata.contacts.service.ContactsQuery()
self.assertEquals(str(query), '/m8/feeds/contacts/default/full')
query.max_results = 10
self.assertEquals(query.ToUri(),
'/m8/feeds/contacts/default/full?max-results=10')
def testConvertToStringCustomFeed(self):
query = gdata.contacts.service.ContactsQuery('/custom/feed/uri')
self.assertEquals(str(query), '/custom/feed/uri')
query.max_results = '10'
self.assertEquals(query.ToUri(), '/custom/feed/uri?max-results=10')
def testGroupQueryParameter(self):
query = gdata.contacts.service.ContactsQuery()
query.group = 'http://google.com/m8/feeds/groups/liz%40gmail.com/full/270f'
self.assertEquals(query.ToUri(), '/m8/feeds/contacts/default/full'
'?group=http%3A%2F%2Fgoogle.com%2Fm8%2Ffeeds%2Fgroups'
'%2Fliz%2540gmail.com%2Ffull%2F270f')
class ContactsGroupsTest(unittest.TestCase):
def setUp(self):
self.gd_client = gdata.contacts.service.ContactsService()
conf.configure_service(self.gd_client, conf.settings.ContactsConfig,
'ContactsServiceTest')
def tearDown(self):
conf.close_service(self.gd_client)
def testCreateUpdateDeleteGroup(self):
if not conf.settings.RUN_LIVE_TESTS:
return
conf.configure_service_cache(self.gd_client,
'testCreateUpdateDeleteGroup')
test_group = gdata.contacts.GroupEntry(title=atom.Title(
text='test group py'))
new_group = self.gd_client.CreateGroup(test_group)
self.assert_(isinstance(new_group, gdata.contacts.GroupEntry))
self.assertEquals(new_group.title.text, 'test group py')
# Change the group's title
new_group.title.text = 'new group name py'
updated_group = self.gd_client.UpdateGroup(new_group.GetEditLink().href,
new_group)
self.assertEquals(updated_group.title.text, new_group.title.text)
# Remove the group
self.gd_client.DeleteGroup(updated_group.GetEditLink().href)
# Utility methods.
def DeleteTestContact(client):
# Get test contact
feed = client.GetContactsFeed()
for entry in feed.entry:
if (entry.title.text == 'Elizabeth Bennet' and
entry.content.text == 'Test Notes' and
entry.email[0].address == 'liz@gmail.com'):
client.DeleteContact(entry.GetEditLink().href)
def suite():
return unittest.TestSuite((unittest.makeSuite(ContactsServiceTest, 'test'),
unittest.makeSuite(ContactsQueryTest, 'test'),
unittest.makeSuite(ContactsGroupsTest, 'test'),))
if __name__ == '__main__':
print ('Contacts Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
unittest.main()
| apache-2.0 |
KontorConsulting/odoo | addons/account/wizard/account_fiscalyear_close.py | 222 | 15660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close(osv.osv_memory):
"""
Closes Account Fiscalyear and Generate Opening entries for New Fiscalyear
"""
_name = "account.fiscalyear.close"
_description = "Fiscalyear Close"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to close', required=True, help="Select a Fiscal year to close"),
'fy2_id': fields.many2one('account.fiscalyear', \
'New Fiscal Year', required=True),
'journal_id': fields.many2one('account.journal', 'Opening Entries Journal', domain="[('type','=','situation')]", required=True, help='The best practice here is to use a journal dedicated to contain the opening entries of all fiscal years. Note that you should define it with default debit/credit accounts, of type \'situation\' and with a centralized counterpart.'),
'period_id': fields.many2one('account.period', 'Opening Entries Period', required=True),
'report_name': fields.char('Name of new entries', required=True, help="Give name of the new entries"),
}
_defaults = {
'report_name': lambda self, cr, uid, context: _('End of Fiscal Year Entry'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear and create entries in new fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
def _reconcile_fy_closing(cr, uid, ids, context=None):
"""
This private function manually do the reconciliation on the account_move_line given as `ids´, and directly
through psql. It's necessary to do it this way because the usual `reconcile()´ function on account.move.line
object is really resource greedy (not supposed to work on reconciliation between thousands of records) and
it does a lot of different computation that are useless in this particular case.
"""
#check that the reconcilation concern journal entries from only one company
cr.execute('select distinct(company_id) from account_move_line where id in %s',(tuple(ids),))
if len(cr.fetchall()) > 1:
raise osv.except_osv(_('Warning!'), _('The entries to reconcile should belong to the same company.'))
r_id = self.pool.get('account.move.reconcile').create(cr, uid, {'type': 'auto', 'opening_reconciliation': True})
cr.execute('update account_move_line set reconcile_id = %s where id in %s',(r_id, tuple(ids),))
# reconcile_ref deptends from reconcile_id but was not recomputed
obj_acc_move_line._store_set_values(cr, uid, ids, ['reconcile_ref'], context=context)
obj_acc_move_line.invalidate_cache(cr, uid, ['reconcile_id'], ids, context=context)
return r_id
obj_acc_period = self.pool.get('account.period')
obj_acc_fiscalyear = self.pool.get('account.fiscalyear')
obj_acc_journal = self.pool.get('account.journal')
obj_acc_move = self.pool.get('account.move')
obj_acc_move_line = self.pool.get('account.move.line')
obj_acc_account = self.pool.get('account.account')
obj_acc_journal_period = self.pool.get('account.journal.period')
currency_obj = self.pool.get('res.currency')
data = self.browse(cr, uid, ids, context=context)
if context is None:
context = {}
fy_id = data[0].fy_id.id
cr.execute("SELECT id FROM account_period WHERE date_stop < (SELECT date_start FROM account_fiscalyear WHERE id = %s)", (str(data[0].fy2_id.id),))
fy_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
cr.execute("SELECT id FROM account_period WHERE date_start > (SELECT date_stop FROM account_fiscalyear WHERE id = %s)", (str(fy_id),))
fy2_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
if not fy_period_set or not fy2_period_set:
raise osv.except_osv(_('User Error!'), _('The periods to generate opening entries cannot be found.'))
period = obj_acc_period.browse(cr, uid, data[0].period_id.id, context=context)
new_fyear = obj_acc_fiscalyear.browse(cr, uid, data[0].fy2_id.id, context=context)
old_fyear = obj_acc_fiscalyear.browse(cr, uid, fy_id, context=context)
new_journal = data[0].journal_id.id
new_journal = obj_acc_journal.browse(cr, uid, new_journal, context=context)
company_id = new_journal.company_id.id
if not new_journal.default_credit_account_id or not new_journal.default_debit_account_id:
raise osv.except_osv(_('User Error!'),
_('The journal must have default credit and debit account.'))
if (not new_journal.centralisation) or new_journal.entry_posted:
raise osv.except_osv(_('User Error!'),
_('The journal must have centralized counterpart without the Skipping draft state option checked.'))
#delete existing move and move lines if any
move_ids = obj_acc_move.search(cr, uid, [
('journal_id', '=', new_journal.id), ('period_id', '=', period.id)])
if move_ids:
move_line_ids = obj_acc_move_line.search(cr, uid, [('move_id', 'in', move_ids)])
obj_acc_move_line._remove_move_reconcile(cr, uid, move_line_ids, opening_reconciliation=True, context=context)
obj_acc_move_line.unlink(cr, uid, move_line_ids, context=context)
obj_acc_move.unlink(cr, uid, move_ids, context=context)
cr.execute("SELECT id FROM account_fiscalyear WHERE date_stop < %s", (str(new_fyear.date_start),))
result = cr.dictfetchall()
fy_ids = [x['id'] for x in result]
query_line = obj_acc_move_line._query_get(cr, uid,
obj='account_move_line', context={'fiscalyear': fy_ids})
#create the opening move
vals = {
'name': '/',
'ref': '',
'period_id': period.id,
'date': period.date_start,
'journal_id': new_journal.id,
}
move_id = obj_acc_move.create(cr, uid, vals, context=context)
#1. report of the accounts with defferal method == 'unreconciled'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'unreconciled', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + '''
AND reconcile_id IS NULL)''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#We have also to consider all move_lines that were reconciled
#on another fiscal year, and report them too
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT
b.name, b.create_uid, b.create_date, b.write_uid, b.write_date,
b.statement_id, %s, b.currency_id, b.date_maturity,
b.partner_id, b.blocked, b.credit, 'draft', b.debit,
b.ref, b.account_id, %s, (%s) AS date, %s, b.amount_currency,
b.quantity, b.product_id, b.company_id
FROM account_move_line b
WHERE b.account_id IN %s
AND b.reconcile_id IS NOT NULL
AND b.period_id IN ('''+fy_period_set+''')
AND b.reconcile_id IN (SELECT DISTINCT(reconcile_id)
FROM account_move_line a
WHERE a.period_id IN ('''+fy2_period_set+''')))''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#2. report of the accounts with defferal method == 'detail'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'detail', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + ''')
''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#3. report of the accounts with defferal method == 'balance'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'balance', ))
account_ids = map(lambda x: x[0], cr.fetchall())
query_1st_part = """
INSERT INTO account_move_line (
debit, credit, name, date, move_id, journal_id, period_id,
account_id, currency_id, amount_currency, company_id, state) VALUES
"""
query_2nd_part = ""
query_2nd_part_args = []
for account in obj_acc_account.browse(cr, uid, account_ids, context={'fiscalyear': fy_id}):
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id
if not currency_obj.is_zero(cr, uid, company_currency_id, abs(account.balance)):
if query_2nd_part:
query_2nd_part += ','
query_2nd_part += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_2nd_part_args += (account.balance > 0 and account.balance or 0.0,
account.balance < 0 and -account.balance or 0.0,
data[0].report_name,
period.date_start,
move_id,
new_journal.id,
period.id,
account.id,
account.currency_id and account.currency_id.id or None,
account.foreign_balance if account.currency_id else 0.0,
account.company_id.id,
'draft')
if query_2nd_part:
cr.execute(query_1st_part + query_2nd_part, tuple(query_2nd_part_args))
self.invalidate_cache(cr, uid, context=context)
#validate and centralize the opening move
obj_acc_move.validate(cr, uid, [move_id], context=context)
#reconcile all the move.line of the opening move
ids = obj_acc_move_line.search(cr, uid, [('journal_id', '=', new_journal.id),
('period_id.fiscalyear_id','=',new_fyear.id)])
if ids:
reconcile_id = _reconcile_fy_closing(cr, uid, ids, context=context)
#set the creation date of the reconcilation at the first day of the new fiscalyear, in order to have good figures in the aged trial balance
self.pool.get('account.move.reconcile').write(cr, uid, [reconcile_id], {'create_date': new_fyear.date_start}, context=context)
#create the journal.period object and link it to the old fiscalyear
new_period = data[0].period_id.id
ids = obj_acc_journal_period.search(cr, uid, [('journal_id', '=', new_journal.id), ('period_id', '=', new_period)])
if not ids:
ids = [obj_acc_journal_period.create(cr, uid, {
'name': (new_journal.name or '') + ':' + (period.code or ''),
'journal_id': new_journal.id,
'period_id': period.id
})]
cr.execute('UPDATE account_fiscalyear ' \
'SET end_journal_period_id = %s ' \
'WHERE id = %s', (ids[0], old_fyear.id))
obj_acc_fiscalyear.invalidate_cache(cr, uid, ['end_journal_period_id'], [old_fyear.id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kvar/ansible | lib/ansible/plugins/cliconf/nos.py | 31 | 3829 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: nos
short_description: Use nos cliconf to run command on Extreme NOS platform
description:
- This nos plugin provides low level abstraction apis for
sending and receiving CLI commands from Extreme NOS network devices.
version_added: "2.7"
"""
import re
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'nos'
reply = self.get('show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Network Operating System Version: (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
reply = self.get('show chassis')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'^Chassis Name:(\s+)(\S+)', data, re.M)
if match:
device_info['network_os_model'] = match.group(2)
reply = self.get('show running-config | inc "switch-attributes host-name"')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'switch-attributes host-name (\S+)', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_config(self, source='running', flags=None):
if source not in 'running':
raise ValueError("fetching configuration from %s is not supported" % source)
if source == 'running':
cmd = 'show running-config'
flags = [] if flags is None else flags
cmd += ' '.join(flags)
cmd = cmd.strip()
return self.send_command(cmd)
def edit_config(self, command):
resp = {}
results = []
requests = []
self.send_command('configure terminal')
for cmd in to_list(command):
if isinstance(cmd, dict):
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
newline = cmd.get('newline', True)
else:
command = cmd
prompt = None
answer = None
newline = True
if cmd != 'end' and cmd[0] != '!':
results.append(self.send_command(command, prompt, answer, False, newline))
requests.append(cmd)
self.send_command('end')
resp['request'] = requests
resp['response'] = results
return resp
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
return json.dumps(result)
| gpl-3.0 |
DucQuang1/youtube-dl | youtube_dl/extractor/vevo.py | 104 | 8366 | from __future__ import unicode_literals
import re
import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
)
class VevoIE(InfoExtractor):
"""
Accepts urls from vevo.com or in the format 'vevo:{id}'
(currently used by MTVIE and MySpaceIE)
"""
_VALID_URL = r'''(?x)
(?:https?://www\.vevo\.com/watch/(?:[^/]+/(?:[^/]+/)?)?|
https?://cache\.vevo\.com/m/html/embed\.html\?video=|
https?://videoplayer\.vevo\.com/embed/embedded\?videoId=|
vevo:)
(?P<id>[^&?#]+)'''
_TESTS = [{
'url': 'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
"md5": "95ee28ee45e70130e3ab02b0f579ae23",
'info_dict': {
'id': 'GB1101300280',
'ext': 'mp4',
"upload_date": "20130624",
"uploader": "Hurts",
"title": "Somebody to Die For",
"duration": 230.12,
"width": 1920,
"height": 1080,
# timestamp and upload_date are often incorrect; seem to change randomly
'timestamp': int,
}
}, {
'note': 'v3 SMIL format',
'url': 'http://www.vevo.com/watch/cassadee-pope/i-wish-i-could-break-your-heart/USUV71302923',
'md5': 'f6ab09b034f8c22969020b042e5ac7fc',
'info_dict': {
'id': 'USUV71302923',
'ext': 'mp4',
'upload_date': '20140219',
'uploader': 'Cassadee Pope',
'title': 'I Wish I Could Break Your Heart',
'duration': 226.101,
'age_limit': 0,
'timestamp': int,
}
}, {
'note': 'Age-limited video',
'url': 'https://www.vevo.com/watch/justin-timberlake/tunnel-vision-explicit/USRV81300282',
'info_dict': {
'id': 'USRV81300282',
'ext': 'mp4',
'age_limit': 18,
'title': 'Tunnel Vision (Explicit)',
'uploader': 'Justin Timberlake',
'upload_date': 're:2013070[34]',
'timestamp': int,
},
'params': {
'skip_download': 'true',
}
}]
_SMIL_BASE_URL = 'http://smil.lvl3.vevo.com/'
def _real_initialize(self):
req = compat_urllib_request.Request(
'http://www.vevo.com/auth', data=b'')
webpage = self._download_webpage(
req, None,
note='Retrieving oauth token',
errnote='Unable to retrieve oauth token',
fatal=False)
if webpage is False:
self._oauth_token = None
else:
self._oauth_token = self._search_regex(
r'access_token":\s*"([^"]+)"',
webpage, 'access token', fatal=False)
def _formats_from_json(self, video_info):
last_version = {'version': -1}
for version in video_info['videoVersions']:
# These are the HTTP downloads, other types are for different manifests
if version['sourceType'] == 2:
if version['version'] > last_version['version']:
last_version = version
if last_version['version'] == -1:
raise ExtractorError('Unable to extract last version of the video')
renditions = xml.etree.ElementTree.fromstring(last_version['data'])
formats = []
# Already sorted from worst to best quality
for rend in renditions.findall('rendition'):
attr = rend.attrib
format_note = '%(videoCodec)s@%(videoBitrate)4sk, %(audioCodec)s@%(audioBitrate)3sk' % attr
formats.append({
'url': attr['url'],
'format_id': attr['name'],
'format_note': format_note,
'height': int(attr['frameheight']),
'width': int(attr['frameWidth']),
})
return formats
def _formats_from_smil(self, smil_xml):
formats = []
smil_doc = xml.etree.ElementTree.fromstring(smil_xml.encode('utf-8'))
els = smil_doc.findall('.//{http://www.w3.org/2001/SMIL20/Language}video')
for el in els:
src = el.attrib['src']
m = re.match(r'''(?xi)
(?P<ext>[a-z0-9]+):
(?P<path>
[/a-z0-9]+ # The directory and main part of the URL
_(?P<cbr>[0-9]+)k
_(?P<width>[0-9]+)x(?P<height>[0-9]+)
_(?P<vcodec>[a-z0-9]+)
_(?P<vbr>[0-9]+)
_(?P<acodec>[a-z0-9]+)
_(?P<abr>[0-9]+)
\.[a-z0-9]+ # File extension
)''', src)
if not m:
continue
format_url = self._SMIL_BASE_URL + m.group('path')
formats.append({
'url': format_url,
'format_id': 'SMIL_' + m.group('cbr'),
'vcodec': m.group('vcodec'),
'acodec': m.group('acodec'),
'vbr': int(m.group('vbr')),
'abr': int(m.group('abr')),
'ext': m.group('ext'),
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return formats
def _download_api_formats(self, video_id):
if not self._oauth_token:
self._downloader.report_warning(
'No oauth token available, skipping API HLS download')
return []
api_url = 'https://apiv2.vevo.com/video/%s/streams/hls?token=%s' % (
video_id, self._oauth_token)
api_data = self._download_json(
api_url, video_id,
note='Downloading HLS formats',
errnote='Failed to download HLS format list', fatal=False)
if api_data is None:
return []
m3u8_url = api_data[0]['url']
return self._extract_m3u8_formats(
m3u8_url, video_id, entry_protocol='m3u8_native', ext='mp4',
preference=0)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
json_url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s' % video_id
response = self._download_json(json_url, video_id)
video_info = response['video']
if not video_info:
if 'statusMessage' in response:
raise ExtractorError('%s said: %s' % (self.IE_NAME, response['statusMessage']), expected=True)
raise ExtractorError('Unable to extract videos')
formats = self._formats_from_json(video_info)
is_explicit = video_info.get('isExplicit')
if is_explicit is True:
age_limit = 18
elif is_explicit is False:
age_limit = 0
else:
age_limit = None
# Download via HLS API
formats.extend(self._download_api_formats(video_id))
# Download SMIL
smil_blocks = sorted((
f for f in video_info['videoVersions']
if f['sourceType'] == 13),
key=lambda f: f['version'])
smil_url = '%s/Video/V2/VFILE/%s/%sr.smil' % (
self._SMIL_BASE_URL, video_id, video_id.lower())
if smil_blocks:
smil_url_m = self._search_regex(
r'url="([^"]+)"', smil_blocks[-1]['data'], 'SMIL URL',
default=None)
if smil_url_m is not None:
smil_url = smil_url_m
if smil_url:
smil_xml = self._download_webpage(
smil_url, video_id, 'Downloading SMIL info', fatal=False)
if smil_xml:
formats.extend(self._formats_from_smil(smil_xml))
self._sort_formats(formats)
timestamp_ms = int_or_none(self._search_regex(
r'/Date\((\d+)\)/',
video_info['launchDate'], 'launch date', fatal=False))
return {
'id': video_id,
'title': video_info['title'],
'formats': formats,
'thumbnail': video_info['imageUrl'],
'timestamp': timestamp_ms // 1000,
'uploader': video_info['mainArtists'][0]['artistName'],
'duration': video_info['duration'],
'age_limit': age_limit,
}
| unlicense |