repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
HHS/CoECI-CMS-Healthcare-Fraud-Prevention | partnerclient/hfppnetwork/partner/conversion/test/test_entities.py | 3 | 3134 | ##
# Copyright (C) 2013 TopCoder Inc., All Rights Reserved.
##
__author__ = 'Easyhard'
__version__ = '1.0'
import entities
from schema import entity
import unittest
from datetime import date
from exception import ConfigurationException
class TestEntities(unittest.TestCase):
"""Testcases for entities.py"""
def setUp(self):
pass
def test_entity_class(self):
"""Testing Entity class's method"""
testing = entities.Testing()
self.assertEqual(testing.get_field_list(), entity['Testing']['fields'])
self.assertEqual(testing.typeof('f1'), 'string')
self.assertEqual(testing.typeof('f2'), 'date')
self.assertEqual(testing.typeof('f3'), 'int')
self.assertEqual(testing.typeof('f4'), 'decimal')
def test_propertylist(self):
"""Checker if generated all properties."""
for class_name, data in entity.items():
instance = getattr(entities, class_name)()
for name, stype, doc in data['fields']:
a = getattr(instance, name)
self.assertIsNone(a)
self.assertIsNotNone(instance.typeof)
def test_property_setget(self):
"""Checker all properties' getter and setter"""
for class_name, data in entity.items():
instance = getattr(entities, class_name)()
for name, stype, doc in data['fields']:
if stype == 'date':
setattr(instance, name, date.min)
self.assertEqual(getattr(instance, name), date.min)
if stype == 'string':
setattr(instance, name, 'testing')
self.assertEqual(getattr(instance, name), 'testing')
setattr(instance, name, 123)
# automatic convert to string
self.assertEqual(getattr(instance, name), '123')
if stype == 'int':
setattr(instance, name, 232)
self.assertEqual(getattr(instance, name), 232)
setattr(instance, name, 12.3)
# automiatic convert to int
self.assertEqual(getattr(instance, name), 12)
# raise exception if caonnot convert
with self.assertRaises(ValueError):
setattr(instance, name, 'abc')
if stype == 'decimal':
setattr(instance, name, 232)
self.assertEqual(getattr(instance, name), 232.0)
setattr(instance, name, 12.3)
# automiatic convert to int
self.assertEqual(getattr(instance, name), 12.3)
# raise exception if caonnot convert
with self.assertRaises(ValueError):
setattr(instance, name, 'abc')
def test_exceptions(self):
"""Testing raising of ConfigurationException"""
class Foo(object):
pass
with self.assertRaises(ConfigurationException):
Foo = entities.entity_Class(Foo)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
chunywang/crosswalk-test-suite | stability/stability-iterative-android-tests/iterative/Pause_Resume_Webapp_Repeatedly.py | 18 | 2868 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao<haox.li@intel.com>
import unittest
import os
import sys
import commands
import shutil
import time
import subprocess
from TestApp import *
reload(sys)
sys.setdefaultencoding('utf-8')
SCRIPT_PATH = os.path.realpath(__file__)
ConstPath = os.path.dirname(SCRIPT_PATH)
def setUp():
global device
#device = 'E6OKCY411012'
device = os.environ.get('DEVICE_ID')
if not device:
print 'Get env error\n'
sys.exit(1)
class TestPauseResumeWebappRepeatedly(unittest.TestCase):
def test_pause_resume_webapp_repeatedly(self):
setUp()
testapp = TestApp(device, ConstPath + "/../iterative.apk",
"org.xwalk.iterative", "IterativeActivity")
try:
if not testapp.isInstalled():
testapp.install()
testapp.launch()
# Pause and Resume 50 times
for i in range(50):
time.sleep(2)
# swtich to back
self.assertTrue(testapp.switch())
time.sleep(2)
# swtich to front
self.assertTrue(testapp.switch())
testapp.stop()
except Exception as e:
print "Error: %s" % e
testapp.stop()
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jostep/tensorflow | tensorflow/contrib/specs/python/params_ops.py | 186 | 3104 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators for concise TensorFlow parameter specifications.
This module is used as an environment for evaluating expressions
in the "params" DSL.
Specifications are intended to assign simple numerical
values. Examples:
--params "n=64; d=5" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
The random parameter primitives are useful for running large numbers
of experiments with randomly distributed parameters:
--params "n=Li(5,500); d=Ui(1,5)" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
Internally, this might be implemented as follows:
params = specs.create_params(FLAGS.params, {})
logging.info(repr(params))
net = specs.create_net(FLAGS.spec, inputs, params)
Note that separating the specifications into parameters and network
creation allows us to log the random parameter values easily.
The implementation of this will change soon in order to support
hyperparameter tuning with steering. Instead of returning a number,
the primitives below will return a class instance that is then
used to generate a random number by the framework.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Lint disabled because these are operators in the DSL, not regular
# Python functions.
# pylint: disable=invalid-name
# pylint: disable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: disable=redefined-builtin,g-importing-member,no-member
# make available all math expressions
import math
from math import *
import random
# pylint: enable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: enable=redefined-builtin,g-importing-member,no-member
def Uf(lo=0.0, hi=1.0):
"""Uniformly distributed floating number."""
return random.uniform(lo, hi)
def Ui(lo, hi):
"""Uniformly distributed integer, inclusive limits."""
return random.randint(lo, hi)
def Lf(lo, hi):
"""Log-uniform distributed floatint point number."""
return math.exp(random.uniform(math.log(lo), math.log(hi)))
def Li(lo, hi):
"""Log-uniform distributed integer, inclusive limits."""
return int(math.floor(math.exp(random.uniform(math.log(lo),
math.log(hi+1-1e-5)))))
def Nt(mu, sigma, limit=3.0):
"""Normally distributed floating point number with truncation."""
return min(max(random.gauss(mu, sigma), mu-limit*sigma), mu+limit*sigma)
# pylint: enable=invalid-name
| apache-2.0 |
MartinDelzant/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
ufieeehw/IEEE2017 | ieee2017_tf_broadcaster/nodes/tf_broadcaster.py | 1 | 3127 | #!/usr/bin/env python
'''
TF Broadcaster: Keeps track of the various coordinate frames on the vehicle in
relation to the map and each other.
'''
from __future__ import division
import rospy
import tf
from nav_msgs.msg import Odometry
from std_msgs.msg import Float64
__author__ = "Anthony Olive"
__maintainer__ = "Anthony Olive"
__email__ = "anthony@iris-systems.net"
__copyright__ = "Copyright 2017, UF IEEE"
__license__ = "MIT"
class TFBroadcaster():
def __init__(self):
self.__broadcaster = tf.TransformBroadcaster()
# Subscribe to topics for the positions of dynamic frames
self.__odom_subscriber = rospy.Subscriber("/odom", Odometry, self.__update_odom, queue_size=2)
# Publish the transformations at a frequency specified by the rate parameter
publishing_rate = rospy.get_param("~publishing_rate", 30)
rospy.Timer(rospy.Duration(1.0 / publishing_rate), self.__publish_static, oneshot=False)
def __update_odom(self, msg):
msg = msg.pose.pose
self.__broadcaster.sendTransform((msg.position.x, msg.position.y, 0),
(msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w),
rospy.Time.now(), "odom", "map")
def __publish_static(self, event):
'''
Transformations between coordinate frames that are at fixed
positions relative to each other.
'''
# Transformation from odom to base_link
# The origin of the base_link frame is at the center of the top plate
# This provides planar coordinates of base_link on the map plus it's elevation
self.__broadcaster.sendTransform((0, 0, 0.119),
tf.transformations.quaternion_from_euler(0, 0, 0),
rospy.Time.now(), "base_link", "odom")
# Transformations from base_link to each LiDAR
# All frames share a planar center, but the lidar_fused frame is lower
# Each LiDAR is rotated such that the beam is emitted away from the vehicle
self.__broadcaster.sendTransform((0, 0.125368, -0.0775),
tf.transformations.quaternion_from_euler(0, 3.14159, -1.590796),
rospy.Time.now(), "lidar_left", "base_link")
self.__broadcaster.sendTransform((0.1, 0, -0.0775),
tf.transformations.quaternion_from_euler(3.14159, 0, 0),
rospy.Time.now(), "lidar_front", "base_link")
self.__broadcaster.sendTransform((0, -0.125368, -0.0775),
tf.transformations.quaternion_from_euler(0, 3.14159, 1.570796),
rospy.Time.now(), "lidar_right", "base_link")
self.__broadcaster.sendTransform((-0.1, 0, -0.0775),
tf.transformations.quaternion_from_euler(3.14159, 0, 3.14159),
rospy.Time.now(), "lidar_back", "base_link")
# Transformations from base_link to the fused LiDAR pointcloud
# Both frames share a planar center, but the lidar_fused frame is lower
self.__broadcaster.sendTransform((0,0,-0.0775),
tf.transformations.quaternion_from_euler(0,0,0),
rospy.Time.now(), "lidar_fused", "base_link")
# Transformations from base_link to the IMU
#self.__broadcaster.sendTransform((0,0,0),
# tf.transformations.quaternion_from_euler(0,0,0),
# rospy.Time.now(), "imu", "base_link")
if __name__ == "__main__":
rospy.init_node('tf_broadcaster')
tf_broadcaster = TFBroadcaster()
rospy.spin()
| mit |
mozillazg/Unidecode | unidecode/x014.py | 252 | 4300 | data = (
'[?]', # 0x00
'e', # 0x01
'aai', # 0x02
'i', # 0x03
'ii', # 0x04
'o', # 0x05
'oo', # 0x06
'oo', # 0x07
'ee', # 0x08
'i', # 0x09
'a', # 0x0a
'aa', # 0x0b
'we', # 0x0c
'we', # 0x0d
'wi', # 0x0e
'wi', # 0x0f
'wii', # 0x10
'wii', # 0x11
'wo', # 0x12
'wo', # 0x13
'woo', # 0x14
'woo', # 0x15
'woo', # 0x16
'wa', # 0x17
'wa', # 0x18
'waa', # 0x19
'waa', # 0x1a
'waa', # 0x1b
'ai', # 0x1c
'w', # 0x1d
'\'', # 0x1e
't', # 0x1f
'k', # 0x20
'sh', # 0x21
's', # 0x22
'n', # 0x23
'w', # 0x24
'n', # 0x25
'[?]', # 0x26
'w', # 0x27
'c', # 0x28
'?', # 0x29
'l', # 0x2a
'en', # 0x2b
'in', # 0x2c
'on', # 0x2d
'an', # 0x2e
'pe', # 0x2f
'paai', # 0x30
'pi', # 0x31
'pii', # 0x32
'po', # 0x33
'poo', # 0x34
'poo', # 0x35
'hee', # 0x36
'hi', # 0x37
'pa', # 0x38
'paa', # 0x39
'pwe', # 0x3a
'pwe', # 0x3b
'pwi', # 0x3c
'pwi', # 0x3d
'pwii', # 0x3e
'pwii', # 0x3f
'pwo', # 0x40
'pwo', # 0x41
'pwoo', # 0x42
'pwoo', # 0x43
'pwa', # 0x44
'pwa', # 0x45
'pwaa', # 0x46
'pwaa', # 0x47
'pwaa', # 0x48
'p', # 0x49
'p', # 0x4a
'h', # 0x4b
'te', # 0x4c
'taai', # 0x4d
'ti', # 0x4e
'tii', # 0x4f
'to', # 0x50
'too', # 0x51
'too', # 0x52
'dee', # 0x53
'di', # 0x54
'ta', # 0x55
'taa', # 0x56
'twe', # 0x57
'twe', # 0x58
'twi', # 0x59
'twi', # 0x5a
'twii', # 0x5b
'twii', # 0x5c
'two', # 0x5d
'two', # 0x5e
'twoo', # 0x5f
'twoo', # 0x60
'twa', # 0x61
'twa', # 0x62
'twaa', # 0x63
'twaa', # 0x64
'twaa', # 0x65
't', # 0x66
'tte', # 0x67
'tti', # 0x68
'tto', # 0x69
'tta', # 0x6a
'ke', # 0x6b
'kaai', # 0x6c
'ki', # 0x6d
'kii', # 0x6e
'ko', # 0x6f
'koo', # 0x70
'koo', # 0x71
'ka', # 0x72
'kaa', # 0x73
'kwe', # 0x74
'kwe', # 0x75
'kwi', # 0x76
'kwi', # 0x77
'kwii', # 0x78
'kwii', # 0x79
'kwo', # 0x7a
'kwo', # 0x7b
'kwoo', # 0x7c
'kwoo', # 0x7d
'kwa', # 0x7e
'kwa', # 0x7f
'kwaa', # 0x80
'kwaa', # 0x81
'kwaa', # 0x82
'k', # 0x83
'kw', # 0x84
'keh', # 0x85
'kih', # 0x86
'koh', # 0x87
'kah', # 0x88
'ce', # 0x89
'caai', # 0x8a
'ci', # 0x8b
'cii', # 0x8c
'co', # 0x8d
'coo', # 0x8e
'coo', # 0x8f
'ca', # 0x90
'caa', # 0x91
'cwe', # 0x92
'cwe', # 0x93
'cwi', # 0x94
'cwi', # 0x95
'cwii', # 0x96
'cwii', # 0x97
'cwo', # 0x98
'cwo', # 0x99
'cwoo', # 0x9a
'cwoo', # 0x9b
'cwa', # 0x9c
'cwa', # 0x9d
'cwaa', # 0x9e
'cwaa', # 0x9f
'cwaa', # 0xa0
'c', # 0xa1
'th', # 0xa2
'me', # 0xa3
'maai', # 0xa4
'mi', # 0xa5
'mii', # 0xa6
'mo', # 0xa7
'moo', # 0xa8
'moo', # 0xa9
'ma', # 0xaa
'maa', # 0xab
'mwe', # 0xac
'mwe', # 0xad
'mwi', # 0xae
'mwi', # 0xaf
'mwii', # 0xb0
'mwii', # 0xb1
'mwo', # 0xb2
'mwo', # 0xb3
'mwoo', # 0xb4
'mwoo', # 0xb5
'mwa', # 0xb6
'mwa', # 0xb7
'mwaa', # 0xb8
'mwaa', # 0xb9
'mwaa', # 0xba
'm', # 0xbb
'm', # 0xbc
'mh', # 0xbd
'm', # 0xbe
'm', # 0xbf
'ne', # 0xc0
'naai', # 0xc1
'ni', # 0xc2
'nii', # 0xc3
'no', # 0xc4
'noo', # 0xc5
'noo', # 0xc6
'na', # 0xc7
'naa', # 0xc8
'nwe', # 0xc9
'nwe', # 0xca
'nwa', # 0xcb
'nwa', # 0xcc
'nwaa', # 0xcd
'nwaa', # 0xce
'nwaa', # 0xcf
'n', # 0xd0
'ng', # 0xd1
'nh', # 0xd2
'le', # 0xd3
'laai', # 0xd4
'li', # 0xd5
'lii', # 0xd6
'lo', # 0xd7
'loo', # 0xd8
'loo', # 0xd9
'la', # 0xda
'laa', # 0xdb
'lwe', # 0xdc
'lwe', # 0xdd
'lwi', # 0xde
'lwi', # 0xdf
'lwii', # 0xe0
'lwii', # 0xe1
'lwo', # 0xe2
'lwo', # 0xe3
'lwoo', # 0xe4
'lwoo', # 0xe5
'lwa', # 0xe6
'lwa', # 0xe7
'lwaa', # 0xe8
'lwaa', # 0xe9
'l', # 0xea
'l', # 0xeb
'l', # 0xec
'se', # 0xed
'saai', # 0xee
'si', # 0xef
'sii', # 0xf0
'so', # 0xf1
'soo', # 0xf2
'soo', # 0xf3
'sa', # 0xf4
'saa', # 0xf5
'swe', # 0xf6
'swe', # 0xf7
'swi', # 0xf8
'swi', # 0xf9
'swii', # 0xfa
'swii', # 0xfb
'swo', # 0xfc
'swo', # 0xfd
'swoo', # 0xfe
'swoo', # 0xff
)
| gpl-2.0 |
Changaco/oh-mainline | vendor/packages/gdata/tests/atom_tests/mock_client_test.py | 128 | 2421 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.mock_http
import atom.http
class MockHttpClientUnitTest(unittest.TestCase):
def setUp(self):
self.client = atom.mock_http.MockHttpClient()
def testRepondToGet(self):
mock_response = atom.http_interface.HttpResponse(body='Hooray!',
status=200, reason='OK')
self.client.add_response(mock_response, 'GET',
'http://example.com/hooray')
response = self.client.request('GET', 'http://example.com/hooray')
self.assertEquals(len(self.client.recordings), 1)
self.assertEquals(response.status, 200)
self.assertEquals(response.read(), 'Hooray!')
def testRecordResponse(self):
# Turn on pass-through record mode.
self.client.real_client = atom.http.ProxiedHttpClient()
live_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
live_response_body = live_response.read()
self.assertEquals(live_response.status, 200)
self.assertEquals(live_response_body.startswith('<?xml'), True)
# Requery for the now canned data.
self.client.real_client = None
canned_response = self.client.request('GET',
'http://www.google.com/base/feeds/snippets?max-results=1')
# The canned response should be the stored response.
canned_response_body = canned_response.read()
self.assertEquals(canned_response.status, 200)
self.assertEquals(canned_response_body, live_response_body)
def testUnrecordedRequest(self):
try:
self.client.request('POST', 'http://example.org')
self.fail()
except atom.mock_http.NoRecordingFound:
pass
def suite():
return unittest.TestSuite(
(unittest.makeSuite(MockHttpClientUnitTest,'test'),))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
muzili/repo | main.py | 18 | 6629 | #!/bin/sh
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
magic='--calling-python-from-/bin/sh--'
"""exec" python -E "$0" "$@" """#$magic"
if __name__ == '__main__':
import sys
if sys.argv[-1] == '#%s' % magic:
del sys.argv[-1]
del magic
import optparse
import os
import re
import sys
from trace import SetTrace
from git_config import init_ssh, close_ssh
from command import InteractiveCommand
from command import MirrorSafeCommand
from command import PagedCommand
from error import ManifestInvalidRevisionError
from error import NoSuchProjectError
from error import RepoChangedException
from pager import RunPager
from subcmds import all as all_commands
global_options = optparse.OptionParser(
usage="repo [-p|--paginate|--no-pager] COMMAND [ARGS]"
)
global_options.add_option('-p', '--paginate',
dest='pager', action='store_true',
help='display command output in the pager')
global_options.add_option('--no-pager',
dest='no_pager', action='store_true',
help='disable the pager')
global_options.add_option('--trace',
dest='trace', action='store_true',
help='trace git command execution')
global_options.add_option('--version',
dest='show_version', action='store_true',
help='display this version of repo')
class _Repo(object):
def __init__(self, repodir):
self.repodir = repodir
self.commands = all_commands
# add 'branch' as an alias for 'branches'
all_commands['branch'] = all_commands['branches']
def _Run(self, argv):
name = None
glob = []
for i in xrange(0, len(argv)):
if not argv[i].startswith('-'):
name = argv[i]
if i > 0:
glob = argv[:i]
argv = argv[i + 1:]
break
if not name:
glob = argv
name = 'help'
argv = []
gopts, gargs = global_options.parse_args(glob)
if gopts.trace:
SetTrace()
if gopts.show_version:
if name == 'help':
name = 'version'
else:
print >>sys.stderr, 'fatal: invalid usage of --version'
sys.exit(1)
try:
cmd = self.commands[name]
except KeyError:
print >>sys.stderr,\
"repo: '%s' is not a repo command. See 'repo help'."\
% name
sys.exit(1)
cmd.repodir = self.repodir
if not isinstance(cmd, MirrorSafeCommand) and cmd.manifest.IsMirror:
print >>sys.stderr, \
"fatal: '%s' requires a working directory"\
% name
sys.exit(1)
copts, cargs = cmd.OptionParser.parse_args(argv)
if not gopts.no_pager and not isinstance(cmd, InteractiveCommand):
config = cmd.manifest.globalConfig
if gopts.pager:
use_pager = True
else:
use_pager = config.GetBoolean('pager.%s' % name)
if use_pager is None:
use_pager = cmd.WantPager(copts)
if use_pager:
RunPager(config)
try:
cmd.Execute(copts, cargs)
except ManifestInvalidRevisionError, e:
print >>sys.stderr, 'error: %s' % str(e)
sys.exit(1)
except NoSuchProjectError, e:
if e.name:
print >>sys.stderr, 'error: project %s not found' % e.name
else:
print >>sys.stderr, 'error: no project in current directory'
sys.exit(1)
def _MyWrapperPath():
return os.path.join(os.path.dirname(__file__), 'repo')
def _CurrentWrapperVersion():
VERSION = None
pat = re.compile(r'^VERSION *=')
fd = open(_MyWrapperPath())
for line in fd:
if pat.match(line):
fd.close()
exec line
return VERSION
raise NameError, 'No VERSION in repo script'
def _CheckWrapperVersion(ver, repo_path):
if not repo_path:
repo_path = '~/bin/repo'
if not ver:
print >>sys.stderr, 'no --wrapper-version argument'
sys.exit(1)
exp = _CurrentWrapperVersion()
ver = tuple(map(lambda x: int(x), ver.split('.')))
if len(ver) == 1:
ver = (0, ver[0])
if exp[0] > ver[0] or ver < (0, 4):
exp_str = '.'.join(map(lambda x: str(x), exp))
print >>sys.stderr, """
!!! A new repo command (%5s) is available. !!!
!!! You must upgrade before you can continue: !!!
cp %s %s
""" % (exp_str, _MyWrapperPath(), repo_path)
sys.exit(1)
if exp > ver:
exp_str = '.'.join(map(lambda x: str(x), exp))
print >>sys.stderr, """
... A new repo command (%5s) is available.
... You should upgrade soon:
cp %s %s
""" % (exp_str, _MyWrapperPath(), repo_path)
def _CheckRepoDir(dir):
if not dir:
print >>sys.stderr, 'no --repo-dir argument'
sys.exit(1)
def _PruneOptions(argv, opt):
i = 0
while i < len(argv):
a = argv[i]
if a == '--':
break
if a.startswith('--'):
eq = a.find('=')
if eq > 0:
a = a[0:eq]
if not opt.has_option(a):
del argv[i]
continue
i += 1
def _Main(argv):
opt = optparse.OptionParser(usage="repo wrapperinfo -- ...")
opt.add_option("--repo-dir", dest="repodir",
help="path to .repo/")
opt.add_option("--wrapper-version", dest="wrapper_version",
help="version of the wrapper script")
opt.add_option("--wrapper-path", dest="wrapper_path",
help="location of the wrapper script")
_PruneOptions(argv, opt)
opt, argv = opt.parse_args(argv)
_CheckWrapperVersion(opt.wrapper_version, opt.wrapper_path)
_CheckRepoDir(opt.repodir)
repo = _Repo(opt.repodir)
try:
try:
init_ssh()
repo._Run(argv)
finally:
close_ssh()
except KeyboardInterrupt:
sys.exit(1)
except RepoChangedException, rce:
# If repo changed, re-exec ourselves.
#
argv = list(sys.argv)
argv.extend(rce.extra_args)
try:
os.execv(__file__, argv)
except OSError, e:
print >>sys.stderr, 'fatal: cannot restart repo after upgrade'
print >>sys.stderr, 'fatal: %s' % e
sys.exit(128)
if __name__ == '__main__':
_Main(sys.argv[1:])
| apache-2.0 |
juanshishido/okcupid | utils/permutation.py | 1 | 2439 | import numpy as np
from scipy.stats import ttest_ind
from sklearn.metrics import accuracy_score
def _diff_means(m, arr):
"""Calculate the difference-in-means statistic.
This is based on an input array, `arr`, where the first
`m` observations correspond to a particular class.
Parameters
----------
m : int
Number of samples in the first class
arr : np.ndarray
Data for both classes
Returns
-------
float
"""
return np.mean(arr[:m]) - np.mean(arr[m:])
def _permute(a, b, comparison='predictions', permutations=10000):
"""Estimate of the permutation-based p-value
Parameters
----------
a : np.ndarray
Data for one class or
ground truth (correct) labels
b : np.ndarray
Data for another class or
predicted labels, as returned by a classifier
comparison : str
{'predictions', 'means'}
permutations : int, optional
Number of permutations
Returns
-------
p_value : float
The proportion of times a value as extreme
as the observed estimate is seen
Notes
-----
This calculates the two-tailed p-value
"""
assert comparison in ('predictions', 'means')
np.random.seed(42)
if comparison == 'predictions':
c = b.copy()
compare = accuracy_score
else:
c = np.append(a, b)
a = a.shape[0]
compare = _diff_means
baseline = compare(a, c)
v = []
for _ in range(permutations):
np.random.shuffle(c)
v.append(compare(a, c))
p_value = (np.abs(np.array(v)) >= np.abs(baseline)).sum() / permutations
return p_value
def print_pvalues(a, b):
"""Wrapper function for printing meand and p-values
both permutation-based and classical
Parameters
----------
a : np.ndarray
Data for one class or
ground truth (correct) labels
b : np.ndarray
Data for another class or
predicted labels, as returned by a classifier
Returns
-------
None
"""
assert isinstance(a, np.ndarray) and isinstance(b, np.ndarray)
rnd = lambda x: np.round(x, 8)
permutation = _permute(a, b, 'means')
classical = ttest_ind(a, b, equal_var=False)[1]
print("[means] 'a':", rnd(a.mean()), "'b':", rnd(b.mean()))
print("p-values:")
print(" [permutation]:", rnd(permutation))
print(" [classical]: ", rnd(classical))
| mit |
geligaoli/shadowsocks-1 | shadowsocks/common.py | 9 | 10391 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def pre_parse_header(data):
datatype = ord(data[0])
if datatype == 0x80:
if len(data) <= 2:
return None
rand_data_size = ord(data[1])
if rand_data_size + 2 >= len(data):
logging.warn('header too short, maybe wrong password or '
'encryption method')
return None
data = data[rand_data_size + 2:]
elif datatype == 0x81:
data = data[1:]
elif datatype == 0x82:
if len(data) <= 3:
return None
rand_data_size = struct.unpack('>H', data[1:3])[0]
if rand_data_size + 3 >= len(data):
logging.warn('header too short, maybe wrong password or '
'encryption method')
return None
data = data[rand_data_size + 3:]
elif datatype == 0x88:
if len(data) <= 7 + 7:
return None
data_size = struct.unpack('>H', data[1:3])[0]
ogn_data = data
data = data[:data_size]
crc = binascii.crc32(data) & 0xffffffff
if crc != 0xffffffff:
logging.warn('uncorrect CRC32, maybe wrong password or '
'encryption method')
return None
start_pos = 3 + ord(data[3])
data = data[start_pos:-4]
if data_size < len(ogn_data):
data += ogn_data[data_size:]
return data
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
connecttype = (addrtype & 0x10) and 1 or 0
addrtype &= ~0x10
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 2 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return connecttype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 |
Juniper/nova | nova/tests/functional/wsgi/test_flavor_manage.py | 4 | 9812 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from nova import context
from nova import db
from nova import exception as ex
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers as helper
from nova.tests.unit import policy_fixture
def rand_flavor(**kwargs):
flav = {
'name': 'name-%s' % helper.generate_random_alphanumeric(10),
'id': helper.generate_random_alphanumeric(10),
'ram': int(helper.generate_random_numeric(2)) + 1,
'disk': int(helper.generate_random_numeric(3)),
'vcpus': int(helper.generate_random_numeric(1)) + 1,
}
flav.update(kwargs)
return flav
class FlavorManageFullstack(testscenarios.WithScenarios, test.TestCase):
"""Tests for flavors manage administrative command.
Extension: os-flavors-manage
os-flavors-manage adds a set of admin functions to the flavors
resource for the creation and deletion of flavors.
POST /v2/flavors:
::
{
'name': NAME, # string, required unique
'id': ID, # string, required unique
'ram': RAM, # in MB, required
'vcpus': VCPUS, # int value, required
'disk': DISK, # in GB, required
'OS-FLV-EXT-DATA:ephemeral', # in GB, ephemeral disk size
'is_public': IS_PUBLIC, # boolean
'swap': SWAP, # in GB?
'rxtx_factor': RXTX, # ???
}
Returns Flavor
DELETE /v2/flavors/ID
Functional Test Scope:
This test starts the wsgi stack for the nova api services, uses an
in memory database to ensure the path through the wsgi layer to
the database.
"""
_additional_fixtures = []
scenarios = [
# test v2.1 base microversion
('v2_1', {
'api_major_version': 'v2.1'}),
]
def setUp(self):
super(FlavorManageFullstack, self).setUp()
# load any additional fixtures specified by the scenario
for fix in self._additional_fixtures:
self.useFixture(fix())
self.useFixture(policy_fixture.RealPolicyFixture())
api_fixture = self.useFixture(
nova_fixtures.OSAPIFixture(
api_version=self.api_major_version))
# NOTE(sdague): because this test is primarily an admin API
# test default self.api to the admin api.
self.api = api_fixture.admin_api
self.user_api = api_fixture.api
def assertFlavorDbEqual(self, flav, flavdb):
# a mapping of the REST params to the db fields
mapping = {
'name': 'name',
'disk': 'root_gb',
'ram': 'memory_mb',
'vcpus': 'vcpus',
'id': 'flavorid',
'swap': 'swap'
}
for k, v in mapping.items():
if k in flav:
self.assertEqual(flav[k], flavdb[v],
"%s != %s" % (flav, flavdb))
def assertFlavorAPIEqual(self, flav, flavapi):
# for all keys in the flavor, ensure they are correctly set in
# flavapi response.
for k in flav:
if k in flavapi:
self.assertEqual(flav[k], flavapi[k],
"%s != %s" % (flav, flavapi))
else:
self.fail("Missing key: %s in flavor: %s" % (k, flavapi))
def assertFlavorInList(self, flav, flavlist):
for item in flavlist['flavors']:
if flav['id'] == item['id']:
self.assertEqual(flav['name'], item['name'])
return
self.fail("%s not found in %s" % (flav, flavlist))
def assertFlavorNotInList(self, flav, flavlist):
for item in flavlist['flavors']:
if flav['id'] == item['id']:
self.fail("%s found in %s" % (flav, flavlist))
def test_flavor_manage_func_negative(self):
"""Test flavor manage edge conditions.
- Bogus body is a 400
- Unknown flavor is a 404
- Deleting unknown flavor is a 404
"""
# Test for various API failure conditions
# bad body is 400
resp = self.api.api_post('flavors', '', check_response_status=False)
self.assertEqual(400, resp.status)
# get unknown flavor is 404
resp = self.api.api_delete('flavors/foo', check_response_status=False)
self.assertEqual(404, resp.status)
# delete unknown flavor is 404
resp = self.api.api_delete('flavors/foo', check_response_status=False)
self.assertEqual(404, resp.status)
ctx = context.get_admin_context()
# bounds conditions - invalid vcpus
flav = {'flavor': rand_flavor(vcpus=0)}
resp = self.api.api_post('flavors', flav, check_response_status=False)
self.assertEqual(400, resp.status, resp)
# ... and ensure that we didn't leak it into the db
self.assertRaises(ex.FlavorNotFound,
objects.Flavor.get_by_flavor_id,
ctx, flav['flavor']['id'])
# bounds conditions - invalid ram
flav = {'flavor': rand_flavor(ram=0)}
resp = self.api.api_post('flavors', flav, check_response_status=False)
self.assertEqual(400, resp.status)
# ... and ensure that we didn't leak it into the db
self.assertRaises(ex.FlavorNotFound,
objects.Flavor.get_by_flavor_id,
ctx, flav['flavor']['id'])
# NOTE(sdague): if there are other bounds conditions that
# should be checked, stack them up here.
def test_flavor_manage_deleted(self):
"""Ensure the behavior around a deleted flavor is stable.
- Fetching a deleted flavor works, and returns the flavor info.
- Listings should not contain deleted flavors
"""
# create a deleted flavor
new_flav = {'flavor': rand_flavor()}
self.api.api_post('flavors', new_flav)
self.api.api_delete('flavors/%s' % new_flav['flavor']['id'])
# deleted flavor should not show up in a list
resp = self.api.api_get('flavors')
self.assertFlavorNotInList(new_flav['flavor'], resp.body)
def test_flavor_create_frozen(self):
ctx = context.get_admin_context()
db.flavor_create(ctx, {
'name': 'foo', 'memory_mb': 512, 'vcpus': 1,
'root_gb': 1, 'ephemeral_gb': 0, 'flavorid': 'foo',
'swap': 0, 'rxtx_factor': 1.0, 'vcpu_weight': 1,
'disabled': False, 'is_public': True,
})
new_flav = {'flavor': rand_flavor()}
resp = self.api.api_post('flavors', new_flav,
check_response_status=False)
self.assertEqual(409, resp.status)
def test_flavor_manage_func(self):
"""Basic flavor creation lifecycle testing.
- Creating a flavor
- Ensure it's in the database
- Ensure it's in the listing
- Delete it
- Ensure it's hidden in the database
"""
ctx = context.get_admin_context()
flav1 = {
'flavor': rand_flavor(),
}
# Create flavor and ensure it made it to the database
self.api.api_post('flavors', flav1)
flav1db = objects.Flavor.get_by_flavor_id(ctx, flav1['flavor']['id'])
self.assertFlavorDbEqual(flav1['flavor'], flav1db)
# Ensure new flavor is seen in the listing
resp = self.api.api_get('flavors')
self.assertFlavorInList(flav1['flavor'], resp.body)
# Delete flavor and ensure it was removed from the database
self.api.api_delete('flavors/%s' % flav1['flavor']['id'])
self.assertRaises(ex.FlavorNotFound,
objects.Flavor.get_by_flavor_id,
ctx, flav1['flavor']['id'])
resp = self.api.api_delete('flavors/%s' % flav1['flavor']['id'],
check_response_status=False)
self.assertEqual(404, resp.status)
def test_flavor_manage_permissions(self):
"""Ensure that regular users can't create or delete flavors.
"""
ctx = context.get_admin_context()
flav1 = {'flavor': rand_flavor()}
# Ensure user can't create flavor
resp = self.user_api.api_post('flavors', flav1,
check_response_status=False)
self.assertEqual(403, resp.status)
# ... and that it didn't leak through
self.assertRaises(ex.FlavorNotFound,
objects.Flavor.get_by_flavor_id,
ctx, flav1['flavor']['id'])
# Create the flavor as the admin user
self.api.api_post('flavors', flav1)
# Ensure user can't delete flavors from our cloud
resp = self.user_api.api_delete('flavors/%s' % flav1['flavor']['id'],
check_response_status=False)
self.assertEqual(403, resp.status)
# ... and ensure that we didn't actually delete the flavor,
# this will throw an exception if we did.
objects.Flavor.get_by_flavor_id(ctx, flav1['flavor']['id'])
| apache-2.0 |
willharris/django | django/core/serializers/__init__.py | 121 | 8167 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.utils import six
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.rel.through._meta.auto_created:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| bsd-3-clause |
cruzegoodin/TSC-ShippingDetails | flask/lib/python2.7/site-packages/pbr/git.py | 25 | 9311 | # Copyright 2011 OpenStack LLC.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import distutils.errors
from distutils import log
import io
import os
import re
import subprocess
import pkg_resources
from pbr import options
def _run_shell_command(cmd, throw_on_error=False, buffer=True, env=None):
if buffer:
out_location = subprocess.PIPE
err_location = subprocess.PIPE
else:
out_location = None
err_location = None
newenv = os.environ.copy()
if env:
newenv.update(env)
output = subprocess.Popen(cmd,
stdout=out_location,
stderr=err_location,
env=newenv)
out = output.communicate()
if output.returncode and throw_on_error:
raise distutils.errors.DistutilsError(
"%s returned %d" % (cmd, output.returncode))
if len(out) == 0 or not out[0] or not out[0].strip():
return ''
return out[0].strip().decode('utf-8')
def _run_git_command(cmd, git_dir, **kwargs):
if not isinstance(cmd, (list, tuple)):
cmd = [cmd]
return _run_shell_command(
['git', '--git-dir=%s' % git_dir] + cmd, **kwargs)
def _get_git_directory():
return _run_shell_command(['git', 'rev-parse', '--git-dir'])
def _git_is_installed():
try:
# We cannot use 'which git' as it may not be available
# in some distributions, So just try 'git --version'
# to see if we run into trouble
_run_shell_command(['git', '--version'])
except OSError:
return False
return True
def _get_highest_tag(tags):
"""Find the highest tag from a list.
Pass in a list of tag strings and this will return the highest
(latest) as sorted by the pkg_resources version parser.
"""
return max(tags, key=pkg_resources.parse_version)
def _find_git_files(dirname='', git_dir=None):
"""Behave like a file finder entrypoint plugin.
We don't actually use the entrypoints system for this because it runs
at absurd times. We only want to do this when we are building an sdist.
"""
file_list = []
if git_dir is None:
git_dir = _run_git_functions()
if git_dir:
log.info("[pbr] In git context, generating filelist from git")
file_list = _run_git_command(['ls-files', '-z'], git_dir)
file_list = file_list.split(b'\x00'.decode('utf-8'))
return [f for f in file_list if f]
def _get_raw_tag_info(git_dir):
describe = _run_git_command(['describe', '--always'], git_dir)
if "-" in describe:
return describe.rsplit("-", 2)[-2]
if "." in describe:
return 0
return None
def get_is_release(git_dir):
return _get_raw_tag_info(git_dir) == 0
def _run_git_functions():
git_dir = None
if _git_is_installed():
git_dir = _get_git_directory()
return git_dir or None
def get_git_short_sha(git_dir=None):
"""Return the short sha for this repo, if it exists."""
if not git_dir:
git_dir = _run_git_functions()
if git_dir:
return _run_git_command(
['log', '-n1', '--pretty=format:%h'], git_dir)
return None
def _iter_changelog(changelog):
"""Convert a oneline log iterator to formatted strings.
:param changelog: An iterator of one line log entries like
that given by _iter_log_oneline.
:return: An iterator over (release, formatted changelog) tuples.
"""
first_line = True
current_release = None
yield current_release, "CHANGES\n=======\n\n"
for hash, tags, msg in changelog:
if tags:
current_release = _get_highest_tag(tags)
underline = len(current_release) * '-'
if not first_line:
yield current_release, '\n'
yield current_release, (
"%(tag)s\n%(underline)s\n\n" %
dict(tag=current_release, underline=underline))
if not msg.startswith("Merge "):
if msg.endswith("."):
msg = msg[:-1]
yield current_release, "* %(msg)s\n" % dict(msg=msg)
first_line = False
def _iter_log_oneline(git_dir=None, option_dict=None):
"""Iterate over --oneline log entries if possible.
This parses the output into a structured form but does not apply
presentation logic to the output - making it suitable for different
uses.
:return: An iterator of (hash, tags_set, 1st_line) tuples, or None if
changelog generation is disabled / not available.
"""
if not option_dict:
option_dict = {}
should_skip = options.get_boolean_option(option_dict, 'skip_changelog',
'SKIP_WRITE_GIT_CHANGELOG')
if should_skip:
return
if git_dir is None:
git_dir = _get_git_directory()
if not git_dir:
return
return _iter_log_inner(git_dir)
def _iter_log_inner(git_dir):
"""Iterate over --oneline log entries.
This parses the output intro a structured form but does not apply
presentation logic to the output - making it suitable for different
uses.
:return: An iterator of (hash, tags_set, 1st_line) tuples.
"""
log.info('[pbr] Generating ChangeLog')
log_cmd = ['log', '--oneline', '--decorate']
changelog = _run_git_command(log_cmd, git_dir)
for line in changelog.split('\n'):
line_parts = line.split()
if len(line_parts) < 2:
continue
# Tags are in a list contained in ()'s. If a commit
# subject that is tagged happens to have ()'s in it
# this will fail
if line_parts[1].startswith('(') and ')' in line:
msg = line.split(')')[1].strip()
else:
msg = " ".join(line_parts[1:])
if "tag:" in line:
tags = set([
tag.split(",")[0]
for tag in line.split(")")[0].split("tag: ")[1:]])
else:
tags = set()
yield line_parts[0], tags, msg
def write_git_changelog(git_dir=None, dest_dir=os.path.curdir,
option_dict=dict(), changelog=None):
"""Write a changelog based on the git changelog."""
if not changelog:
changelog = _iter_log_oneline(git_dir=git_dir, option_dict=option_dict)
if changelog:
changelog = _iter_changelog(changelog)
if not changelog:
return
log.info('[pbr] Writing ChangeLog')
new_changelog = os.path.join(dest_dir, 'ChangeLog')
# If there's already a ChangeLog and it's not writable, just use it
if (os.path.exists(new_changelog)
and not os.access(new_changelog, os.W_OK)):
return
with io.open(new_changelog, "w", encoding="utf-8") as changelog_file:
for release, content in changelog:
changelog_file.write(content)
def generate_authors(git_dir=None, dest_dir='.', option_dict=dict()):
"""Create AUTHORS file using git commits."""
should_skip = options.get_boolean_option(option_dict, 'skip_authors',
'SKIP_GENERATE_AUTHORS')
if should_skip:
return
old_authors = os.path.join(dest_dir, 'AUTHORS.in')
new_authors = os.path.join(dest_dir, 'AUTHORS')
# If there's already an AUTHORS file and it's not writable, just use it
if (os.path.exists(new_authors)
and not os.access(new_authors, os.W_OK)):
return
log.info('[pbr] Generating AUTHORS')
ignore_emails = '(jenkins@review|infra@lists|jenkins@openstack)'
if git_dir is None:
git_dir = _get_git_directory()
if git_dir:
authors = []
# don't include jenkins email address in AUTHORS file
git_log_cmd = ['log', '--format=%aN <%aE>']
authors += _run_git_command(git_log_cmd, git_dir).split('\n')
authors = [a for a in authors if not re.search(ignore_emails, a)]
# get all co-authors from commit messages
co_authors_out = _run_git_command('log', git_dir)
co_authors = re.findall('Co-authored-by:.+', co_authors_out,
re.MULTILINE)
co_authors = [signed.split(":", 1)[1].strip()
for signed in co_authors if signed]
authors += co_authors
authors = sorted(set(authors))
with open(new_authors, 'wb') as new_authors_fh:
if os.path.exists(old_authors):
with open(old_authors, "rb") as old_authors_fh:
new_authors_fh.write(old_authors_fh.read())
new_authors_fh.write(('\n'.join(authors) + '\n')
.encode('utf-8'))
| bsd-3-clause |
NexusIS/libcloud | libcloud/test/storage/test_atmos.py | 46 | 32319 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os.path
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import b
import libcloud.utils.files
from libcloud.common.types import LibcloudError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerAlreadyExistsError, \
ContainerDoesNotExistError, \
ContainerIsNotEmptyError, \
ObjectDoesNotExistError
from libcloud.storage.drivers.atmos import AtmosConnection, AtmosDriver
from libcloud.storage.drivers.dummy import DummyIterator
from libcloud.test import StorageMockHttp, MockRawResponse
from libcloud.test.file_fixtures import StorageFileFixtures
class AtmosTests(unittest.TestCase):
def setUp(self):
AtmosDriver.connectionCls.conn_classes = (None, AtmosMockHttp)
AtmosDriver.connectionCls.rawResponseCls = AtmosMockRawResponse
AtmosDriver.path = ''
AtmosMockHttp.type = None
AtmosMockHttp.upload_created = False
AtmosMockRawResponse.type = None
self.driver = AtmosDriver('dummy', base64.b64encode(b('dummy')))
self._remove_test_file()
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
file_path = os.path.abspath(__file__) + '.temp'
try:
os.unlink(file_path)
except OSError:
pass
def test_list_containers(self):
AtmosMockHttp.type = 'EMPTY'
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
AtmosMockHttp.type = None
containers = self.driver.list_containers()
self.assertEqual(len(containers), 6)
def test_list_container_objects(self):
container = Container(name='test_container', extra={},
driver=self.driver)
AtmosMockHttp.type = 'EMPTY'
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
AtmosMockHttp.type = None
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 2)
obj = [o for o in objects if o.name == 'not-a-container1'][0]
self.assertEqual(obj.meta_data['object_id'],
'651eae32634bf84529c74eabd555fda48c7cead6')
self.assertEqual(obj.container.name, 'test_container')
def test_get_container(self):
container = self.driver.get_container(container_name='test_container')
self.assertEqual(container.name, 'test_container')
self.assertEqual(container.extra['object_id'],
'b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9')
def test_get_container_escaped(self):
container = self.driver.get_container(
container_name='test & container')
self.assertEqual(container.name, 'test & container')
self.assertEqual(container.extra['object_id'],
'b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9')
def test_get_container_not_found(self):
try:
self.driver.get_container(container_name='not_found')
except ContainerDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_container_success(self):
container = self.driver.create_container(
container_name='test_create_container')
self.assertTrue(isinstance(container, Container))
self.assertEqual(container.name, 'test_create_container')
self.assertEqual(container.extra['object_id'],
'31a27b593629a3fe59f887fd973fd953e80062ce')
def test_create_container_already_exists(self):
AtmosMockHttp.type = 'ALREADY_EXISTS'
try:
self.driver.create_container(
container_name='test_create_container')
except ContainerAlreadyExistsError:
pass
else:
self.fail(
'Container already exists but an exception was not thrown')
def test_delete_container_success(self):
container = Container(name='foo_bar_container', extra={}, driver=self)
result = self.driver.delete_container(container=container)
self.assertTrue(result)
def test_delete_container_not_found(self):
AtmosMockHttp.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={}, driver=self)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail(
'Container does not exist but an exception was not thrown')
def test_delete_container_not_empty(self):
AtmosMockHttp.type = 'NOT_EMPTY'
container = Container(name='foo_bar_container', extra={}, driver=self)
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail('Container is not empty but an exception was not thrown')
def test_get_object_success(self):
obj = self.driver.get_object(container_name='test_container',
object_name='test_object')
self.assertEqual(obj.container.name, 'test_container')
self.assertEqual(obj.size, 555)
self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17')
self.assertEqual(obj.extra['object_id'],
'322dce3763aadc41acc55ef47867b8d74e45c31d6643')
self.assertEqual(
obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT')
self.assertEqual(obj.meta_data['foo-bar'], 'test 1')
self.assertEqual(obj.meta_data['bar-foo'], 'test 2')
def test_get_object_escaped(self):
obj = self.driver.get_object(container_name='test & container',
object_name='test & object')
self.assertEqual(obj.container.name, 'test & container')
self.assertEqual(obj.size, 555)
self.assertEqual(obj.hash, '6b21c4a111ac178feacf9ec9d0c71f17')
self.assertEqual(obj.extra['object_id'],
'322dce3763aadc41acc55ef47867b8d74e45c31d6643')
self.assertEqual(
obj.extra['last_modified'], 'Tue, 25 Jan 2011 22:01:49 GMT')
self.assertEqual(obj.meta_data['foo-bar'], 'test 1')
self.assertEqual(obj.meta_data['bar-foo'], 'test 2')
def test_get_object_not_found(self):
try:
self.driver.get_object(container_name='test_container',
object_name='not_found')
except ObjectDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
status = self.driver.delete_object(obj=obj)
self.assertTrue(status)
def test_delete_object_escaped_success(self):
container = Container(name='foo & bar_container', extra={},
driver=self.driver)
obj = Object(name='foo & bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
status = self.driver.delete_object(obj=obj)
self.assertTrue(status)
def test_delete_object_not_found(self):
AtmosMockHttp.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
try:
self.driver.delete_object(obj=obj)
except ObjectDoesNotExistError:
pass
else:
self.fail('Object does not exist but an exception was not thrown')
def test_download_object_success(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_escaped_success(self):
container = Container(name='foo & bar_container', extra={},
driver=self.driver)
obj = Object(name='foo & bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
destination_path = os.path.abspath(__file__) + '.temp'
result = self.driver.download_object(obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
self.assertTrue(result)
def test_download_object_success_not_found(self):
AtmosMockRawResponse.type = 'NOT_FOUND'
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container,
meta_data=None,
driver=self.driver)
destination_path = os.path.abspath(__file__) + '.temp'
try:
self.driver.download_object(
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True)
except ObjectDoesNotExistError:
pass
else:
self.fail('Object does not exist but an exception was not thrown')
def test_download_object_as_stream(self):
container = Container(name='foo_bar_container', extra={},
driver=self.driver)
obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
stream = self.driver.download_object_as_stream(
obj=obj, chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_download_object_as_stream_escaped(self):
container = Container(name='foo & bar_container', extra={},
driver=self.driver)
obj = Object(name='foo & bar_object', size=1000, hash=None, extra={},
container=container, meta_data=None,
driver=self.driver)
stream = self.driver.download_object_as_stream(
obj=obj, chunk_size=None)
self.assertTrue(hasattr(stream, '__iter__'))
def test_upload_object_success(self):
def upload_file(self, response, file_path, chunked=False,
calculate_hash=True):
return True, 'hash343hhash89h932439jsaa89', 1000
old_func = AtmosDriver._upload_file
AtmosDriver._upload_file = upload_file
path = os.path.abspath(__file__)
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftu'
extra = {'meta_data': {'some-value': 'foobar'}}
obj = self.driver.upload_object(file_path=path, container=container,
extra=extra, object_name=object_name)
self.assertEqual(obj.name, 'ftu')
self.assertEqual(obj.size, 1000)
self.assertTrue('some-value' in obj.meta_data)
AtmosDriver._upload_file = old_func
def test_upload_object_no_content_type(self):
def no_content_type(name):
return None, None
old_func = libcloud.utils.files.guess_file_mime_type
libcloud.utils.files.guess_file_mime_type = no_content_type
file_path = os.path.abspath(__file__)
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftu'
obj = self.driver.upload_object(file_path=file_path,
container=container,
object_name=object_name)
# Just check that the file was uploaded OK, as the fallback
# Content-Type header should be set (application/octet-stream).
self.assertEqual(obj.name, object_name)
libcloud.utils.files.guess_file_mime_type = old_func
def test_upload_object_error(self):
def dummy_content_type(name):
return 'application/zip', None
def send(instance):
raise Exception('')
old_func1 = libcloud.utils.files.guess_file_mime_type
libcloud.utils.files.guess_file_mime_type = dummy_content_type
old_func2 = AtmosMockHttp.send
AtmosMockHttp.send = send
file_path = os.path.abspath(__file__)
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftu'
try:
self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name)
except LibcloudError:
pass
else:
self.fail(
'Timeout while uploading but an exception was not thrown')
finally:
libcloud.utils.files.guess_file_mime_type = old_func1
AtmosMockHttp.send = old_func2
def test_upload_object_nonexistent_file(self):
def dummy_content_type(name):
return 'application/zip', None
old_func = libcloud.utils.files.guess_file_mime_type
libcloud.utils.files.guess_file_mime_type = dummy_content_type
file_path = os.path.abspath(__file__ + '.inexistent')
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftu'
try:
self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name)
except OSError:
pass
else:
self.fail('Inesitent but an exception was not thrown')
finally:
libcloud.utils.files.guess_file_mime_type = old_func
def test_upload_object_via_stream_new_object(self):
def dummy_content_type(name):
return 'application/zip', None
old_func = libcloud.storage.drivers.atmos.guess_file_mime_type
libcloud.storage.drivers.atmos.guess_file_mime_type = dummy_content_type
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftsdn'
iterator = DummyIterator(data=['2', '3', '5'])
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator)
finally:
libcloud.storage.drivers.atmos.guess_file_mime_type = old_func
def test_upload_object_via_stream_existing_object(self):
def dummy_content_type(name):
return 'application/zip', None
old_func = libcloud.storage.drivers.atmos.guess_file_mime_type
libcloud.storage.drivers.atmos.guess_file_mime_type = dummy_content_type
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftsde'
iterator = DummyIterator(data=['2', '3', '5'])
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator)
finally:
libcloud.storage.drivers.atmos.guess_file_mime_type = old_func
def test_upload_object_via_stream_no_content_type(self):
def no_content_type(name):
return None, None
old_func = libcloud.storage.drivers.atmos.guess_file_mime_type
libcloud.storage.drivers.atmos.guess_file_mime_type = no_content_type
container = Container(name='fbc', extra={}, driver=self)
object_name = 'ftsdct'
iterator = DummyIterator(data=['2', '3', '5'])
try:
self.driver.upload_object_via_stream(container=container,
object_name=object_name,
iterator=iterator)
except AttributeError:
pass
else:
self.fail(
'File content type not provided'
' but an exception was not thrown')
finally:
libcloud.storage.drivers.atmos.guess_file_mime_type = old_func
def test_signature_algorithm(self):
test_uid = 'fredsmagicuid'
test_key = base64.b64encode(b('ssssshhhhhmysecretkey'))
test_date = 'Mon, 04 Jul 2011 07:39:19 GMT'
test_values = [
('GET', '/rest/namespace/foo', '', {},
'WfSASIA25TuqO2n0aO9k/dtg6S0='),
('GET', '/rest/namespace/foo%20%26%20bar', '', {},
'vmlqXqcInxxoP4YX5mR09BonjX4='),
('POST', '/rest/namespace/foo', '', {},
'oYKdsF+1DOuUT7iX5CJCDym2EQk='),
('PUT', '/rest/namespace/foo', '', {},
'JleF9dpSWhaT3B2swZI3s41qqs4='),
('DELETE', '/rest/namespace/foo', '', {},
'2IX+Bd5XZF5YY+g4P59qXV1uLpo='),
('GET', '/rest/namespace/foo?metata/system', '', {},
'zuHDEAgKM1winGnWn3WBsqnz4ks='),
('POST', '/rest/namespace/foo?metadata/user', '', {
'x-emc-meta': 'fakemeta=fake, othermeta=faketoo'
}, '7sLx1nxPIRAtocfv02jz9h1BjbU='),
]
class FakeDriver(object):
path = ''
for method, action, api_path, headers, expected in test_values:
c = AtmosConnection(test_uid, test_key)
c.method = method
c.action = action
d = FakeDriver()
d.path = api_path
c.driver = d
headers = c.add_default_headers(headers)
headers['Date'] = headers['x-emc-date'] = test_date
self.assertEqual(c._calculate_signature({}, headers),
b(expected).decode('utf-8'))
class AtmosMockHttp(StorageMockHttp, unittest.TestCase):
fixtures = StorageFileFixtures('atmos')
upload_created = False
upload_stream_created = False
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self)
if kwargs.get('host', None) and kwargs.get('port', None):
StorageMockHttp.__init__(self, *args, **kwargs)
self._upload_object_via_stream_first_request = True
def runTest(self):
pass
def request(self, method, url, body=None, headers=None, raw=False):
headers = headers or {}
parsed = urlparse.urlparse(url)
if parsed.query.startswith('metadata/'):
parsed = list(parsed)
parsed[2] = parsed[2] + '/' + parsed[4]
parsed[4] = ''
url = urlparse.urlunparse(parsed)
return super(AtmosMockHttp, self).request(method, url, body, headers,
raw)
def _rest_namespace_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('empty_directory_listing.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_test_container_EMPTY(self, method, url, body, headers):
body = self.fixtures.load('empty_directory_listing.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_test_container(self, method, url, body, headers):
body = self.fixtures.load('list_containers.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_test_container__metadata_system(
self, method, url, body,
headers):
headers = {
'x-emc-meta': 'objectid=b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9'
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_20_26_20container__metadata_system(
self, method, url, body,
headers):
headers = {
'x-emc-meta': 'objectid=b21cb59a2ba339d1afdd4810010b0a5aba2ab6b9'
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_not_found__metadata_system(self, method, url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_test_create_container(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_test_create_container__metadata_system(self, method,
url, body,
headers):
headers = {
'x-emc-meta': 'objectid=31a27b593629a3fe59f887fd973fd953e80062ce'
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_create_container_ALREADY_EXISTS(self, method, url,
body, headers):
body = self.fixtures.load('already_exists.xml')
return (httplib.BAD_REQUEST, body, {},
httplib.responses[httplib.BAD_REQUEST])
def _rest_namespace_foo_bar_container(self, method, url, body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_bar_container_NOT_FOUND(self, method, url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_foo_bar_container_NOT_EMPTY(self, method, url, body,
headers):
body = self.fixtures.load('not_empty.xml')
return (httplib.BAD_REQUEST, body, {},
httplib.responses[httplib.BAD_REQUEST])
def _rest_namespace_test_container_test_object_metadata_system(
self, method,
url, body,
headers):
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_20_26_20container_test_20_26_20object_metadata_system(
self, method,
url, body,
headers):
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_container_test_object_metadata_user(self, method,
url, body,
headers):
meta = {
'md5': '6b21c4a111ac178feacf9ec9d0c71f17',
'foo-bar': 'test 1',
'bar-foo': 'test 2',
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_20_26_20container_test_20_26_20object_metadata_user(
self, method,
url, body,
headers):
meta = {
'md5': '6b21c4a111ac178feacf9ec9d0c71f17',
'foo-bar': 'test 1',
'bar-foo': 'test 2',
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_test_container_not_found_metadata_system(self, method,
url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_foo_bar_container_foo_bar_object(self, method, url,
body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_20_26_20bar_container_foo_20_26_20bar_object(
self, method, url,
body, headers):
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_bar_container_foo_bar_object_NOT_FOUND(
self, method,
url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_fbc_ftu_metadata_system(self, method, url, body,
headers):
if not self.upload_created:
self.__class__.upload_created = True
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
self.__class__.upload_created = False
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftu_metadata_user(self, method, url, body, headers):
self.assertTrue('x-emc-meta' in headers)
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsdn_metadata_system(self, method, url, body,
headers):
if not self.upload_stream_created:
self.__class__.upload_stream_created = True
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
self.__class__.upload_stream_created = False
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsdn(self, method, url, body, headers):
if self._upload_object_via_stream_first_request:
self.assertTrue('Range' not in headers)
self.assertEqual(method, 'POST')
self._upload_object_via_stream_first_request = False
else:
self.assertTrue('Range' in headers)
self.assertEqual(method, 'PUT')
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsdn_metadata_user(self, method, url, body,
headers):
self.assertTrue('x-emc-meta' in headers)
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsde_metadata_system(self, method, url, body,
headers):
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsde(self, method, url, body, headers):
if self._upload_object_via_stream_first_request:
self.assertTrue('Range' not in headers)
self._upload_object_via_stream_first_request = False
else:
self.assertTrue('Range' in headers)
self.assertEqual(method, 'PUT')
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsde_metadata_user(self, method, url, body,
headers):
self.assertTrue('x-emc-meta' in headers)
return (httplib.OK, '', {}, httplib.responses[httplib.OK])
def _rest_namespace_fbc_ftsd_metadata_system(self, method, url, body,
headers):
meta = {
'objectid': '322dce3763aadc41acc55ef47867b8d74e45c31d6643',
'size': '555',
'mtime': '2011-01-25T22:01:49Z'
}
headers = {
'x-emc-meta': ', '.join([k + '=' + v for k, v in list(meta.items())])
}
return (httplib.OK, '', headers, httplib.responses[httplib.OK])
class AtmosMockRawResponse(MockRawResponse):
fixtures = StorageFileFixtures('atmos')
def _rest_namespace_foo_bar_container_foo_bar_object(self, method, url,
body, headers):
body = self._generate_random_data(1000)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_20_26_20bar_container_foo_20_26_20bar_object(
self, method, url,
body, headers):
body = self._generate_random_data(1000)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _rest_namespace_foo_bar_container_foo_bar_object_NOT_FOUND(
self, method,
url, body,
headers):
body = self.fixtures.load('not_found.xml')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _rest_namespace_fbc_ftu(self, method, url, body, headers):
return (httplib.CREATED, '', {}, httplib.responses[httplib.CREATED])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
BogusCurry/BlocksCAD | closure-library/closure/bin/calcdeps.py | 223 | 18544 | #!/usr/bin/env python
#
# Copyright 2006 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculates JavaScript dependencies without requiring Google's build system.
This tool is deprecated and is provided for legacy users.
See build/closurebuilder.py and build/depswriter.py for the current tools.
It iterates over a number of search paths and builds a dependency tree. With
the inputs provided, it walks the dependency tree and outputs all the files
required for compilation.
"""
try:
import distutils.version
except ImportError:
# distutils is not available in all environments
distutils = None
import logging
import optparse
import os
import re
import subprocess
import sys
_BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
req_regex = re.compile(_BASE_REGEX_STRING % 'require')
prov_regex = re.compile(_BASE_REGEX_STRING % 'provide')
ns_regex = re.compile('^ns:((\w+\.)*(\w+))$')
version_regex = re.compile('[\.0-9]+')
def IsValidFile(ref):
"""Returns true if the provided reference is a file and exists."""
return os.path.isfile(ref)
def IsJsFile(ref):
"""Returns true if the provided reference is a Javascript file."""
return ref.endswith('.js')
def IsNamespace(ref):
"""Returns true if the provided reference is a namespace."""
return re.match(ns_regex, ref) is not None
def IsDirectory(ref):
"""Returns true if the provided reference is a directory."""
return os.path.isdir(ref)
def ExpandDirectories(refs):
"""Expands any directory references into inputs.
Description:
Looks for any directories in the provided references. Found directories
are recursively searched for .js files, which are then added to the result
list.
Args:
refs: a list of references such as files, directories, and namespaces
Returns:
A list of references with directories removed and replaced by any
.js files that are found in them. Also, the paths will be normalized.
"""
result = []
for ref in refs:
if IsDirectory(ref):
# Disable 'Unused variable' for subdirs
# pylint: disable=unused-variable
for (directory, subdirs, filenames) in os.walk(ref):
for filename in filenames:
if IsJsFile(filename):
result.append(os.path.join(directory, filename))
else:
result.append(ref)
return map(os.path.normpath, result)
class DependencyInfo(object):
"""Represents a dependency that is used to build and walk a tree."""
def __init__(self, filename):
self.filename = filename
self.provides = []
self.requires = []
def __str__(self):
return '%s Provides: %s Requires: %s' % (self.filename,
repr(self.provides),
repr(self.requires))
def BuildDependenciesFromFiles(files):
"""Build a list of dependencies from a list of files.
Description:
Takes a list of files, extracts their provides and requires, and builds
out a list of dependency objects.
Args:
files: a list of files to be parsed for goog.provides and goog.requires.
Returns:
A list of dependency objects, one for each file in the files argument.
"""
result = []
filenames = set()
for filename in files:
if filename in filenames:
continue
# Python 3 requires the file encoding to be specified
if (sys.version_info[0] < 3):
file_handle = open(filename, 'r')
else:
file_handle = open(filename, 'r', encoding='utf8')
try:
dep = CreateDependencyInfo(filename, file_handle)
result.append(dep)
finally:
file_handle.close()
filenames.add(filename)
return result
def CreateDependencyInfo(filename, source):
"""Create dependency info.
Args:
filename: Filename for source.
source: File-like object containing source.
Returns:
A DependencyInfo object with provides and requires filled.
"""
dep = DependencyInfo(filename)
for line in source:
if re.match(req_regex, line):
dep.requires.append(re.search(req_regex, line).group(1))
if re.match(prov_regex, line):
dep.provides.append(re.search(prov_regex, line).group(1))
return dep
def BuildDependencyHashFromDependencies(deps):
"""Builds a hash for searching dependencies by the namespaces they provide.
Description:
Dependency objects can provide multiple namespaces. This method enumerates
the provides of each dependency and adds them to a hash that can be used
to easily resolve a given dependency by a namespace it provides.
Args:
deps: a list of dependency objects used to build the hash.
Raises:
Exception: If a multiple files try to provide the same namepace.
Returns:
A hash table { namespace: dependency } that can be used to resolve a
dependency by a namespace it provides.
"""
dep_hash = {}
for dep in deps:
for provide in dep.provides:
if provide in dep_hash:
raise Exception('Duplicate provide (%s) in (%s, %s)' % (
provide,
dep_hash[provide].filename,
dep.filename))
dep_hash[provide] = dep
return dep_hash
def CalculateDependencies(paths, inputs):
"""Calculates the dependencies for given inputs.
Description:
This method takes a list of paths (files, directories) and builds a
searchable data structure based on the namespaces that each .js file
provides. It then parses through each input, resolving dependencies
against this data structure. The final output is a list of files,
including the inputs, that represent all of the code that is needed to
compile the given inputs.
Args:
paths: the references (files, directories) that are used to build the
dependency hash.
inputs: the inputs (files, directories, namespaces) that have dependencies
that need to be calculated.
Raises:
Exception: if a provided input is invalid.
Returns:
A list of all files, including inputs, that are needed to compile the given
inputs.
"""
deps = BuildDependenciesFromFiles(paths + inputs)
search_hash = BuildDependencyHashFromDependencies(deps)
result_list = []
seen_list = []
for input_file in inputs:
if IsNamespace(input_file):
namespace = re.search(ns_regex, input_file).group(1)
if namespace not in search_hash:
raise Exception('Invalid namespace (%s)' % namespace)
input_file = search_hash[namespace].filename
if not IsValidFile(input_file) or not IsJsFile(input_file):
raise Exception('Invalid file (%s)' % input_file)
seen_list.append(input_file)
file_handle = open(input_file, 'r')
try:
for line in file_handle:
if re.match(req_regex, line):
require = re.search(req_regex, line).group(1)
ResolveDependencies(require, search_hash, result_list, seen_list)
finally:
file_handle.close()
result_list.append(input_file)
# All files depend on base.js, so put it first.
base_js_path = FindClosureBasePath(paths)
if base_js_path:
result_list.insert(0, base_js_path)
else:
logging.warning('Closure Library base.js not found.')
return result_list
def FindClosureBasePath(paths):
"""Given a list of file paths, return Closure base.js path, if any.
Args:
paths: A list of paths.
Returns:
The path to Closure's base.js file including filename, if found.
"""
for path in paths:
pathname, filename = os.path.split(path)
if filename == 'base.js':
f = open(path)
is_base = False
# Sanity check that this is the Closure base file. Check that this
# is where goog is defined. This is determined by the @provideGoog
# flag.
for line in f:
if '@provideGoog' in line:
is_base = True
break
f.close()
if is_base:
return path
def ResolveDependencies(require, search_hash, result_list, seen_list):
"""Takes a given requirement and resolves all of the dependencies for it.
Description:
A given requirement may require other dependencies. This method
recursively resolves all dependencies for the given requirement.
Raises:
Exception: when require does not exist in the search_hash.
Args:
require: the namespace to resolve dependencies for.
search_hash: the data structure used for resolving dependencies.
result_list: a list of filenames that have been calculated as dependencies.
This variable is the output for this function.
seen_list: a list of filenames that have been 'seen'. This is required
for the dependency->dependant ordering.
"""
if require not in search_hash:
raise Exception('Missing provider for (%s)' % require)
dep = search_hash[require]
if not dep.filename in seen_list:
seen_list.append(dep.filename)
for sub_require in dep.requires:
ResolveDependencies(sub_require, search_hash, result_list, seen_list)
result_list.append(dep.filename)
def GetDepsLine(dep, base_path):
"""Returns a JS string for a dependency statement in the deps.js file.
Args:
dep: The dependency that we're printing.
base_path: The path to Closure's base.js including filename.
"""
return 'goog.addDependency("%s", %s, %s);' % (
GetRelpath(dep.filename, base_path), dep.provides, dep.requires)
def GetRelpath(path, start):
"""Return a relative path to |path| from |start|."""
# NOTE: Python 2.6 provides os.path.relpath, which has almost the same
# functionality as this function. Since we want to support 2.4, we have
# to implement it manually. :(
path_list = os.path.abspath(os.path.normpath(path)).split(os.sep)
start_list = os.path.abspath(
os.path.normpath(os.path.dirname(start))).split(os.sep)
common_prefix_count = 0
for i in range(0, min(len(path_list), len(start_list))):
if path_list[i] != start_list[i]:
break
common_prefix_count += 1
# Always use forward slashes, because this will get expanded to a url,
# not a file path.
return '/'.join(['..'] * (len(start_list) - common_prefix_count) +
path_list[common_prefix_count:])
def PrintLine(msg, out):
out.write(msg)
out.write('\n')
def PrintDeps(source_paths, deps, out):
"""Print out a deps.js file from a list of source paths.
Args:
source_paths: Paths that we should generate dependency info for.
deps: Paths that provide dependency info. Their dependency info should
not appear in the deps file.
out: The output file.
Returns:
True on success, false if it was unable to find the base path
to generate deps relative to.
"""
base_path = FindClosureBasePath(source_paths + deps)
if not base_path:
return False
PrintLine('// This file was autogenerated by calcdeps.py', out)
excludesSet = set(deps)
for dep in BuildDependenciesFromFiles(source_paths + deps):
if not dep.filename in excludesSet:
PrintLine(GetDepsLine(dep, base_path), out)
return True
def PrintScript(source_paths, out):
for index, dep in enumerate(source_paths):
PrintLine('// Input %d' % index, out)
f = open(dep, 'r')
PrintLine(f.read(), out)
f.close()
def GetJavaVersion():
"""Returns the string for the current version of Java installed."""
proc = subprocess.Popen(['java', '-version'], stderr=subprocess.PIPE)
proc.wait()
version_line = proc.stderr.read().splitlines()[0]
return version_regex.search(version_line).group()
def FilterByExcludes(options, files):
"""Filters the given files by the exlusions specified at the command line.
Args:
options: The flags to calcdeps.
files: The files to filter.
Returns:
A list of files.
"""
excludes = []
if options.excludes:
excludes = ExpandDirectories(options.excludes)
excludesSet = set(excludes)
return [i for i in files if not i in excludesSet]
def GetPathsFromOptions(options):
"""Generates the path files from flag options.
Args:
options: The flags to calcdeps.
Returns:
A list of files in the specified paths. (strings).
"""
search_paths = options.paths
if not search_paths:
search_paths = ['.'] # Add default folder if no path is specified.
search_paths = ExpandDirectories(search_paths)
return FilterByExcludes(options, search_paths)
def GetInputsFromOptions(options):
"""Generates the inputs from flag options.
Args:
options: The flags to calcdeps.
Returns:
A list of inputs (strings).
"""
inputs = options.inputs
if not inputs: # Parse stdin
logging.info('No inputs specified. Reading from stdin...')
inputs = filter(None, [line.strip('\n') for line in sys.stdin.readlines()])
logging.info('Scanning files...')
inputs = ExpandDirectories(inputs)
return FilterByExcludes(options, inputs)
def Compile(compiler_jar_path, source_paths, out, flags=None):
"""Prepares command-line call to Closure compiler.
Args:
compiler_jar_path: Path to the Closure compiler .jar file.
source_paths: Source paths to build, in order.
flags: A list of additional flags to pass on to Closure compiler.
"""
args = ['java', '-jar', compiler_jar_path]
for path in source_paths:
args += ['--js', path]
if flags:
args += flags
logging.info('Compiling with the following command: %s', ' '.join(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = proc.communicate()
if proc.returncode != 0:
logging.error('JavaScript compilation failed.')
sys.exit(1)
else:
out.write(stdoutdata)
def main():
"""The entrypoint for this script."""
logging.basicConfig(format='calcdeps.py: %(message)s', level=logging.INFO)
usage = 'usage: %prog [options] arg'
parser = optparse.OptionParser(usage)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
help='The inputs to calculate dependencies for. Valid '
'values can be files, directories, or namespaces '
'(ns:goog.net.XhrIo). Only relevant to "list" and '
'"script" output.')
parser.add_option('-p',
'--path',
dest='paths',
action='append',
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-d',
'--dep',
dest='deps',
action='append',
help='Directories or files that should be traversed to '
'find required dependencies for the deps file. '
'Does not generate dependency information for names '
'provided by these files. Only useful in "deps" mode.')
parser.add_option('-e',
'--exclude',
dest='excludes',
action='append',
help='Files or directories to exclude from the --path '
'and --input flags')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
action='store',
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'file, "deps" to generate a deps.js file for all '
'paths, or "compiled" to produce compiled output with '
'the Closure compiler.')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flag',
'--compiler_flags', # for backwards compatability
dest='compiler_flags',
action='append',
help='Additional flag to pass to the Closure compiler. '
'May be specified multiple times to pass multiple flags.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
(options, args) = parser.parse_args()
search_paths = GetPathsFromOptions(options)
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
if options.output_mode == 'deps':
result = PrintDeps(search_paths, ExpandDirectories(options.deps or []), out)
if not result:
logging.error('Could not find Closure Library in the specified paths')
sys.exit(1)
return
inputs = GetInputsFromOptions(options)
logging.info('Finding Closure dependencies...')
deps = CalculateDependencies(search_paths, inputs)
output_mode = options.output_mode
if output_mode == 'script':
PrintScript(deps, out)
elif output_mode == 'list':
# Just print out a dep per line
for dep in deps:
PrintLine(dep, out)
elif output_mode == 'compiled':
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(1)
# User friendly version check.
if distutils and not (distutils.version.LooseVersion(GetJavaVersion()) >
distutils.version.LooseVersion('1.6')):
logging.error('Closure Compiler requires Java 1.6 or higher.')
logging.error('Please visit http://www.java.com/getjava')
sys.exit(1)
Compile(options.compiler_jar, deps, out, options.compiler_flags)
else:
logging.error('Invalid value for --output flag.')
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 |
rikiji/gnome-wsname-applet | wsname.py | 1 | 6204 | #!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
import gnomeapplet
import gobject
import wnck
import sys
import codecs
import random
# debugging
# import os
# new_stdout = open ("/tmp/debug.stdout", "w")
# new_stderr = open ("/tmp/debug.stderr", "w")
# os.dup2(new_stdout.fileno(), sys.stdout.fileno())
# os.dup2(new_stderr.fileno(), sys.stderr.fileno())
class AlignedWindow(gtk.Window):
def __init__(self, widgetToAlignWith):
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self.set_decorated(False)
self.widgetToAlignWith = widgetToAlignWith
def positionWindow(self):
# Get our own dimensions & position
self.realize()
gtk.gdk.flush()
#print self.window.get_geometry()
ourWidth = (self.window.get_geometry())[2]
ourHeight = (self.window.get_geometry())[3]
# Skip the taskbar, and the pager, stick and stay on top
self.stick()
# not wrapped self.set_skip_taskbar_hint(True)
# not wrapped self.set_skip_pager_hint(True)
self.set_type_hint (gtk.gdk.WINDOW_TYPE_HINT_DOCK)
# Get the dimensions/position of the widgetToAlignWith
self.widgetToAlignWith.realize()
entryX, entryY = self.widgetToAlignWith.window.get_origin()
entryWidth = (self.widgetToAlignWith.window.get_geometry())[2]
entryHeight = (self.widgetToAlignWith.window.get_geometry())[3]
# Get the screen dimensions
screenHeight = gtk.gdk.screen_height()
screenWidth = gtk.gdk.screen_width()
if entryX + ourWidth < screenWidth:
# Align to the left of the entry
newX = entryX
else:
# Align to the right of the entry
newX = (entryX + entryWidth) - ourWidth
if entryY + entryHeight + ourHeight < screenHeight:
# Align to the bottom of the entry
newY = entryY + entryHeight
else:
newY = entryY - ourHeight
# -"Coordinates locked in captain."
# -"Engage."
self.move(newX, newY)
self.show()
class WSNameEntryWindow(AlignedWindow):
def __init__(self, widget, app):
AlignedWindow.__init__(self, widget)
self.app = app
frame = gtk.Frame()
#frame.set_shadow_type(gtk.SHADOW_OUT)
self.entry = gtk.Entry()
frame.add(self.entry)
self.add(frame)
self.set_default_size(0,0)
self.entry.connect("activate", self._on_activate)
self.entry.connect("key-release-event", self._on_key_release)
self.entry.connect("leave-notify-event", self._on_activate)
def _on_activate(self, event):
self.app.workspace.change_name(self.entry.get_text())
self.entryvisible = False
self.hide()
def _on_key_release(self, widget, event):
if event.keyval == gtk.keysyms.Escape:
self.app.entryvisible = False
self.entry_window.hide()
class WSNameApplet(gnomeapplet.Applet):
_name_change_handler_id = None
workspace = None
entryvisible = False
def __init__(self,applet,iid):
self.applet = applet
self.menu = gtk.MenuBar()
self.menuitem = gtk.MenuItem()
self.label = gtk.Label()
self.label.set_size_request(120,-1)
self.menuitem.connect("button-press-event", self._on_button_press)
self.applet.connect("change-background", self._on_change_background)
self.applet.add(self.menu)
self.menu.add(self.menuitem)
self.menuitem.add(self.label)
self.screen = wnck.screen_get_default()
self.screen.connect("active_workspace_changed", self._on_workspace_changed)
self.entry_window = WSNameEntryWindow(self.applet, self)
self.workspace = really_get_active_workspace(self.screen)
self.show_workspace_name()
self._name_change_handler_id = None
def toggle_entry(self):
if self.entryvisible == True:
self.entryvisible = False
self.entry_window.hide()
else:
self.entryvisible = True
self.entry_window.positionWindow()
self.entry_window.show_all()
self.entry_window.present()
self.entry_window.entry.set_text(self.workspace.get_name())
gobject.timeout_add(0, self.entry_window.entry.grab_focus)
def _on_button_press(self, menuitem, event):
if event.button != 1:
menuitem.stop_emission("button-press-event")
self.toggle_entry()
def _on_workspace_changed(self, event, old_workspace):
if (self._name_change_handler_id):
self.workspace.disconnect(self._name_change_handler_id)
self.workspace = really_get_active_workspace(self.screen)
self._name_change_handler_id = self.workspace.connect("name-changed", self._on_workspace_name_changed)
self.show_workspace_name()
def _on_workspace_name_changed(self, event):
self.show_workspace_name()
def show_workspace_name(self):
self.label.set_text(self.workspace.get_name())
self.applet.show_all()
def _on_change_background(self, applet, type, color, pixmap):
applet.set_style(None)
rc_style = gtk.RcStyle()
applet.modify_style(rc_style)
if (type == gnomeapplet.COLOR_BACKGROUND):
applet.modify_bg(gtk.STATE_NORMAL, color)
elif (type == gnomeapplet.PIXMAP_BACKGROUND):
style = applet.style
style.bg_pixmap[gtk.STATE_NORMAL] = pixmap
self.applet.set_style(style)
def really_get_active_workspace(screen):
# This bit is needed because wnck is asynchronous.
while gtk.events_pending():
gtk.main_iteration()
return screen.get_active_workspace()
gobject.type_register(WSNameApplet)
def app_factory(applet,iid):
return WSNameApplet(applet,iid)
if len(sys.argv) == 2:
main_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
main_window.connect("destroy", gtk.main_quit)
main_window.set_title("Applet")
app = gnomeapplet.Applet()
app_factory(app,None)
app.reparent(main_window)
main_window.show_all()
gtk.main()
sys.exit()
gnomeapplet.bonobo_factory("OAFIID:GNOME_wsname_Factory", WSNameApplet.__gtype__, "ws-name", "0", app_factory)
| gpl-2.0 |
robin900/sqlalchemy | test/ext/test_indexable.py | 2 | 10585 | from sqlalchemy.testing import assert_raises
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, Text
from sqlalchemy.sql.sqltypes import ARRAY, JSON
from sqlalchemy.testing.schema import Column
from sqlalchemy.orm import Session
from sqlalchemy.testing import fixtures
from sqlalchemy.ext.indexable import index_property
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.testing import eq_, ne_, is_, in_, not_in_
from sqlalchemy import inspect
class IndexPropertyTest(fixtures.TestBase):
def test_array(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer),
default=[])
first = index_property('array', 0)
tenth = index_property('array', 9)
a = A(array=[1, 2, 3])
eq_(a.first, 1)
assert_raises(AttributeError, lambda: a.tenth)
a.first = 100
eq_(a.first, 100)
eq_(a.array, [100, 2, 3])
del a.first
eq_(a.first, 2)
a2 = A(first=5)
eq_(a2.first, 5)
eq_(a2.array, [5])
def test_array_longinit(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer),
default=[])
first = index_property('array', 0)
fifth = index_property('array', 4)
a1 = A(fifth=10)
a2 = A(first=5)
eq_(a1.array, [None, None, None, None, 10])
eq_(a2.array, [5])
assert_raises(IndexError, setattr, a2, "fifth", 10)
def test_json(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column('id', Integer, primary_key=True)
json = Column('_json', JSON, default={})
field = index_property('json', 'field')
j = J(json={'a': 1, 'b': 2})
assert_raises(AttributeError, lambda: j.field)
j.field = 'test'
eq_(j.field, 'test')
eq_(j.json, {'a': 1, 'b': 2, 'field': 'test'})
j2 = J(field='test')
eq_(j2.json, {"field": "test"})
eq_(j2.field, "test")
def test_value_is_none_attributeerror(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer))
first = index_property('array', 1)
a = A()
assert_raises(AttributeError, getattr, a, "first")
assert_raises(AttributeError, delattr, a, "first")
def test_get_attribute_error(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column('id', Integer, primary_key=True)
array = Column('_array', ARRAY(Integer))
first = index_property('array', 1)
a = A(array=[])
assert_raises(AttributeError, lambda: a.first)
def test_set_immutable(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
array = Column(ARRAY(Integer))
first = index_property('array', 1, mutable=False)
a = A()
def set_():
a.first = 10
assert_raises(AttributeError, set_)
def test_set_mutable_dict(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column(Integer, primary_key=True)
json = Column(JSON, default={})
field = index_property('json', 'field')
j = J()
j.field = 10
j.json = {}
assert_raises(AttributeError, lambda: j.field)
assert_raises(AttributeError, delattr, j, "field")
j.field = 10
eq_(j.field, 10)
def test_get_default_value(self):
Base = declarative_base()
class J(Base):
__tablename__ = 'j'
id = Column(Integer, primary_key=True)
json = Column(JSON, default={})
default = index_property('json', 'field', default='default')
none = index_property('json', 'field', default=None)
j = J()
assert j.json is None
assert j.default == 'default'
assert j.none is None
j.json = {}
assert j.default == 'default'
assert j.none is None
j.default = None
assert j.default is None
assert j.none is None
j.none = 10
assert j.default is 10
assert j.none == 10
class IndexPropertyArrayTest(fixtures.DeclarativeMappedTest):
__requires__ = ('array_type',)
__backend__ = True
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Array(fixtures.ComparableEntity, Base):
__tablename__ = "array"
id = Column(sa.Integer, primary_key=True,
test_needs_autoincrement=True)
array = Column(ARRAY(Integer), default=[])
array0 = Column(ARRAY(Integer, zero_indexes=True), default=[])
first = index_property('array', 0)
first0 = index_property('array0', 0, onebased=False)
def test_query(self):
Array = self.classes.Array
s = Session(testing.db)
s.add_all([
Array(),
Array(array=[1, 2, 3], array0=[1, 2, 3]),
Array(array=[4, 5, 6], array0=[4, 5, 6])])
s.commit()
a1 = s.query(Array).filter(Array.array == [1, 2, 3]).one()
a2 = s.query(Array).filter(Array.first == 1).one()
eq_(a1.id, a2.id)
a3 = s.query(Array).filter(Array.first == 4).one()
ne_(a1.id, a3.id)
a4 = s.query(Array).filter(Array.first0 == 1).one()
eq_(a1.id, a4.id)
a5 = s.query(Array).filter(Array.first0 == 4).one()
ne_(a1.id, a5.id)
def test_mutable(self):
Array = self.classes.Array
s = Session(testing.db)
a = Array(array=[1, 2, 3])
s.add(a)
s.commit()
a.first = 42
eq_(a.first, 42)
s.commit()
eq_(a.first, 42)
del a.first
eq_(a.first, 2)
s.commit()
eq_(a.first, 2)
def test_modified(self):
from sqlalchemy import inspect
Array = self.classes.Array
s = Session(testing.db)
a = Array(array=[1, 2, 3])
s.add(a)
s.commit()
i = inspect(a)
is_(i.modified, False)
in_('array', i.unmodified)
a.first = 10
is_(i.modified, True)
not_in_('array', i.unmodified)
class IndexPropertyJsonTest(fixtures.DeclarativeMappedTest):
# TODO: remove reliance on "astext" for these tests
__requires__ = ('json_type',)
__only_on__ = 'postgresql'
__backend__ = True
@classmethod
def setup_classes(cls):
from sqlalchemy.dialects.postgresql import JSON
Base = cls.DeclarativeBasic
class json_property(index_property):
def __init__(self, attr_name, index, cast_type):
super(json_property, self).__init__(attr_name, index)
self.cast_type = cast_type
def expr(self, model):
expr = super(json_property, self).expr(model)
return expr.astext.cast(self.cast_type)
class Json(fixtures.ComparableEntity, Base):
__tablename__ = "json"
id = Column(sa.Integer, primary_key=True,
test_needs_autoincrement=True)
json = Column(JSON, default={})
field = index_property('json', 'field')
json_field = index_property('json', 'field')
int_field = json_property('json', 'field', Integer)
text_field = json_property('json', 'field', Text)
other = index_property('json', 'other')
subfield = json_property('other', 'field', Text)
def test_query(self):
Json = self.classes.Json
s = Session(testing.db)
s.add_all([
Json(),
Json(json={'field': 10}),
Json(json={'field': 20})])
s.commit()
a1 = s.query(Json).filter(Json.json['field'].astext.cast(Integer) == 10)\
.one()
a2 = s.query(Json).filter(Json.field.astext == '10').one()
eq_(a1.id, a2.id)
a3 = s.query(Json).filter(Json.field.astext == '20').one()
ne_(a1.id, a3.id)
a4 = s.query(Json).filter(Json.json_field.astext == '10').one()
eq_(a2.id, a4.id)
a5 = s.query(Json).filter(Json.int_field == 10).one()
eq_(a2.id, a5.id)
a6 = s.query(Json).filter(Json.text_field == '10').one()
eq_(a2.id, a6.id)
def test_mutable(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={})
s.add(j)
s.commit()
j.other = 42
eq_(j.other, 42)
s.commit()
eq_(j.other, 42)
def test_modified(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={})
s.add(j)
s.commit()
i = inspect(j)
is_(i.modified, False)
in_('json', i.unmodified)
j.other = 42
is_(i.modified, True)
not_in_('json', i.unmodified)
def test_cast_type(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={'field': 10})
s.add(j)
s.commit()
jq = s.query(Json).filter(Json.int_field == 10).one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.text_field == '10').one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.json_field.astext == '10').one()
eq_(j.id, jq.id)
jq = s.query(Json).filter(Json.text_field == 'wrong').first()
is_(jq, None)
j.json = {'field': True}
s.commit()
jq = s.query(Json).filter(Json.text_field == 'true').one()
eq_(j.id, jq.id)
def test_multi_dimension(self):
Json = self.classes.Json
s = Session(testing.db)
j = Json(json={'other': {'field': 'multi'}})
s.add(j)
s.commit()
eq_(j.other, {'field': 'multi'})
eq_(j.subfield, 'multi')
jq = s.query(Json).filter(Json.subfield == 'multi').first()
eq_(j.id, jq.id)
| mit |
brianwoo/django-tutorial | build/Django/django/contrib/auth/forms.py | 93 | 13799 | from __future__ import unicode_literals
from collections import OrderedDict
from django import forms
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, identify_hasher,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.forms.utils import flatatt
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.html import format_html, format_html_join
from django.utils.http import urlsafe_base64_encode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
final_attrs = self.build_attrs(attrs)
if not encoded or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
summary = mark_safe("<strong>%s</strong>" % ugettext("No password set."))
else:
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>%s</strong>" % ugettext(
"Invalid password format or unknown hashing algorithm."))
else:
summary = format_html_join('',
"<strong>{}</strong>: {} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{}>{}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
def has_changed(self, initial, data):
return False
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
class Meta:
model = User
fields = '__all__'
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if self.fields['username'].label is None:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("Email"), max_length=254)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Sends a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
active_users = get_user_model()._default_manager.filter(
email__iexact=email, is_active=True)
return (u for u in active_users if u.has_usable_password())
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
email = self.cleaned_data["email"]
for user in self.get_users(email):
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
}
self.send_mail(subject_template_name, email_template_name,
context, from_email, user.email,
html_email_template_name=html_email_template_name)
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set their password without entering the old
password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change their password by entering their old
password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
PasswordChangeForm.base_fields = OrderedDict(
(k, PasswordChangeForm.base_fields[k])
for k in ['old_password', 'new_password1', 'new_password2']
)
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
required_css_class = 'required'
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput,
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
def _get_changed_data(self):
data = super(AdminPasswordChangeForm, self).changed_data
for name in self.fields.keys():
if name not in data:
return []
return ['password']
changed_data = property(_get_changed_data)
| gpl-3.0 |
smalls257/VRvisu | Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/ctypes/test/test_struct_fields.py | 264 | 1503 | import unittest
from ctypes import *
class StructFieldsTestCase(unittest.TestCase):
# Structure/Union classes must get 'finalized' sooner or
# later, when one of these things happen:
#
# 1. _fields_ is set.
# 2. An instance is created.
# 3. The type is used as field of another Structure/Union.
# 4. The type is subclassed
#
# When they are finalized, assigning _fields_ is no longer allowed.
def test_1_A(self):
class X(Structure):
pass
self.assertEqual(sizeof(X), 0) # not finalized
X._fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_1_B(self):
class X(Structure):
_fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_2(self):
class X(Structure):
pass
X()
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_3(self):
class X(Structure):
pass
class Y(Structure):
_fields_ = [("x", X)] # finalizes X
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_4(self):
class X(Structure):
pass
class Y(X):
pass
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
Y._fields_ = []
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/tensorflow/contrib/labeled_tensor/python/ops/_typecheck.py | 52 | 10165 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
import re
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def _type(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list)
and all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Sequence)
and all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections.Iterable)
and isinstance(instance, collections.Sized)
and isinstance(instance, collections.Container)
and all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple)
and len(instance) == len(self._types)
and all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections.Mapping)
and all(isinstance(k, key_type) for k in instance.keys())
and all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict)
and super(Dict, self).__instancecheck__(instance))
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.Dimension, 'tf.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:],
spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r"
% (a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
A list of one element corresponds to a single return value.
A list of several elements corresponds to several return values.
Note that a function with no explicit return value has an implicit
NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
# The function has a single return value.
allowed_type = _replace_forward_references(types[0], f.__globals__)
if not isinstance(return_value, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r"
% (return_value, type(return_value),
_type_repr(allowed_type), f))
else:
if len(return_value) != len(types):
raise Error(
"Function %r has %d return values but only %d types were "
"provided in the annotation." %
(f, len(return_value), len(types)))
for (r, t) in zip(return_value, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(r, allowed_type):
raise Error("%r of type %r is not an instance of allowed type %s "
"for %r" % (r, type(r), _type_repr(allowed_type), f))
return return_value
return new_f
return check_returns
| apache-2.0 |
python-ning/blog | blogchen/blog/migrations/0006_auto_20150831_2257.py | 1 | 1727 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20150830_2306'),
]
operations = [
migrations.AddField(
model_name='comment',
name='email',
field=models.EmailField(max_length=50, null=True, verbose_name=b'\xe9\x82\xae\xe7\xae\xb1\xe5\x9c\xb0\xe5\x9d\x80', blank=True),
),
migrations.AddField(
model_name='comment',
name='url',
field=models.URLField(max_length=100, null=True, verbose_name=b'\xe4\xb8\xaa\xe4\xba\xba\xe7\xbd\x91\xe9\xa1\xb5\xe5\x9c\xb0\xe5\x9d\x80', blank=True),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(verbose_name=b'\xe7\x94\xa8\xe6\x88\xb7', blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='comment',
name='username',
field=models.CharField(max_length=30, null=True, verbose_name=b'\xe7\x94\xa8\xe6\x88\xb7\xe5\x90\x8d', blank=True),
),
migrations.AddField(
model_name='user',
name='url',
field=models.URLField(max_length=100, null=True, verbose_name=b'\xe4\xb8\xaa\xe4\xba\xba\xe7\xbd\x91\xe9\xa1\xb5\xe5\x9c\xb0\xe5\x9d\x80', blank=True),
),
migrations.AlterField(
model_name='article',
name='is_recommend',
field=models.BooleanField(default=1, verbose_name=b'\xe6\x98\xaf\xe5\x90\xa6\xe6\x8e\xa8\xe8\x8d\x90'),
),
]
| mpl-2.0 |
alisw/AliPhysics | PWGMM/MC/aligenqa/aligenqa/utils.py | 37 | 7697 | import os
import random
import string
import subprocess
import re
from rootpy import asrootpy
from rootpy.plotting import Graph
def gen_random_name():
"""Generate a random name for temp hists"""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(25))
def get_est_dirs(sums, considered_ests):
return (somedir for somedir in sums if somedir.GetName() in considered_ests)
def make_estimator_title(name):
if name == 'EtaLt05':
return '|#eta|#leq0.5'
elif name == 'EtaLt08':
return '|#eta|#leq0.8'
elif name == 'EtaLt15':
return '|#eta|#leq1.5'
elif name == 'Eta08_15':
return '0.8#leq|#eta|#leq1.5'
else:
return name
def remap_x_values(hist, corr_hist):
"""
Map the x values of hist to the y values of map_hist.
In order to do so, it is necessary that the x values of hist are also present as x-values in map_hist.
Parameters
----------
hist : Hist1D
corr_hist : Hist2D
Correlations between the quantity on hist's x-axis (also corr_hist's xaxis) and the new
quantity to plot agains (on corr_hist's y-axis.
Returns
-------
Graph
Graph of the remapped hist. Errors are ??? TODO
"""
hist = asrootpy(hist)
corr_hist = asrootpy(corr_hist)
profx = asrootpy(corr_hist.ProfileX(gen_random_name()))
rt_graph = Graph()
for i, (nch_ref_bin, counter_bin) in enumerate(zip(profx.bins(), hist.bins())):
rt_graph.SetPoint(i, nch_ref_bin.value, counter_bin.value)
xerr, yerr = nch_ref_bin.error / 2.0, counter_bin.error / 2.0
rt_graph.SetPointError(i, xerr, xerr, yerr, yerr)
return rt_graph
def remove_zero_value_points(g):
# Remove the points backwards, since the index would change if we do it forwards
# The first point has index 0!
points_to_remove = []
for i, (x, y) in enumerate(g):
if not y > 0.0:
points_to_remove.append(i)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_points_with_equal_x(g):
"""Remove all points which are on already occupied x values. Ie. The first point is kept, all later ones removed"""
points_to_remove = []
seen_x = []
for i, (x, y) in enumerate(g):
if x in seen_x:
points_to_remove.append(i)
else:
seen_x.append(x)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_points_with_x_err_gt_1NchRef(g):
npoints = g.GetN()
points_to_remove = []
for idx in xrange(0, npoints):
if g.GetErrorX(idx) > 1:
points_to_remove.append(idx)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_non_mutual_points(g1, g2):
"""Remove all points with do no have a corresponding point at the same x-value in the other hist"""
points_to_remove1 = []
points_to_remove2 = []
xs1 = [p[0] for p in g1]
xs2 = [p[0] for p in g2]
for i, x in enumerate(xs1):
if x not in xs2:
points_to_remove1.append(i)
for i, x in enumerate(xs2):
if x not in xs1:
points_to_remove2.append(i)
for p in points_to_remove1[::-1]:
g1.RemovePoint(p)
for p in points_to_remove2[::-1]:
g2.RemovePoint(p)
def percentile_bin_to_binidx_bin(percentile_bin, event_counter):
"""
Converts a given percentile interval (eg. (.5, .4)) to an interval of bin numbers of the given
event_counter histogram.
Parameters
----------
percentile_bin : tuple
Two percentiles, each withing 0-1. Needs to be decreasing
event_counter : Hist1D
Distribution of events over a classifier value
Returns
-------
tuple :
two bin numbers representing the given percentile. The first bin is inclusive, the second exclusive.
Ie. The bin numbers can be used directly in SetRange
Raises
------
ValueError :
The percentile specifies a range which is not found in the given event_counter histogram. It might be too
narrow.
"""
nbins = event_counter.GetXaxis().GetNbins()
ntotal_events = event_counter.Integral(1, nbins) # .Integral is a closed interval, as far as I can tell...
# fraction of events with greater or equal classifier values; hence decreasing values
frac_events_with_geq_classifier_value = [event_counter.Integral(binidx, nbins) / float(ntotal_events)
for binidx in range(1, nbins + 1)]
# small checks:
if frac_events_with_geq_classifier_value[0] != 1:
assert(0)
if len(frac_events_with_geq_classifier_value) != nbins:
assert(0)
# produce a list of bools, the first and last True are the first and last bin index
fraction_is_in_percentile_interval = lambda fraction: percentile_bin[0] >= fraction >= percentile_bin[1]
bin_is_in_percentile_interval = map(fraction_is_in_percentile_interval, frac_events_with_geq_classifier_value)
# get the indices of the elements that are True, sorry, this is a bit ugly
indices_of_bins_in_percentile_interval = [i for i, b in enumerate(bin_is_in_percentile_interval) if b]
# return the first and last binidx of the bins in the percentile interval; +1 for root binidx shit
try:
return (indices_of_bins_in_percentile_interval[0] + 1, indices_of_bins_in_percentile_interval[-1] + 1)
except IndexError:
# print "percentiles: "
# print frac_events_with_geq_classifier_value
raise ValueError("The given percentile interval did not match any bins in the given event_counter histogram")
def download_file(alien_path, local_path):
"""
Download a file from `alien_path` to `local`
Parameters
----------
alien_path, local_path : string
Full path to files
"""
if os.path.isfile(local_path):
raise ValueError("Local file exists")
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
pass
alien_path = "alien:/" + alien_path
cp_cmd = ['alien_cp', '-v', '-s', alien_path, local_path]
p = subprocess.Popen(cp_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
if p.returncode != 0:
print "\n", p.stdout.read()
print("An error occued while downloading {0}; "
"The broken file was deleted.".format(local_path))
try:
os.remove(local_path)
except OSError:
pass
def get_generator_name_from_train(alien_path):
"""
Extract the generator name for an `AnalysisResults.root` file on alien_path.
Parameters
----------
alien_path :
Alien path to `AnalysisResults.root`
Returns
-------
str :
Generator name as stated in the train's `env.sh` file
"""
if not alien_path.startswith("alien:"):
alien_path = "alien:/" + alien_path
path_to_env = os.path.join(os.path.split(alien_path)[0], "..", "env.sh")
cp_cmd = ['alien_cp', '-v', '-s', path_to_env, ".env.sh"]
print "copying with: %s"%cp_cmd
subprocess.check_call(cp_cmd)
with open(".env.sh") as f:
for line in f.readlines():
if "PERIOD_NAME" in line:
gen_name = re.match(".*'(.+)'", line).groups()[-1]
break
return gen_name
def get_generator_name_from_filename(fname):
"""
Deduce the generator name from the file name as asigned when the
file was downloaded. Reduce underscores with spaces.
"""
name = re.match(r'.*\d+_\d{8}-\d{4}-(.+)\.root$', fname).groups()[-1]
return name.replace("_", " ")
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/zmq/tests/test_future.py | 9 | 7520 | # coding: utf-8
# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from datetime import timedelta
import os
import pytest
gen = pytest.importorskip('tornado.gen')
import zmq
from zmq.eventloop import future
from zmq.eventloop.ioloop import IOLoop
from zmq.utils.strtypes import u
from zmq.tests import BaseZMQTestCase
class TestFutureSocket(BaseZMQTestCase):
Context = future.Context
def setUp(self):
self.loop = IOLoop()
self.loop.make_current()
super(TestFutureSocket, self).setUp()
def tearDown(self):
super(TestFutureSocket, self).tearDown()
self.loop.close(all_fds=True)
def test_socket_class(self):
s = self.context.socket(zmq.PUSH)
assert isinstance(s, future.Socket)
s.close()
def test_recv_multipart(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_multipart()
assert not f.done()
yield a.send(b'hi')
recvd = yield f
self.assertEqual(recvd, [b'hi'])
self.loop.run_sync(test)
def test_recv(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f1 = b.recv()
f2 = b.recv()
assert not f1.done()
assert not f2.done()
yield a.send_multipart([b'hi', b'there'])
recvd = yield f2
assert f1.done()
self.assertEqual(f1.result(), b'hi')
self.assertEqual(recvd, b'there')
self.loop.run_sync(test)
def test_recv_cancel(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f1 = b.recv()
f2 = b.recv_multipart()
assert f1.cancel()
assert f1.done()
assert not f2.done()
yield a.send_multipart([b'hi', b'there'])
recvd = yield f2
assert f1.cancelled()
assert f2.done()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_sync(test)
@pytest.mark.skipif(not hasattr(zmq, 'RCVTIMEO'), reason="requires RCVTIMEO")
def test_recv_timeout(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
b.rcvtimeo = 100
f1 = b.recv()
b.rcvtimeo = 1000
f2 = b.recv_multipart()
with pytest.raises(zmq.Again):
yield f1
yield a.send_multipart([b'hi', b'there'])
recvd = yield f2
assert f2.done()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_sync(test)
@pytest.mark.skipif(not hasattr(zmq, 'SNDTIMEO'), reason="requires SNDTIMEO")
def test_send_timeout(self):
@gen.coroutine
def test():
s = self.socket(zmq.PUSH)
s.sndtimeo = 100
with pytest.raises(zmq.Again):
yield s.send(b'not going anywhere')
self.loop.run_sync(test)
@pytest.mark.now
def test_send_noblock(self):
@gen.coroutine
def test():
s = self.socket(zmq.PUSH)
with pytest.raises(zmq.Again):
yield s.send(b'not going anywhere', flags=zmq.NOBLOCK)
self.loop.run_sync(test)
@pytest.mark.now
def test_send_multipart_noblock(self):
@gen.coroutine
def test():
s = self.socket(zmq.PUSH)
with pytest.raises(zmq.Again):
yield s.send_multipart([b'not going anywhere'], flags=zmq.NOBLOCK)
self.loop.run_sync(test)
def test_recv_string(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_string()
assert not f.done()
msg = u('πøøπ')
yield a.send_string(msg)
recvd = yield f
assert f.done()
self.assertEqual(f.result(), msg)
self.assertEqual(recvd, msg)
self.loop.run_sync(test)
def test_recv_json(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_json()
assert not f.done()
obj = dict(a=5)
yield a.send_json(obj)
recvd = yield f
assert f.done()
self.assertEqual(f.result(), obj)
self.assertEqual(recvd, obj)
self.loop.run_sync(test)
def test_recv_json_cancelled(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_json()
assert not f.done()
f.cancel()
# cycle eventloop to allow cancel events to fire
yield gen.sleep(0)
obj = dict(a=5)
yield a.send_json(obj)
with pytest.raises(future.CancelledError):
recvd = yield f
assert f.done()
# give it a chance to incorrectly consume the event
events = yield b.poll(timeout=5)
assert events
yield gen.sleep(0)
# make sure cancelled recv didn't eat up event
recvd = yield gen.with_timeout(timedelta(seconds=5), b.recv_json())
assert recvd == obj
self.loop.run_sync(test)
def test_recv_pyobj(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.recv_pyobj()
assert not f.done()
obj = dict(a=5)
yield a.send_pyobj(obj)
recvd = yield f
assert f.done()
self.assertEqual(f.result(), obj)
self.assertEqual(recvd, obj)
self.loop.run_sync(test)
def test_poll(self):
@gen.coroutine
def test():
a, b = self.create_bound_pair(zmq.PUSH, zmq.PULL)
f = b.poll(timeout=0)
self.assertEqual(f.result(), 0)
f = b.poll(timeout=1)
assert not f.done()
evt = yield f
self.assertEqual(evt, 0)
f = b.poll(timeout=1000)
assert not f.done()
yield a.send_multipart([b'hi', b'there'])
evt = yield f
self.assertEqual(evt, zmq.POLLIN)
recvd = yield b.recv_multipart()
self.assertEqual(recvd, [b'hi', b'there'])
self.loop.run_sync(test)
def test_poll_raw(self):
@gen.coroutine
def test():
p = future.Poller()
# make a pipe
r, w = os.pipe()
r = os.fdopen(r, 'rb')
w = os.fdopen(w, 'wb')
# POLLOUT
p.register(r, zmq.POLLIN)
p.register(w, zmq.POLLOUT)
evts = yield p.poll(timeout=1)
evts = dict(evts)
assert r.fileno() not in evts
assert w.fileno() in evts
assert evts[w.fileno()] == zmq.POLLOUT
# POLLIN
p.unregister(w)
w.write(b'x')
w.flush()
evts = yield p.poll(timeout=1000)
evts = dict(evts)
assert r.fileno() in evts
assert evts[r.fileno()] == zmq.POLLIN
assert r.read(1) == b'x'
r.close()
w.close()
self.loop.run_sync(test)
| mit |
jiezhu2007/scrapy | tests/test_utils_deprecate.py | 140 | 10526 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import unittest
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.deprecate import create_deprecated_class, update_classpath
from tests import mock
class MyWarning(UserWarning):
pass
class SomeBaseClass(object):
pass
class NewName(SomeBaseClass):
pass
class WarnWhenSubclassedTest(unittest.TestCase):
def _mywarnings(self, w, category=MyWarning):
return [x for x in w if x.category is MyWarning]
def test_no_warning_on_definition(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class('Deprecated', NewName)
w = self._mywarnings(w)
self.assertEqual(w, [])
def test_subclassing_warning_message(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"tests.test_utils_deprecate.UserClass inherits from "
"deprecated class tests.test_utils_deprecate.Deprecated, "
"please inherit from tests.test_utils_deprecate.NewName."
" (warning only on first subclass, there may be others)"
)
self.assertEqual(w[0].lineno, inspect.getsourcelines(UserClass)[1])
def test_custom_class_paths(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
new_class_path='foo.NewClass',
old_class_path='bar.OldClass',
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
_ = Deprecated()
w = self._mywarnings(w)
self.assertEqual(len(w), 2)
self.assertIn('foo.NewClass', str(w[0].message))
self.assertIn('bar.OldClass', str(w[0].message))
self.assertIn('foo.NewClass', str(w[1].message))
self.assertIn('bar.OldClass', str(w[1].message))
def test_subclassing_warns_only_on_direct_childs(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_once=False,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class NoWarnOnMe(UserClass):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertIn('UserClass', str(w[0].message))
def test_subclassing_warns_once_by_default(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class FooClass(Deprecated):
pass
class BarClass(Deprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertIn('UserClass', str(w[0].message))
def test_warning_on_instance(self):
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
# ignore subclassing warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
class UserClass(Deprecated):
pass
with warnings.catch_warnings(record=True) as w:
_, lineno = Deprecated(), inspect.getlineno(inspect.currentframe())
_ = UserClass() # subclass instances don't warn
w = self._mywarnings(w)
self.assertEqual(len(w), 1)
self.assertEqual(
str(w[0].message),
"tests.test_utils_deprecate.Deprecated is deprecated, "
"instantiate tests.test_utils_deprecate.NewName instead."
)
self.assertEqual(w[0].lineno, lineno)
def test_warning_auto_message(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class('Deprecated', NewName)
class UserClass2(Deprecated):
pass
msg = str(w[0].message)
self.assertIn("tests.test_utils_deprecate.NewName", msg)
self.assertIn("tests.test_utils_deprecate.Deprecated", msg)
def test_issubclass(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class UpdatedUserClass1(NewName):
pass
class UpdatedUserClass1a(NewName):
pass
class OutdatedUserClass1(DeprecatedName):
pass
class OutdatedUserClass1a(DeprecatedName):
pass
class UnrelatedClass(object):
pass
class OldStyleClass:
pass
assert issubclass(UpdatedUserClass1, NewName)
assert issubclass(UpdatedUserClass1a, NewName)
assert issubclass(UpdatedUserClass1, DeprecatedName)
assert issubclass(UpdatedUserClass1a, DeprecatedName)
assert issubclass(OutdatedUserClass1, DeprecatedName)
assert not issubclass(UnrelatedClass, DeprecatedName)
assert not issubclass(OldStyleClass, DeprecatedName)
assert not issubclass(OldStyleClass, DeprecatedName)
assert not issubclass(OutdatedUserClass1, OutdatedUserClass1a)
assert not issubclass(OutdatedUserClass1a, OutdatedUserClass1)
self.assertRaises(TypeError, issubclass, object(), DeprecatedName)
def test_isinstance(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class UpdatedUserClass2(NewName):
pass
class UpdatedUserClass2a(NewName):
pass
class OutdatedUserClass2(DeprecatedName):
pass
class OutdatedUserClass2a(DeprecatedName):
pass
class UnrelatedClass(object):
pass
class OldStyleClass:
pass
assert isinstance(UpdatedUserClass2(), NewName)
assert isinstance(UpdatedUserClass2a(), NewName)
assert isinstance(UpdatedUserClass2(), DeprecatedName)
assert isinstance(UpdatedUserClass2a(), DeprecatedName)
assert isinstance(OutdatedUserClass2(), DeprecatedName)
assert isinstance(OutdatedUserClass2a(), DeprecatedName)
assert not isinstance(OutdatedUserClass2a(), OutdatedUserClass2)
assert not isinstance(OutdatedUserClass2(), OutdatedUserClass2a)
assert not isinstance(UnrelatedClass(), DeprecatedName)
assert not isinstance(OldStyleClass(), DeprecatedName)
def test_clsdict(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
Deprecated = create_deprecated_class('Deprecated', NewName, {'foo': 'bar'})
self.assertEqual(Deprecated.foo, 'bar')
def test_deprecate_a_class_with_custom_metaclass(self):
Meta1 = type('Meta1', (type,), {})
New = Meta1('New', (), {})
Deprecated = create_deprecated_class('Deprecated', New)
def test_deprecate_subclass_of_deprecated_class(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Deprecated = create_deprecated_class('Deprecated', NewName,
warn_category=MyWarning)
AlsoDeprecated = create_deprecated_class('AlsoDeprecated', Deprecated,
new_class_path='foo.Bar',
warn_category=MyWarning)
w = self._mywarnings(w)
self.assertEqual(len(w), 0, str(map(str, w)))
with warnings.catch_warnings(record=True) as w:
AlsoDeprecated()
class UserClass(AlsoDeprecated):
pass
w = self._mywarnings(w)
self.assertEqual(len(w), 2)
self.assertIn('AlsoDeprecated', str(w[0].message))
self.assertIn('foo.Bar', str(w[0].message))
self.assertIn('AlsoDeprecated', str(w[1].message))
self.assertIn('foo.Bar', str(w[1].message))
def test_inspect_stack(self):
with mock.patch('inspect.stack', side_effect=IndexError):
with warnings.catch_warnings(record=True) as w:
DeprecatedName = create_deprecated_class('DeprecatedName', NewName)
class SubClass(DeprecatedName):
pass
self.assertIn("Error detecting parent module", str(w[0].message))
@mock.patch('scrapy.utils.deprecate.DEPRECATION_RULES',
[('scrapy.contrib.pipeline.', 'scrapy.pipelines.'),
('scrapy.contrib.', 'scrapy.extensions.')])
class UpdateClassPathTest(unittest.TestCase):
def test_old_path_gets_fixed(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath('scrapy.contrib.debug.Debug')
self.assertEqual(output, 'scrapy.extensions.debug.Debug')
self.assertEqual(len(w), 1)
self.assertIn("scrapy.contrib.debug.Debug", str(w[0].message))
self.assertIn("scrapy.extensions.debug.Debug", str(w[0].message))
def test_sorted_replacement(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', ScrapyDeprecationWarning)
output = update_classpath('scrapy.contrib.pipeline.Pipeline')
self.assertEqual(output, 'scrapy.pipelines.Pipeline')
def test_unmatched_path_stays_the_same(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath('scrapy.unmatched.Path')
self.assertEqual(output, 'scrapy.unmatched.Path')
self.assertEqual(len(w), 0)
| bsd-3-clause |
nguyenfilip/subscription-manager | src/subscription_manager/isodate.py | 3 | 1113 | #
# find a reasonable iso8601 date parser
#
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import dateutil.parser
import logging
log = logging.getLogger('rhsm-app.' + __name__)
def _parse_date_dateutil(date):
# see comment for _parse_date_pyxml
try:
dt = dateutil.parser.parse(date)
except ValueError:
log.warning("Date overflow: %s, using 9999-09-06 instead." % date)
return dateutil.parser.parse("9999-09-06T00:00:00.000+0000")
return dt
parse_date = _parse_date_dateutil
parse_date_impl_name = 'dateutil'
| gpl-2.0 |
Crystalnix/BitPop | chrome/test/functional/chromeos_browser.py | 76 | 1981 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import pyauto_functional # pyauto_functional must come before pyauto.
import pyauto
import test_utils
class ChromeosBrowserTest(pyauto.PyUITest):
def testCannotCloseLastIncognito(self):
"""Verify that last incognito window cannot be closed if it's the
last window"""
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.assertTrue(self.GetBrowserInfo()['windows'][1]['incognito'],
msg='Incognito window is not displayed')
self.CloseBrowserWindow(0)
info = self.GetBrowserInfo()['windows']
self.assertEqual(1, len(info))
url = info[0]['tabs'][0]['url']
self.assertEqual('chrome://newtab/', url,
msg='Unexpected URL: %s' % url)
self.assertTrue(info[0]['incognito'],
msg='Incognito window is not displayed.')
def testCrashBrowser(self):
"""Verify that after broswer crash is recovered, user can still navigate
to other URL."""
test_utils.CrashBrowser(self)
self.RestartBrowser(clear_profile=False)
url = self.GetHttpURLForDataPath('english_page.html')
self.NavigateToURL(url)
self.assertEqual('This page is in English', self.GetActiveTabTitle())
def testFullScreen(self):
"""Verify that a browser window can enter and exit full screen mode."""
self.ApplyAccelerator(pyauto.IDC_FULLSCREEN)
self.assertTrue(self.WaitUntil(lambda:
self.GetBrowserInfo()['windows'][0]['fullscreen']),
msg='Full Screen is not displayed.')
self.ApplyAccelerator(pyauto.IDC_FULLSCREEN)
self.assertTrue(self.WaitUntil(lambda: not
self.GetBrowserInfo()['windows'][0]['fullscreen']),
msg='Normal screen is not displayed.')
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
sebmarchand/syzygy | third_party/numpy/files/numpy/core/tests/test_arrayprint.py | 22 | 2816 | import numpy as np
from numpy.testing import *
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([ nan, inf])')
class TestComplexArray(TestCase):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]',
'[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]',
'[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]',
'[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]',
'[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]',
'[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]',
'[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]',
'[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]',
'[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]',
'[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]',
'[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]',
'[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]',
'[-1.+infj]', '[-1.+infj]', '[-1.0+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.0-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]',
'[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]',
'[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]',
'[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]',
'[ inf+infj]', '[ inf+infj]', '[ inf+infj]',
'[ inf-infj]', '[ inf-infj]', '[ inf-infj]',
'[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]',
'[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]',
'[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]',
'[ nan+infj]', '[ nan+infj]', '[ nan+infj]',
'[ nan-infj]', '[ nan-infj]', '[ nan-infj]',
'[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]']
for res, val in zip(actual, wanted):
assert_(res == val)
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
Bismarrck/tensorflow | tensorflow/python/data/experimental/kernel_tests/dense_to_sparse_batch_test.py | 17 | 4431 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.dense_to_sparse_batch()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class DenseToSparseBatchTest(test_base.DatasetTestBase):
def testDenseToSparseBatchDataset(self):
components = np.random.randint(12, size=(100,)).astype(np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: array_ops.fill([x], x)).apply(
batching.dense_to_sparse_batch(4, [12]))
get_next = self.getNext(dataset)
for start in range(0, len(components), 4):
results = self.evaluate(get_next())
self.assertAllEqual([[i, j]
for i, c in enumerate(components[start:start + 4])
for j in range(c)], results.indices)
self.assertAllEqual(
[c for c in components[start:start + 4] for _ in range(c)],
results.values)
self.assertAllEqual([min(4,
len(components) - start), 12],
results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testDenseToSparseBatchDatasetWithUnknownShape(self):
components = np.random.randint(5, size=(40,)).astype(np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: array_ops.fill([x, x], x)).apply(
batching.dense_to_sparse_batch(4, [5, None]))
get_next = self.getNext(dataset)
for start in range(0, len(components), 4):
results = self.evaluate(get_next())
self.assertAllEqual([[i, j, z]
for i, c in enumerate(components[start:start + 4])
for j in range(c)
for z in range(c)], results.indices)
self.assertAllEqual([
c for c in components[start:start + 4] for _ in range(c)
for _ in range(c)
], results.values)
self.assertAllEqual([
min(4,
len(components) - start), 5,
np.max(components[start:start + 4])
], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testDenseToSparseBatchDatasetWithInvalidShape(self):
input_tensor = array_ops.constant([[1]])
with self.assertRaisesRegexp(ValueError, "Dimension -2 must be >= 0"):
dataset_ops.Dataset.from_tensors(input_tensor).apply(
batching.dense_to_sparse_batch(4, [-2]))
def testDenseToSparseBatchDatasetShapeErrors(self):
def dataset_fn(input_tensor):
return dataset_ops.Dataset.from_tensors(input_tensor).apply(
batching.dense_to_sparse_batch(4, [12]))
# Initialize with an input tensor of incompatible rank.
get_next = self.getNext(dataset_fn([[1]]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"incompatible with the row shape"):
self.evaluate(get_next())
# Initialize with an input tensor that is larger than `row_shape`.
get_next = self.getNext(dataset_fn(np.int32(range(13))))
with self.assertRaisesRegexp(errors.DataLossError,
"larger than the row shape"):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
| apache-2.0 |
jrbourbeau/cr-composition | processing/legacy/anisotropy/random_trials/process_kstest.py | 2 | 7627 | #!/usr/bin/env python
import os
import argparse
import numpy as np
import pandas as pd
import pycondor
import comptools as comp
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='Extracts and saves desired information from simulation/data .i3 files')
p.add_argument('-c', '--config', dest='config',
default='IC86.2012',
choices=['IC79', 'IC86.2012', 'IC86.2013', 'IC86.2014', 'IC86.2015'],
help='Detector configuration')
p.add_argument('--low_energy', dest='low_energy',
default=False, action='store_true',
help='Only use events with energy < 10**6.75 GeV')
p.add_argument('--n_side', dest='n_side', type=int,
default=64,
help='Number of times to split the DataFrame')
p.add_argument('--chunksize', dest='chunksize', type=int,
default=1000,
help='Number of lines used when reading in DataFrame')
p.add_argument('--n_batches', dest='n_batches', type=int,
default=50,
help='Number batches running in parallel for each ks-test trial')
p.add_argument('--ks_trials', dest='ks_trials', type=int,
default=100,
help='Number of random maps to generate')
p.add_argument('--overwrite', dest='overwrite',
default=False, action='store_true',
help='Option to overwrite reference map file, '
'if it alreadu exists')
p.add_argument('--test', dest='test',
default=False, action='store_true',
help='Option to run small test version')
args = p.parse_args()
if args.test:
args.ks_trials = 20
args.n_batches = 10000
args.chunksize = 100
# Define output directories
error = comp.paths.condor_data_dir + '/ks_test_{}/error'.format(args.config)
output = comp.paths.condor_data_dir + '/ks_test_{}/output'.format(args.config)
log = comp.paths.condor_scratch_dir + '/ks_test_{}/log'.format(args.config)
submit = comp.paths.condor_scratch_dir + '/ks_test_{}/submit'.format(args.config)
# Define path to executables
make_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'make_maps.py')
merge_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'merge_maps.py')
save_pvals_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'save_pvals.py')
# Create Dagman instance
dag_name = 'anisotropy_kstest_{}'.format(args.config)
if args.test:
dag_name += '_test'
dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1)
# Create Job for saving ks-test p-values for each trial
save_pvals_name = 'save_pvals_{}'.format(args.config)
if args.low_energy:
save_pvals_name += '_lowenergy'
save_pvals_job = pycondor.Job(save_pvals_name, save_pvals_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
save_pvals_infiles_0 = []
save_pvals_infiles_1 = []
dagman.add_job(save_pvals_job)
outdir = os.path.join(comp.paths.comp_data_dir, args.config + '_data',
'anisotropy', 'random_splits')
if args.test:
outdir = os.path.join(outdir, 'test')
for trial_num in range(args.ks_trials):
# Create map_maps jobs for this ks_trial
make_maps_name = 'make_maps_{}_trial-{}'.format(args.config, trial_num)
if args.low_energy:
make_maps_name += '_lowenergy'
make_maps_job = pycondor.Job(make_maps_name, make_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
dagman.add_job(make_maps_job)
merge_maps_infiles_0 = []
merge_maps_infiles_1 = []
for batch_idx in range(args.n_batches):
if args.test and batch_idx > 2:
break
outfile_sample_1 = os.path.join(outdir,
'random_split_1_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
outfile_sample_0 = os.path.join(outdir,
'random_split_0_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
make_maps_arg_list = []
make_maps_arg_list.append('--config {}'.format(args.config))
make_maps_arg_list.append('--n_side {}'.format(args.n_side))
make_maps_arg_list.append('--chunksize {}'.format(args.chunksize))
make_maps_arg_list.append('--n_batches {}'.format(args.n_batches))
make_maps_arg_list.append('--batch_idx {}'.format(batch_idx))
make_maps_arg_list.append('--outfile_sample_0 {}'.format(outfile_sample_0))
make_maps_arg_list.append('--outfile_sample_1 {}'.format(outfile_sample_1))
make_maps_arg = ' '.join(make_maps_arg_list)
if args.low_energy:
make_maps_arg += ' --low_energy'
make_maps_job.add_arg(make_maps_arg)
# Add this outfile to the list of infiles for merge_maps_job
merge_maps_infiles_0.append(outfile_sample_0)
merge_maps_infiles_1.append(outfile_sample_1)
for sample_idx, input_file_list in enumerate([merge_maps_infiles_0,
merge_maps_infiles_1]):
merge_maps_name = 'merge_maps_{}_trial-{}_split-{}'.format(args.config, trial_num, sample_idx)
if args.low_energy:
merge_maps_name += '_lowenergy'
merge_maps_job = pycondor.Job(merge_maps_name, merge_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
# Ensure that make_maps_job completes before merge_maps_job begins
make_maps_job.add_child(merge_maps_job)
merge_maps_job.add_child(save_pvals_job)
dagman.add_job(merge_maps_job)
merge_infiles_str = ' '.join(input_file_list)
# Assemble merged output file path
merge_outfile = os.path.join(outdir, 'random_split_{}_trial-{}.fits'.format(sample_idx, trial_num))
merge_maps_arg = '--infiles {} --outfile {}'.format(merge_infiles_str, merge_outfile)
merge_maps_job.add_arg(merge_maps_arg)
if sample_idx == 0:
save_pvals_infiles_0.append(merge_outfile)
else:
save_pvals_infiles_1.append(merge_outfile)
save_pvals_infiles_0_str = ' '.join(save_pvals_infiles_0)
save_pvals_infiles_1_str = ' '.join(save_pvals_infiles_1)
if args.low_energy:
outfile_basename = 'ks_test_dataframe_lowenergy.hdf'
else:
outfile_basename = 'ks_test_dataframe.hdf'
outfile = os.path.join(outdir, outfile_basename)
save_pvals_arg = '--infiles_sample_0 {} --infiles_sample_1 {} ' \
'--outfile {}'.format(save_pvals_infiles_0_str, save_pvals_infiles_1_str, outfile)
save_pvals_job.add_arg(save_pvals_arg)
dagman.build_submit(fancyname=True)
| mit |
ar45/django | django/core/files/utils.py | 395 | 1338 | from django.utils import six
class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
if six.PY3:
seekable = property(lambda self: self.file.seekable)
def __iter__(self):
return iter(self.file)
| bsd-3-clause |
chacoroot/planetary | addons/crm_partner_assign/__openerp__.py | 244 | 2369 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner Assignation & Geolocation',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
This is the module used by OpenERP SA to redirect customers to its partners, based on geolocation.
======================================================================================================
This modules lets you geolocate Leads, Opportunities and Partners based on their address.
Once the coordinates of the Lead/Opportunity is known, they can be automatically assigned
to an appropriate local partner, based on the distance and the weight that was assigned to the partner.
""",
'author': 'OpenERP SA',
'depends': ['base_geolocalize', 'crm', 'account', 'portal'],
'data': [
'security/ir.model.access.csv',
'res_partner_view.xml',
'wizard/crm_forward_to_partner_view.xml',
'wizard/crm_channel_interested_view.xml',
'crm_lead_view.xml',
'crm_partner_assign_data.xml',
'crm_portal_view.xml',
'portal_data.xml',
'report/crm_lead_report_view.xml',
'report/crm_partner_report_view.xml',
],
'demo': [
'res_partner_demo.xml',
'crm_lead_demo.xml'
],
'test': ['test/partner_assign.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tadebayo/myedge | myvenv/Lib/site-packages/django/dispatch/dispatcher.py | 55 | 11693 | import sys
import threading
import warnings
import weakref
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.inspect import func_accepts_kwargs
from django.utils.six.moves import range
if six.PY2:
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
warnings.warn("Passing `weak` to disconnect has no effect.", RemovedInDjango20Warning, stacklevel=2)
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| mit |
hitsthings/node-gyp | gyp/test/mac/gyptest-rebuild.py | 299 | 1260 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are rebuilt correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'rebuild'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'test_app', chdir=CHDIR)
# Touch a source file, rebuild, and check that the app target is up-to-date.
test.touch('rebuild/main.c')
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_app', chdir=CHDIR)
# Xcode runs postbuilds on every build, so targets with postbuilds are
# never marked as up_to_date.
if test.format != 'xcode':
# Same for a framework bundle.
test.build('test.gyp', 'test_framework_postbuilds', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_framework_postbuilds', chdir=CHDIR)
# Test that an app bundle with a postbuild that touches the app binary needs
# to be built only once.
test.build('test.gyp', 'test_app_postbuilds', chdir=CHDIR)
test.up_to_date('test.gyp', 'test_app_postbuilds', chdir=CHDIR)
test.pass_test()
| mit |
tobes/raspberry-DMX | dmx2.py | 1 | 2777 | # -*- coding: utf-8 -*-
# http://www.jonshouse.co.uk/rpidmx512.cgi
from twisted.internet import protocol, reactor, endpoints
from pigpio import pi, pulse as pig_pulse, OUTPUT
PORT = 50007
# pins
PIN_DI = 18 # Driver Input
PIN_DE = 23 # Driver output enable (high to enable)
PIN_RE = 24 # Receive output enable (low to enable)
PIN_NULL = 0
DI = 1 << PIN_DI
# timings in µ seconds
BREAK = 88
MAB = 8
BIT = 4
MTBP = 50 # Time between packets
SLEEP_TIME = 5
pulses = []
pig = None
def high(duration=BIT):
return pig_pulse(0, DI, duration)
def low(duration=BIT):
return pig_pulse(DI, 0, duration)
def create_value(value):
# start (low for one bit)
# 8 data bits
# stop (high for two bits)
out = []
def write_pulse():
if bits:
if current:
out.append(high(BIT * bits))
else:
out.append(low(BIT * bits))
value = value << 1 | 1536
bits = 0
current = None
for bit in range(11):
bit_value = bool(1 << bit & value)
if bit_value == current:
bits += 1
continue
write_pulse()
current = bit_value
bits = 1
write_pulse()
return out
# precalculate pulses
pulse_values = [create_value(x) for x in range(256)]
def build_waveform(values):
# clear pulses
pulses = []
# Break (low)
pulses.append(low(BREAK))
# Mark after break (high)
pulses.append(high(MAB))
# Channel data
for value in values:
pulses += pulse_values[value]
# End of data (leave high)
pulses.append(high(MTBP))
return pulses
# set up gpio
if True:
pig = pi()
pig.set_mode(PIN_RE, OUTPUT)
pig.set_mode(PIN_DE, OUTPUT)
pig.set_mode(PIN_DI, OUTPUT)
pig.write(PIN_RE, 0) # disable Receive Enable
pig.write(PIN_DE, 1) # enable Driver Enable
pig.write(PIN_DI, 1) # high is the rest state
def send(values):
pig.wave_clear() # clear any existing waveforms
pig.wave_add_generic(build_waveform(values))
wave = pig.wave_create()
pig.wave_send_once(wave)
class Dmx(protocol.Protocol):
def connectionMade(self):
print "Client Connected Detected!"
# enable keepalive if supported
try:
self.transport.setTcpKeepAlive(1)
except AttributeError:
pass
def connectionLost(self, reason):
print "Client Connection Lost!"
def dataReceived(self, data):
data = [int(data[i:i + 2], 16) for i in range(0, len(data), 2)]
send(data)
class DmxFactory(protocol.Factory):
def buildProtocol(self, addr):
return Dmx()
if __name__ == '__main__':
endpoints.serverFromString(reactor, "tcp:%s" % PORT).listen(DmxFactory())
reactor.run()
| gpl-2.0 |
rest-of/the-deck | lambda/lib/python2.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py | 619 | 2168 | from io import BytesIO
class CallbackFileWrapper(object):
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
"""
def __init__(self, fp, callback):
self.__buf = BytesIO()
self.__fp = fp
self.__callback = callback
def __getattr__(self, name):
# The vaguaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop thigns from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__('_CallbackFileWrapper__fp')
return getattr(fp, name)
def __is_fp_closed(self):
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
return self.__fp.closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def read(self, amt=None):
data = self.__fp.read(amt)
self.__buf.write(data)
if self.__is_fp_closed():
if self.__callback:
self.__callback(self.__buf.getvalue())
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
return data
| mit |
paulfurley/festival-weather | festivalweather/parser.py | 1 | 3354 | from __future__ import unicode_literals
import datetime
import pytz
import lxml.etree
from .two_hourly_forecast import TwoHourlyForecast
"""
The Norwegian weather service API 1.9 returns a pretty cool data format, albeit
encoded in XML. It contains two types of forecasts:
1. point-in-time (from="2014-08-03T19:00:00Z" to="2014-08-03T19:00:00Z")
2. range (from="2014-08-03T18:00:00Z" to="2014-08-03T19:00:00Z"
The point-in-time forecasts it gives lots of data like this:
<temperature id="TTT" unit="celsius" value="15.5"/>
<windDirection id="dd" deg="217.1" name="SW"/>
<windSpeed id="ff" mps="4.2" beaufort="3" name="Lett bris"/>
<humidity value="76.0" unit="percent"/>
<pressure id="pr" unit="hPa" value="1010.7"/>
<cloudiness id="NN" percent="17.7"/>
<fog id="FOG" percent="0.0"/>
<lowClouds id="LOW" percent="17.7"/>
<mediumClouds id="MEDIUM" percent="0.0"/>
<highClouds id="HIGH" percent="0.0"/>
<dewpointTemperature id="TD" unit="celsius" value="11.4"/>
Whereas for ranges it just gives:
<precipitation unit="mm" value="0.0" minvalue="0.0" maxvalue="0.0"/>
<symbol id="LightCloud" number="2"/>
For your convenience, it seems, 1, 2, 3, 4 and 6 hour ranges are available.
"""
def get_two_hourly_forecasts(xml_f, utc_startfrom, timezone, count):
root = lxml.etree.fromstring(xml_f.read())
point_time_forecasts = parse_point_time_forecasts(root)
two_hour_range_forecasts = parse_two_hour_range_forecasts(root)
for hour_offset in range(0, count * 2, 2):
forecast_time = utc_startfrom + datetime.timedelta(hours=hour_offset)
temperature = parse_temperature(point_time_forecasts[forecast_time])
min_rain, max_rain = parse_precipitation(
two_hour_range_forecasts[forecast_time])
yield make_two_hourly_forecast(forecast_time, timezone, temperature,
min_rain, max_rain)
def parse_point_time_forecasts(lxml):
result = {}
xpath = '//weatherdata/product[@class="pointData"]/time[@from=@to]'
for node in lxml.xpath(xpath):
result[parse_datetime(node.get('from'))] = node
return result
def parse_two_hour_range_forecasts(lxml):
result = {}
xpath = '//weatherdata/product[@class="pointData"]/time[@from]'
for node in lxml.xpath(xpath):
from_ = parse_datetime(node.get('from'))
to = parse_datetime(node.get('to'))
if to - from_ == datetime.timedelta(hours=2):
result[from_] = node
return result
def parse_datetime(string):
return datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%SZ').replace(
tzinfo=pytz.UTC)
def parse_temperature(time_node):
temp = time_node.xpath('./location/temperature[@unit="celsius"]/@value')[0]
return float(temp)
def parse_precipitation(time_node):
min_ = time_node.xpath('./location/precipitation[@unit="mm"]/@minvalue')[0]
max_ = time_node.xpath('./location/precipitation[@unit="mm"]/@maxvalue')[0]
return float(min_), float(max_)
def make_two_hourly_forecast(utc_time, timezone, temperature, min_rain,
max_rain):
return TwoHourlyForecast(
start_datetime=utc_time.astimezone(pytz.timezone(timezone)),
temperature=temperature,
min_rain=min_rain,
max_rain=max_rain)
| mit |
dask-image/dask-ndfourier | dask_ndfourier/_compat.py | 1 | 1574 | # -*- coding: utf-8 -*-
"""
Content here is borrowed from our contributions to Dask.
"""
import numpy
import dask.array
def _fftfreq_block(i, n, d):
r = i.copy()
r[i >= (n + 1) // 2] -= n
r /= n * d
return r
def _fftfreq(n, d=1.0, chunks=None):
"""
Return the Discrete Fourier Transform sample frequencies.
The returned float array `f` contains the frequency bin centers in cycles
per unit of the sample spacing (with zero at the start). For instance, if
the sample spacing is in seconds, then the frequency unit is cycles/second.
Given a window length `n` and a sample spacing `d`::
f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
Parameters
----------
n : int
Window length.
d : scalar, optional
Sample spacing (inverse of the sampling rate). Defaults to 1.
Returns
-------
grid : dask array
Examples
--------
>>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
>>> fourier = np.fft.fft(signal)
>>> n = signal.size
>>> timestep = 0.1
>>> freq = np.fft.fftfreq(n, d=timestep)
>>> freq
array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])
Notes
-----
Borrowed from my Dask Array contribution.
"""
n = int(n)
d = float(d)
r = dask.array.arange(n, dtype=float, chunks=chunks)
return r.map_blocks(_fftfreq_block, dtype=float, n=n, d=d)
_sinc = dask.array.ufunc.wrap_elemwise(numpy.sinc)
| bsd-3-clause |
nkgilley/home-assistant | homeassistant/components/anthemav/media_player.py | 21 | 5438 | """Support for Anthem Network Receivers and Processors."""
import logging
import anthemav
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "anthemav"
DEFAULT_PORT = 14999
SUPPORT_ANTHEMAV = (
SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up our socket to the AVR."""
host = config[CONF_HOST]
port = config[CONF_PORT]
name = config.get(CONF_NAME)
device = None
_LOGGER.info("Provisioning Anthem AVR device at %s:%d", host, port)
@callback
def async_anthemav_update_callback(message):
"""Receive notification from transport that new data exists."""
_LOGGER.debug("Received update callback from AVR: %s", message)
async_dispatcher_send(hass, DOMAIN)
avr = await anthemav.Connection.create(
host=host, port=port, update_callback=async_anthemav_update_callback
)
device = AnthemAVR(avr, name)
_LOGGER.debug("dump_devicedata: %s", device.dump_avrdata)
_LOGGER.debug("dump_conndata: %s", avr.dump_conndata)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, device.avr.close)
async_add_entities([device])
class AnthemAVR(MediaPlayerEntity):
"""Entity reading values from Anthem AVR protocol."""
def __init__(self, avr, name):
"""Initialize entity with transport."""
super().__init__()
self.avr = avr
self._name = name
def _lookup(self, propname, dval=None):
return getattr(self.avr.protocol, propname, dval)
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
async_dispatcher_connect(self.hass, DOMAIN, self.async_write_ha_state)
)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ANTHEMAV
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return name of device."""
return self._name or self._lookup("model")
@property
def state(self):
"""Return state of power on/off."""
pwrstate = self._lookup("power")
if pwrstate is True:
return STATE_ON
if pwrstate is False:
return STATE_OFF
return None
@property
def is_volume_muted(self):
"""Return boolean reflecting mute state on device."""
return self._lookup("mute", False)
@property
def volume_level(self):
"""Return volume level from 0 to 1."""
return self._lookup("volume_as_percentage", 0.0)
@property
def media_title(self):
"""Return current input name (closest we have to media title)."""
return self._lookup("input_name", "No Source")
@property
def app_name(self):
"""Return details about current video and audio stream."""
return (
f"{self._lookup('video_input_resolution_text', '')} "
f"{self._lookup('audio_input_name', '')}"
)
@property
def source(self):
"""Return currently selected input."""
return self._lookup("input_name", "Unknown")
@property
def source_list(self):
"""Return all active, configured inputs."""
return self._lookup("input_list", ["Unknown"])
async def async_select_source(self, source):
"""Change AVR to the designated source (by name)."""
self._update_avr("input_name", source)
async def async_turn_off(self):
"""Turn AVR power off."""
self._update_avr("power", False)
async def async_turn_on(self):
"""Turn AVR power on."""
self._update_avr("power", True)
async def async_set_volume_level(self, volume):
"""Set AVR volume (0 to 1)."""
self._update_avr("volume_as_percentage", volume)
async def async_mute_volume(self, mute):
"""Engage AVR mute."""
self._update_avr("mute", mute)
def _update_avr(self, propname, value):
"""Update a property in the AVR."""
_LOGGER.info("Sending command to AVR: set %s to %s", propname, str(value))
setattr(self.avr.protocol, propname, value)
@property
def dump_avrdata(self):
"""Return state of avr object for debugging forensics."""
attrs = vars(self)
items_string = ", ".join(f"{item}: {item}" for item in attrs.items())
return f"dump_avrdata: {items_string}"
| apache-2.0 |
katiecheng/Bombolone | env/lib/python2.7/site-packages/Crypto/Cipher/blockalgo.py | 133 | 12596 | # -*- coding: utf-8 -*-
#
# Cipher/blockalgo.py
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Module with definitions common to all block ciphers."""
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
#: *Electronic Code Book (ECB)*.
#: This is the simplest encryption mode. Each of the plaintext blocks
#: is directly encrypted into a ciphertext block, independently of
#: any other block. This mode exposes frequency of symbols
#: in your plaintext. Other modes (e.g. *CBC*) should be used instead.
#:
#: See `NIST SP800-38A`_ , Section 6.1 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_ECB = 1
#: *Cipher-Block Chaining (CBC)*. Each of the ciphertext blocks depends
#: on the current and all previous plaintext blocks. An Initialization Vector
#: (*IV*) is required.
#:
#: The *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it must be authenticated by the receiver and
#: it should be picked randomly.
#:
#: See `NIST SP800-38A`_ , Section 6.2 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CBC = 2
#: *Cipher FeedBack (CFB)*. This mode is similar to CBC, but it transforms
#: the underlying block cipher into a stream cipher. Plaintext and ciphertext
#: are processed in *segments* of **s** bits. The mode is therefore sometimes
#: labelled **s**-bit CFB. An Initialization Vector (*IV*) is required.
#:
#: When encrypting, each ciphertext segment contributes to the encryption of
#: the next plaintext segment.
#:
#: This *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it should be picked randomly.
#: Reusing the same *IV* for encryptions done with the same key lead to
#: catastrophic cryptographic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.3 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CFB = 3
#: This mode should not be used.
MODE_PGP = 4
#: *Output FeedBack (OFB)*. This mode is very similar to CBC, but it
#: transforms the underlying block cipher into a stream cipher.
#: The keystream is the iterated block encryption of an Initialization Vector (*IV*).
#:
#: The *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it should be picked randomly.
#:
#: Reusing the same *IV* for encryptions done with the same key lead to
#: catastrophic cryptograhic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.4 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_OFB = 5
#: *CounTeR (CTR)*. This mode is very similar to ECB, in that
#: encryption of one block is done independently of all other blocks.
#: Unlike ECB, the block *position* contributes to the encryption and no
#: information leaks about symbol frequency.
#:
#: Each message block is associated to a *counter* which must be unique
#: across all messages that get encrypted with the same key (not just within
#: the same message). The counter is as big as the block size.
#:
#: Counters can be generated in several ways. The most straightword one is
#: to choose an *initial counter block* (which can be made public, similarly
#: to the *IV* for the other modes) and increment its lowest **m** bits by
#: one (modulo *2^m*) for each block. In most cases, **m** is chosen to be half
#: the block size.
#:
#: Reusing the same *initial counter block* for encryptions done with the same
#: key lead to catastrophic cryptograhic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.5 (for the mode) and Appendix B (for how
#: to manage the *initial counter block*).
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CTR = 6
#: OpenPGP. This mode is a variant of CFB, and it is only used in PGP and OpenPGP_ applications.
#: An Initialization Vector (*IV*) is required.
#:
#: Unlike CFB, the IV is not transmitted to the receiver. Instead, the *encrypted* IV is.
#: The IV is a random data block. Two of its bytes are duplicated to act as a checksum
#: for the correctness of the key. The encrypted IV is therefore 2 bytes longer than
#: the clean IV.
#:
#: .. _OpenPGP: http://tools.ietf.org/html/rfc4880
MODE_OPENPGP = 7
def _getParameter(name, index, args, kwargs, default=None):
"""Find a parameter in tuple and dictionary arguments a function receives"""
param = kwargs.get(name)
if len(args)>index:
if param:
raise ValueError("Parameter '%s' is specified twice" % name)
param = args[index]
return param or default
class BlockAlgo:
"""Class modelling an abstract block cipher."""
def __init__(self, factory, key, *args, **kwargs):
self.mode = _getParameter('mode', 0, args, kwargs, default=MODE_ECB)
self.block_size = factory.block_size
if self.mode != MODE_OPENPGP:
self._cipher = factory.new(key, *args, **kwargs)
self.IV = self._cipher.IV
else:
# OPENPGP mode. For details, see 13.9 in RCC4880.
#
# A few members are specifically created for this mode:
# - _encrypted_iv, set in this constructor
# - _done_first_block, set to True after the first encryption
# - _done_last_block, set to True after a partial block is processed
self._done_first_block = False
self._done_last_block = False
self.IV = _getParameter('iv', 1, args, kwargs)
if not self.IV:
raise ValueError("MODE_OPENPGP requires an IV")
# Instantiate a temporary cipher to process the IV
IV_cipher = factory.new(key, MODE_CFB,
b('\x00')*self.block_size, # IV for CFB
segment_size=self.block_size*8)
# The cipher will be used for...
if len(self.IV) == self.block_size:
# ... encryption
self._encrypted_IV = IV_cipher.encrypt(
self.IV + self.IV[-2:] + # Plaintext
b('\x00')*(self.block_size-2) # Padding
)[:self.block_size+2]
elif len(self.IV) == self.block_size+2:
# ... decryption
self._encrypted_IV = self.IV
self.IV = IV_cipher.decrypt(self.IV + # Ciphertext
b('\x00')*(self.block_size-2) # Padding
)[:self.block_size+2]
if self.IV[-2:] != self.IV[-4:-2]:
raise ValueError("Failed integrity check for OPENPGP IV")
self.IV = self.IV[:-2]
else:
raise ValueError("Length of IV must be %d or %d bytes for MODE_OPENPGP"
% (self.block_size, self.block_size+2))
# Instantiate the cipher for the real PGP data
self._cipher = factory.new(key, MODE_CFB,
self._encrypted_IV[-self.block_size:],
segment_size=self.block_size*8)
def encrypt(self, plaintext):
"""Encrypt data with the key and the parameters set at initialization.
The cipher object is stateful; encryption of a long block
of data can be broken up in two or more calls to `encrypt()`.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is always equivalent to:
>>> c.encrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not perform any padding.
- For `MODE_ECB`, `MODE_CBC`, and `MODE_OFB`, *plaintext* length
(in bytes) must be a multiple of *block_size*.
- For `MODE_CFB`, *plaintext* length (in bytes) must be a multiple
of *segment_size*/8.
- For `MODE_CTR`, *plaintext* can be of any length.
- For `MODE_OPENPGP`, *plaintext* must be a multiple of *block_size*,
unless it is the last chunk of the message.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
:Return:
the encrypted data, as a byte string. It is as long as
*plaintext* with one exception: when encrypting the first message
chunk with `MODE_OPENPGP`, the encypted IV is prepended to the
returned ciphertext.
"""
if self.mode == MODE_OPENPGP:
padding_length = (self.block_size - len(plaintext) % self.block_size) % self.block_size
if padding_length>0:
# CFB mode requires ciphertext to have length multiple of block size,
# but PGP mode allows the last block to be shorter
if self._done_last_block:
raise ValueError("Only the last chunk is allowed to have length not multiple of %d bytes",
self.block_size)
self._done_last_block = True
padded = plaintext + b('\x00')*padding_length
res = self._cipher.encrypt(padded)[:len(plaintext)]
else:
res = self._cipher.encrypt(plaintext)
if not self._done_first_block:
res = self._encrypted_IV + res
self._done_first_block = True
return res
return self._cipher.encrypt(plaintext)
def decrypt(self, ciphertext):
"""Decrypt data with the key and the parameters set at initialization.
The cipher object is stateful; decryption of a long block
of data can be broken up in two or more calls to `decrypt()`.
That is, the statement:
>>> c.decrypt(a) + c.decrypt(b)
is always equivalent to:
>>> c.decrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not perform any padding.
- For `MODE_ECB`, `MODE_CBC`, and `MODE_OFB`, *ciphertext* length
(in bytes) must be a multiple of *block_size*.
- For `MODE_CFB`, *ciphertext* length (in bytes) must be a multiple
of *segment_size*/8.
- For `MODE_CTR`, *ciphertext* can be of any length.
- For `MODE_OPENPGP`, *plaintext* must be a multiple of *block_size*,
unless it is the last chunk of the message.
:Parameters:
ciphertext : byte string
The piece of data to decrypt.
:Return: the decrypted data (byte string, as long as *ciphertext*).
"""
if self.mode == MODE_OPENPGP:
padding_length = (self.block_size - len(ciphertext) % self.block_size) % self.block_size
if padding_length>0:
# CFB mode requires ciphertext to have length multiple of block size,
# but PGP mode allows the last block to be shorter
if self._done_last_block:
raise ValueError("Only the last chunk is allowed to have length not multiple of %d bytes",
self.block_size)
self._done_last_block = True
padded = ciphertext + b('\x00')*padding_length
res = self._cipher.decrypt(padded)[:len(ciphertext)]
else:
res = self._cipher.decrypt(ciphertext)
return res
return self._cipher.decrypt(ciphertext)
| bsd-3-clause |
betoesquivel/fil2014 | filenv/lib/python2.7/site-packages/south/management/commands/syncdb.py | 119 | 4643 | """
Overridden syncdb command
"""
from __future__ import print_function
import sys
from optparse import make_option
from django.core.management.base import NoArgsCommand, BaseCommand
from django.core.management.color import no_style
from django.utils.datastructures import SortedDict
from django.core.management.commands import syncdb
from django.conf import settings
from django.db import models
from django.db.models.loading import cache
from django.core import management
from south.db import dbs
from south import migration
from south.exceptions import NoMigrations
def get_app_label(app):
return '.'.join( app.__name__.split('.')[0:-1] )
class Command(NoArgsCommand):
option_list = syncdb.Command.option_list + (
make_option('--migrate', action='store_true', dest='migrate', default=False,
help='Tells South to also perform migrations after the sync. Default for during testing, and other internal calls.'),
make_option('--all', action='store_true', dest='migrate_all', default=False,
help='Makes syncdb work on all apps, even migrated ones. Be careful!'),
)
if '--verbosity' not in [opt.get_opt_string() for opt in syncdb.Command.option_list]:
option_list += (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created, except those which use migrations."
def handle_noargs(self, migrate_all=False, **options):
# Import the 'management' module within each installed app, to register
# dispatcher events.
# This is copied from Django, to fix bug #511.
try:
from django.utils.importlib import import_module
except ImportError:
pass # TODO: Remove, only for Django1.0
else:
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError as exc:
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
# Work out what uses migrations and so doesn't need syncing
apps_needing_sync = []
apps_migrated = []
for app in models.get_apps():
app_label = get_app_label(app)
if migrate_all:
apps_needing_sync.append(app_label)
else:
try:
migrations = migration.Migrations(app_label)
except NoMigrations:
# It needs syncing
apps_needing_sync.append(app_label)
else:
# This is a migrated app, leave it
apps_migrated.append(app_label)
verbosity = int(options.get('verbosity', 0))
# Run syncdb on only the ones needed
if verbosity:
print("Syncing...")
old_installed, settings.INSTALLED_APPS = settings.INSTALLED_APPS, apps_needing_sync
old_app_store, cache.app_store = cache.app_store, SortedDict([
(k, v) for (k, v) in cache.app_store.items()
if get_app_label(k) in apps_needing_sync
])
# This will allow the setting of the MySQL storage engine, for example.
for db in dbs.values():
db.connection_init()
# OK, run the actual syncdb
syncdb.Command().execute(**options)
settings.INSTALLED_APPS = old_installed
cache.app_store = old_app_store
# Migrate if needed
if options.get('migrate', True):
if verbosity:
print("Migrating...")
# convert from store_true to store_false
options['no_initial_data'] = not options.get('load_initial_data', True)
management.call_command('migrate', **options)
# Be obvious about what we did
if verbosity:
print("\nSynced:\n > %s" % "\n > ".join(apps_needing_sync))
if options.get('migrate', True):
if verbosity:
print("\nMigrated:\n - %s" % "\n - ".join(apps_migrated))
else:
if verbosity:
print("\nNot synced (use migrations):\n - %s" % "\n - ".join(apps_migrated))
print("(use ./manage.py migrate to migrate these)")
| mit |
RobertABT/heightmap | build/scipy/scipy/misc/tests/test_common.py | 9 | 2908 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal, \
assert_array_almost_equal, assert_equal
from scipy.misc import pade, logsumexp, face, ascent
def test_pade_trivial():
nump, denomp = pade([1.0], 0)
assert_array_equal(nump.c, [1.0])
assert_array_equal(denomp.c, [1.0])
def test_pade_4term_exp():
# First four Taylor coefficients of exp(x).
# Unlike poly1d, the first array element is the zero-order term.
an = [1.0, 1.0, 0.5, 1.0/6]
nump, denomp = pade(an, 0)
assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
assert_array_almost_equal(denomp.c, [1.0])
nump, denomp = pade(an, 1)
assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
nump, denomp = pade(an, 2)
assert_array_almost_equal(nump.c, [1.0/3, 1.0])
assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
nump, denomp = pade(an, 3)
assert_array_almost_equal(nump.c, [1.0])
assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
def test_logsumexp():
"""Test whether logsumexp() function correctly handles large inputs."""
a = np.arange(200)
desired = np.log(np.sum(np.exp(a)))
assert_almost_equal(logsumexp(a), desired)
# Now test with large numbers
b = [1000, 1000]
desired = 1000.0 + np.log(2.0)
assert_almost_equal(logsumexp(b), desired)
n = 1000
b = np.ones(n) * 10000
desired = 10000.0 + np.log(n)
assert_almost_equal(logsumexp(b), desired)
x = np.array([1e-40] * 1000000)
logx = np.log(x)
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum())
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_logsumexp_b():
a = np.arange(200)
b = np.arange(200, 0, -1)
desired = np.log(np.sum(b*np.exp(a)))
assert_almost_equal(logsumexp(a, b=b), desired)
a = [1000, 1000]
b = [1.2, 1.2]
desired = 1000 + np.log(2 * 1.2)
assert_almost_equal(logsumexp(a, b=b), desired)
x = np.array([1e-40] * 100000)
b = np.linspace(1, 1000, 1e5)
logx = np.log(x)
X = np.vstack((x, x))
logX = np.vstack((logx, logx))
B = np.vstack((b, b))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
(B * X).sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
(B * X).sum(axis=1))
def test_face():
assert_equal(face().shape, (768, 1024, 3))
def test_ascent():
assert_equal(ascent().shape, (512, 512))
| mit |
ambikeshwar1991/gnuradio-3.7.4 | gr-atsc/python/atsc/__init__.py | 57 | 1161 | #
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# The presence of this file turns this directory into a Python package
'''
Blocks and utilities for ATSC (Advanced Television Systems Committee) module.
'''
import os
try:
from atsc_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from atsc_swig import *
| gpl-3.0 |
skoppisetty/secret-bugfixes | lib/cherrypy/lib/caching.py | 88 | 17413 | """
CherryPy implements a simple caching system as a pluggable Tool. This tool tries
to be an (in-process) HTTP/1.1-compliant cache. It's not quite there yet, but
it's probably good enough for most sites.
In general, GET responses are cached (along with selecting headers) and, if
another request arrives for the same resource, the caching Tool will return 304
Not Modified if possible, or serve the cached response otherwise. It also sets
request.cached to True if serving a cached representation, and sets
request.cacheable to False (so it doesn't get cached again).
If POST, PUT, or DELETE requests are made for a cached resource, they invalidate
(delete) any cached response.
Usage
=====
Configuration file example::
[/]
tools.caching.on = True
tools.caching.delay = 3600
You may use a class other than the default
:class:`MemoryCache<cherrypy.lib.caching.MemoryCache>` by supplying the config
entry ``cache_class``; supply the full dotted name of the replacement class
as the config value. It must implement the basic methods ``get``, ``put``,
``delete``, and ``clear``.
You may set any attribute, including overriding methods, on the cache
instance by providing them in config. The above sets the
:attr:`delay<cherrypy.lib.caching.MemoryCache.delay>` attribute, for example.
"""
import datetime
import sys
import threading
import time
import cherrypy
from cherrypy.lib import cptools, httputil
from cherrypy._cpcompat import copyitems, ntob, set_daemon, sorted
class Cache(object):
"""Base class for Cache implementations."""
def get(self):
"""Return the current variant if in the cache, else None."""
raise NotImplemented
def put(self, obj, size):
"""Store the current variant in the cache."""
raise NotImplemented
def delete(self):
"""Remove ALL cached variants of the current resource."""
raise NotImplemented
def clear(self):
"""Reset the cache to its initial, empty state."""
raise NotImplemented
# ------------------------------- Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
"""A storage system for cached items which reduces stampede collisions."""
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
If timeout is not None, and the value is already
being calculated by another thread, wait until the given timeout has
elapsed. If the value is available before the timeout expires, it is
returned. If not, None is returned, and a sentinel placed in the cache
to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
value = self.get(key)
if isinstance(value, threading._Event):
if timeout is None:
# Ignore the other thread and recalc it ourselves.
if debug:
cherrypy.log('No timeout', 'TOOLS.CACHING')
return None
# Wait until it's done or times out.
if debug:
cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING')
value.wait(timeout)
if value.result is not None:
# The other thread finished its calculation. Use it.
if debug:
cherrypy.log('Result!', 'TOOLS.CACHING')
return value.result
# Timed out. Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return None
elif value is None:
# Stick an Event in the slot so other threads wait
# on this one to finish calculating the value.
if debug:
cherrypy.log('Timed out', 'TOOLS.CACHING')
e = threading.Event()
e.result = None
dict.__setitem__(self, key, e)
return value
def __setitem__(self, key, value):
"""Set the cached value for the given key."""
existing = self.get(key)
dict.__setitem__(self, key, value)
if isinstance(existing, threading._Event):
# Set Event.result so other threads waiting on it have
# immediate access without needing to poll the cache again.
existing.result = value
existing.set()
class MemoryCache(Cache):
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
The response for any given URI may vary based on the values of
"selecting request headers"; that is, those named in the Vary
response header. We assume the list of header names to be constant
for each URI throughout the lifetime of the application, and store
that list in ``self.store[uri].selecting_headers``.
The items contained in ``self.store[uri]`` have keys which are tuples of
request header values (in the same order as the names in its
selecting_headers), and values which are the actual responses.
"""
maxobjects = 1000
"""The maximum number of cached objects; defaults to 1000."""
maxobj_size = 100000
"""The maximum size of each cached object in bytes; defaults to 100 KB."""
maxsize = 10000000
"""The maximum size of the entire cache in bytes; defaults to 10 MB."""
delay = 600
"""Seconds until the cached content expires; defaults to 600 (10 minutes)."""
antistampede_timeout = 5
"""Seconds to wait for other threads to release a cache lock."""
expire_freq = 0.1
"""Seconds to sleep between cache expiration sweeps."""
debug = False
def __init__(self):
self.clear()
# Run self.expire_cache in a separate daemon thread.
t = threading.Thread(target=self.expire_cache, name='expire_cache')
self.expiration_thread = t
set_daemon(t, True)
t.start()
def clear(self):
"""Reset the cache to its initial, empty state."""
self.store = {}
self.expirations = {}
self.tot_puts = 0
self.tot_gets = 0
self.tot_hist = 0
self.tot_expires = 0
self.tot_non_modified = 0
self.cursize = 0
def expire_cache(self):
"""Continuously examine cached objects, expiring stale ones.
This function is designed to be run in its own daemon thread,
referenced at ``self.expiration_thread``.
"""
# It's possible that "time" will be set to None
# arbitrarily, so we check "while time" to avoid exceptions.
# See tickets #99 and #180 for more information.
while time:
now = time.time()
# Must make a copy of expirations so it doesn't change size
# during iteration
for expiration_time, objects in copyitems(self.expirations):
if expiration_time <= now:
for obj_size, uri, sel_header_values in objects:
try:
del self.store[uri][tuple(sel_header_values)]
self.tot_expires += 1
self.cursize -= obj_size
except KeyError:
# the key may have been deleted elsewhere
pass
del self.expirations[expiration_time]
time.sleep(self.expire_freq)
def get(self):
"""Return the current variant if in the cache, else None."""
request = cherrypy.serving.request
self.tot_gets += 1
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
return None
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
variant = uricache.wait(key=tuple(sorted(header_values)),
timeout=self.antistampede_timeout,
debug=self.debug)
if variant is not None:
self.tot_hist += 1
return variant
def put(self, variant, size):
"""Store the current variant in the cache."""
request = cherrypy.serving.request
response = cherrypy.serving.response
uri = cherrypy.url(qs=request.query_string)
uricache = self.store.get(uri)
if uricache is None:
uricache = AntiStampedeCache()
uricache.selecting_headers = [
e.value for e in response.headers.elements('Vary')]
self.store[uri] = uricache
if len(self.store) < self.maxobjects:
total_size = self.cursize + size
# checks if there's space for the object
if (size < self.maxobj_size and total_size < self.maxsize):
# add to the expirations list
expiration_time = response.time + self.delay
bucket = self.expirations.setdefault(expiration_time, [])
bucket.append((size, uri, uricache.selecting_headers))
# add to the cache
header_values = [request.headers.get(h, '')
for h in uricache.selecting_headers]
uricache[tuple(sorted(header_values))] = variant
self.tot_puts += 1
self.cursize = total_size
def delete(self):
"""Remove ALL cached variants of the current resource."""
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
self.store.pop(uri, None)
def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
If POST, PUT, or DELETE:
* invalidates (deletes) any cached response for this resource
* sets request.cached = False
* sets request.cacheable = False
else if a cached copy exists:
* sets request.cached = True
* sets request.cacheable = False
* sets response.headers to the cached values
* checks the cached Last-Modified response header against the
current If-(Un)Modified-Since request headers; raises 304
if necessary.
* sets response.status and response.body to the cached values
* returns True
otherwise:
* sets request.cached = False
* sets request.cacheable = True
* returns False
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
if not hasattr(cherrypy, "_cache"):
# Make a process-wide Cache object.
cherrypy._cache = kwargs.pop("cache_class", MemoryCache)()
# Take all remaining kwargs and set them on the Cache object.
for k, v in kwargs.items():
setattr(cherrypy._cache, k, v)
cherrypy._cache.debug = debug
# POST, PUT, DELETE should invalidate (delete) the cached copy.
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
if request.method in invalid_methods:
if debug:
cherrypy.log('request.method %r in invalid_methods %r' %
(request.method, invalid_methods), 'TOOLS.CACHING')
cherrypy._cache.delete()
request.cached = False
request.cacheable = False
return False
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
request.cached = False
request.cacheable = True
return False
cache_data = cherrypy._cache.get()
request.cached = bool(cache_data)
request.cacheable = not request.cached
if request.cached:
# Serve the cached copy.
max_age = cherrypy._cache.delay
for v in [e.value for e in request.headers.elements('Cache-Control')]:
atoms = v.split('=', 1)
directive = atoms.pop(0)
if directive == 'max-age':
if len(atoms) != 1 or not atoms[0].isdigit():
raise cherrypy.HTTPError(400, "Invalid Cache-Control header")
max_age = int(atoms[0])
break
elif directive == 'no-cache':
if debug:
cherrypy.log('Ignoring cache due to Cache-Control: no-cache',
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
if debug:
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
s, h, b, create_time = cache_data
age = int(response.time - create_time)
if (age > max_age):
if debug:
cherrypy.log('Ignoring cache due to age > %d' % max_age,
'TOOLS.CACHING')
request.cached = False
request.cacheable = True
return False
# Copy the response headers. See http://www.cherrypy.org/ticket/721.
response.headers = rh = httputil.HeaderMap()
for k in h:
dict.__setitem__(rh, k, dict.__getitem__(h, k))
# Add the required Age header
response.headers["Age"] = str(age)
try:
# Note that validate_since depends on a Last-Modified header;
# this was put into the cached copy, and should have been
# resurrected just above (response.headers = cache_data[1]).
cptools.validate_since()
except cherrypy.HTTPRedirect:
x = sys.exc_info()[1]
if x.status == 304:
cherrypy._cache.tot_non_modified += 1
raise
# serve it & get out from the request
response.status = s
response.body = b
else:
if debug:
cherrypy.log('request is not cached', 'TOOLS.CACHING')
return request.cached
def tee_output():
"""Tee response output to cache storage. Internal."""
# Used by CachingTool by attaching to request.hooks
request = cherrypy.serving.request
if 'no-store' in request.headers.values('Cache-Control'):
return
def tee(body):
"""Tee response.body into a list."""
if ('no-cache' in response.headers.values('Pragma') or
'no-store' in response.headers.values('Cache-Control')):
for chunk in body:
yield chunk
return
output = []
for chunk in body:
output.append(chunk)
yield chunk
# save the cache data
body = ntob('').join(output)
cherrypy._cache.put((response.status, response.headers or {},
body, response.time), len(body))
response = cherrypy.serving.response
response.body = tee(response.body)
def expires(secs=0, force=False, debug=False):
"""Tool for influencing cache mechanisms using the 'Expires' header.
secs
Must be either an int or a datetime.timedelta, and indicates the
number of seconds between response.time and when the response should
expire. The 'Expires' header will be set to response.time + secs.
If secs is zero, the 'Expires' header is set one year in the past, and
the following "cache prevention" headers are also set:
* Pragma: no-cache
* Cache-Control': no-cache, must-revalidate
force
If False, the following headers are checked:
* Etag
* Last-Modified
* Age
* Expires
If any are already present, none of the above response headers are set.
"""
response = cherrypy.serving.response
headers = response.headers
cacheable = False
if not force:
# some header names that indicate that the response can be cached
for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'):
if indicator in headers:
cacheable = True
break
if not cacheable and not force:
if debug:
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
else:
if debug:
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
if isinstance(secs, datetime.timedelta):
secs = (86400 * secs.days) + secs.seconds
if secs == 0:
if force or ("Pragma" not in headers):
headers["Pragma"] = "no-cache"
if cherrypy.serving.request.protocol >= (1, 1):
if force or "Cache-Control" not in headers:
headers["Cache-Control"] = "no-cache, must-revalidate"
# Set an explicit Expires date in the past.
expiry = httputil.HTTPDate(1169942400.0)
else:
expiry = httputil.HTTPDate(response.time + secs)
if force or "Expires" not in headers:
headers["Expires"] = expiry
| gpl-3.0 |
sekaiamber/thefuck | thefuck/rules/ssh_known_hosts.py | 11 | 1044 | import re
patterns = [
r'WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!',
r'WARNING: POSSIBLE DNS SPOOFING DETECTED!',
r"Warning: the \S+ host key for '([^']+)' differs from the key for the IP address '([^']+)'",
]
offending_pattern = re.compile(
r'(?:Offending (?:key for IP|\S+ key)|Matching host key) in ([^:]+):(\d+)',
re.MULTILINE)
commands = ['ssh', 'scp']
def match(command, settings):
if not command.script:
return False
if not command.script.split()[0] in commands:
return False
if not any([re.findall(pattern, command.stderr) for pattern in patterns]):
return False
return True
def get_new_command(command, settings):
return command.script
def side_effect(command, settings):
offending = offending_pattern.findall(command.stderr)
for filepath, lineno in offending:
with open(filepath, 'r') as fh:
lines = fh.readlines()
del lines[int(lineno) - 1]
with open(filepath, 'w') as fh:
fh.writelines(lines)
| mit |
opps/opps | opps/api/__init__.py | 4 | 4177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.contrib.auth import authenticate
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from piston.handler import BaseHandler as Handler
from piston.emitters import JSONEmitter, Emitter
from opps.api.models import ApiKey
class UncachedEmitter(JSONEmitter):
""" In websites running under varnish or another cache
caching the api can mess the results and return the wrong data
this emmitter injects No-Cache headers in response"""
def render(self, request):
content = super(UncachedEmitter, self).render(request)
response = HttpResponse(content)
response['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response['Content-Type'] = 'application/json; charset=utf-8'
response['Pragma'] = 'no-cache'
response['Expires'] = 0
return response
Emitter.register('json', UncachedEmitter, 'application/json; charset=utf-8')
class BaseHandler(Handler):
limit = 20
limit_arg = 'paginate_limit'
meta = {}
blackfield = ['num_pages', 'page_range', 'total_objects', 'per_page',
'page', 'has_next', 'has_previous', 'has_other_pages',
'end_index', 'start_index', 'start_index']
def include_meta(self, d):
obj = {'meta': self.meta, 'objects': d}
return obj
def paginate_queryset(self, queryset, request):
limit = request.GET.get(self.limit_arg, self.meta.get(self.limit_arg))
paginator = Paginator(queryset, limit or self.limit)
self.meta['num_pages'] = paginator.num_pages
self.meta['page_range'] = paginator.page_range
self.meta['total_objects'] = paginator.count
self.meta['per_page'] = paginator.per_page
page = self.meta.get('page', request.GET.get('page', 1))
try:
results = paginator.page(page)
except PageNotAnInteger:
results = paginator.page(1)
except EmptyPage:
results = paginator.page(paginator.num_pages)
self.meta['has_next'] = results.has_next()
self.meta['has_previous'] = results.has_previous()
self.meta['has_other_pages'] = results.has_other_pages()
self.meta['end_index'] = results.end_index()
self.meta['start_index'] = results.start_index()
self.meta['page_number'] = results.number
return results
def read(self, request):
base = self.model.objects
if request.GET.items():
items = request.GET.dict()
self.meta[self.limit_arg] = items.pop(self.limit_arg, None)
self.meta['page'] = items.pop('page', 1)
qs = base.filter(**items)
else:
qs = base.all()
self.meta['total_objects'] = qs.count()
return qs
def _limit(self, request):
limit = request.GET.get(self.limit_arg, self.limit)
return int(limit) * int(request.GET.get('page', 1))
def _page(self, request):
page = int(request.GET.get('page', 1))
if page == 1:
return 0
limit = int(request.GET.get(self.limit_arg, self.limit))
return limit * page - page
def appendModel(Model, Filters):
m = Model.objects.filter(**Filters)
l = []
for i in m:
l.append(i.__dict__)
return l
class ApiKeyAuthentication(object):
def __init__(self, auth_func=authenticate, method=['GET']):
self.auth_func = auth_func
self.method = method
def is_authenticated(self, request):
if request.method == 'GET' and 'GET' in self.method:
return True
try:
method = getattr(request, request.method)
except:
method = request.GET
try:
ApiKey.objects.get(
user__username=method.get('api_username'),
key=method.get('api_key'))
except ApiKey.DoesNotExist:
return False
return True
def challenge(self):
resp = HttpResponse("Authorization Required")
resp.status_code = 401
return resp
| mit |
ShalY/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | ExamplesFromChapters/Chapter3/ClusteringWithGaussians.py | 90 | 1034 | import numpy as np
import pymc as pm
data = np.loadtxt("../../Chapter3_MCMC/data/mixture_data.csv", delimiter=",")
p = pm.Uniform("p", 0, 1)
assignment = pm.Categorical("assignment", [p, 1 - p], size=data.shape[0])
taus = 1.0 / pm.Uniform("stds", 0, 100, size=2) ** 2 # notice the size!
centers = pm.Normal("centers", [150, 150], [0.001, 0.001], size=2)
"""
The below deterministic functions map a assingment, in this case 0 or 1,
to a set of parameters, located in the (1,2) arrays `taus` and `centers.`
"""
@pm.deterministic
def center_i(assignment=assignment, centers=centers):
return centers[assignment]
@pm.deterministic
def tau_i(assignment=assignment, taus=taus):
return taus[assignment]
# and to combine it with the observations:
observations = pm.Normal("obs", center_i, tau_i,
value=data, observed=True)
# below we create a model class
model = pm.Model([p, assignment, taus, centers])
map_ = pm.MAP(model)
map_.fit()
mcmc = pm.MCMC(model)
mcmc.sample(100000, 50000)
| mit |
googleapis/python-container | google/cloud/container_v1beta1/services/cluster_manager/transports/__init__.py | 2 | 1194 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ClusterManagerTransport
from .grpc import ClusterManagerGrpcTransport
from .grpc_asyncio import ClusterManagerGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]]
_transport_registry["grpc"] = ClusterManagerGrpcTransport
_transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport
__all__ = (
"ClusterManagerTransport",
"ClusterManagerGrpcTransport",
"ClusterManagerGrpcAsyncIOTransport",
)
| apache-2.0 |
lepistone/stock-logistics-warehouse | __unported__/stock_reserve_sale/model/stock_reserve.py | 4 | 1887 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class stock_reservation(orm.Model):
_inherit = 'stock.reservation'
_columns = {
'sale_line_id': fields.many2one(
'sale.order.line',
string='Sale Order Line',
ondelete='cascade'),
'sale_id': fields.related(
'sale_line_id', 'order_id',
type='many2one',
relation='sale.order',
string='Sale Order')
}
def release(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'sale_line_id': False}, context=context)
return super(stock_reservation, self).release(
cr, uid, ids, context=context)
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default['sale_line_id'] = False
return super(stock_reservation, self).copy_data(
cr, uid, id, default=default, context=context)
| agpl-3.0 |
rhjdjong/SlipLib | src/sliplib/slipsocket.py | 2 | 10133 | # Copyright (c) 2020. Ruud de Jong
# This file is part of the SlipLib project which is released under the MIT license.
# See https://github.com/rhjdjong/SlipLib for details.
"""
SlipSocket
----------
.. autoclass:: SlipSocket(sock)
:show-inheritance:
Class :class:`SlipSocket` offers the following methods in addition to the methods
offered by its base class :class:`SlipWrapper`:
.. automethod:: accept
.. automethod:: create_connection
.. note::
The :meth:`accept` and :meth:`create_connection` methods
do not magically turn the
socket at the remote address into a SlipSocket.
For the connection to work properly,
the remote socket must already
have been configured to use the SLIP protocol.
The following commonly used :class:`socket.socket` methods are exposed through
a :class:`SlipSocket` object.
These methods are simply delegated to the wrapped `socket` instance.
.. automethod:: bind
.. automethod:: close
.. automethod:: connect
.. automethod:: connect_ex
.. automethod:: getpeername
.. automethod:: getsockname
.. automethod:: listen([backlog])
.. automethod:: shutdown
Since the wrapped socket is available as the :attr:`socket` attribute,
any other :class:`socket.socket`
method can be invoked through that attribute.
.. warning::
Avoid using :class:`socket.socket`
methods that affect the bytes that are sent or received through the socket.
Doing so will invalidate the internal state of the enclosed :class:`Driver` instance,
resulting in corrupted SLIP messages.
In particular, do not use any of the :meth:`recv*` or :meth:`send*` methods
on the :attr:`socket` attribute.
A :class:`SlipSocket` instance has the following attributes in addition to the attributes
offered by its base class :class:`SlipWrapper`:
.. attribute:: socket
The wrapped `socket`.
This is actually just an alias for the :attr:`stream` attribute in the base class.
.. autoattribute:: family
.. autoattribute:: type
.. autoattribute:: proto
"""
import socket
import warnings
from typing import Optional, Tuple
from .slipwrapper import SlipWrapper
class SlipSocket(SlipWrapper):
"""Class that wraps a TCP :class:`socket` with a :class:`Driver`
:class:`SlipSocket` combines a :class:`Driver` instance with a
:class:`socket`.
The :class:`SlipStream` class has all the methods from its base class :class:`SlipWrapper`.
In addition it directly exposes all methods and attributes of
the contained :obj:`socket`, except for the following:
* :meth:`send*` and :meth:`recv*`. These methods are not
supported, because byte-oriented send and receive operations
would invalidate the internal state maintained by :class:`SlipSocket`.
* Similarly, :meth:`makefile` is not supported, because byte- or line-oriented
read and write operations would invalidate the internal state.
* :meth:`share` (Windows only) and :meth:`dup`. The internal state of
the :class:`SlipSocket` would have to be duplicated and shared to make these methods meaningful.
Because of the lack of a convincing use case for this, sharing and duplication is
not supported.
* The :meth:`accept` method is delegated to the contained :class:`socket`,
but the socket that is returned by the :class:`socket`'s :meth:`accept` method
is automatically wrapped in a :class:`SlipSocket` object.
In stead of the :class:`socket`'s :meth:`send*` and :meth:`recv*` methods
a :class:`SlipSocket` provides the method :meth:`send_msg` and :meth:`recv_msg`
to send and receive SLIP-encoded messages.
.. deprecated:: 0.6
Direct access to the methods and attributes of the contained :obj:`socket`
other than `family`, `type`, and `proto` will be removed in version 1.0
Only TCP sockets are supported. Using the SLIP protocol on
UDP sockets is not supported for the following reasons:
* UDP is datagram-based. Using SLIP with UDP therefore
introduces ambiguity: should SLIP packets be allowed to span
multiple UDP datagrams or not?
* UDP does not guarantee delivery, and does not guarantee that
datagrams are delivered in the correct order.
"""
_chunk_size = 4096
def __init__(self, sock: socket.SocketType):
# pylint: disable=missing-raises-doc
"""
To instantiate a :class:`SlipSocket`, the user must provide
a pre-constructed TCP `socket`.
An alternative way to instantiate s SlipSocket is to use the
class method :meth:`create_connection`.
Args:
sock (socket.socket): An existing TCP socket, i.e.
a socket with type :const:`socket.SOCK_STREAM`
"""
if not isinstance(sock, socket.socket) or sock.type != socket.SOCK_STREAM:
raise ValueError('Only sockets with type SOCK_STREAM are supported')
super().__init__(sock)
self.socket = self.stream
def send_bytes(self, packet: bytes) -> None:
"""See base class"""
self.socket.sendall(packet)
def recv_bytes(self) -> bytes:
"""See base class"""
return self.socket.recv(self._chunk_size)
def accept(self) -> Tuple['SlipSocket', Tuple]:
"""Accepts an incoming connection.
Returns:
Tuple[:class:`~SlipSocket`, Tuple]: A (`SlipSocket`, remote_address) pair.
The :class:`SlipSocket` object
can be used to exchange SLIP-encoded data with the socket at the `remote_address`.
See Also:
:meth:`socket.socket.accept`
"""
conn, address = self.socket.accept()
return self.__class__(conn), address
def bind(self, address: Tuple) -> None:
"""Bind the `SlipSocket` to `address`.
Args:
address: The IP address to bind to.
See Also:
:meth:`socket.socket.bind`
"""
self.socket.bind(address)
def close(self) -> None:
"""Close the `SlipSocket`.
See Also:
:meth:`socket.socket.close`
"""
self.socket.close()
def connect(self, address: Tuple) -> None:
"""Connect `SlipSocket` to a remote socket at `address`.
Args:
address: The IP address of the remote socket.
See Also:
:meth:`socket.socket.connect`
"""
self.socket.connect(address)
def connect_ex(self, address: Tuple) -> None:
"""Connect `SlipSocket` to a remote socket at `address`.
Args:
address: The IP address of the remote socket.
See Also:
:meth:`socket.socket.connect_ex`
"""
self.socket.connect_ex(address)
def getpeername(self) -> Tuple:
"""Get the IP address of the remote socket to which `SlipSocket` is connected.
Returns:
The remote IP address.
See Also:
:meth:`socket.socket.getpeername`
"""
return self.socket.getpeername()
def getsockname(self) -> Tuple:
"""Get `SlipSocket`'s own address.
Returns:
The local IP address.
See Also:
:meth:`socket.socket.getsockname`
"""
return self.socket.getsockname()
def listen(self, backlog: Optional[int] = None) -> None:
"""Enable a `SlipSocket` server to accept connections.
Args:
backlog (int): The maximum number of waiting connections.
See Also:
:meth:`socket.socket.listen`
"""
if backlog is None:
self.socket.listen()
else:
self.socket.listen(backlog)
def shutdown(self, how: int) -> None:
"""Shutdown the connection.
Args:
how: Flag to indicate which halves of the connection must be shut down.
See Also:
:meth:`socket.socket.shutdown`
"""
self.socket.shutdown(how)
@property
def family(self) -> int:
# pylint: disable=line-too-long
"""The wrapped socket's address family. Usually :const:`socket.AF_INET` (IPv4) or :const:`socket.AF_INET6` (IPv6)."""
return self.socket.family
@property
def type(self) -> int:
"""The wrapped socket's type. Always :const:`socket.SOCK_STREAM`."""
return self.socket.type
@property
def proto(self) -> int:
"""The wrapped socket's protocol number. Usually 0."""
return self.socket.proto
def __getattr__(self, attribute):
if attribute.startswith('recv') or attribute.startswith('send') or attribute in (
'makefile', 'share', 'dup',
):
raise AttributeError("'{}' object has no attribute '{}'".
format(self.__class__.__name__, attribute))
warnings.warn("Direct access to the enclosed socket attributes and methods will be removed in version 1.0",
DeprecationWarning, stacklevel=2)
return getattr(self.socket, attribute)
@classmethod
def create_connection(cls, address: Tuple, timeout: Optional[float] = None,
source_address: Optional[Tuple] = None) -> 'SlipSocket':
"""Create a SlipSocket connection.
This convenience method creates a connection to a socket at the specified address
using the :func:`socket.create_connection` function.
The socket that is returned from that call is automatically wrapped in
a :class:`SlipSocket` object.
Args:
address (Address): The remote address.
timeout (float): Optional timeout value.
source_address (Address): Optional local address for the near socket.
Returns:
:class:`~SlipSocket`: A `SlipSocket` that is connected to the socket at the remote address.
See Also:
:func:`socket.create_connection`
"""
sock = socket.create_connection(address[0:2], timeout, source_address) # type: ignore
return cls(sock)
| mit |
SublimeLinter/SublimeLinter3 | tests/test_loose_lintmatch.py | 1 | 5173 | from unittesting import DeferrableTestCase
from SublimeLinter.lint.linter import LintMatch
class TestLooseLintMatch(DeferrableTestCase):
def test_attribute_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv.match, m)
self.assertEqual(rv.line, 1)
self.assertEqual(rv.col, 2)
self.assertEqual(rv.error, "error_txt")
self.assertEqual(rv.warning, "warning_txt")
self.assertEqual(rv.message, "message_txt")
self.assertEqual(rv.near, "near_txt")
def test_attribute_access_returns_defaults_for_missing_common_names(self):
rv = LintMatch()
for k in (
"match", "line", "col", "error", "warning", "message", "near",
"filename", "error_type", "code",
):
self.assertEqual(getattr(rv, k), '' if k == 'message' else None)
def test_unknown_keys_raise_on_attribute_access(self):
rv = LintMatch()
try:
rv.foo
except AttributeError as e:
self.assertEqual(str(e), "'LintMatch' object has no attribute 'foo'")
except Exception:
self.fail('Should have thrown AttributeError.')
else:
self.fail('Should have thrown AttributeError.')
def test_self_repr(self):
rv = LintMatch(foo='bar')
self.assertEqual(str(rv), "LintMatch({'foo': 'bar'})")
self.assertEqual(eval(repr(rv)), rv)
def test_copy_lint_match(self):
rv = LintMatch(foo='bar')
self.assertEqual(rv.copy(), rv)
self.assertEqual(type(rv.copy()), LintMatch)
def test_double_star_unpacking_to_dict(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
expected = LintMatch(match)
actual = dict(**expected)
self.assertEqual(actual, expected)
def test_tuple_like_unpacking(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
match, line, col, error, warning, message, near = rv
self.assertEqual(match, m)
self.assertEqual(line, 1)
self.assertEqual(col, 2)
self.assertEqual(error, "error_txt")
self.assertEqual(warning, "warning_txt")
self.assertEqual(message, "message_txt")
self.assertEqual(near, "near_txt")
def test_tuple_like_index_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv[0], m)
self.assertEqual(rv[1], 1)
self.assertEqual(rv[2], 2)
self.assertEqual(rv[3], "error_txt")
self.assertEqual(rv[4], "warning_txt")
self.assertEqual(rv[5], "message_txt")
self.assertEqual(rv[6], "near_txt")
self.assertRaises(IndexError, lambda: rv[7])
def test_namedtuple_like_mutating(self):
rv = LintMatch({'foo': 'bar'})
rv2 = rv._replace(foo='baz')
self.assertEqual(rv2.foo, 'baz')
# unlike namedtuple LintMatch is mutable
self.assertEqual(rv.foo, 'baz')
def test_standard_items_access(self):
m = object()
match = {
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
}
rv = LintMatch(**match)
self.assertEqual(rv['match'], m)
self.assertEqual(rv['line'], 1)
self.assertEqual(rv['col'], 2)
self.assertEqual(rv['error'], "error_txt")
self.assertEqual(rv['warning'], "warning_txt")
self.assertEqual(rv['message'], "message_txt")
self.assertEqual(rv['near'], "near_txt")
def test_standard_item_access_throws_on_unknown_keys(self):
rv = LintMatch()
self.assertRaises(KeyError, lambda: rv['line'])
def test_create_from_tuple(self):
m = object()
match = (m, 1, 2, "error_txt", "warning_txt", "message_txt", "near_txt")
actual = LintMatch(*match)
expected = LintMatch({
"match": m,
"line": 1,
"col": 2,
"error": "error_txt",
"warning": "warning_txt",
"message": "message_txt",
"near": "near_txt"
})
self.assertEqual(actual, expected)
| mit |
williamsjj/twisted_cql | examples/session_client_example.py | 1 | 1142 | # -*- coding: utf-8-*-
# ###################################################################
# FILENAME: examples/session_client_example.py
# PROJECT:
# DESCRIPTION: Cassandra session wrappers.
#
# ###################################################################
# (C)2015 DigiTar, All Rights Reserved
# ###################################################################
from twisted.internet import task
from twisted.internet.defer import inlineCallbacks
from cassandra import ConsistencyLevel
from twisted_cql import session as cql_session
@inlineCallbacks
def main_datastax(reactor):
session = cql_session.CassandraSession(["localhost"],
port=9042,
keyspace="testkeyspace",
username="someuser",
password="somepass")
rows = yield session.execute_query("SELECT * FROM testtable;",
consistency_level=ConsistencyLevel.ONE)
print repr(rows)
if __name__ == '__main__':
task.react(main_datastax) | bsd-2-clause |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_4/tests/regressiontests/multiple_database/models.py | 43 | 2251 | from __future__ import absolute_import
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).create(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).get_or_create(*args, **kwargs)
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, null=True, related_name='edited')
reviews = generic.GenericRelation(Review)
pages = models.IntegerField(default=100)
def __unicode__(self):
return self.title
class Meta:
ordering = ('title',)
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
| mit |
CEG-FYP-OpenStack/scheduler | nova/tests/unit/api/openstack/compute/test_attach_interfaces.py | 8 | 21320 | # Copyright 2012 SINA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.api.openstack.compute import attach_interfaces \
as attach_interfaces_v21
from nova.api.openstack.compute.legacy_v2.contrib import attach_interfaces \
as attach_interfaces_v2
from nova.compute import api as compute_api
from nova import exception
from nova.network import api as network_api
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network_cache_model
from webob import exc
FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
FAKE_NOT_FOUND_PORT_ID = '00000000-0000-0000-0000-000000000000'
FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
port_data1 = {
"id": FAKE_PORT_ID1,
"network_id": FAKE_NET_ID1,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "aa:aa:aa:aa:aa:aa",
"fixed_ips": ["10.0.1.2"],
"device_id": FAKE_UUID1,
}
port_data2 = {
"id": FAKE_PORT_ID2,
"network_id": FAKE_NET_ID2,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": FAKE_UUID1,
}
port_data3 = {
"id": FAKE_PORT_ID3,
"network_id": FAKE_NET_ID3,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": '',
}
fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
ports = [port_data1, port_data2, port_data3]
def fake_list_ports(self, *args, **kwargs):
result = []
for port in ports:
if port['device_id'] == kwargs['device_id']:
result.append(port)
return {'ports': result}
def fake_show_port(self, context, port_id, **kwargs):
for port in ports:
if port['id'] == port_id:
return {'port': port}
else:
raise exception.PortNotFound(port_id=port_id)
def fake_attach_interface(self, context, instance, network_id, port_id,
requested_ip='192.168.1.3'):
if not network_id:
# if no network_id is given when add a port to an instance, use the
# first default network.
network_id = fake_networks[0]
if network_id == FAKE_BAD_NET_ID:
raise exception.NetworkNotFound(network_id=network_id)
if not port_id:
port_id = ports[fake_networks.index(network_id)]['id']
if port_id == FAKE_NOT_FOUND_PORT_ID:
raise exception.PortNotFound(port_id=port_id)
vif = fake_network_cache_model.new_vif()
vif['id'] = port_id
vif['network']['id'] = network_id
vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
return vif
def fake_detach_interface(self, context, instance, port_id):
for port in ports:
if port['id'] == port_id:
return
raise exception.PortNotFound(port_id=port_id)
def fake_get_instance(self, *args, **kwargs):
return objects.Instance(uuid=FAKE_UUID1)
class InterfaceAttachTestsV21(test.NoDBTestCase):
controller_cls = attach_interfaces_v21.InterfaceAttachmentController
validate_exc = exception.ValidationError
in_use_exc = exc.HTTPConflict
not_found_exc = exc.HTTPNotFound
not_usable_exc = exc.HTTPBadRequest
def setUp(self):
super(InterfaceAttachTestsV21, self).setUp()
self.flags(timeout=30, group='neutron')
self.stubs.Set(network_api.API, 'show_port', fake_show_port)
self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.expected_show = {'interfaceAttachment':
{'net_id': FAKE_NET_ID1,
'port_id': FAKE_PORT_ID1,
'mac_addr': port_data1['mac_address'],
'port_state': port_data1['status'],
'fixed_ips': port_data1['fixed_ips'],
}}
self.attachments = self.controller_cls()
self.req = fakes.HTTPRequest.blank('')
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=''))
def _test_instance_not_found(self, func, args, mock_get, kwargs=None):
if not kwargs:
kwargs = {}
self.assertRaises(exc.HTTPNotFound, func, self.req, *args, **kwargs)
def test_show_instance_not_found(self):
self._test_instance_not_found(self.attachments.show, ('fake', 'fake'))
def test_index_instance_not_found(self):
self._test_instance_not_found(self.attachments.index, ('fake', ))
def test_detach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.delete,
('fake', 'fake'))
def test_attach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.create, ('fake', ),
kwargs={'body': {'interfaceAttachment': {}}})
def test_show(self):
result = self.attachments.show(self.req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual(self.expected_show, result)
def test_show_with_port_not_found(self):
self.assertRaises(exc.HTTPNotFound,
self.attachments.show, self.req, FAKE_UUID2,
FAKE_PORT_ID1)
@mock.patch.object(network_api.API, 'show_port',
side_effect=exception.Forbidden)
def test_show_forbidden(self, show_port_mock):
self.assertRaises(exc.HTTPForbidden,
self.attachments.show, self.req, FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
result = self.attachments.delete(self.req, FAKE_UUID1, FAKE_PORT_ID1)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
attach_interfaces_v21.InterfaceAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_detach_interface_instance_locked(self):
def fake_detach_interface_from_locked_server(self, context,
instance, port_id):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stubs.Set(compute_api.API,
'detach_interface',
fake_detach_interface_from_locked_server)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete_interface_not_found(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
self.req,
FAKE_UUID1,
'invaid-port-id')
def test_attach_interface_instance_locked(self):
def fake_attach_interface_to_locked_server(self, context,
instance, network_id, port_id, requested_ip):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stubs.Set(compute_api.API,
'attach_interface',
fake_attach_interface_to_locked_server)
body = {}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_without_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID1)
def test_attach_interface_with_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2}}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID2)
def _attach_interface_bad_request_case(self, body):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def _attach_interface_not_found_case(self, body):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
self.assertRaises(self.not_found_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_with_port_and_network_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_PORT_ID1,
'net_id': FAKE_NET_ID2
}
}
self._attach_interface_bad_request_case(body)
def test_attach_interface_with_not_found_network_id(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_BAD_NET_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_not_found_port_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_NOT_FOUND_PORT_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_invalid_state(self):
def fake_attach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='attach_interface')
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface_invalid_state)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID1}}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
@mock.patch.object(compute_api.API, 'attach_interface',
side_effect=NotImplementedError())
def test_attach_interface_with_not_implemented(self, _mock):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID1}}
self.assertRaises(exc.HTTPNotImplemented,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_detach_interface_with_invalid_state(self):
def fake_detach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='detach_interface')
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface_invalid_state)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_NET_ID1)
@mock.patch.object(compute_api.API, 'detach_interface',
side_effect=NotImplementedError())
def test_detach_interface_with_not_implemented(self, _mock):
self.assertRaises(exc.HTTPNotImplemented,
self.attachments.delete,
self.req, FAKE_UUID1, FAKE_NET_ID1)
def test_attach_interface_invalid_fixed_ip(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_NET_ID1,
'fixed_ips': [{'ip_address': 'invalid_ip'}]
}
}
self.assertRaises(self.validate_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_fixed_ip_already_in_use(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.FixedIpAlreadyInUse(
address='10.0.2.2', instance_uuid=FAKE_UUID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_in_use(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortInUse(
port_id=FAKE_PORT_ID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_not_usable(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortNotUsable(
port_id=FAKE_PORT_ID1,
instance=fake_instance.uuid)
body = {}
self.assertRaises(self.not_usable_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_failed_no_network(self, attach_mock, get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1,
project_id=FAKE_UUID2)
get_mock.return_value = fake_instance
attach_mock.side_effect = (
exception.InterfaceAttachFailedNoNetwork(project_id=FAKE_UUID2))
self.assertRaises(exc.HTTPBadRequest, self.attachments.create,
self.req, FAKE_UUID1, body={})
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_no_more_fixed_ips(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.NoMoreFixedIps(
net=FAKE_NET_ID1)
body = {}
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
def _test_attach_interface_with_invalid_parameter(self, param):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {'interface_attachment': param}
self.assertRaises(exception.ValidationError,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_instance_with_non_uuid_net_id(self):
param = {'net_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_uuid_port_id(self):
param = {'port_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_array_fixed_ips(self):
param = {'fixed_ips': 'non_array'}
self._test_attach_interface_with_invalid_parameter(param)
class InterfaceAttachTestsV2(InterfaceAttachTestsV21):
controller_cls = attach_interfaces_v2.InterfaceAttachmentController
validate_exc = exc.HTTPBadRequest
in_use_exc = exc.HTTPBadRequest
def test_attach_interface_instance_with_non_uuid_net_id(self):
pass
def test_attach_interface_instance_with_non_uuid_port_id(self):
pass
def test_attach_interface_instance_with_non_array_fixed_ips(self):
pass
class AttachInterfacesPolicyEnforcementv21(test.NoDBTestCase):
def setUp(self):
super(AttachInterfacesPolicyEnforcementv21, self).setUp()
self.controller = \
attach_interfaces_v21.InterfaceAttachmentController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-attach-interfaces"
self.policy.set_rules({self.rule_name: "project:non_fake"})
def test_index_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_show_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_create_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, fakes.FAKE_UUID, body={})
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_delete_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
| apache-2.0 |
marcelveldt/plugin.video.flix2kodi | lib/keyring/http.py | 13 | 1255 | """
urllib2.HTTPPasswordMgr object using the keyring, for use with the
urllib2.HTTPBasicAuthHandler.
usage:
import urllib2
handlers = [urllib2.HTTPBasicAuthHandler(PasswordMgr())]
urllib2.install_opener(handlers)
urllib2.urlopen(...)
This will prompt for a password if one is required and isn't already
in the keyring. Then, it adds it to the keyring for subsequent use.
"""
import getpass
from . import get_password, delete_password, set_password
class PasswordMgr(object):
def get_username(self, realm, authuri):
return getpass.getuser()
def add_password(self, realm, authuri, password):
user = self.get_username(realm, authuri)
set_password(realm, user, password)
def find_user_password(self, realm, authuri):
user = self.get_username(realm, authuri)
password = get_password(realm, user)
if password is None:
prompt = 'password for %(user)s@%(realm)s for '\
'%(authuri)s: ' % vars()
password = getpass.getpass(prompt)
set_password(realm, user, password)
return user, password
def clear_password(self, realm, authuri):
user = self.get_username(realm, authuri)
delete_password(realm, user)
| gpl-2.0 |
jmighion/ansible | lib/ansible/modules/network/nxos/nxos_static_route.py | 23 | 8490 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_static_route
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages static route configuration
description:
- Manages static route configuration
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If no vrf is supplied, vrf is set to default.
- If C(state=absent), the route will be removed, regardless of the
non-required parameters.
options:
prefix:
description:
- Destination prefix of static route.
required: true
next_hop:
description:
- Next hop address or interface of static route.
If interface, it must be the fully-qualified interface name.
required: true
vrf:
description:
- VRF for static route.
required: false
default: default
tag:
description:
- Route tag value (numeric).
required: false
default: null
route_name:
description:
- Name of the route. Used with the name parameter on the CLI.
required: false
default: null
pref:
description:
- Preference or administrative difference of route (range 1-255).
required: false
default: null
state:
description:
- Manage the state of the resource.
required: true
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_static_route:
prefix: "192.168.20.64/24"
next_hop: "3.3.3.3"
route_name: testing
pref: 100
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip route 192.168.20.0/24 3.3.3.3 name testing 100"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
def reconcile_candidate(module, candidate, prefix):
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
state = module.params['state']
set_command = set_route_command(module, prefix)
remove_command = remove_route_command(module, prefix)
parents = []
commands = []
if module.params['vrf'] == 'default':
config = netcfg.get_section(set_command)
if config and state == 'absent':
commands = [remove_command]
elif not config and state == 'present':
commands = [set_command]
else:
parents = ['vrf context {0}'.format(module.params['vrf'])]
config = netcfg.get_section(parents)
if not isinstance(config, list):
config = config.split('\n')
config = [line.strip() for line in config]
if set_command in config and state == 'absent':
commands = [remove_command]
elif set_command not in config and state == 'present':
commands = [set_command]
if commands:
candidate.add(commands, parents=parents)
def fix_prefix_to_regex(prefix):
prefix = prefix.replace('.', r'\.').replace('/', r'\/')
return prefix
def get_existing(module, prefix, warnings):
key_map = ['tag', 'pref', 'route_name', 'next_hop']
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = 'vrf context {0}'.format(module.params['vrf'])
prefix_to_regex = fix_prefix_to_regex(prefix)
route_regex = r'.*ip\sroute\s{0}\s(?P<next_hop>\S+)(\sname\s(?P<route_name>\S+))?(\stag\s(?P<tag>\d+))?(\s(?P<pref>\d+))?.*'.format(prefix_to_regex)
if module.params['vrf'] == 'default':
config = str(netcfg)
else:
config = netcfg.get_section(parents)
if config:
try:
match_route = re.match(route_regex, config, re.DOTALL)
group_route = match_route.groupdict()
for key in key_map:
if key not in group_route:
group_route[key] = ''
group_route['prefix'] = prefix
group_route['vrf'] = module.params['vrf']
except (AttributeError, TypeError):
group_route = {}
else:
group_route = {}
msg = ("VRF {0} didn't exist.".format(module.params['vrf']))
if msg not in warnings:
warnings.append(msg)
return group_route
def remove_route_command(module, prefix):
return 'no ip route {0} {1}'.format(prefix, module.params['next_hop'])
def set_route_command(module, prefix):
route_cmd = 'ip route {0} {1}'.format(prefix, module.params['next_hop'])
if module.params['route_name']:
route_cmd += ' name {0}'.format(module.params['route_name'])
if module.params['tag']:
route_cmd += ' tag {0}'.format(module.params['tag'])
if module.params['pref']:
route_cmd += ' {0}'.format(module.params['pref'])
return route_cmd
def get_dotted_mask(mask):
bits = 0
for i in range(32-mask, 32):
bits |= (1 << i)
mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24, (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8, (bits & 0xff)))
return mask
def get_network_start(address, netmask):
address = address.split('.')
netmask = netmask.split('.')
return [str(int(address[x]) & int(netmask[x])) for x in range(0, 4)]
def network_from_string(address, mask, module):
octects = address.split('.')
if len(octects) > 4:
module.fail_json(msg='Incorrect address format.', address=address)
for octect in octects:
try:
if int(octect) < 0 or int(octect) > 255:
module.fail_json(msg='Address may contain invalid values.',
address=address)
except ValueError:
module.fail_json(msg='Address may contain non-integer values.',
address=address)
try:
if int(mask) < 0 or int(mask) > 32:
module.fail_json(msg='Incorrect mask value.', mask=mask)
except ValueError:
module.fail_json(msg='Mask may contain non-integer values.', mask=mask)
netmask = get_dotted_mask(int(mask))
return '.'.join(get_network_start(address, netmask))
def normalize_prefix(module, prefix):
splitted_prefix = prefix.split('/')
address = splitted_prefix[0]
if len(splitted_prefix) > 2:
module.fail_json(msg='Incorrect address format.', address=address)
elif len(splitted_prefix) == 2:
mask = splitted_prefix[1]
network = network_from_string(address, mask, module)
normalized_prefix = str(network) + '/' + str(mask)
else:
normalized_prefix = prefix + '/' + str(32)
return normalized_prefix
def main():
argument_spec = dict(
prefix=dict(required=True, type='str'),
next_hop=dict(required=True, type='str'),
vrf=dict(type='str', default='default'),
tag=dict(type='str'),
route_name=dict(type='str'),
pref=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
prefix = normalize_prefix(module, module.params['prefix'])
candidate = CustomNetworkConfig(indent=3)
reconcile_candidate(module, candidate, prefix)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['commands'] = candidate
result['changed'] = True
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
golismero/golismero | thirdparty_libs/geopy/format.py | 84 | 2758 | from geopy import units
# Unicode characters for symbols that appear in coordinate strings.
DEGREE = unichr(176)
PRIME = unichr(8242)
DOUBLE_PRIME = unichr(8243)
ASCII_DEGREE = ''
ASCII_PRIME = "'"
ASCII_DOUBLE_PRIME = '"'
LATIN1_DEGREE = chr(176)
HTML_DEGREE = '°'
HTML_PRIME = '′'
HTML_DOUBLE_PRIME = '″'
XML_DECIMAL_DEGREE = '°'
XML_DECIMAL_PRIME = '′'
XML_DECIMAL_DOUBLE_PRIME = '″'
XML_HEX_DEGREE = '&xB0;'
XML_HEX_PRIME = '&x2032;'
XML_HEX_DOUBLE_PRIME = '&x2033;'
ABBR_DEGREE = 'deg'
ABBR_ARCMIN = 'arcmin'
ABBR_ARCSEC = 'arcsec'
DEGREES_FORMAT = "%(degrees)d%(deg)s %(minutes)d%(arcmin)s %(seconds)s%(arcsec)s"
UNICODE_SYMBOLS = {'deg': DEGREE, 'arcmin': PRIME, 'arcsec': DOUBLE_PRIME}
ASCII_SYMBOLS = {'deg': ASCII_DEGREE, 'arcmin': ASCII_PRIME, 'arcsec': ASCII_DOUBLE_PRIME}
LATIN1_SYMBOLS = {'deg': LATIN1_DEGREE, 'arcmin': ASCII_PRIME, 'arcsec': ASCII_DOUBLE_PRIME}
HTML_SYMBOLS = {'deg': HTML_DEGREE, 'arcmin': HTML_PRIME, 'arcsec': HTML_DOUBLE_PRIME}
XML_SYMBOLS = {'deg': XML_DECIMAL_DEGREE, 'arcmin': XML_DECIMAL_PRIME, 'arcsec': XML_DECIMAL_DOUBLE_PRIME}
ABBR_SYMBOLS = {'deg': ABBR_DEGREE, 'arcmin': ABBR_ARCMIN, 'arcsec': ABBR_ARCSEC}
def format_degrees(degrees, format=DEGREES_FORMAT, symbols=ASCII_SYMBOLS):
arcminutes = units.arcminutes(degrees=degrees - int(degrees))
arcseconds = units.arcseconds(arcminutes=arcminutes - int(arcminutes))
format_dict = dict(
symbols,
degrees=degrees,
minutes=abs(arcminutes),
seconds=abs(arcseconds)
)
return format % format_dict
DISTANCE_FORMAT = "%(magnitude)s%(unit)s"
DISTANCE_UNITS = {
'km': lambda d: d,
'm': lambda d: units.meters(kilometers=d),
'mi': lambda d: units.miles(kilometers=d),
'ft': lambda d: units.feet(kilometers=d),
'nm': lambda d: units.nautical(kilometers=d),
'nmi': lambda d: units.nautical(kilometers=d)
}
def format_distance(kilometers, format=DISTANCE_FORMAT, unit='km'):
magnitude = DISTANCE_UNITS[unit](kilometers)
return format % {'magnitude': magnitude, 'unit': unit}
_DIRECTIONS = [
('north', 'N'),
('north by east', 'NbE'),
('north-northeast', 'NNE'),
('northeast by north', 'NEbN'),
('northeast', 'NE'),
('northeast by east', 'NEbE'),
('east-northeast', 'ENE'),
('east by north', 'EbN'),
('east', 'E'),
('east by south', 'EbS'),
('east-southeast', 'ESE'),
('southeast by east', 'SEbE'),
('southeast', 'SE'),
('southeast by south', 'SEbS'),
]
DIRECTIONS, DIRECTIONS_ABBR = zip(*_DIRECTIONS)
ANGLE_DIRECTIONS = dict((n * 11.25, d) for n, d in enumerate(DIRECTIONS))
ANGLE_DIRECTIONS_ABBR = dict((n * 11.25, d) for n, d in enumerate(DIRECTIONS_ABBR))
def format_direction(degrees):
pass | gpl-2.0 |
matmutant/sl4a | python/src/Demo/xml/roundtrip.py | 37 | 1201 | """
A simple demo that reads in an XML document and spits out an equivalent,
but not necessarily identical, document.
"""
import sys, string
from xml.sax import saxutils, handler, make_parser
# --- The ContentHandler
class ContentGenerator(handler.ContentHandler):
def __init__(self, out = sys.stdout):
handler.ContentHandler.__init__(self)
self._out = out
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="iso-8859-1"?>\n')
def startElement(self, name, attrs):
self._out.write('<' + name)
for (name, value) in attrs.items():
self._out.write(' %s="%s"' % (name, saxutils.escape(value)))
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def characters(self, content):
self._out.write(saxutils.escape(content))
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
# --- The main program
parser = make_parser()
parser.setContentHandler(ContentGenerator())
parser.parse(sys.argv[1])
| apache-2.0 |
mobify/python-driver | cassandra/decoder.py | 10 | 1632 | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import warnings
import cassandra.query
import logging
log = logging.getLogger(__name__)
_have_warned = False
def warn_once(f):
@wraps(f)
def new_f(*args, **kwargs):
global _have_warned
if not _have_warned:
msg = "cassandra.decoder.%s has moved to cassandra.query.%s" % (f.__name__, f.__name__)
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
_have_warned = True
return f(*args, **kwargs)
return new_f
tuple_factory = warn_once(cassandra.query.tuple_factory)
"""
Deprecated: use :meth:`cassandra.query.tuple_factory()`
"""
named_tuple_factory = warn_once(cassandra.query.named_tuple_factory)
"""
Deprecated: use :meth:`cassandra.query.named_tuple_factory()`
"""
dict_factory = warn_once(cassandra.query.dict_factory)
"""
Deprecated: use :meth:`cassandra.query.dict_factory()`
"""
ordered_dict_factory = warn_once(cassandra.query.ordered_dict_factory)
"""
Deprecated: use :meth:`cassandra.query.ordered_dict_factory()`
"""
| apache-2.0 |
jmesteve/openerp | openerp/addons/base_report_designer/wizard/__init__.py | 421 | 1081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_report_designer_modify
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
abhijitbangera/ecommerce | src/products/models.py | 1 | 4589 | from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import post_save
from django.utils.text import slugify
from django.utils.safestring import mark_safe
# Create your models here.
class ProductQuerySet(models.query.QuerySet):
def active(self):
return self.filter(active=True)
class ProductManager(models.Manager):
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
def all(self,*args,**kwargs):
return self.get_queryset().active()
def get_related(self,instance):
products_one=self.get_queryset().filter(categories__in=instance.categories.all())
products_two=self.get_queryset().filter(default=instance.default)
qs=(products_one|products_two).exclude(id=instance.id).distinct()
return qs
class Product(models.Model):
title =models.CharField(max_length=120)
description=models.TextField(blank=True,null=True)
price=models.DecimalField(decimal_places=2,max_digits=20)
active=models.BooleanField(default=True)
categories=models.ManyToManyField('Category',blank=True)
default=models.ForeignKey('Category',related_name='default_category',null=True,blank=True)
objects=ProductManager()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("product_details",kwargs={"pk":self.pk})
# def get_image_url(self):
# img=self.productimage_set.first()
# if img:
# return img.image.url
# return img
#slug
#inventory
class Variation(models.Model):
product=models.ForeignKey(Product)
title=models.CharField(max_length=120)
price=models.DecimalField(decimal_places=2,max_digits=20)
sale_price=models.DecimalField(decimal_places=2,max_digits=20,null=True,blank=True)
active=models.BooleanField(default=True)
inventory=models.IntegerField(null=True,blank=True) #refers none== to unlimted amount
def __str__(self):
return self.title
def get_price(self):
if self.sale_price is not None:
return self.sale_price
else:
return self.price
def get_html_price(self):
if self.sale_price is not None:
html_text="<span class='sale-price'>%s</span> <span class='og-price'>%s</span>" %(self.sale_price,self.price)
else:
html_text="<span class='price'>%s</span>" %(self.price)
return mark_safe(html_text)
def get_absolute_url(self):
return self.product.get_absolute_url()
def add_to_cart(self):
return "%s?item=%s&qty=1" %(reverse("cart"),self.id)
def remove_from_cart(self):
return "%s?item=%s&qty=1&delete=True" %(reverse("cart"),self.id)
def product_post_saved_receiver(sender,instance,created,*args,**kwargs):
print(sender)
product=instance
variations=product.variation_set.all() #variations=Variation.objects.filter(product=Product)
if variations.count()==0:
new_var=Variation()
new_var.product=product
new_var.title="Default"
new_var.price=product.price
new_var.save()
print(created)
post_save.connect(product_post_saved_receiver,sender=Product)
def image_upload_to(instance,filename):
title=instance.product.title
slug=slugify(title)
file_extension=filename.split(".")[1]
new_filename="%s.%s" %(instance.id,file_extension)
return "products/%s/%s" %(slug,new_filename)
#Product Image
class ProductImage(models.Model):
product=models.ForeignKey(Product)
image=models.ImageField(upload_to=image_upload_to)
def __str__(self):
return self.product.title
class Category(models.Model):
title=models.CharField(max_length=120,unique=True)
slug=models.SlugField(unique=True)
description=models.TextField(null=True,blank=True)
active=models.BooleanField(default=True)
timestamp=models.DateTimeField(auto_now_add=True,auto_now=False)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("category_detail",kwargs={"slug": self.slug})
def image_upload_to_featured(instance,filename):
title=instance.product.title
slug=slugify(title)
file_extension=filename.split(".")[1]
new_filename="%s.%s" %(instance.id,file_extension)
return "products/%s/featured/%s" %(slug,new_filename)
class ProductFeatured(models.Model):
product=models.ForeignKey(Product)
image=models.ImageField(upload_to=image_upload_to_featured)
title=models.CharField(max_length=120,null=True,blank=True)
text=models.CharField(max_length=220,null=True,blank=True)
text_right=models.BooleanField(default=False)
text_css_color=models.CharField(max_length=6,null=True,blank=True)
show_price=models.BooleanField(default=False)
make_image_background=models.BooleanField(default=False)
active=models.BooleanField(default=True)
def __str__(self):
return self.product.title | mit |
elijah513/django | django/core/serializers/pyyaml.py | 439 | 2843 | """
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
import collections
import decimal
import sys
from io import StringIO
import yaml
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.db import models
from django.utils import six
# Use the C (faster) implementation if possible
try:
from yaml import CSafeLoader as SafeLoader
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
class DjangoSafeDumper(SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
def represent_ordered_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data.items())
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
DjangoSafeDumper.add_representer(collections.OrderedDict, DjangoSafeDumper.represent_ordered_dict)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
if isinstance(stream_or_string, six.string_types):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
try:
for obj in PythonDeserializer(yaml.load(stream, Loader=SafeLoader), **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
| bsd-3-clause |
suiyuan2009/tensorflow | tensorflow/python/debug/examples/debug_errors.py | 150 | 2655 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of debugging TensorFlow runtime errors using tfdbg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python import debug as tf_debug
def main(_):
sess = tf.Session()
# Construct the TensorFlow network.
ph_float = tf.placeholder(tf.float32, name="ph_float")
x = tf.transpose(ph_float, name="x")
v = tf.Variable(np.array([[-2.0], [-3.0], [6.0]], dtype=np.float32), name="v")
m = tf.constant(
np.array([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]]),
dtype=tf.float32,
name="m")
y = tf.matmul(m, x, name="y")
z = tf.matmul(m, v, name="z")
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type)
if FLAGS.error == "shape_mismatch":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0], [1.0], [2.0]])}))
elif FLAGS.error == "uninitialized_variable":
print(sess.run(z))
elif FLAGS.error == "no_error":
print(sess.run(y, feed_dict={ph_float: np.array([[0.0, 1.0, 2.0]])}))
else:
raise ValueError("Unrecognized error type: " + FLAGS.error)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--error",
type=str,
default="shape_mismatch",
help="""\
Type of the error to generate (shape_mismatch | uninitialized_variable |
no_error).\
""")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
type="bool",
nargs="?",
const=True,
default=False,
help="Use debugger to track down bad values during training")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/shelve.py | 34 | 8527 | """Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import Pickler, Unpickler
from io import BytesIO
import collections.abc
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.abc.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.abc.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = 3
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
if self.dict is None:
return
try:
self.sync()
try:
self.dict.close()
except AttributeError:
pass
finally:
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except:
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol.
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
| apache-2.0 |
837468220/python-for-android | python-modules/twisted/twisted/test/test_doc.py | 99 | 3683 | from twisted.trial import unittest
import inspect, glob, os
from os import path
from twisted.python import reflect
import twisted
def errorInFile(f, line=17, name=''):
"""Return a filename formatted so emacs will recognize it as an error point
@param line: Line number in file. Defaults to 17 because that's about how
long the copyright headers are.
"""
return '%s:%d:%s' % (f, line, name)
# return 'File "%s", line %d, in %s' % (f, line, name)
class DocCoverage(unittest.TestCase):
def setUp(self):
remove = len(os.path.dirname(os.path.dirname(twisted.__file__)))+1
def visit(dirlist, directory, files):
if '__init__.py' in files:
d = directory[remove:].replace('/','.')
dirlist.append(d)
self.packageNames = []
os.path.walk(os.path.dirname(twisted.__file__),
visit, self.packageNames)
def testModules(self):
"""Looking for docstrings in all modules."""
docless = []
for packageName in self.packageNames:
if packageName in ('twisted.test',):
# because some stuff in here behaves oddly when imported
continue
try:
package = reflect.namedModule(packageName)
except ImportError, e:
# This is testing doc coverage, not importability.
# (Really, I don't want to deal with the fact that I don't
# have pyserial installed.)
# print e
pass
else:
docless.extend(self.modulesInPackage(packageName, package))
self.failIf(docless, "No docstrings in module files:\n"
"%s" % ('\n'.join(map(errorInFile, docless)),))
def modulesInPackage(self, packageName, package):
docless = []
directory = path.dirname(package.__file__)
for modfile in glob.glob(path.join(directory, '*.py')):
moduleName = inspect.getmodulename(modfile)
if moduleName == '__init__':
# These are tested by test_packages.
continue
elif moduleName in ('spelunk_gnome','gtkmanhole'):
# argh special case pygtk evil argh. How does epydoc deal
# with this?
continue
try:
module = reflect.namedModule('.'.join([packageName,
moduleName]))
except Exception, e:
# print moduleName, "misbehaved:", e
pass
else:
if not inspect.getdoc(module):
docless.append(modfile)
return docless
def testPackages(self):
"""Looking for docstrings in all packages."""
docless = []
for packageName in self.packageNames:
try:
package = reflect.namedModule(packageName)
except Exception, e:
# This is testing doc coverage, not importability.
# (Really, I don't want to deal with the fact that I don't
# have pyserial installed.)
# print e
pass
else:
if not inspect.getdoc(package):
docless.append(package.__file__.replace('.pyc','.py'))
self.failIf(docless, "No docstrings for package files\n"
"%s" % ('\n'.join(map(errorInFile, docless),)))
# This test takes a while and doesn't come close to passing. :(
testModules.skip = "Activate me when you feel like writing docstrings, and fixing GTK crashing bugs."
| apache-2.0 |
tencrance/profiling | profiling/viewer.py | 5 | 20846 | # -*- coding: utf-8 -*-
"""
profiling.viewer
~~~~~~~~~~~~~~~~
A text user interface application which inspects statistics. To run it
easily do:
.. sourcecode:: console
$ python -m profiling view SOURCE
::
viewer = StatisticsViewer()
loop = viewer.loop()
loop.run()
"""
from __future__ import absolute_import
from collections import deque
import urwid
from urwid import connect_signal as on
from . import sortkeys
__all__ = ['StatisticsTable', 'StatisticsViewer']
class Formatter(object):
def _markup(get_string, get_attr=None):
def markup(self, *args, **kwargs):
string = get_string(self, *args, **kwargs)
if get_attr is None:
return string
attr = get_attr(self, *args, **kwargs)
return (attr, string)
return markup
_numeric = {'align': 'right', 'wrap': 'clip'}
def _make_text(get_markup, **text_kwargs):
def make_text(self, *args, **kwargs):
markup = get_markup(self, *args, **kwargs)
return urwid.Text(markup, **text_kwargs)
return make_text
# percent
def format_percent(self, ratio, denom=1):
try:
ratio /= denom
except ZeroDivisionError:
ratio = 0
ratio = round(ratio, 4)
if ratio >= 1:
precision = 0
elif ratio >= 0.1:
precision = 1
else:
precision = 2
return ('{:.' + str(precision) + '%}').format(ratio)
def attr_ratio(self, ratio, denom=1):
try:
ratio /= denom
except ZeroDivisionError:
ratio = 0
if ratio > 0.9:
return 'danger'
elif ratio > 0.7:
return 'caution'
elif ratio > 0.3:
return 'warning'
elif ratio > 0.1:
return 'notice'
elif ratio <= 0:
return 'zero'
markup_percent = _markup(format_percent, attr_ratio)
make_percent_text = _make_text(markup_percent, **_numeric)
# int
def format_int(self, num):
return '{:.0f}'.format(num)
def attr_int(self, num):
return None if num else 'zero'
markup_int = _markup(format_int, attr_int)
make_int_text = _make_text(markup_int, **_numeric)
# int or n/a
def format_int_or_na(self, num):
if not num:
return 'n/a'
return self.format_int(num)
markup_int_or_na = _markup(format_int_or_na, attr_int)
make_int_or_na_text = _make_text(markup_int_or_na, **_numeric)
# time
def format_time(self, sec):
if sec == 0:
return '0'
elif sec < 1:
return '{:,.0f}'.format(sec * 1e6).replace(',', '.')
else:
return '{:.2f}s'.format(sec)
def attr_time(self, sec):
if sec == 0:
return 'zero'
elif sec < 1:
return 'usec'
else:
return 'sec'
markup_time = _markup(format_time, attr_time)
make_time_text = _make_text(markup_time, **_numeric)
# stat
def markup_stat(self, stat):
if stat.name:
loc = '({0}:{1})'.format(stat.module or stat.filename, stat.lineno)
return [('name', stat.name), ' ', ('loc', loc)]
else:
return ('loc', stat.module or stat.filename)
make_stat_text = _make_text(markup_stat, wrap='clip')
del _markup
del _make_text
fmt = Formatter()
class StatWidget(urwid.TreeWidget):
signals = ['expanded', 'collapsed']
icon_chars = ('+', '-', ' ') # collapsed, expanded, leaf
def __init__(self, node):
super(StatWidget, self).__init__(node)
self._w = urwid.AttrWrap(self._w, None, StatisticsViewer.focus_map)
@property
def expanded(self):
return self._expanded
@expanded.setter
def expanded(self, expanded):
in_init = not hasattr(self, 'expanded')
self._expanded = expanded
if in_init:
return
if expanded:
urwid.emit_signal(self, 'expanded')
else:
urwid.emit_signal(self, 'collapsed')
def selectable(self):
return True
def load_inner_widget(self):
node = self.get_node()
stat = node.get_value()
stats = node.get_root().get_value()
return StatisticsTable.make_columns([
fmt.make_stat_text(stat),
fmt.make_percent_text(stat.total_time, stats.cpu_time),
fmt.make_percent_text(stat.own_time, stats.cpu_time),
fmt.make_int_or_na_text(stat.calls),
fmt.make_time_text(stat.total_time),
fmt.make_time_text(stat.total_time_per_call),
fmt.make_time_text(stat.own_time),
fmt.make_time_text(stat.own_time_per_call)])
def get_indented_widget(self):
icon = self.get_mark()
widget = self.get_inner_widget()
node = self.get_node()
widget = urwid.Columns([('fixed', 1, icon), widget], 1)
indent = (node.get_depth() - 1)
widget = urwid.Padding(widget, left=indent)
return widget
def get_mark(self):
if self.is_leaf:
char = self.icon_chars[2]
else:
char = self.icon_chars[int(self.expanded)]
return urwid.SelectableIcon(('mark', char), 0)
def update_mark(self):
widget = self._w.base_widget
try:
widget.widget_list[0] = self.get_mark()
except (AttributeError, TypeError):
pass
def update_expanded_icon(self):
self.update_mark()
def expand(self):
self.expanded = True
self.update_mark()
def collapse(self):
self.expanded = False
self.update_mark()
def keypress(self, size, key):
command = self._command_map[key]
if command == urwid.ACTIVATE:
key = '-' if self.expanded else '+'
elif command == urwid.CURSOR_RIGHT:
key = '+'
elif self.expanded and command == urwid.CURSOR_LEFT:
key = '-'
return super(StatWidget, self).keypress(size, key)
class EmptyWidget(urwid.Widget):
"""A widget which doesn't render anything."""
def __init__(self, rows=0):
super(EmptyWidget, self).__init__()
self._rows = rows
def rows(self, size, focus=False):
return self._rows
def render(self, size, focus=False):
return urwid.SolidCanvas(' ', size[0], self.rows(size, focus))
class StatisticsWidget(StatWidget):
def load_inner_widget(self):
return EmptyWidget()
def get_indented_widget(self):
return self.get_inner_widget()
def get_mark(self):
raise TypeError('Statistics widget has no mark')
def update(self):
pass
def unexpand(self):
pass
class StatNodeBase(urwid.TreeNode):
def __init__(self, stat=None, parent=None, key=None, depth=None,
table=None):
super(StatNodeBase, self).__init__(stat, parent, key, depth)
self.table = table
def get_focus(self):
widget, focus = super(StatNodeBase, self).get_focus()
if self.table is not None:
self.table.walker.set_focus(self)
return widget, focus
def get_widget(self, reload=False):
if self._widget is None or reload:
self._widget = self.load_widget()
self.setup_widget(self._widget)
return self._widget
def load_widget(self):
return self._widget_class(self)
def setup_widget(self, widget):
if self.table is None:
return
stat = self.get_value()
if hash(stat) in self.table._expanded_stat_hashes:
widget.expand()
class NullStatWidget(StatWidget):
def __init__(self, node):
urwid.TreeWidget.__init__(self, node)
def get_indented_widget(self):
widget = urwid.Text(('weak', '- Not Available -'), align='center')
widget = urwid.Filler(widget)
widget = urwid.BoxAdapter(widget, 3)
return widget
class NullStatNode(StatNodeBase):
_widget_class = NullStatWidget
class LeafStatNode(StatNodeBase):
_widget_class = StatWidget
class StatNode(StatNodeBase, urwid.ParentNode):
def total_usage(self):
stat = self.get_value()
stats = self.get_root().get_value()
try:
return stat.total_time / stats.cpu_time
except AttributeError:
return 0.0
def load_widget(self):
if self.is_root():
widget_class = StatisticsWidget
else:
widget_class = StatWidget
widget = widget_class(self)
widget.collapse()
return widget
def setup_widget(self, widget):
super(StatNode, self).setup_widget(widget)
if self.get_depth() == 0:
# just expand the root node
widget.expand()
return
table = self.table
if table is None:
return
on(widget, 'expanded', table._widget_expanded, widget)
on(widget, 'collapsed', table._widget_collapsed, widget)
def load_child_keys(self):
stat = self.get_value()
if stat is None:
return ()
return stat.sorted(self.table.order)
def load_child_node(self, stat):
depth = self.get_depth() + 1
node_class = StatNode if len(stat) else LeafStatNode
return node_class(stat, self, stat, depth, self.table)
class StatisticsListBox(urwid.TreeListBox):
signals = ['focus_changed']
def change_focus(self, *args, **kwargs):
super(StatisticsListBox, self).change_focus(*args, **kwargs)
focus = self.get_focus()
urwid.emit_signal(self, 'focus_changed', focus)
class StatisticsWalker(urwid.TreeWalker):
signals = ['focus_changed']
def set_focus(self, focus):
super(StatisticsWalker, self).set_focus(focus)
urwid.emit_signal(self, 'focus_changed', focus)
class StatisticsTable(urwid.WidgetWrap):
#: The column declarations.
columns = [
# name, align, width, order
('FUNCTION', 'left', ('weight', 1), sortkeys.by_function),
('TOTAL%', 'right', (6,), None),
('OWN%', 'right', (6,), None),
('CALLS', 'right', (6,), sortkeys.by_calls),
('TOTAL', 'right', (10,), sortkeys.by_total_time),
('/CALL', 'right', (6,), sortkeys.by_total_time_per_call),
('OWN', 'right', (10,), sortkeys.by_own_time),
('/CALL', 'right', (6,), sortkeys.by_own_time_per_call),
]
#: The initial order.
order = sortkeys.by_total_time
#: Whether the viewer is active.
active = False
#: Whether the viewer is paused.
paused = False
title = None
stats = None
time = None
def __init__(self):
cls = type(self)
self._expanded_stat_hashes = set()
self.walker = StatisticsWalker(NullStatNode())
on(self.walker, 'focus_changed', self._walker_focus_changed)
tbody = StatisticsListBox(self.walker)
thead = urwid.AttrMap(cls.make_columns([
urwid.Text(name, align, 'clip')
for name, align, __, __ in self.columns
]), None)
header = urwid.Columns([])
widget = urwid.Frame(tbody, urwid.Pile([header, thead]))
super(StatisticsTable, self).__init__(widget)
self.update_frame()
@classmethod
def make_columns(cls, column_widgets):
widget_list = []
widths = (width for __, __, width, __ in cls.columns)
for width, widget in zip(widths, column_widgets):
widget_list.append(width + (widget,))
return urwid.Columns(widget_list, 1)
@property
def tbody(self):
return self._w.body
@tbody.setter
def tbody(self, body):
self._w.body = body
@property
def thead(self):
return self._w.header.contents[1][0]
@thead.setter
def thead(self, thead):
self._w.header.contents[1] = (thead, ('pack', None))
@property
def header(self):
return self._w.header.contents[0][0]
@header.setter
def header(self, header):
self._w.header.contents[0] = (header, ('pack', None))
@property
def footer(self):
return self._w.footer
@footer.setter
def footer(self, footer):
self._w.footer = footer
def get_focus(self):
return self.tbody.get_focus()
def set_focus(self, focus):
self.tbody.set_focus(focus)
def get_path(self):
"""Gets the path to the focused statistic. Each step is a hash of
statistic object.
"""
path = deque()
__, node = self.get_focus()
while not node.is_root():
stat = node.get_value()
path.appendleft(hash(stat))
node = node.get_parent()
return path
def find_node(self, node, path):
"""Finds a node by the given path from the given node."""
for hash_value in path:
if isinstance(node, LeafStatNode):
break
for stat in node.get_child_keys():
if hash(stat) == hash_value:
node = node.get_child_node(stat)
break
else:
break
return node
def get_stats(self):
return self.stats
def set_stats(self, stats, title=None, time=None):
self.stats = stats
self.title = title
self.time = time
if not self.paused:
self.activate()
self.refresh()
def sort_stats(self, order=sortkeys.by_total_time):
assert callable(order)
self.order = order
self.refresh()
def shift_order(self, delta):
orders = [order for __, __, __, order in self.columns if order]
x = orders.index(self.order)
order = orders[(x + delta) % len(orders)]
self.sort_stats(order)
def pause(self):
self.paused = True
self.update_frame()
def resume(self):
self.paused = False
try:
stats, title, time = self._pending
except AttributeError:
self.activate()
else:
del self._pending
self.set_stats(stats, title, time)
def activate(self):
self.active = True
self.update_frame()
def inactivate(self):
self.active = False
self.update_frame()
def refresh(self):
stats = self.get_stats()
node = StatNode(stats, table=self)
path = self.get_path()
node = self.find_node(node, path)
self.set_focus(node)
def update_frame(self, focus=None):
# set thead attr
if self.paused:
thead_attr = 'thead.paused'
elif not self.active:
thead_attr = 'thead.inactive'
else:
thead_attr = 'thead'
self.thead.set_attr_map({None: thead_attr})
# set sorting column in thead attr
for x, (__, __, __, order) in enumerate(self.columns):
attr = thead_attr + '.sorted' if order is self.order else None
widget = self.thead.base_widget.contents[x][0]
text, __ = widget.get_text()
widget.set_text((attr, text))
if self.paused:
return
# update header
stats = self.get_stats()
if stats is None:
return
title = self.title
time = self.time
if title or time:
if time is not None:
time_string = '{:%H:%M:%S}'.format(time)
if title and time:
markup = [('weak', title), ' ', time_string]
elif title:
markup = title
else:
markup = time_string
meta_info = urwid.Text(markup, align='right')
else:
meta_info = None
fraction_string = '({0}/{1})'.format(
fmt.format_time(stats.cpu_time),
fmt.format_time(stats.wall_time))
cpu_info = urwid.Text([
'CPU ', fmt.markup_percent(stats.cpu_usage),
' ', ('weak', fraction_string)])
# set header columns
col_opts = ('weight', 1, False)
self.header.contents = \
[(w, col_opts) for w in [cpu_info, meta_info] if w]
def focus_hotspot(self, size):
widget, __ = self.tbody.get_focus()
while widget:
node = widget.get_node()
widget.expand()
widget = widget.first_child()
self.tbody.change_focus(size, node)
def defocus(self):
__, node = self.get_focus()
self.set_focus(node.get_root())
def keypress(self, size, key):
base = super(StatisticsTable, self)
command = self._command_map[key]
if key == ']':
self.shift_order(+1)
return True
elif key == '[':
self.shift_order(-1)
return True
elif key == '>':
self.focus_hotspot(size)
return True
elif command == self._command_map['esc']:
self.defocus()
return True
elif command == self._command_map['right']:
widget, node = self.tbody.get_focus()
if widget.expanded:
heavy_widget = widget.first_child()
if heavy_widget is not None:
heavy_node = heavy_widget.get_node()
self.tbody.change_focus(size, heavy_node)
return True
elif command == self._command_map['left']:
widget, node = self.tbody.get_focus()
if not widget.expanded:
parent_node = node.get_parent()
if not parent_node.is_root():
self.tbody.change_focus(size, parent_node)
return True
elif command == self._command_map[' ']:
if self.paused:
self.resume()
else:
self.pause()
return True
return base.keypress(size, key)
# signal handlers
def _walker_focus_changed(self, focus):
self.update_frame(focus)
def _widget_expanded(self, widget):
stat = widget.get_node().get_value()
self._expanded_stat_hashes.add(hash(stat))
def _widget_collapsed(self, widget):
stat = widget.get_node().get_value()
self._expanded_stat_hashes.discard(hash(stat))
class StatisticsViewer(object):
weak_color = 'light green'
palette = [
('weak', weak_color, ''),
('focus', 'standout', '', 'standout'),
# ui
('thead', 'dark cyan, standout', '', 'standout'),
('thead.paused', 'dark red, standout', '', 'standout'),
('thead.inactive', 'brown, standout', '', 'standout'),
('mark', 'dark cyan', ''),
# risk
('danger', 'dark red', '', 'blink'),
('caution', 'light red', '', 'blink'),
('warning', 'brown', '', 'blink'),
('notice', 'dark green', '', 'blink'),
# clock
('sec', 'brown', ''),
('msec', 'dark green', ''),
('usec', '', ''),
# etc
('zero', weak_color, ''),
('name', 'bold', ''),
('loc', 'dark blue', ''),
]
# add thead.*.sorted palette entries
for entry in palette[:]:
attr = entry[0]
if attr is None or not attr.startswith('thead'):
continue
palette.append((attr + '.sorted', entry[1] + ', underline',
entry[2], entry[3] + ', underline'))
focus_map = {None: 'focus'}
focus_map.update((x[0], 'focus') for x in palette)
def unhandled_input(self, key):
if key in ('q', 'Q'):
raise urwid.ExitMainLoop()
def __init__(self):
self.table = StatisticsTable()
self.widget = urwid.Padding(self.table, right=1)
def loop(self, *args, **kwargs):
kwargs.setdefault('unhandled_input', self.unhandled_input)
loop = urwid.MainLoop(self.widget, self.palette, *args, **kwargs)
return loop
def set_stats(self, stats, title=None, time=None):
self.table.set_stats(stats, title, time)
def activate(self):
return self.table.activate()
def inactivate(self):
return self.table.inactivate()
def use_vim_command_map(self):
urwid.command_map['h'] = urwid.command_map['left']
urwid.command_map['j'] = urwid.command_map['down']
urwid.command_map['k'] = urwid.command_map['up']
urwid.command_map['l'] = urwid.command_map['right']
def use_game_command_map(self):
urwid.command_map['a'] = urwid.command_map['left']
urwid.command_map['s'] = urwid.command_map['down']
urwid.command_map['w'] = urwid.command_map['up']
urwid.command_map['d'] = urwid.command_map['right']
| bsd-3-clause |
bradleyhd/netsim | nodes_vs_routing_speed.py | 1 | 2878 | import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
def linear(x, a, b):
return a * x + b
def quadratic(x, a, b, c):
return a * x**2 + b * x + c
def exponential(x, a, b, c):
return a * x**b + c
fig = plt.figure(num=None, figsize=(12, 8), dpi=300, facecolor='k', edgecolor='k')
xs = [[1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292]]
ys = [[0.00013309850001519408, 0.00059208550001699223, 0.002604027000003839, 0.004665461000030291, 0.014662985999962075, 0.023410306499954459, 0.041176939000251878], [0.00014861549998101964, 0.00055641999999522795, 0.002577900000005684, 0.0054275369999459144, 0.021226498000032734, 0.029786237500047719, 0.059782716000881919], [0.00012334000000180367, 0.00043368899999052246, 0.0020054734999632728, 0.005848614000001362, 0.014609930999995413, 0.019599954500336025, 0.028973604500606598], [0.00012613299999486571, 0.00044437049999146438, 0.0021501399999692694, 0.0055929929999933847, 0.019908546500118973, 0.039582631500252319, 0.054390303499531001]]
ys = np.array(ys) * 1000
def graph(i, label, color, marker, l_marker):
y = np.array(ys[i])
x = np.array(xs[i])
xl = np.linspace(np.min(x), np.max(x), 500)
popt, pcov = curve_fit(exponential, x, y)
plt.scatter(x, y, label=label, color=color, marker=marker)
plt.plot(xl, exponential(xl, *popt), color=color, linestyle=l_marker)
blue = '#5738FF'
purple = '#E747E7'
orange = '#E7A725'
green = '#A1FF47'
red = '#FF1E43'
gray = '#333333'
white = 'w'
graph(0, 'EDS5 - original graph', red, 'o', '--')
graph(1, 'N5 - original graph', purple, 's', '--')
graph(2, 'EDS5 - decision graph', blue, '^', '--')
graph(3, 'N5 - decision graph', white, 'D', '--')
ax = fig.gca()
plt.title('Effects of Node Ordering on Routing Speed', color=white)
plt.xlabel('Effective $\\vert V\/\\vert$')
plt.ylabel('Routing Time (ms)')
plt.axes().set_axis_bgcolor('black')
ax.xaxis.label.set_color(white)
ax.yaxis.label.set_color(white)
ax.tick_params(axis='x', colors=white)
ax.tick_params(axis='y', colors=white)
ax.spines['bottom'].set_color(white)
ax.spines['top'].set_color(white)
ax.spines['left'].set_color(white)
ax.spines['right'].set_color(white)
legend = plt.legend(loc=0, numpoints=1, framealpha=0.0)
legend.get_frame().set_facecolor('k')
max_x = np.max(np.array(xs))
max_y = np.max(np.array(ys))
min_x = np.min(np.array(xs))
min_y = 0 - (max_y * 0.01)
min_x = 0 - (max_x * 0.01)
max_x *= 1.01
max_y *= 1.01
plt.axes().set_xlim([min_x, max_x])
plt.axes().set_ylim([min_y, max_y])
for text in legend.get_texts():
text.set_color(white)
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.savefig('nodes_vs_routing_speed.png', transparent=True)
#plt.show() | gpl-3.0 |
esc/dask | dask/dataframe/shuffle.py | 4 | 2967 | from itertools import count
from collections import Iterator
from math import ceil
from toolz import merge, accumulate, merge_sorted
import toolz
from operator import getitem, setitem
import pandas as pd
import numpy as np
from pframe import pframe
from .. import threaded
from .core import DataFrame, Series, get, names
from ..compatibility import unicode
from ..utils import ignoring
tokens = ('-%d' % i for i in count(1))
def set_index(f, index, npartitions=None, **kwargs):
""" Set DataFrame index to new column
Sorts index and realigns Dataframe to new sorted order. This shuffles and
repartitions your data.
"""
npartitions = npartitions or f.npartitions
if not isinstance(index, Series):
index2 = f[index]
else:
index2 = index
divisions = (index2
.quantiles(np.linspace(0, 100, npartitions+1)[1:-1])
.compute())
return f.set_partition(index, divisions, **kwargs)
partition_names = ('set_partition-%d' % i for i in count(1))
def set_partition(f, index, divisions, get=threaded.get, **kwargs):
""" Set new partitioning along index given divisions """
divisions = unique(divisions)
name = next(names)
if isinstance(index, Series):
assert index.divisions == f.divisions
dsk = dict(((name, i), (f._partition_type.set_index, block, ind))
for i, (block, ind) in enumerate(zip(f._keys(), index._keys())))
f2 = type(f)(merge(f.dask, index.dask, dsk), name,
f.column_info, f.divisions)
else:
dsk = dict(((name, i), (f._partition_type.set_index, block, index))
for i, block in enumerate(f._keys()))
f2 = type(f)(merge(f.dask, dsk), name, f.column_info, f.divisions)
head = f2.head()
pf = pframe(like=head, divisions=divisions, **kwargs)
def append(block):
pf.append(block)
return 0
f2.map_blocks(append).compute(get=get)
pf.flush()
return from_pframe(pf)
def from_pframe(pf):
""" Load dask.array from pframe """
name = next(names)
dsk = dict(((name, i), (pframe.get_partition, pf, i))
for i in range(pf.npartitions))
return DataFrame(dsk, name, pf.columns, pf.divisions)
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories, divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
| bsd-3-clause |
marcydoty/geraldo | site/newsite/site-geraldo/appengine_django/serializer/python.py | 20 | 4799 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Python "serializer", based on the default Django python serializer.
The only customisation is in the deserialization process which needs to take
special care to resolve the name and parent attributes of the key for each
entity and also recreate the keys for any references appropriately.
"""
from django.conf import settings
from django.core.serializers import base
from django.core.serializers import python
from django.db import models
from google.appengine.api import datastore_types
from google.appengine.ext import db
from django.utils.encoding import smart_unicode
Serializer = python.Serializer
class FakeParent(object):
"""Fake parent 'model' like object.
This class exists to allow a parent object to be provided to a new model
without having to load the parent instance itself.
"""
def __init__(self, parent_key):
self._entity = parent_key
def Deserializer(object_list, **options):
"""Deserialize simple Python objects back into Model instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
models.get_apps()
for d in object_list:
# Look up the model and starting build a dict of data for it.
Model = python._get_model(d["model"])
data = {}
key = resolve_key(Model._meta.module_name, d["pk"])
if key.name():
data["key_name"] = key.name()
parent = None
if key.parent():
parent = FakeParent(key.parent())
m2m_data = {}
# Handle each field
for (field_name, field_value) in d["fields"].iteritems():
if isinstance(field_value, str):
field_value = smart_unicode(
field_value, options.get("encoding",
settings.DEFAULT_CHARSET),
strings_only=True)
field = Model.properties()[field_name]
if isinstance(field, db.Reference):
# Resolve foreign key references.
data[field.name] = resolve_key(Model._meta.module_name, field_value)
if not data[field.name].name():
raise base.DeserializationError(u"Cannot load Reference with "
"unnamed key: '%s'" % field_value)
else:
data[field.name] = field.validate(field_value)
# Create the new model instance with all it's data, but no parent.
object = Model(**data)
# Now add the parent into the hidden attribute, bypassing the type checks
# in the Model's __init__ routine.
object._parent = parent
# When the deserialized object is saved our replacement DeserializedObject
# class will set object._parent to force the real parent model to be loaded
# the first time it is referenced.
yield base.DeserializedObject(object, m2m_data)
def resolve_key(model, key_data):
"""Creates a Key instance from a some data.
Args:
model: The name of the model this key is being resolved for. Only used in
the fourth case below (a plain key_name string).
key_data: The data to create a key instance from. May be in four formats:
* The str() output of a key instance. Eg. A base64 encoded string.
* The repr() output of a key instance. Eg. A string for eval().
* A list of arguments to pass to db.Key.from_path.
* A single string value, being the key_name of the instance. When this
format is used the resulting key has no parent, and is for the model
named in the model parameter.
Returns:
An instance of db.Key. If the data cannot be used to create a Key instance
an error will be raised.
"""
if isinstance(key_data, list):
# The key_data is a from_path sequence.
return db.Key.from_path(*key_data)
elif isinstance(key_data, basestring):
if key_data.find("from_path") != -1:
# key_data is encoded in repr(key) format
return eval(key_data)
else:
try:
# key_data encoded a str(key) format
return db.Key(key_data)
except datastore_types.datastore_errors.BadKeyError, e:
# Final try, assume it's a plain key name for the model.
return db.Key.from_path(model, key_data)
else:
raise base.DeserializationError(u"Invalid key data: '%s'" % key_data)
| lgpl-3.0 |
nnikhilsingh/terrence | tao/caching.py | 1 | 20893 | # -*- coding: utf-8 -*-
import os
import time
import hashlib
import gzip
import bz2
import re
from sqlalchemy.orm.exc import NoResultFound
from tao.database import SearchEngineResultsPage
from tao.parsing import parse_serp
from tao.outputconverter import store_serp_result
import logging
"""
GoogleScraper is a complex application and thus searching is error prone. While developing,
you may need to repeat the same searches several times and you might end up being banned by
the search engine providers. This is why all searches are chached by default.
Every SERP page is cached in a separate file. In the future, it might be more straightforward to
cache scraping jobs in archives (zip files).
What determines the uniqueness of a SERP result?
- The complete url (because in URLs search queries and params are included)
- The scrape mode: Raw Http might request different resources than a browser.
- Optionally the http headers (because different User-Agents yield different results)
Using these three pieces of information would guarantee that we cache only unique requests,
but then we couldn't read back the information of the cache files, since these parameters
are only available at runtime of the scrapers. So we have to be satisfied with the
keyword, search_engine and scrapemode as identifying params.
How does caching work on a higher level?
Assume the user interrupted his scrape job at 1000/2000 keywords and there remain
quite some keywords to scrape for. Then the previously parsed 1000 results are already
stored in the database and shouldn't be added a second time.
"""
logger = logging.getLogger(__name__)
ALLOWED_COMPRESSION_ALGORITHMS = ('gz', 'bz2')
class InvalidConfigurationFileException(Exception):
"""
Used when the cache module cannot
determine the kind (compression for instance) of a
configuration file
"""
pass
class CompressedFile(object):
"""Read and write the data of a compressed file.
Used to cache files for tao.s
Supported algorithms: gz, bz2
>>> import os
>>> f = CompressedFile('/tmp/test.txt', algorithm='gz')
>>> f.write('hello world')
>>> assert os.path.exists('/tmp/test.txt.gz')
>>> f2 = CompressedFile('/tmp/test.txt.gz', algorithm='gz')
>>> assert f2.read() == 'hello world'
"""
def __init__(self, path, algorithm='gz'):
"""Create a new compressed file to read and write data to.
Args:
algorithm: Which algorithm to use.
path: A valid file path to the file to read/write. Depends
on the action called.
@todo: it would be a better approach to pass an Algorithm object instead of a string
"""
self.algorithm = algorithm
assert self.algorithm in ALLOWED_COMPRESSION_ALGORITHMS, \
'{algo} is not an supported compression algorithm'.format(algo=self.algorithm)
if path.endswith(self.algorithm):
self.path = path
else:
self.path = '{path}.{ext}'.format(path=path, ext=algorithm)
self.readers = {
'gz': self.read_gz,
'bz2': self.read_bz2
}
self.writers = {
'gz': self.write_gz,
'bz2': self.write_bz2
}
def read_gz(self):
with gzip.open(self.path, 'rb') as f:
return f.read().decode()
def read_bz2(self):
with bz2.open(self.path, 'rb') as f:
return f.read().decode()
def write_gz(self, data):
with gzip.open(self.path, 'wb') as f:
f.write(data)
def write_bz2(self, data):
with bz2.open(self.path, 'wb') as f:
f.write(data)
def read(self):
assert os.path.exists(self.path)
return self.readers[self.algorithm]()
def write(self, data):
if not isinstance(data, bytes):
data = data.encode()
return self.writers[self.algorithm](data)
class CacheManager():
"""
Manages caching for tao.
"""
def __init__(self, config):
self.config = config
self.maybe_create_cache_dir()
def maybe_create_cache_dir(self):
if self.config.get('do_caching', True):
cd = self.config.get('cachedir', '.scrapecache')
if not os.path.exists(cd):
os.mkdir(cd)
def maybe_clean_cache(self):
"""
Clean the cache.
Clean all cached searches (the obtained html code) in the cache directory iff
the respective files are older than specified in the configuration. Defaults to 12 hours.
"""
cachedir = self.config.get('cachedir', '.scrapecache')
if os.path.exists(cachedir):
for fname in os.listdir(cachedir):
path = os.path.join(cachedir, fname)
if time.time() > os.path.getmtime(path) + (60 * 60 * int(self.config.get('clean_cache_after', 48))):
# Remove the whole directory if necessary
if os.path.isdir(path):
import shutil
shutil.rmtree(path)
else:
os.remove(os.path.join(cachedir, fname))
def cached_file_name(self, keyword, search_engine, scrape_mode, page_number):
"""Make a unique file name from the search engine search request.
Important! The order of the sequence is darn important! If search queries have the same
words but in a different order, they are unique searches.
Args:
keyword: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: The number of the SERP page.
Returns:
A unique file name based on the parameters of the search request.
"""
assert isinstance(keyword, str), 'Keyword {} must be a string'.format(keyword)
assert isinstance(search_engine, str), 'Search engine {} must be a string'.format(search_engine)
assert isinstance(scrape_mode, str), 'Scrapemode {} needs to be a string'.format(scrape_mode)
assert isinstance(page_number, int), 'Page_number {} needs to be an int'.format(page_number)
unique = [keyword, search_engine, scrape_mode, page_number]
sha = hashlib.sha256()
sha.update(b''.join(str(s).encode() for s in unique))
return '{file_name}.{extension}'.format(file_name=sha.hexdigest(), extension='cache')
def get_cached(self, keyword, search_engine, scrapemode, page_number):
"""Loads a cached SERP result.
Args:
keyword: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: page_number
Returns:
The contents of the HTML that was shipped while searching. False if there couldn't
be found a file based on the above params.
"""
if self.config.get('do_caching', False):
fname = self.cached_file_name(keyword, search_engine, scrapemode, page_number)
cdir = self.config.get('cachedir', '.scrapecache')
if fname in os.listdir(cdir):
# If the cached file is older than 12 hours, return False and thus
# make a new fresh request.
try:
modtime = os.path.getmtime(os.path.join(cdir, fname))
except FileNotFoundError:
return False
if (time.time() - modtime) / 60 / 60 > int(self.config('clean_cache_after', 48)):
return False
path = os.path.join(cdir, fname)
return self.read_cached_file(path)
else:
return False
def read_cached_file(self, path):
"""Read a compressed or uncompressed file.
The compressing schema is determined by the file extension. For example
a file that ends with .gz needs to be gunzipped.
Supported algorithms:
gzip and bzip2
Args:
path: The path to the cached file.
Returns:
The data of the cached file as a string.
Raises:
InvalidConfigurationFileException: When the type of the cached file
cannot be determined.
"""
if self.config.get('do_caching', False):
ext = path.split('.')[-1]
# The path needs to have an extension in any case.
# When uncompressed, ext is 'cache', else it is the
# compressing scheme file ending like .gz or .bz2 ...
assert ext in ALLOWED_COMPRESSION_ALGORITHMS or ext == 'cache', 'Invalid extension: {}'.format(ext)
if ext == 'cache':
with open(path, 'r') as fd:
try:
data = fd.read()
return data
except UnicodeDecodeError as e:
logger.warning(str(e))
# If we get this error, the cache files are probably
# compressed but the 'compress_cached_files' flag was
# set to False. Try to decompress them, but this may
# lead to a infinite recursion. This isn't proper coding,
# but convenient for the end user.
self.config['compress_cached_files'] = True
elif ext in ALLOWED_COMPRESSION_ALGORITHMS:
f = CompressedFile(path)
return f.read()
else:
raise InvalidConfigurationFileException('"{}" is a invalid configuration file.'.format(path))
def cache_results(self, parser, query, search_engine, scrape_mode, page_number, db_lock=None):
"""Stores the html of an parser in a file.
The file name is determined by the parameters query, search_engine, scrape_mode and page_number.
See cached_file_name() for more information.
This will always write(overwrite) the cached file. If compress_cached_files is
True, the page is written in bytes (obviously).
Args:
parser: A parser with the data to cache.
query: The keyword that was used in the search.
search_engine: The search engine the keyword was scraped for.
scrape_mode: The scrapemode that was used.
page_number: The page number that the serp page is.
db_lock: If an db_lock is given, all action are wrapped in this lock.
"""
if self.config.get('do_caching', False):
if db_lock:
db_lock.acquire()
if self.config.get('minimize_caching_files', True):
html = parser.cleaned_html
else:
html = parser.html
fname = self.cached_file_name(query, search_engine, scrape_mode, page_number)
cachedir = self.config.get('cachedir', '.scrapecache')
path = os.path.join(cachedir, fname)
if self.config.get('compress_cached_files'):
algorithm = self.config.get('compressing_algorithm', 'gz')
f = CompressedFile(path, algorithm=algorithm)
f.write(html)
else:
with open(path, 'w') as fd:
if isinstance(html, bytes):
fd.write(html.decode())
else:
fd.write(html)
if db_lock:
db_lock.release()
def _get_all_cache_files(self):
"""Return all files found in the cachedir.
Returns:
All files that have the string "cache" in it within the cache directory.
Files are either uncompressed filename.cache or are compressed with a
compression algorithm: "filename.cache.zip"
"""
files = set()
for dirpath, dirname, filenames in os.walk(self.config.get('cachedir', '.scrapecache')):
for name in filenames:
if 'cache' in name:
files.add(os.path.join(dirpath, name))
return files
def _caching_is_one_to_one(self, keywords, search_engine, scrapemode, page_number):
"""Check whether all keywords map to a unique file name.
Args:
keywords: All keywords for which to check the uniqueness of the hash
search_engine: The search engine the keyword was scraped for.
scrapemode: The scrapemode that was used.
page_number: page_number
Returns:
True if all keywords map to a unique hash and False if not.
"""
mappings = {}
for kw in keywords:
file_hash = self.cached_file_name(kw, search_engine, scrapemode, page_number)
if file_hash not in mappings:
mappings.update({file_hash: [kw, ]})
else:
mappings[file_hash].append(kw)
duplicates = [v for k, v in mappings.items() if len(v) > 1]
if duplicates:
logger.info('Not one-to-one. {}'.format(duplicates))
return False
else:
logger.info('one-to-one')
return True
def parse_all_cached_files(self, scrape_jobs, session, scraper_search):
"""Walk recursively through the cachedir (as given by the Config) and parse all cached files.
Args:
session: An sql alchemy session to add the entities
scraper_search: Abstract object representing the current search.
Returns:
The scrape jobs that couldn't be parsed from the cache directory.
"""
files = self._get_all_cache_files()
num_cached = num_total = 0
mapping = {}
for job in scrape_jobs:
cache_name = self.cached_file_name(
job['query'],
job['search_engine'],
job['scrape_method'],
job['page_number']
)
mapping[cache_name] = job
num_total += 1
for path in files:
# strip of the extension of the path if it has eny
fname = os.path.split(path)[1]
clean_filename = fname
for ext in ALLOWED_COMPRESSION_ALGORITHMS:
if fname.endswith(ext):
clean_filename = fname.rstrip('.' + ext)
job = mapping.get(clean_filename, None)
if job:
# We found a file that contains the keyword, search engine name and
# search mode that fits our description. Let's see if there is already
# an record in the database and link it to our new ScraperSearch object.
serp = self.get_serp_from_database(session, job['query'], job['search_engine'], job['scrape_method'],
job['page_number'])
if not serp:
serp = self.parse_again(fname, job['search_engine'], job['scrape_method'], job['query'])
serp.scraper_searches.append(scraper_search)
session.add(serp)
if num_cached % 200 == 0:
session.commit()
store_serp_result(serp, self.config)
num_cached += 1
scrape_jobs.remove(job)
logger.info('{} cache files found in {}'.format(len(files), self.config.get('cachedir')))
logger.info('{}/{} objects have been read from the cache. {} remain to get scraped.'.format(
num_cached, num_total, num_total - num_cached))
session.add(scraper_search)
session.commit()
return scrape_jobs
def parse_again(self, fname, search_engine, scrape_method, query):
"""
@todo: `scrape_method` is not used here -> check if scrape_method is passed to this function and remove it
"""
path = os.path.join(self.config.get('cachedir', '.scrapecache'), fname)
html = self.read_cached_file(path)
return parse_serp(
self.config,
html=html,
search_engine=search_engine,
query=query
)
def get_serp_from_database(self, session, query, search_engine, scrape_method, page_number):
try:
serp = session.query(SearchEngineResultsPage).filter(
SearchEngineResultsPage.query == query,
SearchEngineResultsPage.search_engine_name == search_engine,
SearchEngineResultsPage.scrape_method == scrape_method,
SearchEngineResultsPage.page_number == page_number).first()
return serp
except NoResultFound:
# that shouldn't happen
# we have a cache file that matches the above identifying information
# but it was never stored to the database.
return False
def clean_cachefiles(self):
"""Clean silly html from all cachefiles in the cachdir"""
if input(
'Do you really want to strip all cache files from bloating tags such as <script> and <style>? ').startswith(
'y'):
import lxml.html
from lxml.html.clean import Cleaner
cleaner = Cleaner()
cleaner.style = True
cleaner.scripts = True
cleaner.javascript = True
for file in self._get_all_cache_files():
cfile = CompressedFile(file)
data = cfile.read()
cleaned = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(data)))
cfile.write(cleaned)
logger.info('Cleaned {}. Size before: {}, after {}'.format(file, len(data), len(cleaned)))
def fix_broken_cache_names(self, url, search_engine, scrapemode, page_number):
"""Fix broken cache names.
Args:
url: A list of strings to add to each cached_file_name() call.
@todo: `url` is not used here -> check if scrape_method is passed to this function and remove it
"""
files = self._get_all_cache_files()
logger.debug('{} cache files found in {}'.format(len(files), self.config.get('cachedir', '.scrapecache')))
r = re.compile(r'<title>(?P<kw>.*?) - Google Search</title>')
i = 0
for path in files:
fname = os.path.split(path)[1].strip()
data = self.read_cached_file(path)
infilekws = r.search(data).group('kw')
realname = self.cached_file_name(infilekws, search_engine, scrapemode, page_number)
if fname != realname:
logger.debug('The search query in the title element in file {} differ from that hash of its name. Fixing...'.format(path))
src = os.path.abspath(path)
dst = os.path.abspath(os.path.join(os.path.split(path)[0], realname))
logger.debug('Renamed from {} => {}'.format(src, dst))
os.rename(src, dst)
i += 1
logger.debug('Renamed {} files.'.format(i))
def cached(self, f, attr_to_cache=None):
"""Decorator that makes return value of functions cachable.
Any function that returns a value and that is decorated with
cached will be supplied with the previously calculated result of
an earlier call. The parameter name with the cached value may
be set with attr_to_cache.
Args:
attr_to_cache: The name of attribute whose data
is cachable.
Returns: The modified and wrapped function.
@todo: `attr_to_cache` is not used here -> check if scrape_method is passed to this function and remove it
"""
def wraps(*args, **kwargs):
cached_value = self.get_cached(*args, params=kwargs)
if cached_value:
f(*args, attr_to_cache=cached_value, **kwargs)
else:
# Nothing was cached for this attribute
value = f(*args, attr_to_cache=None, **kwargs)
self.cache_results(value, *args, params=kwargs)
return wraps
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
Shaps/ansible | test/lib/ansible_test/_internal/test.py | 11 | 14612 | """Classes for storing and processing test results."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import re
from . import types as t
from .util import (
display,
get_ansible_version,
)
from .util_common import (
write_text_test_results,
write_json_test_results,
ResultType,
)
from .config import (
TestConfig,
)
def calculate_best_confidence(choices, metadata):
"""
:type choices: tuple[tuple[str, int]]
:type metadata: Metadata
:rtype: int
"""
best_confidence = 0
for path, line in choices:
confidence = calculate_confidence(path, line, metadata)
best_confidence = max(confidence, best_confidence)
return best_confidence
def calculate_confidence(path, line, metadata):
"""
:type path: str
:type line: int
:type metadata: Metadata
:rtype: int
"""
ranges = metadata.changes.get(path)
# no changes were made to the file
if not ranges:
return 0
# changes were made to the same file and line
if any(r[0] <= line <= r[1] in r for r in ranges):
return 100
# changes were made to the same file and the line number is unknown
if line == 0:
return 75
# changes were made to the same file and the line number is different
return 50
class TestResult:
"""Base class for test results."""
def __init__(self, command, test, python_version=None):
"""
:type command: str
:type test: str
:type python_version: str
"""
self.command = command
self.test = test
self.python_version = python_version
self.name = self.test or self.command
if self.python_version:
self.name += '-python-%s' % self.python_version
try:
import junit_xml
except ImportError:
junit_xml = None
self.junit = junit_xml
def write(self, args):
"""
:type args: TestConfig
"""
self.write_console()
self.write_bot(args)
if args.lint:
self.write_lint()
if args.junit:
if self.junit:
self.write_junit(args)
else:
display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
def write_console(self):
"""Write results to console."""
def write_lint(self):
"""Write lint results to stdout."""
def write_bot(self, args):
"""
:type args: TestConfig
"""
def write_junit(self, args):
"""
:type args: TestConfig
"""
def create_result_name(self, extension):
"""
:type extension: str
:rtype: str
"""
name = 'ansible-test-%s' % self.command
if self.test:
name += '-%s' % self.test
if self.python_version:
name += '-python-%s' % self.python_version
name += extension
return name
def save_junit(self, args, test_case, properties=None):
"""
:type args: TestConfig
:type test_case: junit_xml.TestCase
:type properties: dict[str, str] | None
:rtype: str | None
"""
test_suites = [
self.junit.TestSuite(
name='ansible-test',
test_cases=[test_case],
timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
properties=properties,
),
]
report = self.junit.TestSuite.to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
if args.explain:
return
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
class TestTimeout(TestResult):
"""Test timeout."""
def __init__(self, timeout_duration):
"""
:type timeout_duration: int
"""
super(TestTimeout, self).__init__(command='timeout', test='')
self.timeout_duration = timeout_duration
def write(self, args):
"""
:type args: TestConfig
"""
message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
output = '''
One or more of the following situations may be responsible:
- Code changes have resulted in tests that hang or run for an excessive amount of time.
- Tests have been added which exceed the time limit when combined with existing tests.
- Test infrastructure and/or external dependencies are operating slower than normal.'''
if args.coverage:
output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
output += '\n\nConsult the console log for additional details on where the timeout occurred.'
timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
# hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers
xml = '''
<?xml version="1.0" encoding="utf-8"?>
<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
\t\t<testcase classname="timeout" name="timeout">
\t\t\t<error message="%s" type="error">%s</error>
\t\t</testcase>
\t</testsuite>
</testsuites>
''' % (timestamp, message, output)
write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), xml.lstrip())
class TestSuccess(TestResult):
"""Test success."""
def write_junit(self, args):
"""
:type args: TestConfig
"""
test_case = self.junit.TestCase(classname=self.command, name=self.name)
self.save_junit(args, test_case)
class TestSkipped(TestResult):
"""Test skipped."""
def write_console(self):
"""Write results to console."""
display.info('No tests applicable.', verbosity=1)
def write_junit(self, args):
"""
:type args: TestConfig
"""
test_case = self.junit.TestCase(classname=self.command, name=self.name)
test_case.add_skipped_info('No tests applicable.')
self.save_junit(args, test_case)
class TestFailure(TestResult):
"""Test failure."""
def __init__(self, command, test, python_version=None, messages=None, summary=None):
"""
:type command: str
:type test: str
:type python_version: str | None
:type messages: list[TestMessage] | None
:type summary: unicode | None
"""
super(TestFailure, self).__init__(command, test, python_version)
if messages:
messages = sorted(messages)
else:
messages = []
self.messages = messages
self.summary = summary
def write(self, args):
"""
:type args: TestConfig
"""
if args.metadata.changes:
self.populate_confidence(args.metadata)
super(TestFailure, self).write(args)
def write_console(self):
"""Write results to console."""
if self.summary:
display.error(self.summary)
else:
if self.python_version:
specifier = ' on python %s' % self.python_version
else:
specifier = ''
display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
for message in self.messages:
display.error(message.format(show_confidence=True))
doc_url = self.find_docs()
if doc_url:
display.info('See documentation for help: %s' % doc_url)
def write_lint(self):
"""Write lint results to stdout."""
if self.summary:
command = self.format_command()
message = 'The test `%s` failed. See stderr output for details.' % command
path = ''
message = TestMessage(message, path)
print(message)
else:
for message in self.messages:
print(message)
def write_junit(self, args):
"""
:type args: TestConfig
"""
title = self.format_title()
output = self.format_block()
test_case = self.junit.TestCase(classname=self.command, name=self.name)
# Include a leading newline to improve readability on Shippable "Tests" tab.
# Without this, the first line becomes indented.
test_case.add_failure_info(message=title, output='\n%s' % output)
self.save_junit(args, test_case)
def write_bot(self, args):
"""
:type args: TestConfig
"""
docs = self.find_docs()
message = self.format_title(help_link=docs)
output = self.format_block()
if self.messages:
verified = all((m.confidence or 0) >= 50 for m in self.messages)
else:
verified = False
bot_data = dict(
verified=verified,
docs=docs,
results=[
dict(
message=message,
output=output,
),
],
)
if args.explain:
return
write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
def populate_confidence(self, metadata):
"""
:type metadata: Metadata
"""
for message in self.messages:
if message.confidence is None:
message.confidence = calculate_confidence(message.path, message.line, metadata)
def format_command(self):
"""
:rtype: str
"""
command = 'ansible-test %s' % self.command
if self.test:
command += ' --test %s' % self.test
if self.python_version:
command += ' --python %s' % self.python_version
return command
def find_docs(self):
"""
:rtype: str
"""
if self.command != 'sanity':
return None # only sanity tests have docs links
# Use the major.minor version for the URL only if this a release that
# matches the pattern 2.4.0, otherwise, use 'devel'
ansible_version = get_ansible_version()
url_version = 'devel'
if re.search(r'^[0-9.]+$', ansible_version):
url_version = '.'.join(ansible_version.split('.')[:2])
testing_docs_url = 'https://docs.ansible.com/ansible/%s/dev_guide/testing' % url_version
url = '%s/%s/' % (testing_docs_url, self.command)
if self.test:
url += '%s.html' % self.test
return url
def format_title(self, help_link=None):
"""
:type help_link: str | None
:rtype: str
"""
command = self.format_command()
if self.summary:
reason = 'the error'
else:
reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
if help_link:
help_link_markup = ' [[explain](%s)]' % help_link
else:
help_link_markup = ''
title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
return title
def format_block(self):
"""
:rtype: str
"""
if self.summary:
block = self.summary
else:
block = '\n'.join(m.format() for m in self.messages)
message = block.strip()
# Hack to remove ANSI color reset code from SubprocessError messages.
message = message.replace(display.clear, '')
return message
class TestMessage:
"""Single test message for one file."""
def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None):
"""
:type message: str
:type path: str
:type line: int
:type column: int
:type level: str
:type code: str | None
:type confidence: int | None
"""
self.__path = path
self.__line = line
self.__column = column
self.__level = level
self.__code = code
self.__message = message
self.confidence = confidence
@property
def path(self): # type: () -> str
"""Return the path."""
return self.__path
@property
def line(self): # type: () -> int
"""Return the line number, or 0 if none is available."""
return self.__line
@property
def column(self): # type: () -> int
"""Return the column number, or 0 if none is available."""
return self.__column
@property
def level(self): # type: () -> str
"""Return the level."""
return self.__level
@property
def code(self): # type: () -> t.Optional[str]
"""Return the code, if any."""
return self.__code
@property
def message(self): # type: () -> str
"""Return the message."""
return self.__message
@property
def tuple(self): # type: () -> t.Tuple[str, int, int, str, t.Optional[str], str]
"""Return a tuple with all the immutable values of this test message."""
return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
def __lt__(self, other):
return self.tuple < other.tuple
def __le__(self, other):
return self.tuple <= other.tuple
def __eq__(self, other):
return self.tuple == other.tuple
def __ne__(self, other):
return self.tuple != other.tuple
def __gt__(self, other):
return self.tuple > other.tuple
def __ge__(self, other):
return self.tuple >= other.tuple
def __hash__(self):
return hash(self.tuple)
def __str__(self):
return self.format()
def format(self, show_confidence=False):
"""
:type show_confidence: bool
:rtype: str
"""
if self.__code:
msg = '%s: %s' % (self.__code, self.__message)
else:
msg = self.__message
if show_confidence and self.confidence is not None:
msg += ' (%d%%)' % self.confidence
return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
| gpl-3.0 |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/python/debug/wrappers/hooks.py | 32 | 13374 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tfdbg CLI as SessionRunHook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import stepper
from tensorflow.python.debug.wrappers import dumping_wrapper
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.training import session_run_hook
# The prefix for GRPC endpoint URLs.
_GRPC_ENDPOINT_PREFIX = "grpc://"
class LocalCLIDebugHook(session_run_hook.SessionRunHook,
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Command-line-interface debugger hook.
Can be used as a monitor/hook for `tf.train.MonitoredSession`s and
`tf.contrib.learn`'s `Estimator`s and `Experiment`s.
"""
def __init__(self,
ui_type="curses",
dump_root=None,
thread_name_filter=None):
"""Create a local debugger command-line interface (CLI) hook.
Args:
ui_type: (str) user-interface type.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
"""
self._ui_type = ui_type
self._dump_root = dump_root
self._thread_name_filter = thread_name_filter
self._wrapper_initialized = False
self._pending_tensor_filters = {}
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
Override default behavior to accommodate the possibility of this method being
called prior to the initialization of the underlying
`LocalCLIDebugWrapperSession` object.
Args:
filter_name: See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()`
for details.
tensor_filter: See doc of
`LocalCLIDebugWrapperSession.add_tensor_filter()` for details.
"""
if self._wrapper_initialized:
local_cli_wrapper.LocalCLIDebugWrapperSession.add_tensor_filter(
self, filter_name, tensor_filter)
else:
self._pending_tensor_filters[filter_name] = tensor_filter
def begin(self):
pass
def before_run(self, run_context):
if not self._wrapper_initialized:
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self,
run_context.session,
ui_type=self._ui_type,
dump_root=self._dump_root,
thread_name_filter=self._thread_name_filter)
# Actually register tensor filters registered prior to the construction
# of the underlying LocalCLIDebugWrapperSession object.
for filter_name in self._pending_tensor_filters:
local_cli_wrapper.LocalCLIDebugWrapperSession.add_tensor_filter(
self, filter_name, self._pending_tensor_filters[filter_name])
self._wrapper_initialized = True
# Increment run call counter.
self._run_call_count += 1
# Adapt run_context to an instance of OnRunStartRequest for invoking
# superclass on_run_start().
on_run_start_request = framework.OnRunStartRequest(
run_context.original_args.fetches, run_context.original_args.feed_dict,
None, None, self._run_call_count)
on_run_start_response = self.on_run_start(on_run_start_request)
self._performed_action = on_run_start_response.action
run_args = session_run_hook.SessionRunArgs(
None, feed_dict=None, options=config_pb2.RunOptions())
if self._performed_action == framework.OnRunStartAction.DEBUG_RUN:
self._decorate_options_for_debug(
run_args.options,
run_context.session.graph,
framework.WatchOptions(
node_name_regex_whitelist=(
on_run_start_response.node_name_regex_whitelist),
op_type_regex_whitelist=(
on_run_start_response.op_type_regex_whitelist),
tensor_dtype_regex_whitelist=(
on_run_start_response.tensor_dtype_regex_whitelist),
tolerate_debug_op_creation_failures=(
on_run_start_response.tolerate_debug_op_creation_failures)))
elif self._performed_action == framework.OnRunStartAction.PROFILE_RUN:
self._decorate_run_options_for_profile(run_args.options)
elif self._performed_action == framework.OnRunStartAction.INVOKE_STEPPER:
# The _finalized property must be set to False so that the NodeStepper
# can insert ops for retrieving TensorHandles.
# pylint: disable=protected-access
run_context.session.graph._finalized = False
# pylint: enable=protected-access
with stepper.NodeStepper(
run_context.session,
run_context.original_args.
fetches,
run_context.original_args.feed_dict) as node_stepper:
self.invoke_node_stepper(
node_stepper, restore_variable_values_on_exit=True)
return run_args
def after_run(self, run_context, run_values):
# Adapt run_context and run_values to OnRunEndRequest and invoke superclass
# on_run_end()
on_run_end_request = framework.OnRunEndRequest(self._performed_action,
run_values.run_metadata)
self.on_run_end(on_run_end_request)
def _decorate_options_for_debug(self, options, graph, watch_options):
"""Modify RunOptions.debug_options.debug_tensor_watch_opts for debugging."""
debug_utils.watch_graph(
options,
graph,
debug_urls=self._get_run_debug_urls(),
node_name_regex_whitelist=watch_options.node_name_regex_whitelist,
op_type_regex_whitelist=watch_options.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_options.tolerate_debug_op_creation_failures))
options.output_partition_graphs = True
class DumpingDebugHook(session_run_hook.SessionRunHook,
dumping_wrapper.DumpingDebugWrapperSession):
"""A debugger hook that dumps debug data to filesystem.
Can be used as a monitor/hook for `tf.train.MonitoredSession`s and
`tf.contrib.learn`'s `Estimator`s and `Experiment`s.
"""
def __init__(self,
session_root,
watch_fn=None,
thread_name_filter=None,
log_usage=True):
"""Create a local debugger command-line interface (CLI) hook.
Args:
session_root: See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__`.
watch_fn: See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__`.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
log_usage: (bool) Whether usage is to be logged.
"""
self._session_root = session_root
self._watch_fn = watch_fn
self._thread_name_filter = thread_name_filter
self._log_usage = log_usage
self._wrapper_initialized = False
def begin(self):
pass
def before_run(self, run_context):
if not self._wrapper_initialized:
# TODO(cais): Make this hook have a DumpingDebugWrapperSession property
# instead of subclassing DumpingDebugWrapperSession.
dumping_wrapper.DumpingDebugWrapperSession.__init__(
self,
run_context.session,
self._session_root,
watch_fn=self._watch_fn,
thread_name_filter=self._thread_name_filter,
log_usage=self._log_usage)
self._wrapper_initialized = True
self._run_call_count += 1
debug_urls, watch_options = self._prepare_run_watch_config(
run_context.original_args.fetches, run_context.original_args.feed_dict)
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
run_context.session.graph,
debug_urls=debug_urls,
debug_ops=watch_options.debug_ops,
node_name_regex_whitelist=watch_options.node_name_regex_whitelist,
op_type_regex_whitelist=watch_options.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_options.tolerate_debug_op_creation_failures))
run_args = session_run_hook.SessionRunArgs(
None, feed_dict=None, options=run_options)
return run_args
def after_run(self, run_context, run_values):
pass
class GrpcDebugHook(session_run_hook.SessionRunHook):
"""A hook that streams debugger-related events to any grpc_debug_server.
For example, the debugger data server is a grpc_debug_server. The debugger
data server writes debugger-related events it receives via GRPC to logdir.
This enables debugging features in Tensorboard such as health pills.
When the arguments of debug_utils.watch_graph changes, strongly consider
changing arguments here too so that features are available to tflearn users.
Can be used as a monitor/hook for `tf.train.MonitoredSession`s and
`tf.contrib.learn`'s `Estimator`s and `Experiment`s.
"""
def __init__(self,
grpc_debug_server_addresses,
watch_fn=None,
thread_name_filter=None,
log_usage=True):
"""Constructs a GrpcDebugHook.
Args:
grpc_debug_server_addresses: (`list` of `str`) A list of the gRPC debug
server addresses, in the format of <host:port>, without the "grpc://"
prefix. For example: ["localhost:7000", "192.168.0.2:8000"]
watch_fn: A function that allows for customizing which ops to watch at
which specific steps. See doc of
`dumping_wrapper.DumpingDebugWrapperSession.__init__` for details.
thread_name_filter: Regular-expression white list for threads on which the
wrapper session will be active. See doc of `BaseDebugWrapperSession` for
more details.
log_usage: (bool) Whether usage is to be logged.
Raises:
ValueError: if any debugger server addresses start with grpc://.
"""
for address in grpc_debug_server_addresses:
if address.startswith(_GRPC_ENDPOINT_PREFIX):
raise ValueError(
("Debug server address %r starts with %r. It should not because "
"the hook already automatically adds the prefix.") % (
address, _GRPC_ENDPOINT_PREFIX))
# A wrapper session responsible for GRPC communication.
self._grpc_debug_wrapper_session = None
self._thread_name_filter = thread_name_filter
self._grpc_debug_server_addresses = grpc_debug_server_addresses
self._watch_fn = watch_fn
self._log_usage = log_usage
def before_run(self, run_context):
"""Called right before a session is run.
Args:
run_context: A session_run_hook.SessionRunContext. Encapsulates
information on the run.
Returns:
A session_run_hook.SessionRunArgs object.
"""
if not self._grpc_debug_wrapper_session:
self._grpc_debug_wrapper_session = grpc_wrapper.GrpcDebugWrapperSession(
run_context.session,
self._grpc_debug_server_addresses,
watch_fn=self._watch_fn,
thread_name_filter=self._thread_name_filter,
log_usage=self._log_usage)
fetches = run_context.original_args.fetches
feed_dict = run_context.original_args.feed_dict
watch_options = self._watch_fn(fetches, feed_dict)
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
run_context.session.graph,
debug_urls=self._grpc_debug_wrapper_session.prepare_run_debug_urls(
fetches, feed_dict),
debug_ops=watch_options.debug_ops,
node_name_regex_whitelist=watch_options.node_name_regex_whitelist,
op_type_regex_whitelist=watch_options.op_type_regex_whitelist,
tensor_dtype_regex_whitelist=watch_options.tensor_dtype_regex_whitelist,
tolerate_debug_op_creation_failures=(
watch_options.tolerate_debug_op_creation_failures))
return session_run_hook.SessionRunArgs(
None, feed_dict=None, options=run_options)
| apache-2.0 |
KarelJakubec/pip | pip/_vendor/cachecontrol/adapter.py | 469 | 4196 | import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
| mit |
cribster/cribster.github.io | bower_components/bootstrap/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| mit |
afandria/sky_engine | third_party/android_testrunner/errors.py | 171 | 1340 | #!/usr/bin/python2.4
#
#
# Copyright 2008, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines common exception classes for this package."""
class MsgException(Exception):
"""Generic exception with an optional string msg."""
def __init__(self, msg=""):
self.msg = msg
class WaitForResponseTimedOutError(Exception):
"""We sent a command and had to wait too long for response."""
class DeviceUnresponsiveError(Exception):
"""Device is unresponsive to command."""
class InstrumentationError(Exception):
"""Failed to run instrumentation."""
class AbortError(MsgException):
"""Generic exception that indicates a fatal error has occurred and program
execution should be aborted."""
class ParseError(MsgException):
"""Raised when xml data to parse has unrecognized format."""
| bsd-3-clause |
ahmedjabbar/uor | tg/tg-test.py | 199 | 1450 | import tgl
import pprint
from functools import partial
our_id = 0
pp = pprint.PrettyPrinter(indent=4)
binlog_done = False;
def on_binlog_replay_end():
binlog_done = True;
def on_get_difference_end():
pass
def on_our_id(id):
our_id = id
return "Set ID: " + str(our_id)
def msg_cb(success, msg):
pp.pprint(success)
pp.pprint(msg)
HISTORY_QUERY_SIZE = 100
def history_cb(msg_list, peer, success, msgs):
print(len(msgs))
msg_list.extend(msgs)
print(len(msg_list))
if len(msgs) == HISTORY_QUERY_SIZE:
tgl.get_history(peer, len(msg_list), HISTORY_QUERY_SIZE, partial(history_cb, msg_list, peer));
def cb(success):
print(success)
def on_msg_receive(msg):
if msg.out and not binlog_done:
return;
if msg.dest.id == our_id: # direct message
peer = msg.src
else: # chatroom
peer = msg.dest
pp.pprint(msg)
if msg.text.startswith("!ping"):
peer.send_msg("PONG! google.com", preview=False, reply=msg.id)
def on_secret_chat_update(peer, types):
return "on_secret_chat_update"
def on_user_update():
pass
def on_chat_update():
pass
# Set callbacks
tgl.set_on_binlog_replay_end(on_binlog_replay_end)
tgl.set_on_get_difference_end(on_get_difference_end)
tgl.set_on_our_id(on_our_id)
tgl.set_on_msg_receive(on_msg_receive)
tgl.set_on_secret_chat_update(on_secret_chat_update)
tgl.set_on_user_update(on_user_update)
tgl.set_on_chat_update(on_chat_update)
| gpl-2.0 |
syci/OCB | addons/sale/tests/test_sale_order.py | 40 | 7670 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.exceptions import UserError, AccessError
from test_sale_common import TestSale
class TestSaleOrder(TestSale):
def test_sale_order(self):
""" Test the sale order flow (invoicing and quantity updates)
- Invoice repeatedly while varrying delivered quantities and check that invoice are always what we expect
"""
# DBO TODO: validate invoice and register payments
inv_obj = self.env['account.invoice']
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
})
self.assertEqual(so.amount_total, sum([2 * p.list_price for (k, p) in self.products.iteritems()]), 'Sale: total amount is wrong')
# send quotation
so.force_quotation_send()
self.assertTrue(so.state == 'sent', 'Sale: state after sending is wrong')
# confirm quotation
so.action_confirm()
self.assertTrue(so.state == 'sale')
self.assertTrue(so.invoice_status == 'to invoice')
# create invoice: only 'invoice on order' products are invoiced
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 2, 'Sale: invoice is missing lines')
self.assertEqual(inv.amount_total, sum([2 * p.list_price if p.invoice_policy == 'order' else 0 for (k, p) in self.products.iteritems()]), 'Sale: invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'no', 'Sale: SO status after invoicing should be "nothing to invoice"')
self.assertTrue(len(so.invoice_ids) == 1, 'Sale: invoice is missing')
# deliver lines except 'time and material' then invoice again
for line in so.order_line:
line.qty_delivered = 2 if line.product_id.invoice_policy in ['order', 'delivery'] else 0
self.assertTrue(so.invoice_status == 'to invoice', 'Sale: SO status after delivery should be "to invoice"')
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 2, 'Sale: second invoice is missing lines')
self.assertEqual(inv.amount_total, sum([2 * p.list_price if p.invoice_policy == 'delivery' else 0 for (k, p) in self.products.iteritems()]), 'Sale: second invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'invoiced', 'Sale: SO status after invoicing everything should be "invoiced"')
self.assertTrue(len(so.invoice_ids) == 2, 'Sale: invoice is missing')
# go over the sold quantity
for line in so.order_line:
if line.product_id == self.products['serv_order']:
line.qty_delivered = 10
self.assertTrue(so.invoice_status == 'upselling', 'Sale: SO status after increasing delivered qty higher than ordered qty should be "upselling"')
# upsell and invoice
for line in so.order_line:
if line.product_id == self.products['serv_order']:
line.product_uom_qty = 10
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 1, 'Sale: third invoice is missing lines')
self.assertEqual(inv.amount_total, 8 * self.products['serv_order'].list_price, 'Sale: second invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'invoiced', 'Sale: SO status after invoicing everything (including the upsel) should be "invoiced"')
def test_unlink_cancel(self):
""" Test deleting and cancelling sale orders depending on their state and on the user's rights """
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
})
# only quotations are deletable
with self.assertRaises(UserError):
so.action_confirm()
so.unlink()
so_copy = so.copy()
with self.assertRaises(AccessError):
so_copy.sudo(self.user).unlink()
self.assertTrue(so_copy.sudo(self.manager).unlink(), 'Sale: deleting a quotation should be possible')
# cancelling and setting to done, you should not be able to delete any SO ever
so.action_cancel()
self.assertTrue(so.state == 'cancel', 'Sale: cancelling SO should always be possible')
with self.assertRaises(UserError):
so.sudo(self.manager).unlink()
so.action_done()
self.assertTrue(so.state == 'done', 'Sale: SO not done')
def test_cost_invoicing(self):
""" Test confirming a vendor invoice to reinvoice cost on the so """
serv_cost = self.env.ref('product.product_product_1b')
prod_gap = self.env.ref('product.product_product_1')
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod_gap.name, 'product_id': prod_gap.id, 'product_uom_qty': 2, 'product_uom': prod_gap.uom_id.id, 'price_unit': prod_gap.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
})
so.action_confirm()
so._create_analytic_account()
inv_partner = self.env.ref('base.res_partner_2')
company = self.env.ref('base.main_company')
journal = self.env['account.journal'].create({'name': 'Purchase Journal - Test', 'code': 'STPJ', 'type': 'purchase', 'company_id': company.id})
account_payable = self.env['account.account'].create({'code': 'X1111', 'name': 'Sale - Test Payable Account', 'user_type_id': self.env.ref('account.data_account_type_payable').id, 'reconcile': True})
account_income = self.env['account.account'].create({'code': 'X1112', 'name': 'Sale - Test Account', 'user_type_id': self.env.ref('account.data_account_type_direct_costs').id})
invoice_vals = {
'name': '',
'type': 'in_invoice',
'partner_id': inv_partner.id,
'invoice_line_ids': [(0, 0, {'name': serv_cost.name, 'product_id': serv_cost.id, 'quantity': 2, 'uom_id': serv_cost.uom_id.id, 'price_unit': serv_cost.standard_price, 'account_analytic_id': so.project_id.id, 'account_id': account_income.id})],
'account_id': account_payable.id,
'journal_id': journal.id,
'currency_id': company.currency_id.id,
}
inv = self.env['account.invoice'].create(invoice_vals)
inv.signal_workflow('invoice_open')
sol = so.order_line.filtered(lambda l: l.product_id == serv_cost)
self.assertTrue(sol, 'Sale: cost invoicing does not add lines when confirming vendor invoice')
self.assertTrue(sol.price_unit == 160 and sol.qty_delivered == 2 and sol.product_uom_qty == sol.qty_invoiced == 0, 'Sale: line is wrong after confirming vendor invoice')
| agpl-3.0 |
kartikp1995/gnuradio | gnuradio-runtime/python/pmt/qa_pmt_to_python.py | 48 | 1554 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import unittest
import pmt
import pmt_to_python as pmt2py
class test_pmt_to_python(unittest.TestCase):
def test_pmt_from_double(self):
b = pmt.from_double(123765)
self.assertEqual(pmt.to_python(b), 123765)
t = pmt.to_pmt(range(5))
def test_numpy_to_uvector_and_reverse(self):
import numpy as np
N = 100
narr = np.ndarray(N, dtype=np.complex128)
narr.real[:] = np.random.uniform(size=N)
narr.imag[:] = np.random.uniform(size=N)
uvector = pmt2py.numpy_to_uvector(narr)
nparr = pmt2py.uvector_to_numpy(uvector)
self.assertTrue(nparr.dtype==narr.dtype)
self.assertTrue(np.alltrue(nparr == narr))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
da1z/intellij-community | python/helpers/profiler/thriftpy/_compat.py | 20 | 3953 | # -*- coding: utf-8 -*-
"""
thriftpy._compat
~~~~~~~~~~~~~
py2/py3 compatibility support.
"""
from __future__ import absolute_import
import platform
import sys
import types
PY3 = sys.version_info[0] == 3
PYPY = "__pypy__" in sys.modules
UNIX = platform.system() in ("Linux", "Darwin")
CYTHON = False # Cython always disabled in pypy and windows
# only python2.7.9 and python 3.4 or above have true ssl context
MODERN_SSL = (2, 7, 9) <= sys.version_info < (3, 0, 0) or \
sys.version_info >= (3, 4)
if PY3:
text_type = str
string_types = (str,)
def u(s):
return s
else:
text_type = unicode # noqa
string_types = (str, unicode) # noqa
def u(s):
if not isinstance(s, text_type):
s = s.decode("utf-8")
return s
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass for py2 & py3
This code snippet is copied from six."""
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
def init_func_generator(spec):
"""Generate `__init__` function based on TPayload.default_spec
For example::
spec = [('name', 'Alice'), ('number', None)]
will generate::
def __init__(self, name='Alice', number=None):
kwargs = locals()
kwargs.pop('self')
self.__dict__.update(kwargs)
TODO: The `locals()` part may need refine.
"""
if not spec:
def __init__(self):
pass
return __init__
varnames, defaults = zip(*spec)
varnames = ('self', ) + varnames
def init(self):
self.__dict__ = locals().copy()
del self.__dict__['self']
code = init.__code__
if PY3:
new_code = types.CodeType(len(varnames),
0,
len(varnames),
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
varnames,
code.co_filename,
"__init__",
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars)
else:
new_code = types.CodeType(len(varnames),
len(varnames),
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
varnames,
code.co_filename,
"__init__",
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars)
return types.FunctionType(new_code,
{"__builtins__": __builtins__},
argdefs=defaults)
| apache-2.0 |
gangadhar-kadam/sapphire_app | hr/report/monthly_salary_register/monthly_salary_register.py | 2 | 4044 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import flt, cstr
from webnotes import msgprint, _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.month, ss.leave_withut_pay, ss.payment_days]
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.arrear_amount, ss.leave_encashment_amount, ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
columns = [
"Employee:Link/Employee:120", "Employee Name::140", "Branch:Link/Branch:120",
"Department:Link/Department:120", "Designation:Link/Designation:120",
"Company:Link/Company:120", "Month::80", "Leave Without pay:Float:130",
"Payment Days:Float:120"
]
earning_types = webnotes.conn.sql_list("""select distinct e_type from `tabSalary Slip Earning`
where ifnull(e_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
ded_types = webnotes.conn.sql_list("""select distinct d_type from `tabSalary Slip Deduction`
where ifnull(d_modified_amount, 0) != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
columns = columns + [(e + ":Link/Earning Type:120") for e in earning_types] + \
["Arrear Amount:Currency:120", "Leave Encashment Amount:Currency:150",
"Gross Pay:Currency:120"] + [(d + ":Link/Deduction Type:120") for d in ded_types] + \
["Total Deduction:Currency:120", "Net Pay:Currency:120"]
return columns, earning_types, ded_types
def get_salary_slips(filters):
conditions, filters = get_conditions(filters)
salary_slips = webnotes.conn.sql("""select * from `tabSalary Slip` where docstatus = 1 %s""" %
conditions, filters, as_dict=1)
if not salary_slips:
msgprint(_("No salary slip found for month: ") + cstr(filters.get("month")) +
_(" and year: ") + cstr(filters.get("fiscal_year")), raise_exception=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
filters["month"] = month
conditions += " and month = %(month)s"
if filters.get("fiscal_year"): conditions += " and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = webnotes.conn.sql("""select parent, e_type, e_modified_amount
from `tabSalary Slip Earning` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, webnotes._dict()).setdefault(d.e_type, [])
ss_earning_map[d.parent][d.e_type] = flt(d.e_modified_amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = webnotes.conn.sql("""select parent, d_type, d_modified_amount
from `tabSalary Slip Deduction` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, webnotes._dict()).setdefault(d.d_type, [])
ss_ded_map[d.parent][d.e_type] = flt(d.d_modified_amount)
return ss_ded_map | agpl-3.0 |
goldenbull/grpc | src/python/grpcio/tests/unit/framework/interfaces/face/_3069_test_constant.py | 25 | 1848 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A test constant working around issue 3069."""
# test_constants is referenced from specification in this module.
from tests.unit.framework.common import test_constants # pylint: disable=unused-import
# TODO(issue 3069): Replace uses of this constant with
# test_constants.SHORT_TIMEOUT.
REALLY_SHORT_TIMEOUT = 0.1
| bsd-3-clause |
rruebner/odoo | addons/hw_escpos/escpos/escpos.py | 66 | 31141 | # -*- coding: utf-8 -*-
'''
@author: Manuel F Martinez <manpaz@bashlinux.com>
@organization: Bashlinux
@copyright: Copyright (c) 2012 Bashlinux
@license: GPL
'''
try:
import qrcode
except ImportError:
qrcode = None
import time
import copy
import io
import base64
import math
import md5
import re
import traceback
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
from PIL import Image
try:
import jcconv
except ImportError:
jcconv = None
print 'ESC/POS: please install jcconv for improved Japanese receipt printing:'
print ' # pip install jcconv'
from constants import *
from exceptions import *
def utfstr(stuff):
""" converts stuff to string and does without failing if stuff is a utf8 string """
if isinstance(stuff,basestring):
return stuff
else:
return str(stuff)
class StyleStack:
"""
The stylestack is used by the xml receipt serializer to compute the active styles along the xml
document. Styles are just xml attributes, there is no css mechanism. But the style applied by
the attributes are inherited by deeper nodes.
"""
def __init__(self):
self.stack = []
self.defaults = { # default style values
'align': 'left',
'underline': 'off',
'bold': 'off',
'size': 'normal',
'font' : 'a',
'width': 48,
'indent': 0,
'tabwidth': 2,
'bullet': ' - ',
'line-ratio':0.5,
'color': 'black',
'value-decimals': 2,
'value-symbol': '',
'value-symbol-position': 'after',
'value-autoint': 'off',
'value-decimals-separator': '.',
'value-thousands-separator': ',',
'value-width': 0,
}
self.types = { # attribute types, default is string and can be ommitted
'width': 'int',
'indent': 'int',
'tabwidth': 'int',
'line-ratio': 'float',
'value-decimals': 'int',
'value-width': 'int',
}
self.cmds = {
# translation from styles to escpos commands
# some style do not correspond to escpos command are used by
# the serializer instead
'align': {
'left': TXT_ALIGN_LT,
'right': TXT_ALIGN_RT,
'center': TXT_ALIGN_CT,
},
'underline': {
'off': TXT_UNDERL_OFF,
'on': TXT_UNDERL_ON,
'double': TXT_UNDERL2_ON,
},
'bold': {
'off': TXT_BOLD_OFF,
'on': TXT_BOLD_ON,
},
'font': {
'a': TXT_FONT_A,
'b': TXT_FONT_B,
},
'size': {
'normal': TXT_NORMAL,
'double-height': TXT_2HEIGHT,
'double-width': TXT_2WIDTH,
'double': TXT_DOUBLE,
},
'color': {
'black': TXT_COLOR_BLACK,
'red': TXT_COLOR_RED,
},
}
self.push(self.defaults)
def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val)
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style)
def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr])
def pop(self):
""" pop a style stack level """
if len(self.stack) > 1 :
self.stack = self.stack[:-1]
def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
for style in self.cmds:
cmd += self.cmds[style][self.get(style)]
return cmd
class XmlSerializer:
"""
Converts the xml inline / block tree structure to a string,
keeping track of newlines and spacings.
The string is outputted asap to the provided escpos driver.
"""
def __init__(self,escpos):
self.escpos = escpos
self.stack = ['block']
self.dirty = False
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack)
def start_block(self,stylestack=None):
""" starts a block entity with an optional style definition """
if self.dirty:
self.escpos._raw('\n')
self.dirty = False
self.stack.append('block')
if stylestack:
self.style(stylestack)
def end_entity(self):
""" ends the entity definition. (but does not cancel the active style!) """
if self.stack[-1] == 'block' and self.dirty:
self.escpos._raw('\n')
self.dirty = False
if len(self.stack) > 1:
self.stack = self.stack[:-1]
def pre(self,text):
""" puts a string of text in the entity keeping the whitespace intact """
if text:
self.escpos.text(text)
self.dirty = True
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text)
def linebreak(self):
""" inserts a linebreak in the entity """
self.dirty = False
self.escpos._raw('\n')
def style(self,stylestack):
""" apply a style to the entity (only applies to content added after the definition) """
self.raw(stylestack.to_escpos())
def raw(self,raw):
""" puts raw text or escpos command in the entity without affecting the state of the serializer """
self.escpos._raw(raw)
class XmlLineSerializer:
"""
This is used to convert a xml tree into a single line, with a left and a right part.
The content is not output to escpos directly, and is intended to be fedback to the
XmlSerializer as the content of a block entity.
"""
def __init__(self, indent=0, tabwidth=2, width=48, ratio=0.5):
self.tabwidth = tabwidth
self.indent = indent
self.width = max(0, width - int(tabwidth*indent))
self.lwidth = int(self.width*ratio)
self.rwidth = max(0, self.width - self.lwidth)
self.clwidth = 0
self.crwidth = 0
self.lbuffer = ''
self.rbuffer = ''
self.left = True
def _txt(self,txt):
if self.left:
if self.clwidth < self.lwidth:
txt = txt[:max(0, self.lwidth - self.clwidth)]
self.lbuffer += txt
self.clwidth += len(txt)
else:
if self.crwidth < self.rwidth:
txt = txt[:max(0, self.rwidth - self.crwidth)]
self.rbuffer += txt
self.crwidth += len(txt)
def start_inline(self,stylestack=None):
if (self.left and self.clwidth) or (not self.left and self.crwidth):
self._txt(' ')
def start_block(self,stylestack=None):
self.start_inline(stylestack)
def end_entity(self):
pass
def pre(self,text):
if text:
self._txt(text)
def text(self,text):
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self._txt(text)
def linebreak(self):
pass
def style(self,stylestack):
pass
def raw(self,raw):
pass
def start_right(self):
self.left = False
def get_line(self):
return ' ' * self.indent * self.tabwidth + self.lbuffer + ' ' * (self.width - self.clwidth - self.crwidth) + self.rbuffer
class Escpos:
""" ESC/POS Printer object """
device = None
encoding = None
img_cache = {}
def _check_image_size(self, size):
""" Check and fix the size of the image to 32 bits """
if size % 32 == 0:
return (0, 0)
else:
image_border = 32 - (size % 32)
if (image_border % 2) == 0:
return (image_border / 2, image_border / 2)
else:
return (image_border / 2, (image_border / 2) + 1)
def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
self._raw(buffer.decode('hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(buffer.decode("hex"))
buffer = ""
cont = 0
def _raw_print_image(self, line, size, output=None ):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
raw = ""
def __raw(string):
if output:
output(string)
else:
self._raw(string)
raw += S_RASTER_N
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
raw += buffer.decode('hex')
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
raw += buffer.decode("hex")
buffer = ""
cont = 0
return raw
def _convert_image(self, im):
""" Parse image and prepare it to a printable format """
pixels = []
pix_line = ""
im_left = ""
im_right = ""
switch = 0
img_size = [ 0, 0 ]
if im.size[0] > 512:
print "WARNING: Image is wider than 512 and could be truncated at print time "
if im.size[1] > 255:
raise ImageSizeError()
im_border = self._check_image_size(im.size[0])
for i in range(im_border[0]):
im_left += "0"
for i in range(im_border[1]):
im_right += "0"
for y in range(im.size[1]):
img_size[1] += 1
pix_line += im_left
img_size[0] += im_border[0]
for x in range(im.size[0]):
img_size[0] += 1
RGB = im.getpixel((x, y))
im_color = (RGB[0] + RGB[1] + RGB[2])
im_pattern = "1X0"
pattern_len = len(im_pattern)
switch = (switch - 1 ) * (-1)
for x in range(pattern_len):
if im_color <= (255 * 3 / pattern_len * (x+1)):
if im_pattern[x] == "X":
pix_line += "%d" % switch
else:
pix_line += im_pattern[x]
break
elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3):
pix_line += im_pattern[-1]
break
pix_line += im_right
img_size[0] += im_border[1]
return (pix_line, img_size)
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size)
def print_base64_image(self,img):
print 'print_b64_img'
id = md5.new(img).digest()
if id not in self.img_cache:
print 'not in cache'
img = img[img.find(',')+1:]
f = io.BytesIO('img')
f.write(base64.decodestring(img))
f.seek(0)
img_rgba = Image.open(f)
img = Image.new('RGB', img_rgba.size, (255,255,255))
img.paste(img_rgba, mask=img_rgba.split()[3])
print 'convert image'
pix_line, img_size = self._convert_image(img)
print 'print image'
buffer = self._raw_print_image(pix_line, img_size)
self.img_cache[id] = buffer
print 'raw image'
self._raw(self.img_cache[id])
def qr(self,text):
""" Print QR Code for the provided string """
qr_code = qrcode.QRCode(version=4, box_size=4, border=1)
qr_code.add_data(text)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert("RGB")
# Convert the RGB image in printable image
self._convert_image(im)
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'):
""" Print Barcode """
# Align Bar Code()
self._raw(TXT_ALIGN_CT)
# Height
if height >=2 or height <=6:
self._raw(BARCODE_HEIGHT)
else:
raise BarcodeSizeError()
# Width
if width >= 1 or width <=255:
self._raw(BARCODE_WIDTH)
else:
raise BarcodeSizeError()
# Font
if font.upper() == "B":
self._raw(BARCODE_FONT_B)
else: # DEFAULT FONT: A
self._raw(BARCODE_FONT_A)
# Position
if pos.upper() == "OFF":
self._raw(BARCODE_TXT_OFF)
elif pos.upper() == "BOTH":
self._raw(BARCODE_TXT_BTH)
elif pos.upper() == "ABOVE":
self._raw(BARCODE_TXT_ABV)
else: # DEFAULT POSITION: BELOW
self._raw(BARCODE_TXT_BLW)
# Type
if bc.upper() == "UPC-A":
self._raw(BARCODE_UPC_A)
elif bc.upper() == "UPC-E":
self._raw(BARCODE_UPC_E)
elif bc.upper() == "EAN13":
self._raw(BARCODE_EAN13)
elif bc.upper() == "EAN8":
self._raw(BARCODE_EAN8)
elif bc.upper() == "CODE39":
self._raw(BARCODE_CODE39)
elif bc.upper() == "ITF":
self._raw(BARCODE_ITF)
elif bc.upper() == "NW7":
self._raw(BARCODE_NW7)
else:
raise BarcodeTypeError()
# Print Code
if code:
self._raw(code)
else:
raise exception.BarcodeCodeError()
def receipt(self,xml):
"""
Prints an xml based receipt definition
"""
def strclean(string):
if not string:
string = ''
string = string.strip()
string = re.sub('\s+',' ',string)
return string
def format_value(value, decimals=3, width=0, decimals_separator='.', thousands_separator=',', autoint=False, symbol='', position='after'):
decimals = max(0,int(decimals))
width = max(0,int(width))
value = float(value)
if autoint and math.floor(value) == value:
decimals = 0
if width == 0:
width = ''
if thousands_separator:
formatstr = "{:"+str(width)+",."+str(decimals)+"f}"
else:
formatstr = "{:"+str(width)+"."+str(decimals)+"f}"
ret = formatstr.format(value)
ret = ret.replace(',','COMMA')
ret = ret.replace('.','DOT')
ret = ret.replace('COMMA',thousands_separator)
ret = ret.replace('DOT',decimals_separator)
if symbol:
if position == 'after':
ret = ret + symbol
else:
ret = symbol + ret
return ret
def print_elem(stylestack, serializer, elem, indent=0):
elem_styles = {
'h1': {'bold': 'on', 'size':'double'},
'h2': {'size':'double'},
'h3': {'bold': 'on', 'size':'double-height'},
'h4': {'size': 'double-height'},
'h5': {'bold': 'on'},
'em': {'font': 'b'},
'b': {'bold': 'on'},
}
stylestack.push()
if elem.tag in elem_styles:
stylestack.set(elem_styles[elem.tag])
stylestack.set(elem.attrib)
if elem.tag in ('p','div','section','article','receipt','header','footer','li','h1','h2','h3','h4','h5'):
serializer.start_block(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag in ('span','em','b','left','right'):
serializer.start_inline(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag == 'value':
serializer.start_inline(stylestack)
serializer.pre(format_value(
elem.text,
decimals=stylestack.get('value-decimals'),
width=stylestack.get('value-width'),
decimals_separator=stylestack.get('value-decimals-separator'),
thousands_separator=stylestack.get('value-thousands-separator'),
autoint=(stylestack.get('value-autoint') == 'on'),
symbol=stylestack.get('value-symbol'),
position=stylestack.get('value-symbol-position')
))
serializer.end_entity()
elif elem.tag == 'line':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
lineserializer = XmlLineSerializer(stylestack.get('indent')+indent,stylestack.get('tabwidth'),width,stylestack.get('line-ratio'))
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'left':
print_elem(stylestack,lineserializer,child,indent=indent)
elif child.tag == 'right':
lineserializer.start_right()
print_elem(stylestack,lineserializer,child,indent=indent)
serializer.pre(lineserializer.get_line())
serializer.end_entity()
elif elem.tag == 'ul':
serializer.start_block(stylestack)
bullet = stylestack.get('bullet')
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + bullet)
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'ol':
cwidth = len(str(len(elem))) + 2
i = 1
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + ' ' + (str(i)+')').ljust(cwidth))
i = i + 1
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'pre':
serializer.start_block(stylestack)
serializer.pre(elem.text)
serializer.end_entity()
elif elem.tag == 'hr':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
serializer.start_block(stylestack)
serializer.text('-'*width)
serializer.end_entity()
elif elem.tag == 'br':
serializer.linebreak()
elif elem.tag == 'img':
if 'src' in elem.attrib and 'data:' in elem.attrib['src']:
self.print_base64_image(elem.attrib['src'])
elif elem.tag == 'barcode' and 'encoding' in elem.attrib:
serializer.start_block(stylestack)
self.barcode(strclean(elem.text),elem.attrib['encoding'])
serializer.end_entity()
elif elem.tag == 'cut':
self.cut()
elif elem.tag == 'partialcut':
self.cut(mode='part')
elif elem.tag == 'cashdraw':
self.cashdraw(2)
self.cashdraw(5)
stylestack.pop()
try:
stylestack = StyleStack()
serializer = XmlSerializer(self)
root = ET.fromstring(xml.encode('utf-8'))
self._raw(stylestack.to_escpos())
print_elem(stylestack,serializer,root)
if 'open-cashdrawer' in root.attrib and root.attrib['open-cashdrawer'] == 'true':
self.cashdraw(2)
self.cashdraw(5)
if not 'cut' in root.attrib or root.attrib['cut'] == 'true' :
self.cut()
except Exception as e:
errmsg = str(e)+'\n'+'-'*48+'\n'+traceback.format_exc() + '-'*48+'\n'
self.text(errmsg)
self.cut()
raise e
def text(self,txt):
""" Print Utf8 encoded alpha-numeric text """
if not txt:
return
try:
txt = txt.decode('utf-8')
except:
try:
txt = txt.decode('utf-16')
except:
pass
self.extra_chars = 0
def encode_char(char):
"""
Encodes a single utf-8 character into a sequence of
esc-pos code page change instructions and character declarations
"""
char_utf8 = char.encode('utf-8')
encoded = ''
encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character
encodings = {
# TODO use ordering to prevent useless switches
# TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis )
'cp437': TXT_ENC_PC437,
'cp850': TXT_ENC_PC850,
'cp852': TXT_ENC_PC852,
'cp857': TXT_ENC_PC857,
'cp858': TXT_ENC_PC858,
'cp860': TXT_ENC_PC860,
'cp863': TXT_ENC_PC863,
'cp865': TXT_ENC_PC865,
'cp866': TXT_ENC_PC866,
'cp862': TXT_ENC_PC862,
'cp720': TXT_ENC_PC720,
'iso8859_2': TXT_ENC_8859_2,
'iso8859_7': TXT_ENC_8859_7,
'iso8859_9': TXT_ENC_8859_9,
'cp1254' : TXT_ENC_WPC1254,
'cp1255' : TXT_ENC_WPC1255,
'cp1256' : TXT_ENC_WPC1256,
'cp1257' : TXT_ENC_WPC1257,
'cp1258' : TXT_ENC_WPC1258,
'katakana' : TXT_ENC_KATAKANA,
}
remaining = copy.copy(encodings)
if not encoding :
encoding = 'cp437'
while True: # Trying all encoding until one succeeds
try:
if encoding == 'katakana': # Japanese characters
if jcconv:
# try to convert japanese text to a half-katakanas
kata = jcconv.kata2half(jcconv.hira2kata(char_utf8))
if kata != char_utf8:
self.extra_chars += len(kata.decode('utf-8')) - 1
# the conversion may result in multiple characters
return encode_str(kata.decode('utf-8'))
else:
kata = char_utf8
if kata in TXT_ENC_KATAKANA_MAP:
encoded = TXT_ENC_KATAKANA_MAP[kata]
break
else:
raise ValueError()
else:
encoded = char.encode(encoding)
break
except ValueError: #the encoding failed, select another one and retry
if encoding in remaining:
del remaining[encoding]
if len(remaining) >= 1:
encoding = remaining.items()[0][0]
else:
encoding = 'cp437'
encoded = '\xb1' # could not encode, output error character
break;
if encoding != self.encoding:
# if the encoding changed, remember it and prefix the character with
# the esc-pos encoding change sequence
self.encoding = encoding
encoded = encodings[encoding] + encoded
return encoded
def encode_str(txt):
buffer = ''
for c in txt:
buffer += encode_char(c)
return buffer
txt = encode_str(txt)
# if the utf-8 -> codepage conversion inserted extra characters,
# remove double spaces to try to restore the original string length
# and prevent printing alignment issues
while self.extra_chars > 0:
dspace = txt.find(' ')
if dspace > 0:
txt = txt[:dspace] + txt[dspace+1:]
self.extra_chars -= 1
else:
break
self._raw(txt)
def set(self, align='left', font='a', type='normal', width=1, height=1):
""" Set text properties """
# Align
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
# Font
if font.upper() == "B":
self._raw(TXT_FONT_B)
else: # DEFAULT FONT: A
self._raw(TXT_FONT_A)
# Type
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
# Width
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else: # DEFAULT SIZE: NORMAL
self._raw(TXT_NORMAL)
def cut(self, mode=''):
""" Cut paper """
# Fix the size between last line and cut
# TODO: handle this with a line feed
self._raw("\n\n\n\n\n\n")
if mode.upper() == "PART":
self._raw(PAPER_PART_CUT)
else: # DEFAULT MODE: FULL CUT
self._raw(PAPER_FULL_CUT)
def cashdraw(self, pin):
""" Send pulse to kick the cash drawer """
if pin == 2:
self._raw(CD_KICK_2)
elif pin == 5:
self._raw(CD_KICK_5)
else:
raise CashDrawerError()
def hw(self, hw):
""" Hardware operations """
if hw.upper() == "INIT":
self._raw(HW_INIT)
elif hw.upper() == "SELECT":
self._raw(HW_SELECT)
elif hw.upper() == "RESET":
self._raw(HW_RESET)
else: # DEFAULT: DOES NOTHING
pass
def control(self, ctl):
""" Feed control sequences """
if ctl.upper() == "LF":
self._raw(CTL_LF)
elif ctl.upper() == "FF":
self._raw(CTL_FF)
elif ctl.upper() == "CR":
self._raw(CTL_CR)
elif ctl.upper() == "HT":
self._raw(CTL_HT)
elif ctl.upper() == "VT":
self._raw(CTL_VT)
| agpl-3.0 |
capriele/crazyflie-clients-python-move | lib/cflib/crtp/debugdriver.py | 9 | 35152 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Fake link driver used to debug the UI without using the Crazyflie.
The operation of this driver can be controlled in two ways, either by
connecting to different URIs or by sending messages to the DebugDriver port
though CRTP once connected.
For normal connections a console thread is also started that will send
generated console output via CRTP.
"""
__author__ = 'Bitcraze AB'
__all__ = ['DebugDriver']
from threading import Thread
from .crtpdriver import CRTPDriver
from .crtpstack import CRTPPacket, CRTPPort
from .exceptions import WrongUriType
import Queue
import re
import time
import struct
from datetime import datetime
from cflib.crazyflie.log import LogTocElement
from cflib.crazyflie.param import ParamTocElement
import random
import string
import errno
import logging
logger = logging.getLogger(__name__)
# This setup is used to debug raw memory logging
memlogging = {0x01: {"min": 0, "max": 255, "mod": 1, "vartype": 1},
0x02: {"min": 0, "max": 65000, "mod": 100, "vartype": 2},
0x03: {"min": 0, "max": 100000, "mod": 1000, "vartype": 3},
0x04: {"min":-100, "max": 100, "mod": 1, "vartype": 4},
0x05: {"min":-10000, "max": 10000, "mod": 2000, "vartype": 5},
0x06: {"min":-50000, "max": 50000, "mod": 1000, "vartype": 6},
0x07: {"min": 0, "max": 255, "mod": 1, "vartype": 1}}
class DebugDriver (CRTPDriver):
""" Debug driver used for debugging UI/communication without using a
Crazyflie"""
def __init__(self):
self.fakeLoggingThreads = []
# Fill up the fake logging TOC with values and data
self.fakeLogToc = []
self.fakeLogToc.append({"varid": 0, "vartype": 5, "vargroup": "imu",
"varname": "gyro_x", "min":-10000,
"max": 10000, "mod": 1000})
self.fakeLogToc.append({"varid": 1, "vartype": 5, "vargroup": "imu",
"varname": "gyro_y", "min":-10000,
"max": 10000, "mod": 150})
self.fakeLogToc.append({"varid": 2, "vartype": 5, "vargroup": "imu",
"varname": "gyro_z", "min":-10000,
"max": 10000, "mod": 200})
self.fakeLogToc.append({"varid": 3, "vartype": 5, "vargroup": "imu",
"varname": "acc_x", "min":-1000,
"max": 1000, "mod": 15})
self.fakeLogToc.append({"varid": 4, "vartype": 5, "vargroup": "imu",
"varname": "acc_y", "min":-1000,
"max": 1000, "mod": 10})
self.fakeLogToc.append({"varid": 5, "vartype": 5, "vargroup": "imu",
"varname": "acc_z", "min":-1000,
"max": 1000, "mod": 20})
self.fakeLogToc.append({"varid": 6, "vartype": 7,
"vargroup": "stabilizer", "varname": "roll",
"min":-90, "max": 90, "mod": 2})
self.fakeLogToc.append({"varid": 7, "vartype": 7,
"vargroup": "stabilizer", "varname": "pitch",
"min":-90, "max": 90, "mod": 1.5})
self.fakeLogToc.append({"varid": 8, "vartype": 7,
"vargroup": "stabilizer", "varname": "yaw",
"min":-90, "max": 90, "mod": 2.5})
self.fakeLogToc.append({"varid": 9, "vartype": 7, "vargroup": "pm",
"varname": "vbat", "min": 3.0,
"max": 4.2, "mod": 0.1})
self.fakeLogToc.append({"varid": 10, "vartype": 6, "vargroup": "motor",
"varname": "m1", "min": 0, "max": 65000,
"mod": 1000})
self.fakeLogToc.append({"varid": 11, "vartype": 6, "vargroup": "motor",
"varname": "m2", "min": 0, "max": 65000,
"mod": 1000})
self.fakeLogToc.append({"varid": 12, "vartype": 6, "vargroup": "motor",
"varname": "m3", "min": 0, "max": 65000,
"mod": 1000})
self.fakeLogToc.append({"varid": 13, "vartype": 6, "vargroup": "motor",
"varname": "m4", "min": 0, "max": 65000,
"mod": 1000})
self.fakeLogToc.append({"varid": 14, "vartype": 2,
"vargroup": "stabilizer", "varname": "thrust",
"min": 0, "max": 65000, "mod": 1000})
self.fakeLogToc.append({"varid": 15, "vartype": 7,
"vargroup": "baro", "varname": "asl",
"min": 540, "max": 545, "mod": 0.5})
self.fakeLogToc.append({"varid": 16, "vartype": 7,
"vargroup": "baro", "varname": "aslRaw",
"min": 540, "max": 545, "mod": 1.0})
self.fakeLogToc.append({"varid": 17, "vartype": 7,
"vargroup": "baro", "varname": "aslLong",
"min": 540, "max": 545, "mod": 0.5})
self.fakeLogToc.append({"varid": 18, "vartype": 7,
"vargroup": "baro", "varname": "temp",
"min": 26, "max": 38, "mod": 1.0})
self.fakeLogToc.append({"varid": 19, "vartype": 7,
"vargroup": "altHold", "varname": "target",
"min": 542, "max": 543, "mod": 0.1})
self.fakeLogToc.append({"varid": 20, "vartype": 6,
"vargroup": "gps", "varname": "lat",
"min": 556112190, "max": 556112790,
"mod": 10})
self.fakeLogToc.append({"varid": 21, "vartype": 6,
"vargroup": "gps", "varname": "lon",
"min": 129945110, "max": 129945710,
"mod": 10})
self.fakeLogToc.append({"varid": 22, "vartype": 6,
"vargroup": "gps", "varname": "hMSL",
"min": 0, "max": 100000,
"mod": 1000})
self.fakeLogToc.append({"varid": 23, "vartype": 6,
"vargroup": "gps", "varname": "heading",
"min": -10000000, "max": 10000000,
"mod": 100000})
self.fakeLogToc.append({"varid": 24, "vartype": 6,
"vargroup": "gps", "varname": "gSpeed",
"min": 0, "max": 1000,
"mod": 100})
self.fakeLogToc.append({"varid": 25, "vartype": 3,
"vargroup": "gps", "varname": "hAcc",
"min": 0, "max": 5000,
"mod": 100})
self.fakeLogToc.append({"varid": 26, "vartype": 1,
"vargroup": "gps", "varname": "fixType",
"min": 0, "max": 5,
"mod": 1})
# Fill up the fake logging TOC with values and data
self.fakeParamToc = []
self.fakeParamToc.append({"varid": 0, "vartype": 0x08,
"vargroup": "blah", "varname": "p",
"writable": True, "value": 100})
self.fakeParamToc.append({"varid": 1, "vartype": 0x0A,
"vargroup": "info", "varname": "cid",
"writable": False, "value": 1234})
self.fakeParamToc.append({"varid": 2, "vartype": 0x06,
"vargroup": "rpid", "varname": "prp",
"writable": True, "value": 1.5})
self.fakeParamToc.append({"varid": 3, "vartype": 0x06,
"vargroup": "rpid", "varname": "pyaw",
"writable": True, "value": 2.5})
self.fakeParamToc.append({"varid": 4, "vartype": 0x06,
"vargroup": "rpid", "varname": "irp",
"writable": True, "value": 3.5})
self.fakeParamToc.append({"varid": 5, "vartype": 0x06,
"vargroup": "rpid", "varname": "iyaw",
"writable": True, "value": 4.5})
self.fakeParamToc.append({"varid": 6, "vartype": 0x06,
"vargroup": "rpid", "varname": "drp",
"writable": True, "value": 5.5})
self.fakeParamToc.append({"varid": 7, "vartype": 0x06,
"vargroup": "rpid", "varname": "dyaw",
"writable": True, "value": 6.5})
self.fakeParamToc.append({"varid": 8, "vartype": 0x06,
"vargroup": "apid", "varname": "prp",
"writable": True, "value": 7.5})
self.fakeParamToc.append({"varid": 9, "vartype": 0x06,
"vargroup": "apid", "varname": "pyaw",
"writable": True, "value": 8.5})
self.fakeParamToc.append({"varid": 10, "vartype": 0x06,
"vargroup": "apid", "varname": "irp",
"writable": True, "value": 9.5})
self.fakeParamToc.append({"varid": 11, "vartype": 0x06,
"vargroup": "apid", "varname": "iyaw",
"writable": True, "value": 10.5})
self.fakeParamToc.append({"varid": 12, "vartype": 0x06,
"vargroup": "apid", "varname": "drp",
"writable": True, "value": 11.5})
self.fakeParamToc.append({"varid": 13, "vartype": 0x06,
"vargroup": "apid", "varname": "dyaw",
"writable": True, "value": 12.5})
self.fakeParamToc.append({"varid": 14, "vartype": 0x08,
"vargroup": "flightctrl",
"varname": "xmode", "writable": True,
"value": 1})
self.fakeParamToc.append({"varid": 15, "vartype": 0x08,
"vargroup": "flightctrl",
"varname": "ratepid", "writable": True,
"value": 1})
self.fakeParamToc.append({"varid": 16, "vartype": 0x08,
"vargroup": "imu_sensors",
"varname": "HMC5883L", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 17, "vartype": 0x08,
"vargroup": "imu_sensors",
"varname": "MS5611", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 18, "vartype": 0x0A,
"vargroup": "firmware",
"varname": "revision0", "writable": False,
"value": 0xdeb})
self.fakeParamToc.append({"varid": 19, "vartype": 0x09,
"vargroup": "firmware",
"varname": "revision1", "writable": False,
"value": 0x99})
self.fakeParamToc.append({"varid": 20, "vartype": 0x08,
"vargroup": "firmware",
"varname": "modified", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 21, "vartype": 0x08,
"vargroup": "imu_tests",
"varname": "MPU6050", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 22, "vartype": 0x08,
"vargroup": "imu_tests",
"varname": "HMC5883L", "writable": False,
"value": 1})
self.fakeParamToc.append({"varid": 23, "vartype": 0x08,
"vargroup": "imu_tests",
"varname": "MS5611", "writable": False,
"value": 1})
self.fakeflash = {}
self._random_answer_delay = True
self.queue = Queue.Queue()
self._packet_handler = _PacketHandlingThread(self.queue,
self.fakeLogToc,
self.fakeParamToc)
self._packet_handler.start()
def scan_interface(self):
return [["debug://0/0", "Normal connection"],
["debug://0/1", "Fail to connect"],
["debug://0/2", "Incomplete log TOC download"],
["debug://0/3", "Insert random delays on replies"],
["debug://0/4", "Insert random delays on replies and random TOC CRCs"],
["debug://0/5", "Normal but random TOC CRCs"]]
def get_status(self):
return "Ok"
def get_name(self):
return "debug"
def connect(self, uri, linkQualityCallback, linkErrorCallback):
if not re.search("^debug://", uri):
raise WrongUriType("Not a debug URI")
self._packet_handler.linkErrorCallback = linkErrorCallback
self._packet_handler.linkQualityCallback = linkQualityCallback
# Debug-options for this driver that
# is set by using different connection URIs
self._packet_handler.inhibitAnswers = False
self._packet_handler.doIncompleteLogTOC = False
self._packet_handler.bootloader = False
self._packet_handler._random_answer_delay = False
self._packet_handler._random_toc_crcs = False
if (re.search("^debug://.*/1\Z", uri)):
self._packet_handler.inhibitAnswers = True
if (re.search("^debug://.*/110\Z", uri)):
self._packet_handler.bootloader = True
if (re.search("^debug://.*/2\Z", uri)):
self._packet_handler.doIncompleteLogTOC = True
if (re.search("^debug://.*/3\Z", uri)):
self._packet_handler._random_answer_delay = True
if (re.search("^debug://.*/4\Z", uri)):
self._packet_handler._random_answer_delay = True
self._packet_handler._random_toc_crcs = True
if (re.search("^debug://.*/5\Z", uri)):
self._packet_handler._random_toc_crcs = True
self.fakeConsoleThread = None
if (not self._packet_handler.inhibitAnswers and not self._packet_handler.bootloader):
self.fakeConsoleThread = FakeConsoleThread(self.queue)
self.fakeConsoleThread.start()
if (self._packet_handler.linkQualityCallback is not None):
self._packet_handler.linkQualityCallback(0)
def receive_packet(self, time=0):
if time == 0:
try:
return self.queue.get(False)
except Queue.Empty:
return None
elif time < 0:
try:
return self.queue.get(True)
except Queue.Empty:
return None
else:
try:
return self.queue.get(True, time)
except Queue.Empty:
return None
def send_packet(self, pk):
self._packet_handler.handle_packet(pk)
def close(self):
logger.info("Closing debugdriver")
for f in self._packet_handler.fakeLoggingThreads:
f.stop()
if self.fakeConsoleThread:
self.fakeConsoleThread.stop()
class _PacketHandlingThread(Thread):
"""Thread for handling packets asynchronously"""
def __init__(self, out_queue, fake_log_toc, fake_param_toc):
Thread.__init__(self)
self.setDaemon(True)
self.queue = out_queue
self.fakeLogToc = fake_log_toc
self.fakeParamToc = fake_param_toc
self._in_queue = Queue.Queue()
self.inhibitAnswers = False
self.doIncompleteLogTOC = False
self.bootloader = False
self._random_answer_delay = False
self._random_toc_crcs = False
self.linkErrorCallback = None
self.linkQualityCallback = None
random.seed(None)
self.fakeLoggingThreads = []
self._added_blocks = []
self.nowAnswerCounter = 4
def handle_packet(self, pk):
self._in_queue.put(pk)
def run(self):
while (True):
pk = self._in_queue.get(True)
if (self.inhibitAnswers):
self.nowAnswerCounter = self.nowAnswerCounter - 1
logger.debug("Not answering with any data, will send link errori"
" in %d retries", self.nowAnswerCounter)
if (self.nowAnswerCounter == 0):
self.linkErrorCallback("Nothing is answering, and it"
" shouldn't")
else:
if (pk.port == 0xFF):
self._handle_bootloader(pk)
elif (pk.port == CRTPPort.DEBUGDRIVER):
self._handle_debugmessage(pk)
elif (pk.port == CRTPPort.COMMANDER):
pass
elif (pk.port == CRTPPort.LOGGING):
self._handle_logging(pk)
elif (pk.port == CRTPPort.PARAM):
self.handleParam(pk)
else:
logger.warning("Not handling incomming packets on port [%d]",
pk.port)
def _handle_bootloader(self, pk):
cmd = pk.datal[1]
if (cmd == 0x10): # Request info about copter
p = CRTPPacket()
p.set_header(0xFF, 0xFF)
pageSize = 1024
buffPages = 10
flashPages = 100
flashStart = 1
p.data = struct.pack('<BBHHHH', 0xFF, 0x10, pageSize, buffPages,
flashPages, flashStart)
p.data += struct.pack('B' * 12, 0xA0A1A2A3A4A5)
self._send_packet(p)
logging.info("Bootloader: Sending info back info")
elif (cmd == 0x14): # Upload buffer
[page, addr] = struct.unpack('<HH', p.data[0:4])
elif (cmd == 0x18): # Flash page
p = CRTPPacket()
p.set_header(0xFF, 0xFF)
p.data = struct.pack('<BBH', 0xFF, 0x18, 1)
self._send_packet(p)
elif (cmd == 0xFF): # Reset to firmware
logger.info("Bootloader: Got reset command")
else:
logger.warning("Bootloader: Unknown command 0x%02X", cmd)
def _handle_debugmessage(self, pk):
if (pk.channel == 0):
cmd = struct.unpack("B", pk.data[0])[0]
if (cmd == 0): # Fake link quality
newLinkQuality = struct.unpack("B", pk.data[1])[0]
self.linkQualityCallback(newLinkQuality)
elif (cmd == 1):
self.linkErrorCallback("DebugDriver was forced to disconnect!")
else:
logger.warning("Debug port: Not handling cmd=%d on channel 0",
cmd)
else:
logger.warning("Debug port: Not handling channel=%d",
pk.channel)
def _handle_toc_access(self, pk):
chan = pk.channel
cmd = struct.unpack("B", pk.data[0])[0]
logger.info("TOC access on port %d", pk.port)
if (chan == 0): # TOC Access
cmd = struct.unpack("B", pk.data[0])[0]
if (cmd == 0): # Reqest variable info
p = CRTPPacket()
p.set_header(pk.port, 0)
varIndex = 0
if (len(pk.data) > 1):
varIndex = struct.unpack("B", pk.data[1])[0]
logger.debug("TOC[%d]: Requesting ID=%d", pk.port,
varIndex)
else:
logger.debug("TOC[%d]: Requesting first index..surprise,"
" it 0 !", pk.port)
if (pk.port == CRTPPort.LOGGING):
l = self.fakeLogToc[varIndex]
if (pk.port == CRTPPort.PARAM):
l = self.fakeParamToc[varIndex]
vartype = l["vartype"]
if (pk.port == CRTPPort.PARAM and l["writable"] is True):
vartype = vartype | (0x10)
p.data = struct.pack("<BBB", cmd, l["varid"], vartype)
for ch in l["vargroup"]:
p.data += ch
p.data += '\0'
for ch in l["varname"]:
p.data += ch
p.data += '\0'
if (self.doIncompleteLogTOC is False):
self._send_packet(p)
elif (varIndex < 5):
self._send_packet(p)
else:
logger.info("TOC: Doing incomplete TOC, stopping after"
" varIndex => 5")
if (cmd == 1): # TOC CRC32 request
fakecrc = 0
if (pk.port == CRTPPort.LOGGING):
tocLen = len(self.fakeLogToc)
fakecrc = 0xAAAAAAAA
if (pk.port == CRTPPort.PARAM):
tocLen = len(self.fakeParamToc)
fakecrc = 0xBBBBBBBB
if self._random_toc_crcs:
fakecrc = int(''.join(random.choice("ABCDEF" + string.digits) for x in range(8)), 16)
logger.debug("Generated random TOC CRC: 0x%x", fakecrc)
logger.info("TOC[%d]: Requesting TOC CRC, sending back fake"
" stuff: %d", pk.port, len(self.fakeLogToc))
p = CRTPPacket()
p.set_header(pk.port, 0)
p.data = struct.pack('<BBIBB', 1, tocLen, fakecrc, 16, 24)
self._send_packet(p)
def handleParam(self, pk):
chan = pk.channel
cmd = struct.unpack("B", pk.data[0])[0]
logger.debug("PARAM: Port=%d, Chan=%d, cmd=%d", pk.port,
chan, cmd)
if (chan == 0): # TOC Access
self._handle_toc_access(pk)
elif (chan == 2): # Settings access
varId = pk.datal[0]
formatStr = ParamTocElement.types[self.fakeParamToc
[varId]["vartype"]][1]
newvalue = struct.unpack(formatStr, pk.data[1:])[0]
self.fakeParamToc[varId]["value"] = newvalue
logger.info("PARAM: New value [%s] for param [%d]", newvalue,
varId)
# Send back the new value
p = CRTPPacket()
p.set_header(pk.port, 2)
p.data += struct.pack("<B", varId)
p.data += struct.pack(formatStr, self.fakeParamToc[varId]["value"])
self._send_packet(p)
elif (chan == 1):
p = CRTPPacket()
p.set_header(pk.port, 1)
varId = cmd
p.data += struct.pack("<B", varId)
formatStr = ParamTocElement.types[self.fakeParamToc
[varId]["vartype"]][1]
p.data += struct.pack(formatStr, self.fakeParamToc[varId]["value"])
logger.info("PARAM: Getting value for %d", varId)
self._send_packet(p)
def _handle_logging(self, pk):
chan = pk.channel
cmd = struct.unpack("B", pk.data[0])[0]
logger.debug("LOG: Chan=%d, cmd=%d", chan, cmd)
if (chan == 0): # TOC Access
self._handle_toc_access(pk)
elif (chan == 1): # Settings access
if (cmd == 0):
blockId = ord(pk.data[1])
if blockId not in self._added_blocks:
self._added_blocks.append(blockId)
logger.info("LOG:Adding block id=%d", blockId)
listofvars = pk.data[3:]
fakeThread = _FakeLoggingDataThread(self.queue, blockId,
listofvars,
self.fakeLogToc)
self.fakeLoggingThreads.append(fakeThread)
fakeThread.start()
# Anser that everything is ok
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', 0, blockId, 0x00)
self._send_packet(p)
else:
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', 0, blockId, errno.EEXIST)
self._send_packet(p)
if (cmd == 1):
logger.warning("LOG: Appending block not implemented!")
if (cmd == 2):
blockId = ord(pk.data[1])
logger.info("LOG: Should delete block %d", blockId)
success = False
for fb in self.fakeLoggingThreads:
if (fb.blockId == blockId):
fb._disable_logging()
fb.stop()
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', cmd, blockId, 0x00)
self._send_packet(p)
logger.info("LOG: Deleted block=%d", blockId)
success = True
if (success is False):
logger.warning("LOG: Could not delete block=%d, not found",
blockId)
# TODO: Send back error code
if (cmd == 3):
blockId = ord(pk.data[1])
period = ord(pk.data[2]) * 10 # Sent as multiple of 10 ms
logger.info("LOG:Starting block %d", blockId)
success = False
for fb in self.fakeLoggingThreads:
if (fb.blockId == blockId):
fb._enable_logging()
fb.period = period
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', cmd, blockId, 0x00)
self._send_packet(p)
logger.info("LOG:Started block=%d", blockId)
success = True
if (success is False):
logger.info("LOG:Could not start block=%d, not found",
blockId)
# TODO: Send back error code
if (cmd == 4):
blockId = ord(pk.data[1])
logger.info("LOG:Pausing block %d", blockId)
success = False
for fb in self.fakeLoggingThreads:
if (fb.blockId == blockId):
fb._disable_logging()
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', cmd, blockId, 0x00)
self._send_packet(p)
logger.info("LOG:Pause block=%d", blockId)
success = True
if (success is False):
logger.warning("LOG:Could not pause block=%d, not found",
blockId)
# TODO: Send back error code
if (cmd == 5):
logger.info("LOG: Reset logging, but doing nothing")
p = CRTPPacket()
p.set_header(5, 1)
p.data = struct.pack('<BBB', cmd, 0x00, 0x00)
self._send_packet(p)
import traceback
logger.info(traceback.format_exc())
elif (chan > 1):
logger.warning("LOG: Uplink packets with channes > 1 not"
" supported!")
def _send_packet(self, pk):
# Do not delay log data
if self._random_answer_delay and pk.port != 0x05 and pk.channel != 0x02:
# Calculate a delay between 0ms and 250ms
delay = random.randint(0, 250)/1000.0
logger.debug("Delaying answer %.2fms", delay*1000)
time.sleep(delay)
self.queue.put(pk)
class _FakeLoggingDataThread (Thread):
"""Thread that will send back fake logging data via CRTP"""
def __init__(self, outQueue, blockId, listofvars, fakeLogToc):
Thread.__init__(self)
self.starttime = datetime.now()
self.outQueue = outQueue
self.setDaemon(True)
self.mod = 0
self.blockId = blockId
self.period = 0
self.listofvars = listofvars
self.shouldLog = False
self.fakeLogToc = fakeLogToc
self.fakeLoggingData = []
self.setName("Fakelog block=%d" % blockId)
self.shouldQuit = False
logging.info("FakeDataLoggingThread created for blockid=%d", blockId)
i = 0
while (i < len(listofvars)):
varType = ord(listofvars[i])
var_stored_as = (varType >> 8)
var_fetch_as = (varType & 0xFF)
if (var_stored_as > 0):
addr = struct.unpack("<I", listofvars[i + 1:i + 5])
logger.debug("FakeLoggingThread: We should log a memory addr"
" 0x%04X", addr)
self.fakeLoggingData.append([memlogging[var_fetch_as],
memlogging[var_fetch_as]["min"],
1])
i = i + 5
else:
varId = ord(listofvars[i])
logger.debug("FakeLoggingThread: We sould log variable from"
" TOC: id=%d, type=0x%02X", varId, varType)
for t in self.fakeLogToc:
if (varId == t["varid"]):
# Each touple will have var data and current fake value
self.fakeLoggingData.append([t, t["min"], 1])
i = i + 2
def _enable_logging(self):
self.shouldLog = True
logging.info("_FakeLoggingDataThread: Enable thread [%s] at period %d",
self.getName(), self.period)
def _disable_logging(self):
self.shouldLog = False
logging.info("_FakeLoggingDataThread: Disable thread [%s]",
self.getName())
def stop(self):
self.shouldQuit = True
def run(self):
while(self.shouldQuit is False):
if (self.shouldLog is True):
p = CRTPPacket()
p.set_header(5, 2)
p.data = struct.pack('<B', self.blockId)
timestamp = int((datetime.now()-self.starttime).total_seconds()*1000)
p.data += struct.pack('BBB', timestamp&0xff, (timestamp>>8)&0x0ff, (timestamp>>16)&0x0ff) # Timestamp
for d in self.fakeLoggingData:
# Set new value
d[1] = d[1] + d[0]["mod"] * d[2]
# Obej the limitations
if (d[1] > d[0]["max"]):
d[1] = d[0]["max"] # Limit value
d[2] = -1 # Switch direction
if (d[1] < d[0]["min"]):
d[1] = d[0]["min"] # Limit value
d[2] = 1 # Switch direction
# Pack value
formatStr = LogTocElement.types[d[0]["vartype"]][1]
p.data += struct.pack(formatStr, d[1])
self.outQueue.put(p)
time.sleep(self.period / 1000.0) # Period in ms here
class FakeConsoleThread (Thread):
"""Thread that will send back fake console data via CRTP"""
def __init__(self, outQueue):
Thread.__init__(self)
self.outQueue = outQueue
self.setDaemon(True)
self._should_run = True
def stop(self):
self._shoud_run = False
def run(self):
# Temporary hack to test GPS from firmware by sending NMEA string on
# console
long_val = 0
lat_val = 0
alt_val = 0
while(self._should_run):
long_val += 1
lat_val += 1
alt_val += 1.0
long_string = "5536.677%d" % (long_val % 99)
lat_string = "01259.645%d" % (lat_val % 99)
alt_string = "%.1f" % (alt_val % 100.0)
# Copy of what is sent from the module, but note that only
# the GPGGA message is being simulated, the others are fixed...
self._send_text("Time is now %s\n" % datetime.now())
self._send_text("$GPVTG,,T,,M,0.386,N,0.716,K,A*2E\n")
self._send_text("$GPGGA,135544.0")
self._send_text("0,%s,N,%s,E,1,04,2.62,3.6,M,%s,M,,*58\n" % (long_string, lat_string, alt_string))
self._send_text("$GPGSA,A,3,31,20,23,07,,,,,,,,,3.02,2.62,1.52*05\n")
self._send_text("$GPGSV,2,1,07,07,09,181,15,13,63,219,26,16,02,097,,17,05,233,20*7E\n")
self._send_text("$GPGSV,2,2,07,20,42,119,35,23,77,097,27,31,12,032,19*47\n")
self._send_text("$GPGLL,5536.67734,N,01259.64578,E,135544.00,A,A*68\n")
time.sleep(2)
def _send_text(self, message):
p = CRTPPacket()
p.set_header(0, 0)
us = "%is" % len(message)
# This might be done prettier ;-)
p.data = struct.pack(us, message)
self.outQueue.put(p)
| gpl-2.0 |
ehashman/oh-mainline | vendor/packages/gdata/samples/apps/adminsettings_example.py | 41 | 5677 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a Sample for Google Apps Admin Settings.
AdminSettingsSample: shows everything you ever wanted to know about
your Google Apps Domain but were afraid to ask.
"""
__author__ = 'jlee@pbu.edu'
import getopt
import getpass
import sys
import time
import gdata.apps.service
import gdata.apps.adminsettings.service
class AdminSettingsSample(object):
"""AdminSettingsSample object demos Admin Settings API."""
def __init__(self, email, password, domain):
"""Constructor for the AdminSettingsSample object.
Takes an email and password corresponding to a google apps admin
account to demon the Admin Settings API.
Args:
email: [string] The e-mail address of the account to use for the sample.
password: [string] The password corresponding to the account specified by
the email parameter.
domain: [string] The domain for the Profiles feed
"""
self.gd_client = gdata.apps.adminsettings.service.AdminSettingsService()
self.gd_client.domain = domain
self.gd_client.email = email
self.gd_client.password = password
self.gd_client.source = 'GoogleInc-AdminSettingsPythonSample-1'
self.gd_client.ProgrammaticLogin()
def Run(self):
#pause 1 sec inbetween calls to prevent quota warning
print 'Google Apps Domain: ', self.gd_client.domain
time.sleep(1)
print 'Default Language: ', self.gd_client.GetDefaultLanguage()
time.sleep(1)
print 'Organization Name: ', self.gd_client.GetOrganizationName()
time.sleep(1)
print 'Maximum Users: ', self.gd_client.GetMaximumNumberOfUsers()
time.sleep(1)
print 'Current Users: ', self.gd_client.GetCurrentNumberOfUsers()
time.sleep(1)
print 'Domain is Verified: ',self.gd_client.IsDomainVerified()
time.sleep(1)
print 'Support PIN: ',self.gd_client.GetSupportPIN()
time.sleep(1)
print 'Domain Edition: ', self.gd_client.GetEdition()
time.sleep(1)
print 'Customer PIN: ', self.gd_client.GetCustomerPIN()
time.sleep(1)
print 'Domain Creation Time: ', self.gd_client.GetCreationTime()
time.sleep(1)
print 'Domain Country Code: ', self.gd_client.GetCountryCode()
time.sleep(1)
print 'Admin Secondary Email: ', self.gd_client.GetAdminSecondaryEmail()
time.sleep(1)
cnameverificationstatus = self.gd_client.GetCNAMEVerificationStatus()
print 'CNAME Verification Record Name: ', cnameverificationstatus['recordName']
print 'CNAME Verification Verified: ', cnameverificationstatus['verified']
print 'CNAME Verification Method: ', cnameverificationstatus['verificationMethod']
time.sleep(1)
mxverificationstatus = self.gd_client.GetMXVerificationStatus()
print 'MX Verification Verified: ', mxverificationstatus['verified']
print 'MX Verification Method: ', mxverificationstatus['verificationMethod']
time.sleep(1)
ssosettings = self.gd_client.GetSSOSettings()
print 'SSO Enabled: ', ssosettings['enableSSO']
print 'SSO Signon Page: ', ssosettings['samlSignonUri']
print 'SSO Logout Page: ', ssosettings['samlLogoutUri']
print 'SSO Password Page: ', ssosettings['changePasswordUri']
print 'SSO Whitelist IPs: ', ssosettings['ssoWhitelist']
print 'SSO Use Domain Specific Issuer: ', ssosettings['useDomainSpecificIssuer']
time.sleep(1)
ssokey = self.gd_client.GetSSOKey()
print 'SSO Key Modulus: ', ssokey['modulus']
print 'SSO Key Exponent: ', ssokey['exponent']
print 'SSO Key Algorithm: ', ssokey['algorithm']
print 'SSO Key Format: ', ssokey['format']
print 'User Migration Enabled: ', self.gd_client.IsUserMigrationEnabled()
time.sleep(1)
outboundgatewaysettings = self.gd_client.GetOutboundGatewaySettings()
print 'Outbound Gateway Smart Host: ', outboundgatewaysettings['smartHost']
print 'Outbound Gateway Mode: ', outboundgatewaysettings['smtpMode']
def main():
"""Demonstrates use of the Admin Settings API using the AdminSettingsSample object."""
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw=', 'domain='])
except getopt.error, msg:
print 'python adminsettings_example.py --user [username] --pw [password]'
print ' --domain [domain]'
sys.exit(2)
user = ''
pw = ''
domain = ''
# Process options
for option, arg in opts:
if option == '--user':
user = arg
elif option == '--pw':
pw = arg
elif option == '--domain':
domain = arg
while not domain:
print 'NOTE: Please run these tests only with a test account.'
domain = raw_input('Please enter your apps domain: ')
while not user:
user = raw_input('Please enter a administrator account: ')+'@'+domain
while not pw:
pw = getpass.getpass('Please enter password: ')
if not pw:
print 'Password cannot be blank.'
try:
sample = AdminSettingsSample(user, pw, domain)
except gdata.service.BadAuthentication:
print 'Invalid user credentials given.'
return
sample.Run()
if __name__ == '__main__':
main()
| agpl-3.0 |
waynesun09/virt-test | virttest/remote.py | 1 | 44950 | """
Functions and classes used for logging into guests and transferring files.
"""
import logging
import time
import re
import os
import shutil
import tempfile
import aexpect
import utils_misc
import rss_client
import base64
from remote_commander import remote_master
from remote_commander import messenger
from autotest.client.shared import error
from autotest.client import utils
import data_dir
class LoginError(Exception):
def __init__(self, msg, output):
Exception.__init__(self, msg, output)
self.msg = msg
self.output = output
def __str__(self):
return "%s (output: %r)" % (self.msg, self.output)
class LoginAuthenticationError(LoginError):
pass
class LoginTimeoutError(LoginError):
def __init__(self, output):
LoginError.__init__(self, "Login timeout expired", output)
class LoginProcessTerminatedError(LoginError):
def __init__(self, status, output):
LoginError.__init__(self, None, output)
self.status = status
def __str__(self):
return ("Client process terminated (status: %s, output: %r)" %
(self.status, self.output))
class LoginBadClientError(LoginError):
def __init__(self, client):
LoginError.__init__(self, None, None)
self.client = client
def __str__(self):
return "Unknown remote shell client: %r" % self.client
class SCPError(Exception):
def __init__(self, msg, output):
Exception.__init__(self, msg, output)
self.msg = msg
self.output = output
def __str__(self):
return "%s (output: %r)" % (self.msg, self.output)
class SCPAuthenticationError(SCPError):
pass
class SCPAuthenticationTimeoutError(SCPAuthenticationError):
def __init__(self, output):
SCPAuthenticationError.__init__(self, "Authentication timeout expired",
output)
class SCPTransferTimeoutError(SCPError):
def __init__(self, output):
SCPError.__init__(self, "Transfer timeout expired", output)
class SCPTransferFailedError(SCPError):
def __init__(self, status, output):
SCPError.__init__(self, None, output)
self.status = status
def __str__(self):
return ("SCP transfer failed (status: %s, output: %r)" %
(self.status, self.output))
def handle_prompts(session, username, password, prompt, timeout=10,
debug=False):
"""
Connect to a remote host (guest) using SSH or Telnet or else.
Wait for questions and provide answers. If timeout expires while
waiting for output from the child (e.g. a password prompt or
a shell prompt) -- fail.
:param session: An Expect or ShellSession instance to operate on
:param username: The username to send in reply to a login prompt
:param password: The password to send in reply to a password prompt
:param prompt: The shell prompt that indicates a successful login
:param timeout: The maximal time duration (in seconds) to wait for each
step of the login procedure (i.e. the "Are you sure" prompt, the
password prompt, the shell prompt, etc)
:raise LoginTimeoutError: If timeout expires
:raise LoginAuthenticationError: If authentication fails
:raise LoginProcessTerminatedError: If the client terminates during login
:raise LoginError: If some other error occurs
"""
password_prompt_count = 0
login_prompt_count = 0
while True:
try:
match, text = session.read_until_last_line_matches(
[r"[Aa]re you sure", r"[Pp]assword:\s*",
r"\(or (press|type) Control-D to continue\):\s*$", # Prompt of rescue mode for Red Hat.
r"[Gg]ive.*[Ll]ogin:\s*$", # Prompt of rescue mode for SUSE.
r"(?<![Ll]ast).*[Ll]ogin:\s*$", # Don't match "Last Login:"
r"[Cc]onnection.*closed", r"[Cc]onnection.*refused",
r"[Pp]lease wait", r"[Ww]arning", r"[Ee]nter.*username",
r"[Ee]nter.*password", prompt],
timeout=timeout, internal_timeout=0.5)
if match == 0: # "Are you sure you want to continue connecting"
if debug:
logging.debug("Got 'Are you sure...', sending 'yes'")
session.sendline("yes")
continue
elif match in [1, 2, 3, 10]: # "password:"
if password_prompt_count == 0:
if debug:
logging.debug("Got password prompt, sending '%s'",
password)
session.sendline(password)
password_prompt_count += 1
continue
else:
raise LoginAuthenticationError("Got password prompt twice",
text)
elif match == 4 or match == 9: # "login:"
if login_prompt_count == 0 and password_prompt_count == 0:
if debug:
logging.debug("Got username prompt; sending '%s'",
username)
session.sendline(username)
login_prompt_count += 1
continue
else:
if login_prompt_count > 0:
msg = "Got username prompt twice"
else:
msg = "Got username prompt after password prompt"
raise LoginAuthenticationError(msg, text)
elif match == 5: # "Connection closed"
raise LoginError("Client said 'connection closed'", text)
elif match == 6: # "Connection refused"
raise LoginError("Client said 'connection refused'", text)
elif match == 7: # "Please wait"
if debug:
logging.debug("Got 'Please wait'")
timeout = 30
continue
elif match == 8: # "Warning added RSA"
if debug:
logging.debug("Got 'Warning added RSA to known host list")
continue
elif match == 11: # prompt
if debug:
logging.debug("Got shell prompt -- logged in")
break
except aexpect.ExpectTimeoutError, e:
raise LoginTimeoutError(e.output)
except aexpect.ExpectProcessTerminatedError, e:
raise LoginProcessTerminatedError(e.status, e.output)
def remote_login(client, host, port, username, password, prompt, linesep="\n",
log_filename=None, timeout=10, interface=None):
"""
Log into a remote host (guest) using SSH/Telnet/Netcat.
:param client: The client to use ('ssh', 'telnet' or 'nc')
:param host: Hostname or IP address
:param port: Port to connect to
:param username: Username (if required)
:param password: Password (if required)
:param prompt: Shell prompt (regular expression)
:param linesep: The line separator to use when sending lines
(e.g. '\\n' or '\\r\\n')
:param log_filename: If specified, log all output to this file
:param timeout: The maximal time duration (in seconds) to wait for
each step of the login procedure (i.e. the "Are you sure" prompt
or the password prompt)
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise LoginError: If using ipv6 linklocal but not assign a interface that
the neighbour attache
:raise LoginBadClientError: If an unknown client is requested
:raise: Whatever handle_prompts() raises
:return: A ShellSession object.
"""
if host and host.lower().startswith("fe80"):
if not interface:
raise LoginError("When using ipv6 linklocal an interface must "
"be assigned")
host = "%s%%%s" % (host, interface)
if client == "ssh":
cmd = ("ssh -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -p %s %s@%s" %
(port, username, host))
elif client == "telnet":
cmd = "telnet -l %s %s %s" % (username, host, port)
elif client == "nc":
cmd = "nc %s %s" % (host, port)
else:
raise LoginBadClientError(client)
logging.debug("Login command: '%s'", cmd)
session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt)
try:
handle_prompts(session, username, password, prompt, timeout)
except Exception:
session.close()
raise
if log_filename:
session.set_output_func(utils_misc.log_line)
session.set_output_params((log_filename,))
session.set_log_file(log_filename)
return session
class AexpectIOWrapperOut(messenger.StdIOWrapperOutBase64):
"""
Basic implementation of IOWrapper for stdout
"""
def close(self):
self._obj.close()
def fileno(self):
return os.open(self._obj, os.O_RDWR)
def write(self, data):
self._obj.send(data)
def remote_commander(client, host, port, username, password, prompt,
linesep="\n", log_filename=None, timeout=10, path=None):
"""
Log into a remote host (guest) using SSH/Telnet/Netcat.
:param client: The client to use ('ssh', 'telnet' or 'nc')
:param host: Hostname or IP address
:param port: Port to connect to
:param username: Username (if required)
:param password: Password (if required)
:param prompt: Shell prompt (regular expression)
:param linesep: The line separator to use when sending lines
(e.g. '\\n' or '\\r\\n')
:param log_filename: If specified, log all output to this file
:param timeout: The maximal time duration (in seconds) to wait for
each step of the login procedure (i.e. the "Are you sure" prompt
or the password prompt)
:param path: The path to place where remote_runner.py is placed.
:raise LoginBadClientError: If an unknown client is requested
:raise: Whatever handle_prompts() raises
:return: A ShellSession object.
"""
if path is None:
path = "/tmp"
if client == "ssh":
cmd = ("ssh -o UserKnownHostsFile=/dev/null "
"-o PreferredAuthentications=password "
"-p %s %s@%s %s agent_base64" %
(port, username, host, os.path.join(path, "remote_runner.py")))
elif client == "telnet":
cmd = "telnet -l %s %s %s" % (username, host, port)
elif client == "nc":
cmd = "nc %s %s" % (host, port)
else:
raise LoginBadClientError(client)
logging.debug("Login command: '%s'", cmd)
session = aexpect.Expect(cmd, linesep=linesep)
try:
handle_prompts(session, username, password, prompt, timeout)
except Exception:
session.close()
raise
if log_filename:
session.set_output_func(utils_misc.log_line)
session.set_output_params((log_filename,))
session.set_log_file(log_filename)
session.send_ctrl("raw")
# Wrap io interfaces.
inw = messenger.StdIOWrapperInBase64(session._get_fd("tail"))
outw = AexpectIOWrapperOut(session)
# Create commander
cmd = remote_master.CommanderMaster(inw, outw, False)
return cmd
def wait_for_login(client, host, port, username, password, prompt,
linesep="\n", log_filename=None, timeout=240,
internal_timeout=10, interface=None):
"""
Make multiple attempts to log into a guest until one succeeds or timeouts.
:param timeout: Total time duration to wait for a successful login
:param internal_timeout: The maximum time duration (in seconds) to wait for
each step of the login procedure (e.g. the
"Are you sure" prompt or the password prompt)
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:see: remote_login()
:raise: Whatever remote_login() raises
:return: A ShellSession object.
"""
logging.debug("Attempting to log into %s:%s using %s (timeout %ds)",
host, port, client, timeout)
end_time = time.time() + timeout
while time.time() < end_time:
try:
return remote_login(client, host, port, username, password, prompt,
linesep, log_filename, internal_timeout,
interface)
except LoginError, e:
logging.debug(e)
time.sleep(2)
# Timeout expired; try one more time but don't catch exceptions
return remote_login(client, host, port, username, password, prompt,
linesep, log_filename, internal_timeout, interface)
def _remote_scp(session, password_list, transfer_timeout=600, login_timeout=20):
"""
Transfer files using SCP, given a command line.
Transfer file(s) to a remote host (guest) using SCP. Wait for questions
and provide answers. If login_timeout expires while waiting for output
from the child (e.g. a password prompt), fail. If transfer_timeout expires
while waiting for the transfer to complete, fail.
:param session: An Expect or ShellSession instance to operate on
:param password_list: Password list to send in reply to the password prompt
:param transfer_timeout: The time duration (in seconds) to wait for the
transfer to complete.
:param login_timeout: The maximal time duration (in seconds) to wait for
each step of the login procedure (i.e. the "Are you sure" prompt or
the password prompt)
:raise SCPAuthenticationError: If authentication fails
:raise SCPTransferTimeoutError: If the transfer fails to complete in time
:raise SCPTransferFailedError: If the process terminates with a nonzero
exit code
:raise SCPError: If some other error occurs
"""
password_prompt_count = 0
timeout = login_timeout
authentication_done = False
scp_type = len(password_list)
while True:
try:
match, text = session.read_until_last_line_matches(
[r"[Aa]re you sure", r"[Pp]assword:\s*$", r"lost connection"],
timeout=timeout, internal_timeout=0.5)
if match == 0: # "Are you sure you want to continue connecting"
logging.debug("Got 'Are you sure...', sending 'yes'")
session.sendline("yes")
continue
elif match == 1: # "password:"
if password_prompt_count == 0:
logging.debug("Got password prompt, sending '%s'" %
password_list[password_prompt_count])
session.sendline(password_list[password_prompt_count])
password_prompt_count += 1
timeout = transfer_timeout
if scp_type == 1:
authentication_done = True
continue
elif password_prompt_count == 1 and scp_type == 2:
logging.debug("Got password prompt, sending '%s'" %
password_list[password_prompt_count])
session.sendline(password_list[password_prompt_count])
password_prompt_count += 1
timeout = transfer_timeout
authentication_done = True
continue
else:
raise SCPAuthenticationError("Got password prompt twice",
text)
elif match == 2: # "lost connection"
raise SCPError("SCP client said 'lost connection'", text)
except aexpect.ExpectTimeoutError, e:
if authentication_done:
raise SCPTransferTimeoutError(e.output)
else:
raise SCPAuthenticationTimeoutError(e.output)
except aexpect.ExpectProcessTerminatedError, e:
if e.status == 0:
logging.debug("SCP process terminated with status 0")
break
else:
raise SCPTransferFailedError(e.status, e.output)
def remote_scp(command, password_list, log_filename=None, transfer_timeout=600,
login_timeout=20):
"""
Transfer files using SCP, given a command line.
:param command: The command to execute
(e.g. "scp -r foobar root@localhost:/tmp/").
:param password_list: Password list to send in reply to a password prompt.
:param log_filename: If specified, log all output to this file
:param transfer_timeout: The time duration (in seconds) to wait for the
transfer to complete.
:param login_timeout: The maximal time duration (in seconds) to wait for
each step of the login procedure (i.e. the "Are you sure" prompt
or the password prompt)
:raise: Whatever _remote_scp() raises
"""
logging.debug("Trying to SCP with command '%s', timeout %ss",
command, transfer_timeout)
if log_filename:
output_func = utils_misc.log_line
output_params = (log_filename,)
else:
output_func = None
output_params = ()
session = aexpect.Expect(command,
output_func=output_func,
output_params=output_params)
try:
_remote_scp(session, password_list, transfer_timeout, login_timeout)
finally:
session.close()
def scp_to_remote(host, port, username, password, local_path, remote_path,
limit="", log_filename=None, timeout=600, interface=None):
"""
Copy files to a remote host (guest) through scp.
:param host: Hostname or IP address
:param username: Username (if required)
:param password: Password (if required)
:param local_path: Path on the local machine where we are copying from
:param remote_path: Path on the remote machine where we are copying to
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file
:param timeout: The time duration (in seconds) to wait for the transfer
to complete.
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise: Whatever remote_scp() raises
"""
if (limit):
limit = "-l %s" % (limit)
if host and host.lower().startswith("fe80"):
if not interface:
raise SCPError("When using ipv6 linklocal address must assign",
"the interface the neighbour attache")
host = "%s%%%s" % (host, interface)
command = ("scp -v -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -r %s "
"-P %s %s %s@\[%s\]:%s" %
(limit, port, local_path, username, host, remote_path))
password_list = []
password_list.append(password)
return remote_scp(command, password_list, log_filename, timeout)
def scp_from_remote(host, port, username, password, remote_path, local_path,
limit="", log_filename=None, timeout=600, interface=None):
"""
Copy files from a remote host (guest).
:param host: Hostname or IP address
:param username: Username (if required)
:param password: Password (if required)
:param local_path: Path on the local machine where we are copying from
:param remote_path: Path on the remote machine where we are copying to
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file
:param timeout: The time duration (in seconds) to wait for the transfer
to complete.
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise: Whatever remote_scp() raises
"""
if (limit):
limit = "-l %s" % (limit)
if host and host.lower().startswith("fe80"):
if not interface:
raise SCPError("When using ipv6 linklocal address must assign, ",
"the interface the neighbour attache")
host = "%s%%%s" % (host, interface)
command = ("scp -v -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -r %s "
"-P %s %s@\[%s\]:%s %s" %
(limit, port, username, host, remote_path, local_path))
password_list = []
password_list.append(password)
remote_scp(command, password_list, log_filename, timeout)
def scp_between_remotes(src, dst, port, s_passwd, d_passwd, s_name, d_name,
s_path, d_path, limit="", log_filename=None,
timeout=600, src_inter=None, dst_inter=None):
"""
Copy files from a remote host (guest) to another remote host (guest).
:param src/dst: Hostname or IP address of src and dst
:param s_name/d_name: Username (if required)
:param s_passwd/d_passwd: Password (if required)
:param s_path/d_path: Path on the remote machine where we are copying
from/to
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file
:param timeout: The time duration (in seconds) to wait for the transfer
to complete.
:src_inter: The interface on local that the src neighbour attache
:dst_inter: The interface on the src that the dst neighbour attache
:return: True on success and False on failure.
"""
if (limit):
limit = "-l %s" % (limit)
if src and src.lower().startswith("fe80"):
if not src_inter:
raise SCPError("When using ipv6 linklocal address must assign ",
"the interface the neighbour attache")
src = "%s%%%s" % (src, src_inter)
if dst and dst.lower().startswith("fe80"):
if not dst_inter:
raise SCPError("When using ipv6 linklocal address must assign ",
"the interface the neighbour attache")
dst = "%s%%%s" % (dst, dst_inter)
command = ("scp -v -o UserKnownHostsFile=/dev/null "
"-o StrictHostKeyChecking=no "
"-o PreferredAuthentications=password -r %s -P %s"
" %s@\[%s\]:%s %s@\[%s\]:%s" %
(limit, port, s_name, src, s_path, d_name, dst, d_path))
password_list = []
password_list.append(s_passwd)
password_list.append(d_passwd)
return remote_scp(command, password_list, log_filename, timeout)
def nc_copy_between_remotes(src, dst, s_port, s_passwd, d_passwd,
s_name, d_name, s_path, d_path,
c_type="ssh", c_prompt="\n",
d_port="8888", d_protocol="udp", timeout=10,
check_sum=True):
"""
Copy files from guest to guest using netcat.
This method only supports linux guest OS.
:param src/dst: Hostname or IP address of src and dst
:param s_name/d_name: Username (if required)
:param s_passwd/d_passwd: Password (if required)
:param s_path/d_path: Path on the remote machine where we are copying
:param c_type: Login method to remote host(guest).
:param c_prompt: command line prompt of remote host(guest)
:param d_port: the port data transfer
:param d_protocol: nc protocol use (tcp or udp)
:param timeout: If a connection and stdin are idle for more than timeout
seconds, then the connection is silently closed.
:return: True on success and False on failure.
"""
s_session = remote_login(c_type, src, s_port, s_name, s_passwd, c_prompt)
d_session = remote_login(c_type, dst, s_port, d_name, d_passwd, c_prompt)
s_session.cmd("iptables -I INPUT -p %s -j ACCEPT" % d_protocol)
d_session.cmd("iptables -I OUTPUT -p %s -j ACCEPT" % d_protocol)
logging.info("Transfer data using netcat from %s to %s" % (src, dst))
cmd = "nc"
if d_protocol == "udp":
cmd += " -u"
cmd += " -w %s" % timeout
s_session.sendline("%s -l %s < %s" % (cmd, d_port, s_path))
d_session.sendline("echo a | %s %s %s > %s" % (cmd, src, d_port, d_path))
if check_sum:
if (s_session.cmd("md5sum %s" % s_path).split()[0] !=
d_session.cmd("md5sum %s" % d_path).split()[0]):
return False
return True
def udp_copy_between_remotes(src, dst, s_port, s_passwd, d_passwd,
s_name, d_name, s_path, d_path,
c_type="ssh", c_prompt="\n",
d_port="9000", timeout=600):
"""
Copy files from guest to guest using udp.
:param src/dst: Hostname or IP address of src and dst
:param s_name/d_name: Username (if required)
:param s_passwd/d_passwd: Password (if required)
:param s_path/d_path: Path on the remote machine where we are copying
:param c_type: Login method to remote host(guest).
:param c_prompt: command line prompt of remote host(guest)
:param d_port: the port data transfer
:param timeout: data transfer timeout
"""
s_session = remote_login(c_type, src, s_port, s_name, s_passwd, c_prompt)
d_session = remote_login(c_type, dst, s_port, d_name, d_passwd, c_prompt)
def get_abs_path(session, filename, extension):
"""
return file path drive+path
"""
cmd_tmp = "wmic datafile where \"Filename='%s' and "
cmd_tmp += "extension='%s'\" get drive^,path"
cmd = cmd_tmp % (filename, extension)
info = session.cmd_output(cmd, timeout=360).strip()
drive_path = re.search(r'(\w):\s+(\S+)', info, re.M)
if not drive_path:
raise error.TestError("Not found file %s.%s in your guest"
% (filename, extension))
return ":".join(drive_path.groups())
def get_file_md5(session, file_path):
"""
Get files md5sums
"""
if c_type == "ssh":
md5_cmd = "md5sum %s" % file_path
md5_reg = r"(\w+)\s+%s.*" % file_path
else:
drive_path = get_abs_path(session, "md5sums", "exe")
filename = file_path.split("\\")[-1]
md5_reg = r"%s\s+(\w+)" % filename
md5_cmd = '%smd5sums.exe %s | find "%s"' % (drive_path, file_path,
filename)
o = session.cmd_output(md5_cmd)
file_md5 = re.findall(md5_reg, o)
if not o:
raise error.TestError("Get file %s md5sum error" % file_path)
return file_md5
def server_alive(session):
if c_type == "ssh":
check_cmd = "ps aux"
else:
check_cmd = "tasklist"
o = session.cmd_output(check_cmd)
if not o:
raise error.TestError("Can not get the server status")
if "sendfile" in o.lower():
return True
return False
def start_server(session):
if c_type == "ssh":
start_cmd = "sendfile %s &" % d_port
else:
drive_path = get_abs_path(session, "sendfile", "exe")
start_cmd = "start /b %ssendfile.exe %s" % (drive_path,
d_port)
session.cmd_output_safe(start_cmd)
if not server_alive(session):
raise error.TestError("Start udt server failed")
def start_client(session):
if c_type == "ssh":
client_cmd = "recvfile %s %s %s %s" % (src, d_port,
s_path, d_path)
else:
drive_path = get_abs_path(session, "recvfile", "exe")
client_cmd_tmp = "%srecvfile.exe %s %s %s %s"
client_cmd = client_cmd_tmp % (drive_path, src, d_port,
s_path.split("\\")[-1],
d_path.split("\\")[-1])
session.cmd_output_safe(client_cmd, timeout)
def stop_server(session):
if c_type == "ssh":
stop_cmd = "killall sendfile"
else:
stop_cmd = "taskkill /F /IM sendfile.exe"
if server_alive(session):
session.cmd_output_safe(stop_cmd)
try:
src_md5 = get_file_md5(s_session, s_path)
if not server_alive(s_session):
start_server(s_session)
start_client(d_session)
dst_md5 = get_file_md5(d_session, d_path)
if src_md5 != dst_md5:
err_msg = "Files md5sum mismatch, file %s md5sum is '%s', "
err_msg = "but the file %s md5sum is %s"
raise error.TestError(err_msg % (s_path, src_md5,
d_path, dst_md5))
finally:
stop_server(s_session)
s_session.close()
d_session.close()
def copy_files_to(address, client, username, password, port, local_path,
remote_path, limit="", log_filename=None,
verbose=False, timeout=600, interface=None):
"""
Copy files to a remote host (guest) using the selected client.
:param client: Type of transfer client
:param username: Username (if required)
:param password: Password (if requried)
:param local_path: Path on the local machine where we are copying from
:param remote_path: Path on the remote machine where we are copying to
:param address: Address of remote host(guest)
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file (SCP only)
:param verbose: If True, log some stats using logging.debug (RSS only)
:param timeout: The time duration (in seconds) to wait for the transfer to
complete.
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise: Whatever remote_scp() raises
"""
if client == "scp":
scp_to_remote(address, port, username, password, local_path,
remote_path, limit, log_filename, timeout,
interface=interface)
elif client == "rss":
log_func = None
if verbose:
log_func = logging.debug
c = rss_client.FileUploadClient(address, port, log_func)
c.upload(local_path, remote_path, timeout)
c.close()
else:
raise error.TestError("No such file copy client: '%s', valid values"
"are scp and rss" % client)
def copy_files_from(address, client, username, password, port, remote_path,
local_path, limit="", log_filename=None,
verbose=False, timeout=600, interface=None):
"""
Copy files from a remote host (guest) using the selected client.
:param client: Type of transfer client
:param username: Username (if required)
:param password: Password (if requried)
:param remote_path: Path on the remote machine where we are copying from
:param local_path: Path on the local machine where we are copying to
:param address: Address of remote host(guest)
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file (SCP only)
:param verbose: If True, log some stats using ``logging.debug`` (RSS only)
:param timeout: The time duration (in seconds) to wait for the transfer to
complete.
:interface: The interface the neighbours attach to (only use when using ipv6
linklocal address.)
:raise: Whatever ``remote_scp()`` raises
"""
if client == "scp":
scp_from_remote(address, port, username, password, remote_path,
local_path, limit, log_filename, timeout,
interface=interface)
elif client == "rss":
log_func = None
if verbose:
log_func = logging.debug
c = rss_client.FileDownloadClient(address, port, log_func)
c.download(remote_path, local_path, timeout)
c.close()
else:
raise error.TestError("No such file copy client: '%s', valid values"
"are scp and rss" % client)
class Remote_Package(object):
def __init__(self, address, client, username, password, port, remote_path):
"""
Initialization of Remote Package class.
:param address: Address of remote host(guest)
:param client: The client to use ('ssh', 'telnet' or 'nc')
:param username: Username (if required)
:param password: Password (if requried)
:param port: Port to connect to
:param remote_path: Rmote package path
"""
self.address = address
self.client = client
self.port = port
self.username = username
self.password = password
self.remote_path = remote_path
if self.client == "nc":
self.cp_client = "rss"
self.cp_port = 10023
elif self.client == "ssh":
self.cp_client = "scp"
self.cp_port = 22
else:
raise LoginBadClientError(client)
def pull_file(self, local_path, timeout=600):
"""
Copy file from remote to local.
"""
logging.debug("Pull remote: '%s' to local: '%s'." % (self.remote_path,
local_path))
copy_files_from(self.address, self.cp_client, self.username,
self.password, self.cp_port, self.remote_path,
local_path, timeout=timeout)
def push_file(self, local_path, timeout=600):
"""
Copy file from local to remote.
"""
logging.debug("Push local: '%s' to remote: '%s'." % (local_path,
self.remote_path))
copy_files_to(self.address, self.cp_client, self.username,
self.password, self.cp_port, local_path,
self.remote_path, timeout=timeout)
class RemoteFile(object):
"""
Class to handle the operations of file on remote host or guest.
"""
def __init__(self, address, client, username, password, port,
remote_path, limit="", log_filename=None,
verbose=False, timeout=600):
"""
Initialization of RemoteFile class.
:param address: Address of remote host(guest)
:param client: Type of transfer client
:param username: Username (if required)
:param password: Password (if requried)
:param remote_path: Path of file which we want to edit on remote.
:param limit: Speed limit of file transfer.
:param log_filename: If specified, log all output to this file(SCP only)
:param verbose: If True, log some stats using logging.debug (RSS only)
:param timeout: The time duration (in seconds) to wait for the
transfer tocomplete.
"""
self.address = address
self.client = client
self.username = username
self.password = password
self.port = port
self.remote_path = remote_path
self.limit = limit
self.log_filename = log_filename
self.verbose = verbose
self.timeout = timeout
# Get a local_path and all actions is taken on it.
filename = os.path.basename(self.remote_path)
# Get a local_path.
tmp_dir = data_dir.get_tmp_dir()
local_file = tempfile.NamedTemporaryFile(prefix=("%s_" % filename),
dir=tmp_dir)
self.local_path = local_file.name
local_file.close()
# Get a backup_path.
backup_file = tempfile.NamedTemporaryFile(prefix=("%s_" % filename),
dir=tmp_dir)
self.backup_path = backup_file.name
backup_file.close()
# Get file from remote.
try:
self._pull_file()
except SCPTransferFailedError:
# Remote file doesn't exist, create empty file on local
self._write_local([])
# Save a backup.
shutil.copy(self.local_path, self.backup_path)
def __del__(self):
"""
Called when the instance is about to be destroyed.
"""
self._reset_file()
if os.path.exists(self.backup_path):
os.remove(self.backup_path)
if os.path.exists(self.local_path):
os.remove(self.local_path)
def _pull_file(self):
"""
Copy file from remote to local.
"""
if self.client == "test":
shutil.copy(self.remote_path, self.local_path)
else:
copy_files_from(self.address, self.client, self.username,
self.password, self.port, self.remote_path,
self.local_path, self.limit, self.log_filename,
self.verbose, self.timeout)
def _push_file(self):
"""
Copy file from local to remote.
"""
if self.client == "test":
shutil.copy(self.local_path, self.remote_path)
else:
copy_files_to(self.address, self.client, self.username,
self.password, self.port, self.local_path,
self.remote_path, self.limit, self.log_filename,
self.verbose, self.timeout)
def _reset_file(self):
"""
Copy backup from local to remote.
"""
if self.client == "test":
shutil.copy(self.backup_path, self.remote_path)
else:
copy_files_to(self.address, self.client, self.username,
self.password, self.port, self.backup_path,
self.remote_path, self.limit, self.log_filename,
self.verbose, self.timeout)
def _read_local(self):
"""
Read file on local_path.
:return: string list got from readlines().
"""
local_file = open(self.local_path, "r")
lines = local_file.readlines()
local_file.close()
return lines
def _write_local(self, lines):
"""
Write file on local_path. Call writelines method of File.
"""
local_file = open(self.local_path, "w")
local_file.writelines(lines)
local_file.close()
def add(self, line_list):
"""
Append lines in line_list into file on remote.
"""
lines = self._read_local()
for line in line_list:
lines.append("\n%s" % line)
self._write_local(lines)
self._push_file()
def sub(self, pattern2repl_dict):
"""
Replace the string which match the pattern
to the value contained in pattern2repl_dict.
"""
lines = self._read_local()
for pattern, repl in pattern2repl_dict.items():
for index in range(len(lines)):
line = lines[index]
lines[index] = re.sub(pattern, repl, line)
self._write_local(lines)
self._push_file()
def truncate(self, length=0):
"""
Truncate the detail of remote file to assigned length
Content before
line 1
line 2
line 3
remote_file.truncate(length=1)
Content after
line 1
:param length: how many lines you want to keep
"""
lines = self._read_local()
lines = lines[0: length]
self._write_local(lines)
self._push_file()
def remove(self, pattern_list):
"""
Remove the lines in remote file which matchs a pattern
in pattern_list.
"""
lines = self._read_local()
for pattern in pattern_list:
for index in range(len(lines)):
line = lines[index]
if re.match(pattern, line):
lines.remove(line)
# Check this line is the last one or not.
if (not line.endswith('\n') and (index > 0)):
lines[index - 1] = lines[index - 1].rstrip("\n")
self._write_local(lines)
self._push_file()
def sub_else_add(self, pattern2repl_dict):
"""
Replace the string which match the pattern.
If no match in the all lines, append the value
to the end of file.
"""
lines = self._read_local()
for pattern, repl in pattern2repl_dict.items():
no_line_match = True
for index in range(len(lines)):
line = lines[index]
if re.match(pattern, line):
no_line_match = False
lines[index] = re.sub(pattern, repl, line)
if no_line_match:
lines.append("\n%s" % repl)
self._write_local(lines)
self._push_file()
class RemoteRunner(object):
"""
Class to provide a utils.run-like method to execute command on
remote host or guest. Provide a similar interface with utils.run
on local.
"""
def __init__(self, client="ssh", host=None, port="22", username="root",
password=None, prompt=r"[\#\$]\s*$", linesep="\n",
log_filename=None, timeout=240, internal_timeout=10,
session=None):
"""
Initialization of RemoteRunner. Init a session login to remote host or
guest.
:param client: The client to use ('ssh', 'telnet' or 'nc')
:param host: Hostname or IP address
:param port: Port to connect to
:param username: Username (if required)
:param password: Password (if required)
:param prompt: Shell prompt (regular expression)
:param linesep: The line separator to use when sending lines
(e.g. '\\n' or '\\r\\n')
:param log_filename: If specified, log all output to this file
:param timeout: Total time duration to wait for a successful login
:param internal_timeout: The maximal time duration (in seconds) to wait
for each step of the login procedure (e.g. the "Are you sure"
prompt or the password prompt)
:param session: An existing session
:see: wait_for_login()
:raise: Whatever wait_for_login() raises
"""
if session is None:
if host is None:
raise error.TestError("Neither host, nor session was defined!")
self.session = wait_for_login(client, host, port, username,
password, prompt, linesep,
log_filename, timeout,
internal_timeout)
else:
self.session = session
# Init stdout pipe and stderr pipe.
self.stdout_pipe = tempfile.mktemp()
self.stderr_pipe = tempfile.mktemp()
def run(self, command, timeout=60, ignore_status=False):
"""
Method to provide a utils.run-like interface to execute command on
remote host or guest.
:param timeout: Total time duration to wait for command return.
:param ignore_status: If ignore_status=True, do not raise an exception,
no matter what the exit code of the command is.
Else, raise CmdError if exit code of command is not
zero.
"""
# Redirect the stdout and stderr to file, Deviding error message
# from output, and taking off the color of output. To return the same
# result with utils.run() function.
command = "%s 1>%s 2>%s" % (command, self.stdout_pipe, self.stderr_pipe)
status, _ = self.session.cmd_status_output(command, timeout=timeout)
output = self.session.cmd_output("cat %s;rm -f %s" %
(self.stdout_pipe, self.stdout_pipe))
errput = self.session.cmd_output("cat %s;rm -f %s" %
(self.stderr_pipe, self.stderr_pipe))
cmd_result = utils.CmdResult(command=command, exit_status=status,
stdout=output, stderr=errput)
if (status and (not ignore_status)):
raise error.CmdError(command, cmd_result)
return cmd_result
| gpl-2.0 |
jgoclawski/django | django/test/utils.py | 14 | 20900 | import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.core.urlresolvers import get_script_prefix, set_script_prefix
from django.db import reset_queries
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(test_func)
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
def override_system_checks(new_checks, deployment_checks=None):
""" Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks. """
from django.core.checks.registry import registry
def outer(test_func):
@wraps(test_func)
def inner(*args, **kwargs):
old_checks = registry.registered_checks
registry.registered_checks = new_checks
old_deployment_checks = registry.deployment_checks
if deployment_checks is not None:
registry.deployment_checks = deployment_checks
try:
return test_func(*args, **kwargs)
finally:
registry.registered_checks = old_checks
registry.deployment_checks = old_deployment_checks
return inner
return outer
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.replace('\\n', '\n')
got = got.replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(object):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
def __call__(self, decorated):
if isinstance(decorated, type):
# A class is decorated
saved_setUp = decorated.setUp
saved_tearDown = decorated.tearDown
def setUp(inner_self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
saved_setUp(inner_self)
def tearDown(inner_self):
saved_tearDown(inner_self)
self.catch_warnings.__exit__(*sys.exc_info())
decorated.setUp = setUp
decorated.tearDown = tearDown
return decorated
else:
@wraps(decorated)
def inner(*args, **kwargs):
with warnings.catch_warnings():
self.filter_func('ignore', **self.ignore_kwargs)
return decorated(*args, **kwargs)
return inner
@contextmanager
def patch_logger(logger_name, log_level):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
calls.append(msg % args)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class ScriptPrefix(ContextDecorator):
def __enter__(self):
set_script_prefix(self.prefix)
def __exit__(self, exc_type, exc_val, traceback):
set_script_prefix(self.old_prefix)
def __init__(self, prefix):
self.prefix = prefix
self.old_prefix = get_script_prefix()
def override_script_prefix(prefix):
"""
Decorator or context manager to temporary override the script prefix.
"""
return ScriptPrefix(prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
| bsd-3-clause |
culots/meld | meld/undo.py | 5 | 8280 | # Copyright (C) 2002-2006 Stephen Kennedy <stevek@gnome.org>
# Copyright (C) 2010-2011 Kai Willadsen <kai.willadsen@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module to help implement undo functionality.
Usage:
t = TextWidget()
s = UndoSequence()
def on_textwidget_text_inserted():
s.begin_group()
if not t.is_modified():
s.add_action( TextWidgetModifiedAction() )
s.add_action( InsertionAction() )
s.end_group()
def on_undo_button_pressed():
s.undo()
"""
from gi.repository import GObject
class GroupAction(object):
"""A group action combines several actions into one logical action.
"""
def __init__(self, seq):
self.seq = seq
# TODO: If a GroupAction affects more than one sequence, our logic
# breaks. Currently, this isn't a problem.
self.buffer = seq.actions[0].buffer
def undo(self):
while self.seq.can_undo():
self.seq.undo()
def redo(self):
while self.seq.can_redo():
self.seq.redo()
class UndoSequence(GObject.GObject):
"""A manager class for operations which can be undone/redone.
"""
__gsignals__ = {
'can-undo': (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_BOOLEAN,)),
'can-redo': (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_BOOLEAN,)),
'checkpointed': (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_OBJECT, GObject.TYPE_BOOLEAN,)),
}
def __init__(self):
"""Create an empty UndoSequence.
"""
GObject.GObject.__init__(self)
self.actions = []
self.next_redo = 0
self.checkpoints = {}
self.group = None
self.busy = False
def clear(self):
"""Remove all undo and redo actions from this sequence
If the sequence was previously able to undo and/or redo, the
'can-undo' and 'can-redo' signals are emitted.
Raises an AssertionError if a group is in progress.
"""
assert self.group is None
if self.can_undo():
self.emit('can-undo', 0)
if self.can_redo():
self.emit('can-redo', 0)
self.actions = []
self.next_redo = 0
self.checkpoints = {}
def can_undo(self):
"""Return if an undo is possible.
"""
return self.next_redo > 0
def can_redo(self):
"""Return if a redo is possible.
"""
return self.next_redo < len(self.actions)
def add_action(self, action):
"""Add an action to the undo list.
Arguments:
action -- A class with two callable attributes: 'undo' and 'redo'
which are called by this sequence during an undo or redo.
"""
if self.busy:
return
if self.group is None:
if self.checkpointed(action.buffer):
self.checkpoints[action.buffer][1] = self.next_redo
self.emit('checkpointed', action.buffer, False)
else:
# If we go back in the undo stack before the checkpoint starts,
# and then modify the buffer, we lose the checkpoint altogether
start, end = self.checkpoints.get(action.buffer, (None, None))
if start is not None and start > self.next_redo:
self.checkpoints[action.buffer] = (None, None)
could_undo = self.can_undo()
could_redo = self.can_redo()
self.actions[self.next_redo:] = []
self.actions.append(action)
self.next_redo += 1
if not could_undo:
self.emit('can-undo', 1)
if could_redo:
self.emit('can-redo', 0)
else:
self.group.add_action(action)
def undo(self):
"""Undo an action.
Raises an AssertionError if the sequence is not undoable.
"""
assert self.next_redo > 0
self.busy = True
buf = self.actions[self.next_redo - 1].buffer
if self.checkpointed(buf):
self.emit('checkpointed', buf, False)
could_redo = self.can_redo()
self.next_redo -= 1
self.actions[self.next_redo].undo()
self.busy = False
if not self.can_undo():
self.emit('can-undo', 0)
if not could_redo:
self.emit('can-redo', 1)
if self.checkpointed(buf):
self.emit('checkpointed', buf, True)
def redo(self):
"""Redo an action.
Raises and AssertionError if the sequence is not undoable.
"""
assert self.next_redo < len(self.actions)
self.busy = True
buf = self.actions[self.next_redo].buffer
if self.checkpointed(buf):
self.emit('checkpointed', buf, False)
could_undo = self.can_undo()
a = self.actions[self.next_redo]
self.next_redo += 1
a.redo()
self.busy = False
if not could_undo:
self.emit('can-undo', 1)
if not self.can_redo():
self.emit('can-redo', 0)
if self.checkpointed(buf):
self.emit('checkpointed', buf, True)
def checkpoint(self, buf):
start = self.next_redo
while start > 0 and self.actions[start - 1].buffer != buf:
start -= 1
end = self.next_redo
while (end < len(self.actions) - 1 and
self.actions[end + 1].buffer != buf):
end += 1
if end == len(self.actions):
end = None
self.checkpoints[buf] = [start, end]
self.emit('checkpointed', buf, True)
def checkpointed(self, buf):
# While the main undo sequence should always have checkpoints
# recorded, grouped subsequences won't.
start, end = self.checkpoints.get(buf, (None, None))
if start is None:
return False
if end is None:
end = len(self.actions)
return start <= self.next_redo <= end
def begin_group(self):
"""Group several actions into a single logical action.
When you wrap several calls to add_action() inside begin_group()
and end_group(), all the intervening actions are considered
one logical action. For instance a 'replace' action may be
implemented as a pair of 'delete' and 'create' actions, but
undoing should undo both of them.
"""
if self.busy:
return
if self.group:
self.group.begin_group()
else:
self.group = UndoSequence()
def end_group(self):
"""End a logical group action. See also begin_group().
Raises an AssertionError if there was not a matching call to
begin_group().
"""
if self.busy:
return
assert self.group is not None
if self.group.group is not None:
self.group.end_group()
else:
group = self.group
self.group = None
# Collapse single action groups
if len(group.actions) == 1:
self.add_action(group.actions[0])
elif len(group.actions) > 1:
self.add_action(GroupAction(group))
def abort_group(self):
"""Revert the sequence to the state before begin_group() was called.
Raises an AssertionError if there was no a matching call to begin_group().
"""
if self.busy:
return
assert self.group is not None
if self.group.group is not None:
self.group.abort_group()
else:
self.group = None
def in_grouped_action(self):
return self.group is not None
| gpl-2.0 |
hehongliang/tensorflow | tensorflow/tools/common/public_api.py | 24 | 4920 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visitor restricting traversal to only the public tensorflow API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.util import tf_inspect
class PublicAPIVisitor(object):
"""Visitor to use with `traverse` to visit exactly the public TF API."""
def __init__(self, visitor):
"""Constructor.
`visitor` should be a callable suitable as a visitor for `traverse`. It will
be called only for members of the public TensorFlow API.
Args:
visitor: A visitor to call for the public API.
"""
self._visitor = visitor
self._root_name = 'tf'
# Modules/classes we want to suppress entirely.
self._private_map = {
# Some implementations have this internal module that we shouldn't
# expose.
'tf.flags': ['cpp_flags'],
}
# Modules/classes we do not want to descend into if we hit them. Usually,
# system modules exposed through platforms for compatibility reasons.
# Each entry maps a module path to a name to ignore in traversal.
self._do_not_descend_map = {
'tf': [
'compiler',
'core',
'examples',
'flags', # Don't add flags
# TODO(drpng): This can be removed once sealed off.
'platform',
# TODO(drpng): This can be removed once sealed.
'pywrap_tensorflow',
# TODO(drpng): This can be removed once sealed.
'user_ops',
'python',
'tools',
'tensorboard',
],
## Everything below here is legitimate.
# It'll stay, but it's not officially part of the API.
'tf.app': ['flags'],
# Imported for compatibility between py2/3.
'tf.test': ['mock'],
# Externalized modules of the Keras API.
'tf.keras': ['applications', 'preprocessing']
}
@property
def private_map(self):
"""A map from parents to symbols that should not be included at all.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not include.
"""
return self._private_map
@property
def do_not_descend_map(self):
"""A map from parents to symbols that should not be descended into.
This map can be edited, but it should not be edited once traversal has
begun.
Returns:
The map marking symbols to not explore.
"""
return self._do_not_descend_map
def set_root_name(self, root_name):
"""Override the default root name of 'tf'."""
self._root_name = root_name
def _is_private(self, path, name, obj=None):
"""Return whether a name is private."""
# TODO(wicke): Find out what names to exclude.
del obj # Unused.
return ((path in self._private_map and
name in self._private_map[path]) or
(name.startswith('_') and not re.match('__.*__$', name) or
name in ['__base__', '__class__']))
def _do_not_descend(self, path, name):
"""Safely queries if a specific fully qualified name should be excluded."""
return (path in self._do_not_descend_map and
name in self._do_not_descend_map[path])
def __call__(self, path, parent, children):
"""Visitor interface, see `traverse` for details."""
# Avoid long waits in cases of pretty unambiguous failure.
if tf_inspect.ismodule(parent) and len(path.split('.')) > 10:
raise RuntimeError('Modules nested too deep:\n%s.%s\n\nThis is likely a '
'problem with an accidental public import.' %
(self._root_name, path))
# Includes self._root_name
full_path = '.'.join([self._root_name, path]) if path else self._root_name
# Remove things that are not visible.
for name, child in list(children):
if self._is_private(full_path, name, child):
children.remove((name, child))
self._visitor(path, parent, children)
# Remove things that are visible, but which should not be descended into.
for name, child in list(children):
if self._do_not_descend(full_path, name):
children.remove((name, child))
| apache-2.0 |
c-o-m-m-a-n-d-e-r/CouchPotatoServer | libs/pyutil/hashexpand.py | 106 | 2890 | # Copyright (c) 2002-2012 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import warnings
"""
Cryptographically strong pseudo-random number generator based on SHA256.
"""
class SHA256Expander:
"""
Provide a cryptographically strong pseudo-random number generator based on
SHA256. Hopefully this means that no attacker will be able to predict any
bit of output that he hasn't seen, given that he doesn't know anything about
the seed and given that he can see as many bits of output as he desires
except for the bit that he is trying to predict. Further it is hoped that
an attacker will not even be able to determine whether a given stream of
random bytes was generated by this PRNG or by flipping a coin repeatedly.
The safety of this technique has not been verified by a Real Cryptographer.
... but it is similar to the PRNG in FIPS-186...
The seed and counter are encoded in DJB's netstring format so that I
don't have to think about the possibility of ambiguity.
Note: I've since learned more about the theory of secure hash functions
and the above is a strong assumption about a secure hash function. Use
of this class should be considered deprecated and you should use a more
well-analyzed KDF (such as the nascent standard HKDF) or stream cipher or
whatever it is that you need.
"""
def __init__(self, seed=None):
warnings.warn("deprecated", DeprecationWarning)
if seed is not None:
self.seed(seed)
def seed(self, seed):
import hashlib
self.starth = hashlib.sha256('24:pyutil hash expansion v2,10:algorithm:,6:SHA256,6:value:,')
seedlen = len(seed)
seedlenstr = str(seedlen)
self.starth.update(seedlenstr)
self.starth.update(':')
self.starth.update(seed)
self.starth.update(',')
self.avail = ""
self.counter = 0
def get(self, bytes):
bytesleft = bytes
res = []
while bytesleft > 0:
if len(self.avail) == 0:
h = self.starth.copy()
counterstr = str(self.counter)
counterstrlen = len(counterstr)
counterstrlenstr = str(counterstrlen)
h.update(counterstrlenstr)
h.update(':')
h.update(counterstr)
h.update(',')
self.avail = h.digest()
self.counter += 1
numb = min(len(self.avail), bytesleft)
(chunk, self.avail,) = (self.avail[:numb], self.avail[numb:],)
res.append(chunk)
bytesleft = bytesleft - numb
resstr = ''.join(res)
assert len(resstr) == bytes
return resstr
def sha256expand(inpstr, expbytes):
return SHA256Expander(inpstr).get(expbytes)
| gpl-3.0 |
BartoszCichecki/onlinepython | onlinepython/pypy-2.4.0-win32/lib-python/2.7/test/test_code.py | 8 | 3307 | """This module includes tests of the code object representation.
>>> def f(x):
... def g(y):
... return x + y
... return g
...
>>> dump(f.func_code)
name: f
argcount: 1
names: ()
varnames: ('x', 'g')
cellvars: ('x',)
freevars: ()
nlocals: 2
flags: 3
consts: ('None', '<code object g>')
>>> dump(f(4).func_code)
name: g
argcount: 1
names: ()
varnames: ('y',)
cellvars: ()
freevars: ('x',)
nlocals: 1
flags: 19
consts: ('None',)
>>> def h(x, y):
... a = x + y
... b = x - y
... c = a * b
... return c
...
>>> dump(h.func_code)
name: h
argcount: 2
names: ()
varnames: ('x', 'y', 'a', 'b', 'c')
cellvars: ()
freevars: ()
nlocals: 5
flags: 67
consts: ('None',)
>>> def attrs(obj):
... print obj.attr1
... print obj.attr2
... print obj.attr3
>>> dump(attrs.func_code)
name: attrs
argcount: 1
names: ('attr1', 'attr2', 'attr3')
varnames: ('obj',)
cellvars: ()
freevars: ()
nlocals: 1
flags: 67
consts: ('None',)
>>> def optimize_away():
... 'doc string'
... 'not a docstring'
... 53
... 53L
>>> dump(optimize_away.func_code)
name: optimize_away
argcount: 0
names: ()
varnames: ()
cellvars: ()
freevars: ()
nlocals: 0
flags: 1048643
consts: ("'doc string'", 'None')
"""
import unittest
import weakref
from test.test_support import run_doctest, run_unittest, cpython_only
from test.test_support import gc_collect
def consts(t):
"""Yield a doctest-safe sequence of object reprs."""
for elt in t:
r = repr(elt)
if r.startswith("<code object"):
yield "<code object %s>" % elt.co_name
else:
yield r
def dump(co):
"""Print out a text representation of a code object."""
for attr in ["name", "argcount", "names", "varnames", "cellvars",
"freevars", "nlocals", "flags"]:
print "%s: %s" % (attr, getattr(co, "co_" + attr))
print "consts:", tuple(consts(co.co_consts))
class CodeTest(unittest.TestCase):
@cpython_only
def test_newempty(self):
import _testcapi
co = _testcapi.code_newempty("filename", "funcname", 15)
self.assertEqual(co.co_filename, "filename")
self.assertEqual(co.co_name, "funcname")
self.assertEqual(co.co_firstlineno, 15)
class CodeWeakRefTest(unittest.TestCase):
def test_basic(self):
# Create a code object in a clean environment so that we know we have
# the only reference to it left.
namespace = {}
exec "def f(): pass" in globals(), namespace
f = namespace["f"]
del namespace
self.called = False
def callback(code):
self.called = True
# f is now the last reference to the function, and through it, the code
# object. While we hold it, check that we can create a weakref and
# deref it. Then delete it, and check that the callback gets called and
# the reference dies.
coderef = weakref.ref(f.__code__, callback)
self.assertTrue(bool(coderef()))
del f
gc_collect()
self.assertFalse(bool(coderef()))
self.assertTrue(self.called)
def test_main(verbose=None):
from test import test_code
run_doctest(test_code, verbose)
run_unittest(CodeTest, CodeWeakRefTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
fdecourcelle/meanfde | node_modules/meanio/node_modules/mean-cli/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common.py | 497 | 17406 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer:
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
| mit |
GoogleCloudPlatform/dataflow-sample-applications | timeseries-streaming/timeseries-python-applications/ml_pipeline/timeseries/encoder_decoder/transforms/process_encdec_inf_rtn.py | 1 | 9368 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from datetime import datetime
from typing import Dict, Text, Any
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_serving.apis import prediction_log_pb2
import apache_beam as beam
from apache_beam.utils.windowed_value import WindowedValue
from ml_pipeline.timeseries.utils import timeseries_transform_utils
class ProcessReturn(beam.DoFn):
"""
We need to match the input to the output to compare the example to the encoded-decoded value.
The transform component preprocessing_fn creates lexical order of the features in scope for the model.
This function mimics the preprocessing_fn structure.
"""
def __init__(self, config: Dict[Text, Any], batching_size: int = 1000):
beam.DoFn.__init__(self)
self.tf_transform_graph_dir = config['tf_transform_graph_dir']
self.model_config = config['model_config']
self.batching_size = batching_size
def setup(self):
self.transform_output = tft.TFTransformOutput(self.tf_transform_graph_dir)
self.tft_layer = self.transform_output.transform_features_layer()
def start_bundle(self):
self.batch: [WindowedValue] = []
def finish_bundle(self):
for prediction in self.process_result(self.batch):
yield prediction
def process(
self,
element: prediction_log_pb2.PredictionLog,
window=beam.DoFn.WindowParam,
timestamp=beam.DoFn.TimestampParam):
if len(element.predict_log.request.inputs['examples'].string_val) > 1:
raise Exception("Only support single input string.")
if len(self.batch) > self.batching_size:
for k in self.process_result(self.batch):
yield k
self.batch.clear()
else:
self.batch.append(WindowedValue(element, timestamp, [window]))
def process_result(self, element: [WindowedValue]):
"""
A input example has shape : [timesteps, all_features] all_features is
not always == to features used in model.
An output example has shape : [timesteps, model_features]
In order to compare these we need to match the (timestep, feature) from
(timestep,all_features) to (timestep, model_features)
There are also Metadata fields which provide context
"""
element_value = [k.value for k in element]
processed_inputs = []
request_inputs = []
request_outputs = []
for k in element_value:
request_inputs.append(
k.predict_log.request.inputs['examples'].string_val[0])
request_outputs.append(k.predict_log.response.outputs['output_0'])
# The output of tf.io.parse_example is a set of feature tensors which
# have shape for non Metadata of [batch,
# timestep]
batched_example = tf.io.parse_example(
request_inputs, self.transform_output.raw_feature_spec())
# The tft layer gives us two labels 'FLOAT32' and 'LABEL' which have
# shape [batch, timestep, model_features]
inputs = self.tft_layer(batched_example)
# Determine which of the features was used in the model
feature_labels = timeseries_transform_utils.create_feature_list_from_list(
features=batched_example.keys(), config=self.model_config)
# The outer loop gives us the batch label which has
# shape [timestep, model_features]
# For the metadata the shape is [timestep, 1]
metadata_span_start_timestamp = tf.sparse.to_dense(
batched_example['METADATA_SPAN_START_TS']).numpy()
metadata_span_end_timestamp = tf.sparse.to_dense(
batched_example['METADATA_SPAN_END_TS']).numpy()
batch_pos = 0
for batch_input in inputs['LABEL'].numpy():
# Get the Metadata from the original request
span_start_timestamp = datetime.fromtimestamp(
metadata_span_start_timestamp[batch_pos][0] / 1000)
span_end_timestamp = datetime.fromtimestamp(
metadata_span_end_timestamp[batch_pos][0] / 1000)
# Add the metadata to the result
result = {
'span_start_timestamp': span_start_timestamp,
'span_end_timestamp': span_end_timestamp
}
# In this loop we need to compare the last timestep
# [timestep , model_feature] for the input and the output.
# Get the output that matches this input
results = tf.io.parse_tensor(
request_outputs[batch_pos].SerializeToString(),
tf.float32).numpy()[0]
# The last time step is the last value in the input batch,
# ordinal pos starts from 0
last_timestep_pos = len(batch_input) - 1
# From the input batch get the last time step
last_timestep_input = batch_input[last_timestep_pos]
# Get the last timestep from the results
last_timestep_output = results[last_timestep_pos]
feature_results = {}
for model_feature_pos in range(len(last_timestep_output)):
label = (feature_labels[model_feature_pos])
# The num of features should == number of results
if len(feature_labels) != len(last_timestep_input):
raise ValueError(f'Features list {feature_labels} in config is '
f'len {len(feature_labels)} which '
f'does not match output length '
f'{len(last_timestep_output)} '
f' This normally is a result of using a configuration '
f'file that does not match '
f'tf_transform dir / saved model dir.')
# The num of features should == number of results
if len(last_timestep_output) != len(last_timestep_input):
raise ValueError(f'Input len {len(last_timestep_input)} does not '
f'match output length {len(last_timestep_output)} '
f' This normally is a result of mis matched tf_transform dir and saved model dir.')
feature_results[label] = {
'input_value': last_timestep_input[model_feature_pos],
'output_value': last_timestep_output[model_feature_pos]
}
if not str(label).endswith('-TIMESTAMP'):
feature_results[label].update({
# Outliers will effect the head of their array, so we need to keep the array
# to show in the outlier detection.
'raw_data_array': str(
tf.sparse.to_dense(
batched_example[label]).numpy()
[batch_pos])
})
result.update({'feature_results': feature_results})
processed_inputs.append(result)
batch_pos += 1
# Add back windows
windowed_value = []
for input_pos in range(len(processed_inputs) - 1):
windowed_value.append(
element[input_pos].with_value(processed_inputs[input_pos]))
return windowed_value
class CheckAnomalous(beam.DoFn):
"""
Naive threshold based entirely on % difference cutoff value.
"""
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
def __init__(self, threshold: float = 0.05):
beam.DoFn.__init__(self)
self.threshold = threshold
def process(self, element: Dict[Text, Any], *unused_args, **unused_kwargs):
result = {
'span_start_timestamp': element['span_start_timestamp'],
'span_end_timestamp': element['span_end_timestamp']
}
for key, value in element['feature_results'].items():
input_value = value['input_value']
output_value = value['output_value']
diff = abs(input_value - output_value)
value.update({'diff': diff})
if not key.endswith('-TIMESTAMP'):
value.update({'anomaly': diff > self.threshold})
result.update({key: value})
yield result
| apache-2.0 |
jonyachen/hearboi | record_2.py | 1 | 1234 | """
PyAudio exmple: Record a few seconds of audio and save to a WAVE
file.
"""
import pyaudio
import wave
import sys
from dejavu import Dejavu
CHUNK = 8192
FORMAT = pyaudio.paInt16
CHANNELS = 1L
RATE = 48000
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "/sounds/output.wav"
if sys.platform == 'darwin':
CHANNELS = 1
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=1,
frames_per_buffer=CHUNK,
rate=RATE,
input=True,
input_device_index= 4)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
config = {
"database": {
"host": "127.0.0.1",
"user": "root",
"passwd": "rasp",
"db": "sound_db",
}
}
# create a Dejavu instance
djv = Dejavu(config)
# Fingerprint all the mp3's in the directory we give it
djv.fingerprint_directory("sounds", [".wav"])
| mit |
benoitc/dj-revproxy | revproxy/store.py | 1 | 4409 | # -*- coding: utf-8 -
#
# This file is part of dj-revproxy released under the MIT license.
# See the NOTICE for more information.
import os
import types
import uuid
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from restkit.tee import TeeInput, ResponseTeeInput
from restkit.client import USER_AGENT
from .filters import Filter
from .models import RequestSession
class RequestBodyWrapper(TeeInput):
def __init__(self, request, fobject):
self.fobject = fobject
super(RequestBodyWrapper, self).__init__(request.body)
def _tee(self, length):
data = super(RequestBodyWrapper, self)._tee(length)
if not data:
return data
self.fobject.write(data)
return data
def _finalize(self):
self.fobject.close()
return super(RequestBodyWrapper, self)._finalize()
class ResponseBodyWrapper(ResponseTeeInput):
def __init__(self, response, fobject):
self.fobject = fobject
super(ResponseBodyWrapper, self).__init__(response,
response.connection, response.should_close)
def _tee(self, length):
data = super(ResponseBodyWrapper, self)._tee(length)
if not data:
return data
self.fobject.write(data)
return data
def _finalize(self):
self.fobject.close()
return super(ResponseBodyWrapper, self)._finalize()
class RequestStore(Filter):
def __init__(self, request, **kwargs):
proxy_sid = kwargs.get("proxy_sid")
store_path = kwargs.get("store_path", "/tmp")
request_id = uuid.uuid4().hex
dirs = os.path.join(*request_id[0:8])
fdir = os.path.join(store_path, dirs)
self.fprefix = os.path.join(fdir, request_id[8:])
if not os.path.exists(fdir):
os.makedirs(fdir)
self.freq = None
self.frep = None
# save session
session = RequestSession(
sid = proxy_sid,
request_id = request_id,
store_path = store_path)
session.save()
super(RequestStore, self).__init__(request, **kwargs)
def on_request(self, request):
self.freq = open("%s.req" % self.fprefix, "w+")
headers_str = headers_request_str(request)
self.freq.write(headers_str)
if request.body is None:
self.freq.close()
self.freq = None
else:
request.body = RequestBodyWrapper(request,
self.freq)
def on_response(self, response, request):
if self.freq is not None:
try:
self.freq.close()
except OSError:
pass
self.frep = open("%s.rep" % self.fprefix, "w+")
headers_str = headers_response_str(response)
self.frep.write(headers_str)
if request.method == "HEAD":
self.frep.close()
else:
response._body = ResponseBodyWrapper(response,
self.frep)
def headers_request_str(request, extra_headers=None):
""" create final header string """
headers = request.headers.copy()
if extra_headers is not None:
for k, v in extra_headers:
headers[k] = v
if not request.body and request.method in ('POST', 'PUT',):
headers['Content-Length'] = 0
httpver = "HTTP/1.1"
ua = headers.iget('user_agent')
if not ua:
ua = USER_AGENT
host = request.host
accept_encoding = headers.iget('accept-encoding')
if not accept_encoding:
accept_encoding = 'identity'
lheaders = [
"%s %s %s\r\n" % (request.method, request.path, httpver),
"Host: %s\r\n" % host,
"User-Agent: %s\r\n" % ua,
"Accept-Encoding: %s\r\n" % accept_encoding
]
lheaders.extend(["%s: %s\r\n" % (k, str(v)) for k, v in \
headers.items() if k.lower() not in \
('user-agent', 'host', 'accept-encoding',)])
return "%s\r\n" % "".join(lheaders)
def headers_response_str(response):
version_str = "HTTP/%s.%s" % response.version
headers = ["%s %s\r\n" % (version_str, response.status)]
headers.extend(["%s: %s\r\n" % (k, str(v)) for k, v in \
response.headers.items()])
return "%s\r\n" % "".join(headers)
| mit |
addition-it-solutions/project-all | addons/mrp/wizard/change_production_qty.py | 7 | 4789 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
class change_production_qty(osv.osv_memory):
_name = 'change.production.qty'
_description = 'Change Quantity of Products'
_columns = {
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
res = super(change_production_qty, self).default_get(cr, uid, fields, context=context)
prod_obj = self.pool.get('mrp.production')
prod = prod_obj.browse(cr, uid, context.get('active_id'), context=context)
if 'product_qty' in fields:
res.update({'product_qty': prod.product_qty})
return res
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
move_lines_obj = self.pool.get('stock.move')
for m in prod.move_created_ids:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
def change_prod_qty(self, cr, uid, ids, context=None):
"""
Changes the Quantity of Product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
record_id = context and context.get('active_id',False)
assert record_id, _('Active Id not found')
prod_obj = self.pool.get('mrp.production')
bom_obj = self.pool.get('mrp.bom')
move_obj = self.pool.get('stock.move')
for wiz_qty in self.browse(cr, uid, ids, context=context):
prod = prod_obj.browse(cr, uid, record_id, context=context)
prod_obj.write(cr, uid, [prod.id], {'product_qty': wiz_qty.product_qty})
prod_obj.action_compute(cr, uid, [prod.id])
for move in prod.move_lines:
bom_point = prod.bom_id
bom_id = prod.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, product_id=prod.product_id.id, context=context)
if not bom_id:
raise UserError(_("Cannot find bill of material for this product."))
prod_obj.write(cr, uid, [prod.id], {'bom_id': bom_id})
bom_point = bom_obj.browse(cr, uid, [bom_id])[0]
if not bom_id:
raise UserError(_("Cannot find bill of material for this product."))
factor = prod.product_qty * prod.product_uom.factor / bom_point.product_uom.factor
product_details, workcenter_details = \
bom_obj._bom_explode(cr, uid, bom_point, prod.product_id, factor / bom_point.product_qty, [], context=context)
for r in product_details:
if r['product_id'] == move.product_id.id:
move_obj.write(cr, uid, [move.id], {'product_uom_qty': r['product_qty']})
if prod.move_prod_id:
move_obj.write(cr, uid, [prod.move_prod_id.id], {'product_uom_qty' : wiz_qty.product_qty})
self._update_product_to_produce(cr, uid, prod, wiz_qty.product_qty, context=context)
return {}
| agpl-3.0 |
rohitwaghchaure/erpnext_develop | erpnext/shopping_cart/test_shopping_cart.py | 2 | 6744 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import nowdate, add_months
from erpnext.shopping_cart.cart import _get_cart_quotation, update_cart, get_party
from erpnext.tests.utils import create_test_contact_and_address
test_dependencies = ['Payment Terms Template']
class TestShoppingCart(unittest.TestCase):
"""
Note:
Shopping Cart == Quotation
"""
def setUp(self):
frappe.set_user("Administrator")
create_test_contact_and_address()
self.enable_shopping_cart()
def tearDown(self):
frappe.set_user("Administrator")
self.disable_shopping_cart()
def test_get_cart_new_user(self):
self.login_as_new_user()
# test if lead is created and quotation with new lead is fetched
quotation = _get_cart_quotation()
self.assertEquals(quotation.quotation_to, "Customer")
self.assertEquals(quotation.contact_person,
frappe.db.get_value("Contact", dict(email_id="test_cart_user@example.com")))
self.assertEquals(quotation.lead, None)
self.assertEquals(quotation.contact_email, frappe.session.user)
return quotation
def test_get_cart_customer(self):
self.login_as_customer()
# test if quotation with customer is fetched
quotation = _get_cart_quotation()
self.assertEquals(quotation.quotation_to, "Customer")
self.assertEquals(quotation.customer, "_Test Customer")
self.assertEquals(quotation.lead, None)
self.assertEquals(quotation.contact_email, frappe.session.user)
return quotation
def test_add_to_cart(self):
self.login_as_customer()
# remove from cart
self.remove_all_items_from_cart()
# add first item
update_cart("_Test Item", 1)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item")
self.assertEquals(quotation.get("items")[0].qty, 1)
self.assertEquals(quotation.get("items")[0].amount, 10)
# add second item
update_cart("_Test Item 2", 1)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[1].item_code, "_Test Item 2")
self.assertEquals(quotation.get("items")[1].qty, 1)
self.assertEquals(quotation.get("items")[1].amount, 20)
self.assertEquals(len(quotation.get("items")), 2)
def test_update_cart(self):
# first, add to cart
self.test_add_to_cart()
# update first item
update_cart("_Test Item", 5)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item")
self.assertEquals(quotation.get("items")[0].qty, 5)
self.assertEquals(quotation.get("items")[0].amount, 50)
self.assertEquals(quotation.net_total, 70)
self.assertEquals(len(quotation.get("items")), 2)
def test_remove_from_cart(self):
# first, add to cart
self.test_add_to_cart()
# remove first item
update_cart("_Test Item", 0)
quotation = self.test_get_cart_customer()
self.assertEquals(quotation.get("items")[0].item_code, "_Test Item 2")
self.assertEquals(quotation.get("items")[0].qty, 1)
self.assertEquals(quotation.get("items")[0].amount, 20)
self.assertEquals(quotation.net_total, 20)
self.assertEquals(len(quotation.get("items")), 1)
def test_tax_rule(self):
self.login_as_customer()
quotation = self.create_quotation()
from erpnext.accounts.party import set_taxes
tax_rule_master = set_taxes(quotation.customer, "Customer", \
quotation.transaction_date, quotation.company, None, None, \
quotation.customer_address, quotation.shipping_address_name, 1)
self.assertEquals(quotation.taxes_and_charges, tax_rule_master)
self.assertEquals(quotation.total_taxes_and_charges, 1000.0)
self.remove_test_quotation(quotation)
def create_quotation(self):
quotation = frappe.new_doc("Quotation")
values = {
"doctype": "Quotation",
"quotation_to": "Customer",
"order_type": "Shopping Cart",
"customer": get_party(frappe.session.user).name,
"docstatus": 0,
"contact_email": frappe.session.user,
"selling_price_list": "_Test Price List Rest of the World",
"currency": "USD",
"taxes_and_charges" : "_Test Tax 1",
"conversion_rate":1,
"transaction_date" : nowdate(),
"valid_till" : add_months(nowdate(), 1),
"items": [{
"item_code": "_Test Item",
"qty": 1
}],
"taxes": frappe.get_doc("Sales Taxes and Charges Template", "_Test Tax 1").taxes,
"company": "_Test Company"
}
quotation.update(values)
quotation.insert(ignore_permissions=True)
return quotation
def remove_test_quotation(self, quotation):
frappe.set_user("Administrator")
quotation.delete()
# helper functions
def enable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.update({
"enabled": 1,
"company": "_Test Company",
"default_customer_group": "_Test Customer Group",
"quotation_series": "_T-Quotation-",
"price_list": "_Test Price List India"
})
# insert item price
if not frappe.db.get_value("Item Price", {"price_list": "_Test Price List India",
"item_code": "_Test Item"}):
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item",
"price_list_rate": 10
}).insert()
frappe.get_doc({
"doctype": "Item Price",
"price_list": "_Test Price List India",
"item_code": "_Test Item 2",
"price_list_rate": 20
}).insert()
settings.save()
frappe.local.shopping_cart_settings = None
def disable_shopping_cart(self):
settings = frappe.get_doc("Shopping Cart Settings", "Shopping Cart Settings")
settings.enabled = 0
settings.save()
frappe.local.shopping_cart_settings = None
def login_as_new_user(self):
self.create_user_if_not_exists("test_cart_user@example.com")
frappe.set_user("test_cart_user@example.com")
def login_as_customer(self):
self.create_user_if_not_exists("test_contact_customer@example.com",
"_Test Contact For _Test Customer")
frappe.set_user("test_contact_customer@example.com")
def remove_all_items_from_cart(self):
quotation = _get_cart_quotation()
quotation.flags.ignore_permissions=True
quotation.delete()
def create_user_if_not_exists(self, email, first_name = None):
if frappe.db.exists("User", email):
return
frappe.get_doc({
"doctype": "User",
"user_type": "Website User",
"email": email,
"send_welcome_email": 0,
"first_name": first_name or email.split("@")[0]
}).insert(ignore_permissions=True)
test_dependencies = ["Sales Taxes and Charges Template", "Price List", "Item Price", "Shipping Rule", "Currency Exchange",
"Customer Group", "Lead", "Customer", "Contact", "Address", "Item", "Tax Rule"]
| gpl-3.0 |
mavit/ansible | lib/ansible/modules/network/avi/avi_vrfcontext.py | 20 | 4674 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.2
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vrfcontext
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of VrfContext Avi RESTful Object
description:
- This module is used to configure VrfContext object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
bgp_profile:
description:
- Bgp local and peer info.
cloud_ref:
description:
- It is a reference to an object of type cloud.
debugvrfcontext:
description:
- Configure debug flags for vrf.
- Field introduced in 17.1.1.
description:
description:
- User defined description for the object.
gateway_mon:
description:
- Configure ping based heartbeat check for gateway in service engines of vrf.
internal_gateway_monitor:
description:
- Configure ping based heartbeat check for all default gateways in service engines of vrf.
- Field introduced in 17.1.1.
name:
description:
- Name of the object.
required: true
static_routes:
description:
- List of staticroute.
system_default:
description:
- Boolean flag to set system_default.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VrfContext object
avi_vrfcontext:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vrfcontext
"""
RETURN = '''
obj:
description: VrfContext (api/vrfcontext) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
bgp_profile=dict(type='dict',),
cloud_ref=dict(type='str',),
debugvrfcontext=dict(type='dict',),
description=dict(type='str',),
gateway_mon=dict(type='list',),
internal_gateway_monitor=dict(type='dict',),
name=dict(type='str', required=True),
static_routes=dict(type='list',),
system_default=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vrfcontext',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
ZhuangER/robot_path_planning | gui/pyqtgraph/python2_3.py | 4 | 1727 | """
Helper functions which smooth out the differences between python 2 and 3.
"""
import sys
def asUnicode(x):
if sys.version_info[0] == 2:
if isinstance(x, unicode):
return x
elif isinstance(x, str):
return x.decode('UTF-8')
else:
return unicode(x)
else:
return str(x)
def cmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def sortList(l, cmpFunc):
if sys.version_info[0] == 2:
l.sort(cmpFunc)
else:
l.sort(key=cmpToKey(cmpFunc))
if sys.version_info[0] == 3:
import builtins
builtins.basestring = str
#builtins.asUnicode = asUnicode
#builtins.sortList = sortList
basestring = str
def cmp(a,b):
if a>b:
return 1
elif b > a:
return -1
else:
return 0
builtins.cmp = cmp
builtins.xrange = range
#else: ## don't use __builtin__ -- this confuses things like pyshell and ActiveState's lazy import recipe
#import __builtin__
#__builtin__.asUnicode = asUnicode
#__builtin__.sortList = sortList
| mit |
jmighion/ansible | lib/ansible/modules/cloud/amazon/elasticache.py | 26 | 20767 | #!/usr/bin/python
#
# Copyright (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: elasticache
short_description: Manage cache clusters in Amazon Elasticache.
description:
- Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "1.4"
author: "Jim Dalton (@jsdalton)"
options:
state:
description:
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster,
resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted']
required: true
name:
description:
- The cache cluster identifier
required: true
engine:
description:
- Name of the cache engine to be used.
required: false
default: memcached
choices: ['redis', 'memcached']
cache_engine_version:
description:
- The version number of the cache engine
required: false
default: None
node_type:
description:
- The compute and memory capacity of the nodes in the cache cluster
required: false
default: cache.m1.small
num_nodes:
description:
- The initial number of cache nodes that the cache cluster will have. Required when state=present.
required: false
cache_port:
description:
- The port number on which each of the cache nodes will accept connections
required: false
default: None
cache_parameter_group:
description:
- The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
for the specified engine will be used.
required: false
default: None
version_added: "2.0"
aliases: [ 'parameter_group' ]
cache_subnet_group:
description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
required: false
default: None
version_added: "2.0"
security_group_ids:
description:
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
required: false
default: None
version_added: "1.6"
cache_security_groups:
description:
- A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc
required: false
default: None
zone:
description:
- The EC2 Availability Zone in which the cache cluster will be created
required: false
default: None
wait:
description:
- Wait for cache cluster result before returning
required: false
default: yes
choices: [ "yes", "no" ]
hard_modify:
description:
- Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
required: false
default: no
choices: [ "yes", "no" ]
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic example
- elasticache:
name: "test-please-delete"
state: present
engine: memcached
cache_engine_version: 1.4.14
node_type: cache.m1.small
num_nodes: 1
cache_port: 11211
cache_security_groups:
- default
zone: us-east-1d
# Ensure cache cluster is gone
- elasticache:
name: "test-please-delete"
state: absent
# Reboot cache cluster
- elasticache:
name: "test-please-delete"
state: rebooted
"""
from time import sleep
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict
try:
import boto3
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
class ElastiCacheManager(object):
"""Handles elasticache creation and destruction"""
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs):
self.module = module
self.name = name
self.engine = engine.lower()
self.cache_engine_version = cache_engine_version
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
self.cache_parameter_group = cache_parameter_group
self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
self.zone = zone
self.wait = wait
self.hard_modify = hard_modify
self.region = region
self.aws_connect_kwargs = aws_connect_kwargs
self.changed = False
self.data = None
self.status = 'gone'
self.conn = self._get_elasticache_connection()
self._refresh_data()
def ensure_present(self):
"""Ensure cache cluster exists or create it if not"""
if self.exists():
self.sync()
else:
self.create()
def ensure_absent(self):
"""Ensure cache cluster is gone or delete it if not"""
self.delete()
def ensure_rebooted(self):
"""Ensure cache cluster is gone or delete it if not"""
self.reboot()
def exists(self):
"""Check if cache cluster exists"""
return self.status in self.EXIST_STATUSES
def create(self):
"""Create an ElastiCache cluster"""
if self.status == 'available':
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
else:
msg = "'%s' is currently deleting. Cannot create."
self.module.fail_json(msg=msg % self.name)
kwargs = dict(CacheClusterId=self.name,
NumCacheNodes=self.num_nodes,
CacheNodeType=self.node_type,
Engine=self.engine,
EngineVersion=self.cache_engine_version,
CacheSecurityGroupNames=self.cache_security_groups,
SecurityGroupIds=self.security_group_ids,
CacheParameterGroupName=self.cache_parameter_group,
CacheSubnetGroupName=self.cache_subnet_group,
PreferredAvailabilityZone=self.zone)
if self.cache_port is not None:
kwargs['Port'] = self.cache_port
try:
self.conn.create_cache_cluster(**kwargs)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
return True
def delete(self):
"""Destroy an ElastiCache cluster"""
if self.status == 'gone':
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot delete."
self.module.fail_json(msg=msg % (self.name, self.status))
try:
response = self.conn.delete_cache_cluster(CacheClusterId=self.name)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
cache_cluster_data = response['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('gone')
def sync(self):
"""Sync settings to cluster if required"""
if not self.exists():
msg = "'%s' is %s. Cannot sync."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
# Cluster can only be synced if available. If we can't wait
# for this, then just be done.
return
if self._requires_destroy_and_create():
if not self.hard_modify:
msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
if not self.wait:
msg = "'%s' requires destructive modification. 'wait' must be set to true."
self.module.fail_json(msg=msg % self.name)
self.delete()
self.create()
return
if self._requires_modification():
self.modify()
def modify(self):
"""Modify the cache cluster. Note it's only possible to modify a few select options."""
nodes_to_remove = self._get_nodes_to_remove()
try:
self.conn.modify_cache_cluster(CacheClusterId=self.name,
NumCacheNodes=self.num_nodes,
CacheNodeIdsToRemove=nodes_to_remove,
CacheSecurityGroupNames=self.cache_security_groups,
CacheParameterGroupName=self.cache_parameter_group,
SecurityGroupIds=self.security_group_ids,
ApplyImmediately=True,
EngineVersion=self.cache_engine_version)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
def reboot(self):
"""Reboot the cache cluster"""
if not self.exists():
msg = "'%s' is %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status == 'rebooting':
return
if self.status in ['creating', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
# Collect ALL nodes for reboot
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
try:
self.conn.reboot_cache_cluster(CacheClusterId=self.name,
CacheNodeIdsToReboot=cache_node_ids)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
self._refresh_data()
self.changed = True
if self.wait:
self._wait_for_status('available')
def get_info(self):
"""Return basic info about the cache cluster"""
info = {
'name': self.name,
'status': self.status
}
if self.data:
info['data'] = self.data
return info
def _wait_for_status(self, awaited_status):
"""Wait for status to change from present status to awaited_status"""
status_map = {
'creating': 'available',
'rebooting': 'available',
'modifying': 'available',
'deleting': 'gone'
}
if self.status == awaited_status:
# No need to wait, we're already done
return
if status_map[self.status] != awaited_status:
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
self.module.fail_json(msg=msg % (self.status, awaited_status))
if awaited_status not in set(status_map.values()):
msg = "'%s' is not a valid awaited status."
self.module.fail_json(msg=msg % awaited_status)
while True:
sleep(1)
self._refresh_data()
if self.status == awaited_status:
break
def _requires_modification(self):
"""Check if cluster requires (nondestructive) modification"""
# Check modifiable data attributes
modifiable_data = {
'NumCacheNodes': self.num_nodes,
'EngineVersion': self.cache_engine_version
}
for key, value in modifiable_data.items():
if value is not None and self.data[key] != value:
return True
# Check cache security groups
cache_security_groups = []
for sg in self.data['CacheSecurityGroups']:
cache_security_groups.append(sg['CacheSecurityGroupName'])
if set(cache_security_groups) != set(self.cache_security_groups):
return True
# check vpc security groups
if self.security_group_ids:
vpc_security_groups = []
security_groups = self.data['SecurityGroups'] or []
for sg in security_groups:
vpc_security_groups.append(sg['SecurityGroupId'])
if set(vpc_security_groups) != set(self.security_group_ids):
return True
return False
def _requires_destroy_and_create(self):
"""
Check whether a destroy and create is required to synchronize cluster.
"""
unmodifiable_data = {
'node_type': self.data['CacheNodeType'],
'engine': self.data['Engine'],
'cache_port': self._get_port()
}
# Only check for modifications if zone is specified
if self.zone is not None:
unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
for key, value in unmodifiable_data.items():
if getattr(self, key) is not None and getattr(self, key) != value:
return True
return False
def _get_elasticache_connection(self):
"""Get an elasticache connection"""
region, ec2_url, aws_connect_params = get_aws_connection_info(self.module, boto3=True)
if region:
return boto3_conn(self.module, conn_type='client', resource='elasticache',
region=region, endpoint=ec2_url, **aws_connect_params)
else:
self.module.fail_json(msg="region must be specified")
def _get_port(self):
"""Get the port. Where this information is retrieved from is engine dependent."""
if self.data['Engine'] == 'memcached':
return self.data['ConfigurationEndpoint']['Port']
elif self.data['Engine'] == 'redis':
# Redis only supports a single node (presently) so just use
# the first and only
return self.data['CacheNodes'][0]['Endpoint']['Port']
def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster"""
if cache_cluster_data is None:
try:
response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'CacheClusterNotFound':
self.data = None
self.status = 'gone'
return
else:
self.module.fail_json(msg=e.message, exception=format_exc(),
**camel_dict_to_snake_dict(e.response))
cache_cluster_data = response['CacheClusters'][0]
self.data = cache_cluster_data
self.status = self.data['CacheClusterStatus']
# The documentation for elasticache lies -- status on rebooting is set
# to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
# here to make status checks etc. more sane.
if self.status == 'rebooting cache cluster nodes':
self.status = 'rebooting'
def _get_nodes_to_remove(self):
"""If there are nodes to remove, it figures out which need to be removed"""
num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
if num_nodes_to_remove <= 0:
return []
if not self.hard_modify:
msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
return cache_node_ids[-num_nodes_to_remove:]
def main():
""" elasticache ansible module """
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'rebooted']),
name=dict(required=True),
engine=dict(default='memcached'),
cache_engine_version=dict(default=""),
node_type=dict(default='cache.t2.small'),
num_nodes=dict(default=1, type='int'),
# alias for compat with the original PR 1950
cache_parameter_group=dict(default="", aliases=['parameter_group']),
cache_port=dict(type='int'),
cache_subnet_group=dict(default=""),
cache_security_groups=dict(default=[], type='list'),
security_group_ids=dict(default=[], type='list'),
zone=dict(default=""),
wait=dict(default=True, type='bool'),
hard_modify=dict(type='bool')
))
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
name = module.params['name']
state = module.params['state']
engine = module.params['engine']
cache_engine_version = module.params['cache_engine_version']
node_type = module.params['node_type']
num_nodes = module.params['num_nodes']
cache_port = module.params['cache_port']
cache_subnet_group = module.params['cache_subnet_group']
cache_security_groups = module.params['cache_security_groups']
security_group_ids = module.params['security_group_ids']
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
cache_parameter_group = module.params['cache_parameter_group']
if cache_subnet_group and cache_security_groups:
module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
if state == 'present' and not num_nodes:
module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
cache_parameter_group,
cache_subnet_group,
cache_security_groups,
security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs)
if state == 'present':
elasticache_manager.ensure_present()
elif state == 'absent':
elasticache_manager.ensure_absent()
elif state == 'rebooted':
elasticache_manager.ensure_rebooted()
facts_result = dict(changed=elasticache_manager.changed,
elasticache=elasticache_manager.get_info())
module.exit_json(**facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 |
t3dev/odoo | addons/website_sale_digital/controllers/main.py | 5 | 4569 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import io
from werkzeug.utils import redirect
from odoo import http
from odoo.http import request
from odoo.addons.sale.controllers.portal import CustomerPortal
from odoo.addons.website_sale.controllers.main import WebsiteSale
class WebsiteSaleDigitalConfirmation(WebsiteSale):
@http.route([
'/shop/confirmation',
], type='http', auth="public", website=True)
def payment_confirmation(self, **post):
response = super(WebsiteSaleDigitalConfirmation, self).payment_confirmation(**post)
order_lines = response.qcontext['order'].order_line
digital_content = any(x.product_id.type == 'digital' for x in order_lines)
response.qcontext.update(digital=digital_content)
return response
class WebsiteSaleDigital(CustomerPortal):
orders_page = '/my/orders'
@http.route([
'/my/orders/<int:order_id>',
], type='http', auth='public', website=True)
def portal_order_page(self, order_id=None, **post):
response = super(WebsiteSaleDigital, self).portal_order_page(order_id=order_id, **post)
if not 'sale_order' in response.qcontext:
return response
order = response.qcontext['sale_order']
invoiced_lines = request.env['account.invoice.line'].sudo().search([('invoice_id', 'in', order.invoice_ids.ids), ('invoice_id.state', '=', 'paid')])
products = invoiced_lines.mapped('product_id') | order.order_line.filtered(lambda r: not r.price_subtotal).mapped('product_id')
if not order.amount_total:
# in that case, we should add all download links to the products
# since there is nothing to pay, so we shouldn't wait for an invoice
products = order.order_line.mapped('product_id')
purchased_products_attachments = {}
for product in products:
# Search for product attachments
Attachment = request.env['ir.attachment']
product_id = product.id
template = product.product_tmpl_id
att = Attachment.search_read(
domain=['|', '&', ('res_model', '=', product._name), ('res_id', '=', product_id), '&', ('res_model', '=', template._name), ('res_id', '=', template.id), ('product_downloadable', '=', True)],
fields=['name', 'write_date'],
order='write_date desc',
)
# Ignore products with no attachments
if not att:
continue
purchased_products_attachments[product_id] = att
response.qcontext.update({
'digital_attachments': purchased_products_attachments,
})
return response
@http.route([
'/my/download',
], type='http', auth='public')
def download_attachment(self, attachment_id):
# Check if this is a valid attachment id
attachment = request.env['ir.attachment'].sudo().search_read(
[('id', '=', int(attachment_id))],
["name", "datas", "file_type", "res_model", "res_id", "type", "url"]
)
if attachment:
attachment = attachment[0]
else:
return redirect(self.orders_page)
# Check if the user has bought the associated product
res_model = attachment['res_model']
res_id = attachment['res_id']
purchased_products = request.env['account.invoice.line'].get_digital_purchases()
if res_model == 'product.product':
if res_id not in purchased_products:
return redirect(self.orders_page)
# Also check for attachments in the product templates
elif res_model == 'product.template':
template_ids = request.env['product.product'].sudo().browse(purchased_products).mapped('product_tmpl_id').ids
if res_id not in template_ids:
return redirect(self.orders_page)
else:
return redirect(self.orders_page)
# The client has bought the product, otherwise it would have been blocked by now
if attachment["type"] == "url":
if attachment["url"]:
return redirect(attachment["url"])
else:
return request.not_found()
elif attachment["datas"]:
data = io.BytesIO(base64.standard_b64decode(attachment["datas"]))
return http.send_file(data, filename=attachment['name'], as_attachment=True)
else:
return request.not_found()
| gpl-3.0 |
doheekim/chuizonetest | lib/sqlalchemy/dialects/sqlite/pysqlite.py | 10 | 13249 | # sqlite/pysqlite.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlite
:name: pysqlite
:dbapi: sqlite3
:connectstring: sqlite+pysqlite:///file_path
:url: http://docs.python.org/library/sqlite3.html
Note that ``pysqlite`` is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is::
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to
the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db')
The sqlite ``:memory:`` identifier is the default if no filepath is
present. Specify ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types':
sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result
processing.
.. _pysqlite_threading_pooling:
Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is originally intended to work with older
versions of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that
pysqlite connections are still not safe to use in concurrently in multiple
threads. In particular, any statement execution calls would need to be
externally mutexed, as Pysqlite does not provide for thread-safe propagation
of error messages among other things. So while even ``:memory:`` databases
can be shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.SingletonThreadPool`. This pool maintains a single
connection per thread, so that all access to the engine within the current
thread use the same ``:memory:`` database - other threads would access a
different ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.NullPool` as the source of connections. This pool closes and
discards connections which are returned to the pool immediately. SQLite
file-based connections have extremely low overhead, so pooling is not
necessary. The scheme also prevents a connection from being used again in
a different thread and works best with SQLite's coarse-grained file locking.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
Previous versions select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same
connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
:meth:`.Session.rollback` is called, a pool which maintains a single
connection must be used. Use :class:`.SingletonThreadPool` if the scope is
only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
closed out in a non deterministic way.
Unicode
-------
The pysqlite driver only returns Python ``unicode`` objects in result sets,
never plain strings, and accommodates ``unicode`` objects within bound
parameter values in all cases. Regardless of the SQLAlchemy string type in
use, string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
is passed containing non-ASCII characters.
.. _pysqlite_serializable:
Serializable Transaction Isolation
----------------------------------
The pysqlite DBAPI driver has a long-standing bug in which transactional
state is not begun until the first DML statement, that is INSERT, UPDATE
or DELETE, is emitted. A SELECT statement will not cause transactional
state to begin. While this mode of usage is fine for typical situations
and has the advantage that the SQLite database file is not prematurely
locked, it breaks serializable transaction isolation, which requires
that the database file be locked upon any SQL being emitted.
To work around this issue, the ``BEGIN`` keyword can be emitted
at the start of each transaction. The following recipe establishes
a :meth:`.ConnectionEvents.begin` handler to achieve this::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db",
isolation_level='SERIALIZABLE')
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.execute("BEGIN")
"""
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
from sqlalchemy import exc, pool
from sqlalchemy import types as sqltypes
from sqlalchemy import util
import os
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date: _SQLite_pysqliteDate,
sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
}
)
if not util.py2k:
description_encoding = None
driver = 'pysqlite'
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
("The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended.") %
'.'.join([str(subver) for subver in sqlite_ver]))
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError as e:
try:
from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name.
except ImportError:
raise e
return sqlite
@classmethod
def get_pool_class(cls, url):
if url.database and url.database != ':memory:':
return pool.NullPool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,))
filename = url.database or ':memory:'
if filename != ':memory:':
filename = os.path.abspath(filename)
opts = url.query.copy()
util.coerce_kw_type(opts, 'timeout', float)
util.coerce_kw_type(opts, 'isolation_level', str)
util.coerce_kw_type(opts, 'detect_types', int)
util.coerce_kw_type(opts, 'check_same_thread', bool)
util.coerce_kw_type(opts, 'cached_statements', int)
return ([filename], opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.ProgrammingError) and \
"Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
| apache-2.0 |