text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import print_function, absolute_import, division, unicode_literals
try:
import numpy
except: # NOQA
numpy = None
def Xtest_numpy():
import srsly.ruamel_yaml
if numpy is None:
return
data = numpy.arange(10)
print("data", type(data), data)
yaml_str = srsly.ruamel_yaml.dump(data)
datb = srsly.ruamel_yaml.load(yaml_str)
print("datb", type(datb), datb)
print("\nYAML", yaml_str)
assert data == datb
| {
"content_hash": "c5cd502eb5fcbf036f10993798eea1d9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 82,
"avg_line_length": 21.40909090909091,
"alnum_prop": 0.6411889596602972,
"repo_name": "explosion/srsly",
"id": "2c21854408f94aea8f9a0fd86a562e6e0f59b012",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "srsly/tests/ruamel_yaml/test_numpy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "137170"
},
{
"name": "C++",
"bytes": "16926"
},
{
"name": "Cython",
"bytes": "33068"
},
{
"name": "Python",
"bytes": "981631"
},
{
"name": "Shell",
"bytes": "346"
}
],
"symlink_target": ""
} |
"""Tests for tf.keras models using tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import test
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import loss_reduction
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_subclassed_model(num_labels=_NUM_CLASS):
class _SimpleMLP(keras.Model):
def __init__(self, num_labels):
super(_SimpleMLP, self).__init__()
self.dense = keras.layers.Dense(num_labels)
def call(self, inputs):
return self.dense(inputs)
return _SimpleMLP(num_labels)
def simple_multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
merged = keras.layers.concatenate([input_a, input_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d])
return model
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, (tpu_strategy.TPUStrategy,
tpu_strategy.TPUStrategyV1)):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_sample_weights_model():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(
1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def convert_numpy_to_dataset_with_unknown_cardinality(inputs,
targets=None):
if targets is not None:
input_slices = (inputs, targets)
dummy_op = (lambda inp, target: True)
else:
input_slices = inputs
dummy_op = (lambda inp: True)
original_dataset = (dataset_ops.Dataset.from_tensor_slices(
input_slices))
ds_with_unknown_cardinality = (original_dataset.filter(dummy_op).
batch(10, drop_remainder=True))
return ds_with_unknown_cardinality
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
strategies_minus_default_minus_tpu = [
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
]
strategies_minus_tpu = [
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
]
tpu_strategies = [
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step
]
def strategy_minus_tpu_combinations():
return combinations.combine(distribution=strategies_minus_tpu,
mode=['graph', 'eager'])
def tpu_strategy_combinations():
return combinations.combine(distribution=tpu_strategies,
mode=['graph', 'eager'])
def tpu_strategy_combinations_graph_only():
return combinations.combine(distribution=tpu_strategies,
mode=['graph'])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_combinations_plus_cloning():
return (
combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager'],
cloning=[True, False]) +
combinations.combine(
distribution=tpu_strategies,
mode=['graph', 'eager'],
cloning=[False]))
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['graph', 'eager'])
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())
def strategy_and_optimizer_combinations():
non_tpu_strategies = combinations.times(
strategy_minus_tpu_combinations(),
# TODO(b/130808953): Simplify when optimizers v1 work with cloning=False.
combinations.combine(
optimizer=[
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adam_optimizer_v1_fn,
strategy_combinations.gradient_descent_optimizer_v1_fn,
strategy_combinations.rmsprop_optimizer_v1_fn,
],
cloning=True) +
combinations.combine(
optimizer=[
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
],
cloning=[True, False]))
# TODO(b/130808953): Simplify when optimizers v1 work with cloning=False.
tpu_strategies_graph = combinations.combine(
distribution=tpu_strategies,
mode=['graph'],
cloning=[True],
optimizer=[
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adam_optimizer_v1_fn,
strategy_combinations.gradient_descent_optimizer_v1_fn,
strategy_combinations.rmsprop_optimizer_v1_fn,
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
])
tpu_strategies_eager = combinations.combine(
distribution=tpu_strategies,
mode=['eager'],
cloning=[False],
optimizer=[
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
])
return non_tpu_strategies + tpu_strategies_eager + tpu_strategies_graph
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_calculating_input_params_no_steps_no_batch_size(self, distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Input samples of different sizes
input_20_samples = np.zeros((20, 3), dtype=np.float32)
input_64_samples = np.zeros((64, 3), dtype=np.float32)
# Default global batch size 32 for input with 64 samples run in 2 steps
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=None, batch_size=None)
self.assertEqual(batch_size, 32 // replica_scale_factor)
self.assertEqual(steps, 2)
# Computed global batch size 20 is lower than 32 if we pass less samples.
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_20_samples, steps=None, batch_size=None)
self.assertEqual(batch_size, 20 // replica_scale_factor)
self.assertEqual(steps, 1)
@combinations.generate(all_strategy_combinations())
def test_calculating_input_params_with_steps_no_batch_size(self,
distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Input samples of different sizes
input_63_samples = np.zeros((63, 3), dtype=np.float32)
input_64_samples = np.zeros((64, 3), dtype=np.float32)
# Computed global batch size is correct for number of specified 1 step
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=1, batch_size=None)
self.assertEqual(batch_size, 64 // replica_scale_factor)
self.assertEqual(steps, 1)
# Computed global batch size is correct for number of specified 2 steps
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=2, batch_size=None)
self.assertEqual(batch_size, 32 // replica_scale_factor)
self.assertEqual(steps, 2)
# All samples can not be consumed in specified number of steps
with self.assertRaisesRegexp(ValueError, 'not divisible by steps'):
distributed_training_utils.get_input_params(
distribution, input_63_samples, steps=2, batch_size=None)
# This cases is different for different strategies due to the
# difference in supported batch size being global or per-replica.
if replica_scale_factor == 1:
# Computed global batch size is correct even if not sharadable
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_63_samples, steps=3, batch_size=None)
self.assertEqual(batch_size, 21)
self.assertEqual(steps, 3)
else:
# Computed global batch size can not be sharded across replicas
with self.assertRaisesRegexp(ValueError, 'could not be sharded evenly '
'across the sync replicas'):
distributed_training_utils.get_input_params(
distribution, input_63_samples, steps=1, batch_size=None)
@combinations.generate(all_strategy_combinations())
def test_calculating_input_params_no_steps_with_batch_size(self,
distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
input_64_samples = np.zeros((64, 3), dtype=np.float32)
# Computed steps is correct for specified batch size
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=None, batch_size=16)
self.assertEqual(batch_size, 16)
self.assertEqual(steps, 4 // replica_scale_factor)
# Computed steps is correct for specified batch size
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=None, batch_size=32)
self.assertEqual(batch_size, 32)
self.assertEqual(steps, 2 // replica_scale_factor)
@combinations.generate(all_strategy_combinations())
def test_calculating_input_params_with_steps_with_batch_size(self,
distribution):
with self.cached_session():
input_64_samples = np.zeros((64, 3), dtype=np.float32)
# No change to steps and batch size if both specified and feasible
steps, batch_size = distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=5, batch_size=3)
self.assertEqual(batch_size, 3)
self.assertEqual(steps, 5)
# Number of samples is less than global batch size * steps
with self.assertRaisesRegexp(ValueError, 'less than samples required'):
distributed_training_utils.get_input_params(
distribution, input_64_samples, steps=10, batch_size=13)
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_calling_model_with_numpy_arrays(self, distribution, cloning):
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning or not distribution_strategy_context.has_strategy()
else gradient_descent_keras.SGD)
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics, cloning=cloning)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps
# are smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_calling_model_with_nested_numpy_arrays(self, distribution, cloning):
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(learning_rate=0.001)
model = multi_input_output_model()
loss = 'mse'
model.compile(optimizer, loss, cloning=cloning)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
# with steps
model.evaluate(inputs, targets, steps=2)
# with batch_size
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
# with steps
model.predict(inputs, steps=2)
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(
combinations.combine(distribution=strategies_minus_tpu,
mode=['graph', 'eager']))
def test_numpy_with_sample_weights(self, distribution):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
result = model.evaluate(inputs, targets, batch_size=2,
sample_weight=sample_weights, verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75
# batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
# final result = (batch_1 + batch_2) / 2 = 10.625.
# The first time we divide by number of input samples and the second time
# we divide by number of steps/batches that the loss is aggregated over.
self.assertAllClose(result, 10.625)
# We now test without passing sample_weights:
# batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
# batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
# final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5
result = model.evaluate(inputs, targets, batch_size=2, verbose=1)
self.assertAllClose(result, 13.5)
@combinations.generate(
combinations.combine(distribution=strategies_minus_default_minus_tpu,
mode=['eager']))
def test_numpy_with_sample_weights_eager_with_cloning(self, distribution):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, cloning=True)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
with self.assertRaisesRegexp(NotImplementedError,
'`sample_weight` is not supported when '
'using tf.distribute.Strategy in '):
model.evaluate(inputs, targets, batch_size=2,
sample_weight=sample_weights, verbose=1)
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_flatten_predict_outputs(self, distribution, cloning):
with self.cached_session():
with distribution.scope():
model = multi_input_output_model()
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, cloning=cloning)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs, steps=1)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertLen(outs, 2)
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(batch_size=[4, 6])))
def test_evaluate_with_partial_batch(self, distribution, batch_size):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)
cpu_model = get_model()
cpu_model.compile(optimizer, loss, metrics=metrics)
x = np.random.random((10, 3)).astype('float32')
y = np.random.random((10, 4)).astype('float32')
# As sample size is 10, we batch by 4 so that the last batch is
# a partial batch. Also `evaluate()` using numpy array as inputs without
# distribution strategy uses entire sample as a single batch. As so,
# we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
evaluate_ground_truth = cpu_model.evaluate(x, y)
# We don't compare the loss as loss is currently not computed as metric
# in Keras, the loss value is inaccurate for last partial batch due to
# more weights for the last batch samples.
steps = np.ceil(10.0 / batch_size)
self.assertAllClose(
model_with_ds_strategy.evaluate(
x, y, batch_size=batch_size, steps=steps)[1:],
evaluate_ground_truth[1:],
atol=1e-5,
rtol=1e-5)
# Test that `steps` is inferred correctly when final partial batch exists.
self.assertAllClose(
model_with_ds_strategy.evaluate(x, y, batch_size=batch_size)[1:],
evaluate_ground_truth[1:],
atol=1e-5,
rtol=1e-5)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(cloning=[True, False])))
def test_predict_with_partial_batch(self, distribution, cloning):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss, cloning=cloning)
cpu_model = get_model()
cpu_model.compile(optimizer, loss)
inputs = np.random.random((10, 3)).astype(np.float32)
# As sample size is 10, we batch by 4 so that the last batch is
# a partial batch. Also `predict()` using numpy array as inputs without
# distribution strategy uses entire sample as a single batch. As so,
# we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
predict_ground_truth = cpu_model.predict(inputs)
self.assertAllClose(
model_with_ds_strategy.predict(inputs, batch_size=4, steps=3),
predict_ground_truth,
atol=1e-5,
rtol=1e-5)
# Test that `steps` is inferred correctly when final partial batch exists.
self.assertAllClose(
model_with_ds_strategy.predict(inputs, batch_size=4),
predict_ground_truth,
atol=1e-5,
rtol=1e-5)
@combinations.generate(tpu_strategy_combinations_graph_only())
def test_no_target_model(self, distribution):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
self.add_loss(math_ops.reduce_sum(inputs), inputs=True)
return inputs
with distribution.scope():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu',
input_shape=_INPUT_SIZE))
model.add(MyLayer())
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
model.compile(optimizer)
inputs = np.zeros((20, 10), np.float32)
model.fit(inputs, epochs=1, steps_per_epoch=2)
model.predict(inputs, steps=1)
model.evaluate(inputs, steps=1)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(cloning=[True, False])))
def test_predict_multi_output_model_with_partial_batch(
self, distribution, cloning):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
with distribution.scope():
model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
model_with_ds_strategy.compile(optimizer, loss, cloning=cloning)
cpu_model = simple_multi_inputs_multi_outputs_model()
cpu_model.compile(optimizer, loss)
input_data, _ = get_multi_inputs_multi_outputs_data()
input_dict = {
'input_a': input_data['input_a'],
'input_b': input_data['input_b'],
}
# As sample size is 200, we batch by 18 so that the last batch is
# a partial batch. Also `fit()` using numpy array as inputs without
# distribution strategy uses entire sample as a single batch. As so,
# we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(input_dict, batch_size=18, steps=12),
cpu_model.predict(input_dict),
atol=1e-4, rtol=1e-4)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_calling_model_on_same_dataset(self, distribution, cloning):
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, cloning=cloning)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution,
cloning):
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
user_controlled_model = get_model()
user_controlled_model.compile(
optimizer_fn(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
cloning=cloning)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
optimizer_fn(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
cloning=cloning)
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset, epochs=2, steps_per_epoch=2, verbose=1,
validation_data=dataset, validation_steps=2, shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
val_mean_absolute_error = interleaved_output.history.get(
'val_mean_absolute_error')
if not val_mean_absolute_error:
# The name of the metric changed in TF2.0
val_mean_absolute_error = interleaved_output.history['val_mae']
self.assertEqual(val_mean_absolute_error,
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
# TODO(priyag): Enable this test for TPU. Currently tuples/dict don't work
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager'], cloning=[True, False]))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution, cloning):
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(learning_rate=0.001)
model = multi_input_output_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, cloning=cloning)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
output_d_np = np.random.random((10, 7))
output_e_np = np.random.random((10, 7))
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
{'input_a': input_a_np, 'input_b': input_b_np},
(output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_fit_eval_and_predict_methods_on_dataset_without_steps(
self, distribution, cloning):
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, cloning=cloning)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
fit_with_numpy = model.fit(inputs, targets, epochs=1,
batch_size=10).history
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10, drop_remainder=True)
fit_with_ds = model.fit(dataset, epochs=1).history
eval_with_ds = model.evaluate(dataset)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.batch(10, drop_remainder=True)
predict_with_ds = model.predict(predict_dataset)
self.assertAllClose(
fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)
@combinations.generate(
combinations.times(strategy_minus_tpu_combinations(),
combinations.combine(cloning=[True, False])))
def test_on_dataset_with_unknown_cardinality_without_steps(
self, distribution, cloning):
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, cloning=cloning)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
fit_with_numpy = model.fit(inputs, targets, epochs=1,
batch_size=10).history
fit_with_numpy_multiple_epochs = model.fit(
inputs, targets, epochs=2, batch_size=10).history
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs, targets)
predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs)
self.assertEqual(keras.backend.get_value(cardinality.cardinality(
dataset)), cardinality.UNKNOWN)
self.assertEqual(keras.backend.get_value(cardinality.cardinality(
predict_dataset)), cardinality.UNKNOWN)
eval_with_ds = model.evaluate(dataset)
predict_with_ds = model.predict(predict_dataset)
self.assertAllClose(
eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)
fit_with_ds = model.fit(dataset,
epochs=1).history
fit_with_ds_multiple_epochs = model.fit(dataset,
epochs=2).history
self.assertAllClose(
fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
fit_with_numpy_multiple_epochs,
fit_with_ds_multiple_epochs, atol=1e-4, rtol=1e-4)
@combinations.generate(
combinations.times(tpu_strategy_combinations(),
combinations.combine(cloning=[True, False])))
def test_on_dataset_with_unknown_cardinality(self, distribution, cloning):
with self.cached_session():
with distribution.scope():
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss,
metrics=metrics,
cloning=cloning)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs, targets)
predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(predict_dataset)),
cardinality.UNKNOWN)
eval_with_ds = model.evaluate(dataset, steps=100)
predict_with_ds = model.predict(predict_dataset, steps=100)
self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)
with self.assertRaisesRegexp(ValueError,
'Number of steps could not be infered'):
model.fit(dataset, epochs=1)
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution, cloning):
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics, cloning=cloning)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer,
cloning):
with self.cached_session():
with distribution.scope():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, cloning=cloning)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy
],
mode=['graph', 'eager'], cloning=[True, False]))
def test_dataset_wrong_input_shape(self, distribution, cloning, mode):
if cloning or mode == 'graph':
self.skipTest('TODO(b/120943676, b/120957836): Re-enable for cloning=True'
' once the validation code is restored.')
with self.cached_session():
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
rmsprop.RMSPropOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = 'mse'
model.compile(optimizer, loss, cloning=cloning)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager'],
cloning=[True, False]))
def test_dataset_no_batch_input_validation(self, distribution,
cloning, mode):
if cloning or mode == 'graph':
self.skipTest('TODO(b/120943676, b/120957836): Re-enable for cloning=True'
' once the validation code is restored.')
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, cloning=cloning)
# User forgets to batch the dataset
inputs = np.zeros((10, 6), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(ValueError, 'Call.*batch.*on.*Dataset'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph', 'eager'], cloning=[True, False]))
def test_learning_phase_value(self, distribution, cloning):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
with distribution.scope():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
gradient_descent.GradientDescentOptimizer
if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(0.005)
loss = 'mse'
metrics = ['acc']
model.compile(optimizer, loss, metrics=metrics, cloning=cloning)
batch_size = 8
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
# MirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(batch_size)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
with distribution.scope():
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(batch_size)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps
ref_output = np.ones((160, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
@combinations.generate(all_strategy_combinations_plus_cloning())
def testOptimizerWithCallbacks(self, distribution, cloning):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent_keras.SGD(0.01)
loss = 'mse'
model.compile(optimizer, loss, cloning=cloning)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
self.assertAllClose(0.001, keras.backend.get_value(model.optimizer.lr))
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(batch_size=[4, 6])))
def test_evaluate_with_dataset_with_partial_batch(self, distribution,
batch_size):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)
cpu_model = get_model()
cpu_model.compile(optimizer, loss, metrics=metrics)
x = np.random.random((10, 3)).astype('float32')
y = np.random.random((10, 4)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
# As sample size is 10, we make the last batch a partial batch.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
dataset_with_partial_batch = dataset.batch(batch_size)
# We don't compare the loss as loss is currently not computed as metric
# in Keras, the loss value is inaccurate for last partial batch due to
# more weights for the last batch samples.
steps = np.ceil(10.0 / batch_size)
self.assertAllClose(
model_with_ds_strategy.evaluate(
dataset_with_partial_batch, steps=steps)[1:],
cpu_model.evaluate(dataset_with_partial_batch, steps=steps)[1:],
atol=1e-5,
rtol=1e-5)
self.assertAllClose(
model_with_ds_strategy.evaluate(dataset_with_partial_batch)[1:],
cpu_model.evaluate(dataset_with_partial_batch)[1:],
atol=1e-5,
rtol=1e-5)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(cloning=[True, False])))
def test_predict_with_dataset_with_partial_batch(self, distribution, cloning):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss, cloning=cloning)
cpu_model = get_model()
cpu_model.compile(optimizer, loss)
inputs = np.random.random((10, 3)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs))
# As sample size is 10, we batch by 4 so that the last batch is
# a partial batch.
dataset_with_partial_batch = dataset.batch(4)
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(dataset_with_partial_batch, steps=3),
cpu_model.predict(dataset_with_partial_batch, steps=3),
atol=1e-5,
rtol=1e-5)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(cloning=[True, False])))
def test_predict_multi_output_model_with_dataset_with_partial_batch(
self, distribution, cloning):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
with distribution.scope():
model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
model_with_ds_strategy.compile(optimizer, loss, cloning=cloning)
cpu_model = simple_multi_inputs_multi_outputs_model()
cpu_model.compile(optimizer, loss)
input_data, _ = get_multi_inputs_multi_outputs_data()
input_dict = {
'input_a': input_data['input_a'],
'input_b': input_data['input_b'],
}
dataset = dataset_ops.Dataset.from_tensor_slices(input_dict)
# As sample size is 200, we batch by 18 using 12 steps per epoch so
# that the last batch is a partial batch.
dataset_with_partial_batch = dataset.batch(18)
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(dataset_with_partial_batch, steps=12),
cpu_model.predict(dataset_with_partial_batch, steps=12),
atol=1e-4, rtol=1e-4)
@combinations.generate(all_strategy_combinations_minus_default())
def test_match_model_input_matches_with_dataset_tensors(self, distribution):
def _create_model_input_output_tensors():
input_a = keras.layers.Input(shape=(16,), name='z_input_sorted_last')
input_b = keras.layers.Input(shape=(32,), name='a_input_sorted_first')
intermediate_a = keras.layers.Dense(10)(input_a)
intermediate_b = keras.layers.Dense(10)(input_b)
merged = keras.layers.Add()([intermediate_a, intermediate_b])
output = keras.layers.Dense(2)(merged)
return input_a, input_b, output
input_dict = {
'z_input_sorted_last': np.random.rand(32, 16).astype(np.float32),
'a_input_sorted_first': np.random.rand(32, 32).astype(np.float32)
}
target = np.ones((32, 2), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((input_dict, target))
dataset = dataset.batch(4, drop_remainder=True)
with self.cached_session():
with distribution.scope():
input_a, input_b, output = _create_model_input_output_tensors()
# `input_a`, which has input name that comes last in alphanumeric
# order, is the first input of the model input layers. If tensors
# from `input_dict` is blindly flattened and passed to model
# inputs incorrectly, this would result in `input_a` input layer
# matching with tensor `a_input_sorted_first` and would result in
# shape mismatch.
model_with_array_input = keras.models.Model(
inputs=[input_a, input_b], outputs=output)
model_with_array_input.compile('sgd', 'mse')
model_weights = model_with_array_input.get_weights()
model_with_array_input_fit = model_with_array_input.fit(
dataset, steps_per_epoch=1, epochs=1).history
input_a, input_b, output = _create_model_input_output_tensors()
model_with_dict_input = keras.models.Model(
inputs={
'z_input_sorted_last': input_a,
'a_input_sorted_first': input_b,
},
outputs=output)
model_with_dict_input.compile('sgd', 'mse')
model_with_dict_input.set_weights(model_weights)
model_with_dict_input_fit = model_with_dict_input.fit(
dataset, steps_per_epoch=1, epochs=1).history
self.assertAllClose(
model_with_dict_input_fit,
model_with_array_input_fit,
atol=1e-4,
rtol=1e-4)
@combinations.generate(
combinations.combine(distribution=strategies_minus_tpu,
mode=['graph', 'eager']))
def test_dataset_with_sample_weights(self, distribution):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights)).batch(2)
result = model.evaluate(ds, verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75
# batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
# final result = (batch_1 + batch_2) / 2 = 10.625.
# The first time we divide by number of input samples and the second time
# we divide by number of steps/batches that the loss is aggregated over.
self.assertAllClose(result, 10.625)
# We now test without passing sample_weights:
# batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
# batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
# final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5
ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).batch(2)
result = model.evaluate(ds, verbose=1)
self.assertAllClose(result, 13.5)
@combinations.generate(
combinations.combine(distribution=strategies_minus_default_minus_tpu,
mode=['eager']))
def test_dataset_with_sample_weights_eager_with_cloning(self, distribution):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, cloning=True)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights)).batch(2)
with self.assertRaisesRegexp(NotImplementedError,
'`sample_weight` is not supported when '
'using tf.distribute.Strategy in '):
model.evaluate(ds, verbose=1)
class TestRegularizerLoss(test.TestCase, parameterized.TestCase):
class IdentityRegularizer(keras.regularizers.Regularizer):
def __call__(self, x):
return array_ops.identity(x)
class AddLayer(keras.layers.Layer):
def build(self, _):
self.v = self.add_weight(
'v', (), initializer='ones',
regularizer=TestRegularizerLoss.IdentityRegularizer())
def call(self, inputs):
return inputs + self.v
@staticmethod
def loss_fn(_, y_pred):
return math_ops.reduce_mean(y_pred)
@combinations.generate(
combinations.times(
strategy_combinations.all_strategy_combinations_minus_default(),
combinations.combine(cloning=[True, False])))
def test_regularizer_loss(self, distribution, cloning):
batch_size = 2
if not distributed_training_utils.global_batch_size_supported(distribution):
batch_size //= distribution.num_replicas_in_sync
# Given an input x, which is always 1, and variable v, this model computes
# Loss=x+v+regularizer_loss, where regularizer_loss=v and the variable is
# initialized to 1. Therefore, this model computes Loss=1+2v, and so the
# gradient dLoss/dv = 2. This gradient of 2 is averaged over all examples
# in a batch and then multiplied by the learning rate of 1. As a result,
# the model update for one batch should subtract 2 from v, resulting in v
# being -1. If the regularizer loss is not scaled correctly by number of
# replicas, the variable value will be incorrect when number of replicas
# >1. For e.g. it will be -2 if num replicas = 2.
with distribution.scope():
x = keras.layers.Input(shape=(1,), batch_size=batch_size)
y = TestRegularizerLoss.AddLayer()(x)
model = keras.models.Model(inputs=x, outputs=y)
opt = gradient_descent_keras.SGD(1.)
model.compile(opt, loss=TestRegularizerLoss.loss_fn, cloning=cloning)
model.fit(
x=np.array([[1.], [1.]], dtype=np.float32),
y=np.array([[1.], [1.]], dtype=np.float32),
batch_size=batch_size)
v = model.get_weights()[0]
self.assertEqual(-1.0, v)
class TestDistributionStrategyWithKerasModels(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_distribution_strategy_on_sequential_model(self, distribution,
cloning):
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
rmsprop.RMSPropOptimizer if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(learning_rate=0.001)
model = simple_sequential_model()
loss = 'mse'
model.compile(optimizer, loss, cloning=cloning)
inputs = np.zeros((20, 10), np.float32)
targets = np.zeros((20, 2), np.float32)
model.fit(inputs, targets, epochs=1, steps_per_epoch=2)
model.predict(inputs, steps=1)
model.evaluate(inputs, targets, steps=1)
@combinations.generate(all_strategy_combinations_plus_cloning())
def test_distribution_strategy_on_functional_model(self, distribution,
cloning):
with distribution.scope():
# TODO(b/130808953): Re-enable the V1 optimizer after iterations is
# mirrored.
optimizer_fn = (
rmsprop.RMSPropOptimizer if cloning else gradient_descent_keras.SGD)
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = 'mse'
model.compile(optimizer, loss, cloning=cloning)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model.fit(inputs, targets, epochs=1, steps_per_epoch=2)
model.predict(inputs, steps=1)
model.evaluate(inputs, targets, steps=1)
@combinations.generate(
combinations.times(
all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations(),
combinations.combine(cloning=[True, False])))
def test_distribution_strategy_one_dimensional(self, distribution, cloning):
with distribution.scope():
inp = keras.layers.Input(shape=(10,))
out = keras.layers.Dense(3, activation='softmax')(inp)
model = keras.Model(inputs=[inp], outputs=[out])
model.compile(
optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
cloning=cloning)
x = np.random.random((64, 10)).astype('float32')
y = np.random.randint(3, size=64)
model.fit(x, y, epochs=1, steps_per_epoch=2)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph', 'eager'],
cloning=[True, False],
reduction=[
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,
loss_reduction.ReductionV2.SUM
]))
def test_distribution_strategy_with_loss_reduction_types(
self, distribution, cloning, reduction):
np.random.seed(_RANDOM_SEED)
def _get_model():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
model = keras.Model(inputs, outputs)
return model
x = np.random.random((64, 10))
y = np.random.random((64, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(32)
model = _get_model()
model.compile(
'sgd', loss=keras.losses.MeanSquaredError(reduction=reduction))
history = model.fit(dataset, steps_per_epoch=2, epochs=1, shuffle=False)
with distribution.scope():
ds_model = _get_model()
ds_model.compile(
'sgd',
loss=keras.losses.MeanSquaredError(reduction=reduction),
cloning=cloning)
ds_history = ds_model.fit(
dataset, steps_per_epoch=2, epochs=1, shuffle=False)
self.assertArrayNear(history.history['loss'], ds_history.history['loss'],
1e-5)
@combinations.generate(
combinations.times(all_strategy_minus_default_and_tpu_combinations(),
combinations.combine(cloning=[True, False])))
def test_distribution_strategy_with_symbolic_add_loss(self, distribution,
cloning):
def _make_model_with_add_loss():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
model = keras.Model(inputs, outputs)
model.add_loss(math_ops.reduce_mean(x1))
model.add_loss(math_ops.reduce_mean(outputs))
return model
x = np.ones((64, 10)).astype('float32')
model = _make_model_with_add_loss()
model.compile('sgd')
history = model.fit(x, steps_per_epoch=2, epochs=1)
with distribution.scope():
ds_model = _make_model_with_add_loss()
ds_model.compile('sgd', cloning=cloning)
ds_history = ds_model.fit(x, steps_per_epoch=2, epochs=1)
self.assertAllClose(history.history, ds_history.history)
# TODO(omalleyt): Investigate flakiness and re-enable.
@combinations.generate(all_strategy_minus_default_and_tpu_combinations())
def DISABLED_test_distribution_strategy_with_callable_add_loss(
self, distribution):
def _make_model():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
d = keras.layers.Dense(1, kernel_initializer='zeros')
outputs = d(x2)
model = keras.Model(inputs, outputs)
model.add_loss(lambda: 100. * math_ops.reduce_mean(d.kernel))
return model
x = np.ones((64, 10)).astype('float32')
y = np.ones((64, 1)).astype('float32')
model = _make_model()
self.assertLen(model.losses, 1)
model.compile('sgd', 'mse')
history = model.fit(x, y, steps_per_epoch=2, epochs=1)
with distribution.scope():
ds_model = _make_model()
self.assertLen(ds_model.losses, 1)
ds_model.compile('sgd', 'mse')
ds_history = ds_model.fit(x, y, steps_per_epoch=2, epochs=1)
self.assertAllClose(history.history, ds_history.history)
@combinations.generate(
combinations.times(all_strategy_minus_default_and_tpu_combinations(),
combinations.combine(cloning=[True, False])))
def test_distribution_strategy_with_add_metric_in_call(
self, distribution, cloning):
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(name='bias', initializer='zeros', shape=())
def call(self, inputs):
self.add_metric(
math_ops.reduce_mean(inputs), name='bias', aggregation='mean')
return inputs + self.bias
def _make_model_with_add_metric():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = Bias()(x1)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
model = keras.Model(inputs, outputs)
return model
x = np.ones((64, 10)).astype('float32')
y = np.ones((64, 1)).astype('float32')
model = _make_model_with_add_metric()
self.assertLen(model.metrics, 1)
model.compile('sgd', 'mse')
history = model.fit(
x,
y,
steps_per_epoch=2,
validation_data=(x, y),
validation_steps=2,
epochs=2)
with distribution.scope():
ds_model = _make_model_with_add_metric()
self.assertLen(ds_model.metrics, 1)
ds_model.compile('sgd', 'mse', cloning=cloning)
ds_history = ds_model.fit(
x,
y,
steps_per_epoch=2,
validation_data=(x, y),
validation_steps=2,
epochs=2)
self.assertLen(ds_model.metrics, 1)
self.assertAllClose(history.history, ds_history.history)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['eager'],
cloning=[False]))
def test_distribution_strategy_with_add_metric_object(self, distribution,
cloning):
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(name='bias', initializer='zeros', shape=())
self.mean = keras.metrics.Mean(name='mean')
def call(self, inputs):
self.add_metric(self.mean(inputs))
return inputs + self.bias
def _make_model_with_add_metric_object():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = Bias()(x1)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
model = keras.Model(inputs, outputs)
return model
x = np.ones((64, 10)).astype('float32')
y = np.ones((64, 1)).astype('float32')
model = _make_model_with_add_metric_object()
self.assertLen(model.metrics, 1)
model.compile('sgd', 'mse')
history = model.fit(
x,
y,
steps_per_epoch=2,
validation_data=(x, y),
validation_steps=2,
epochs=2)
with distribution.scope():
ds_model = _make_model_with_add_metric_object()
self.assertLen(ds_model.metrics, 1)
ds_model.compile('sgd', 'mse', cloning=cloning)
ds_history = ds_model.fit(
x,
y,
steps_per_epoch=2,
validation_data=(x, y),
validation_steps=2,
epochs=2)
self.assertLen(ds_model.metrics, 1)
self.assertAllClose(history.history, ds_history.history)
@combinations.generate(
combinations.times(all_strategy_minus_default_and_tpu_combinations(),
combinations.combine(cloning=[True, False])))
def test_distribution_strategy_with_add_metric_outside_call(
self, distribution, cloning):
def _make_model_with_add_metric():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x1)
model = keras.Model(inputs, outputs)
model.add_metric(
math_ops.reduce_mean(x1), name='mid_mean', aggregation='mean')
return model
x = np.ones((64, 10)).astype('float32')
y = np.ones((64, 1)).astype('float32')
model = _make_model_with_add_metric()
self.assertLen(model.metrics, 1)
model.compile('sgd', 'mse')
history = model.fit(
x,
y,
steps_per_epoch=2,
validation_data=(x, y),
validation_steps=2,
epochs=2)
with distribution.scope():
ds_model = _make_model_with_add_metric()
self.assertLen(ds_model.metrics, 1)
ds_model.compile('sgd', 'mse', cloning=cloning)
ds_history = ds_model.fit(
x,
y,
steps_per_epoch=2,
validation_data=(x, y),
validation_steps=2,
epochs=2)
self.assertLen(ds_model.metrics, 1)
self.assertAllClose(history.history, ds_history.history)
if __name__ == '__main__':
test.main()
| {
"content_hash": "7c5645521f44c835e80a530db43f148c",
"timestamp": "",
"source": "github",
"line_count": 1749,
"max_line_length": 91,
"avg_line_length": 40.81132075471698,
"alnum_prop": 0.6425139046498269,
"repo_name": "alsrgv/tensorflow",
"id": "bd594a8862f6b0f3e2ac9126d931d4c25ebe5b1e",
"size": "72068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/distribute/distribute_strategy_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "755360"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "68001148"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1627121"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "842866"
},
{
"name": "Jupyter Notebook",
"bytes": "1665584"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101157"
},
{
"name": "Objective-C",
"bytes": "104061"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17570"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48843099"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "488241"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
} |
""" Module for producing the Project Summary Report
Note: Much of this code was written by Pontus and lifted from
the SciLifeLab repo
"""
from collections import defaultdict, OrderedDict
import os
from string import ascii_uppercase as alphabets
import ngi_reports.reports
class Report(ngi_reports.reports.BaseReport):
## initialize class and assign basic variables
def __init__(self, LOG, working_dir, **kwargs):
# Initialise the parent class
super(Report, self).__init__(LOG, working_dir, **kwargs)
# general initialization
self.tables_info = defaultdict(dict)
self.report_info = {}
# report name and directory to be created
self.report_dir = os.path.join(working_dir, 'reports')
self.report_basename = ''
self.signature = kwargs.get('signature')
def generate_report_template(self, proj, template, support_email):
## Check and exit if signature not provided
if not self.signature:
self.LOG.error('It is required to provide Signature/Name while generating \'project_summary\' report, see -s opition in help')
raise SystemExit
else:
self.report_info['signature'] = self.signature
## Helper vars
seq_methods = OrderedDict()
## Get information for the report
self.report_basename = proj.ngi_name
self.report_info['support_email'] = support_email
self.report_info['dates'] = self.get_order_dates(proj.dates)
self.report_info['report_date'] = self.creation_date
self.report_info['accredit'] = self.get_accredit_info(proj.accredited, proj.library_construction, proj.ngi_name)
## Get sequecing method for the flowcell
seq_template = '{}) Samples were sequenced on {} ({}) with a {} setup '\
'using {}. The Bcl to FastQ conversion was performed using {} from the CASAVA software '\
'suite. The quality scale used is Sanger / phred33 / Illumina 1.8+.'
## Collect required information for all flowcell run for the project
for fc in proj.flowcells.values():
## Sort by the order of readss
run_setup = sorted(fc.run_setup, key=lambda k: k['Number'])
run_setup_text = ''
read_count = 0
index_count = 0
for read in run_setup:
run_setup_text += read['NumCycles']
run_setup_text += 'nt'
if read['IsIndexedRead'] == 'N':
read_count += 1
run_setup_text += '(Read'
run_setup_text += str(read_count)
elif read['IsIndexedRead'] == 'Y':
index_count += 1
run_setup_text += '(Index'
run_setup_text += str(index_count)
if run_setup.index(read) == len(run_setup)-1:
run_setup_text += ')'
else:
run_setup_text += ')-'
if fc.type == 'NovaSeq6000':
fc_chem = '\'{}\' workflow in \'{}\' mode flowcell'.format(fc.chemistry.get('WorkflowType'), fc.chemistry.get('FlowCellMode'))
elif fc.type == 'NextSeq500':
fc_chem = '\'{}-Output\' chemistry'.format(fc.chemistry.get('Chemistry'))
elif fc.type == 'NextSeq2000':
fc_chem = '\'{}\' flowcell'.format(fc.chemistry.get('Chemistry'))
else:
fc_chem = '\'{}\' chemistry'.format(fc.chemistry.get('Chemistry'))
applicationName = 'MSC' if fc.type == "MiSeq" else fc.seq_software.get('ApplicationName')
seq_software = "{} {}/RTA {}".format(applicationName, fc.seq_software.get('ApplicationVersion'), fc.seq_software.get('RTAVersion'))
tmp_method = seq_template.format('SECTION', fc.type, seq_software, run_setup_text, fc_chem, fc.casava)
## to make sure the sequencing methods are unique
if tmp_method not in list(seq_methods.keys()):
seq_methods[tmp_method] = alphabets[len(list(seq_methods.keys()))]
fc.seq_meth = seq_methods[tmp_method]
## give proper section name for the methods
self.report_info['sequencing_methods'] = "\n\n".join([m.replace('SECTION',seq_methods[m]) for m in seq_methods])
## Check if sequencing info is complete
if not self.report_info['sequencing_methods']:
self.LOG.warn('Sequencing methods may have some missing information, kindly check your inputs.')
###############################################################################
##### Create table text and header explanation from collected information #####
###############################################################################
## sample_info table
unit_magnitude = {'#reads' : '', 'Kreads': ' Thousand','Mreads': ' Million'}
sample_header = ['NGI ID', 'User ID', proj.samples_unit, '≥Q30']
sample_filter = ['ngi_id', 'customer_name', 'total_reads', 'qscore']
self.tables_info['tables']['sample_info'] = self.create_table_text(proj.samples.values(), filter_keys=sample_filter, header=sample_header)
self.tables_info['header_explanation']['sample_info'] = '* _NGI ID:_ Internal NGI sample identifier\n'\
'* _User ID:_ Sample name submitted by user\n'\
'* _{}:_ Total{} reads (or pairs) for a sample\n'\
'* _≥Q30:_ Aggregated percentage of bases that have a quality score ≥ Q30'\
.format(proj.samples_unit, unit_magnitude[proj.samples_unit])
## library_info table
library_header = ['NGI ID', 'Index', 'Lib Prep', 'Avg. FS', 'Lib QC']
library_filter = ['ngi_id', 'barcode', 'label', 'avg_size', 'qc_status']
library_list = []
for s, v in list(proj.samples.items()):
for p in list(v.preps.values()):
p = vars(p)
p['ngi_id'] = s
library_list.append(p)
self.tables_info['tables']['library_info'] = self.create_table_text(sorted(library_list, key=lambda d: d['ngi_id']), filter_keys=library_filter, header=library_header)
self.tables_info['header_explanation']['library_info'] = '* _NGI ID:_ Internal NGI sample identifier\n'\
'* _Index:_ Barcode sequence used for the sample\n'\
'* _Lib. Prep:_ NGI library identifier. The first library prep will be marked "A", the second "B" and so on.\n'\
'* _Avg. FS:_ Average fragment size of the library\n'\
'* _Lib. QC:_ Library quality control status\n'
## lanes_info table
lanes_header = ['Date', 'FC id', 'Lane', 'Cluster(M)', 'Phix', '≥Q30(%)', 'Method']
lanes_filter = ['date', 'name', 'id', 'cluster', 'phix', 'avg_qval', 'seq_meth']
lanes_list = []
for f, v in list(proj.flowcells.items()):
for l in list(v.lanes.values()):
l = vars(l)
l['date'] = v.date
l['name'] = v.name
l['seq_meth'] = v.seq_meth
lanes_list.append(l)
self.tables_info['tables']['lanes_info'] = self.create_table_text(sorted(lanes_list, key=lambda d: '{}_{}'.format(d['date'],d['id'])), filter_keys=lanes_filter, header=lanes_header)
self.tables_info['header_explanation']['lanes_info'] = '* _Date:_ Date of sequencing\n'\
'* _Flowcell:_ Flowcell identifier\n'\
'* _Lane:_ Flowcell lane number\n'\
'* _Clusters:_ Number of clusters that passed the read filters (millions)\n'\
'* _≥Q30:_ Aggregated percentage of bases that have a quality score ≥ Q30\n'\
'* _PhiX:_ Average PhiX error rate for the lane\n'\
'* _Method:_ Sequencing method used. See description under Sequencing heading above.\n'
# Make the file basename
output_bn = os.path.realpath(os.path.join(self.working_dir, self.report_dir, '{}_project_summary'.format(self.report_basename)))
# Parse the template
try:
md = template.render(project=proj, tables=self.tables_info['header_explanation'], report_info=self.report_info)
return {output_bn: md}
except:
self.LOG.error('Could not parse the project_summary template')
raise
#####################################################
##### Helper methods to get certain information #####
#####################################################
def create_table_text(self, ip, filter_keys=None, header=None, sep='\t'):
""" Create a single text string that will be saved in a file in TABLE format
from given dict and filtered based upon mentioned header.
:param dict/list ip: Input dictionary/list to be convertead as table string
:param list filter: A list of keys that will be used to filter the ip_dict
:param list header: A list that will be used as header
:param str sep: A string that will be used as separator
"""
op_string = []
if isinstance(ip, dict):
ip = list(ip.values())
if header:
op_string.append(sep.join(header))
if not filter_keys:
filter_keys = []
for i in ip:
filter_keys.extend(list(i.keys()))
filter_keys = sorted(list(set(filter_keys)))
for i in ip:
row = []
for k in filter_keys:
if type(i) is dict:
row.append(i.get(k,'NA'))
else:
row.append(getattr(i, k, 'NA'))
row = list(map(str, row))
op_string.append(sep.join(row))
return '\n'.join(op_string)
def get_order_dates(self, project_dates):
""" Get order dates as a markdown string. Ignore if unavailable
"""
dates = []
for item in project_dates:
if project_dates.get(item):
dates.append('_{}:_ {}'.format(item.replace('_', ' ').capitalize(), project_dates[item]))
return ', '.join(dates)
def get_accredit_info(self, accredit_dict, library_construction, proj_name):
"""Get swedac accreditation info for given step 'k'
:param Project proj: Project object containing details of the relevant project
"""
accredit_info = {}
for key in accredit_dict:
accredit = accredit_dict[key]
## For "finished library" projects, set certain accredation steps as "NA" even if not set by default
if key in ['library_preparation','data_analysis'] and library_construction == 'Library was prepared by user.':
accredit_info[key] = 'Not Applicable'
elif accredit in ['Yes','No']:
accredit_info[key] = '{} under ISO/IEC 17025'.format(['[cross] Not accredited','[tick] Accredited'][accredit == 'Yes'])
elif accredit == 'N/A':
accredit_info[key] = 'Not Applicable'
else:
self.LOG.error('Accreditation step {} for project {} is found, but no value is set'.format(key, proj_name))
return accredit_info
# Generate CSV files for the tables
def create_txt_files(self, op_dir=None):
""" Generate the CSV files for mentioned tables i.e. a dictionary with table name as key,
which will be used as file name and the content of file in single string as value to
put in the TXT file
:param str op_dir: Path where the TXT files should be created, current dir is default
"""
for tb_nm, tb_cont in list(self.tables_info['tables'].items()):
op_fl = '{}_{}.txt'.format(self.report_basename, tb_nm)
if op_dir:
op_fl = os.path.join(op_dir, op_fl)
with open(op_fl, 'w') as TXT:
TXT.write(tb_cont)
| {
"content_hash": "c1315132fe488177e4063d4008c42009",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 189,
"avg_line_length": 52.74688796680498,
"alnum_prop": 0.5313089993706733,
"repo_name": "NationalGenomicsInfrastructure/ngi_reports",
"id": "18eb547a209bba7a5a57b8771f6119b573c94c3f",
"size": "12747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ngi_reports/reports/project_summary.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "87274"
},
{
"name": "Python",
"bytes": "58486"
},
{
"name": "TeX",
"bytes": "12112"
}
],
"symlink_target": ""
} |
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def build_dict(file_name):
dict = {}
f = open(file_name, 'rU')
for line in f:
list = line.split()
for word in list:
if word.lower() in dict:
dict[word.lower()] = dict[word.lower()] + 1
else:
dict[word.lower()] = 1
f.close()
return dict
def print_words(file_name):
dict = build_dict(file_name)
for k, v in sorted(dict.items()): print k, ' ', v
def get_count(word_count_tuple):
return word_count_tuple[1]
def print_top(file_name):
dict = build_dict(file_name)
sortedlist = sorted(dict.items(), key=get_count, reverse=True)
for k, v in sortedlist[:20]: print k, ' ', v
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "17777c47c14c58b3812be7b0bdf3843e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 29.658823529411766,
"alnum_prop": 0.6925823086076953,
"repo_name": "custom22/google-python-exercises",
"id": "5aaac4724d191edd4cc35e3afc86c5d8e8c092c4",
"size": "2752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/wordcount.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "653443"
},
{
"name": "Python",
"bytes": "60069"
}
],
"symlink_target": ""
} |
from preggy import expect
from tests.base import FilterTestCase
class QualityFilterTestCase(FilterTestCase):
def test_quality_filter(self):
image = self.get_filtered('source.jpg', 'thumbor.filters.quality', 'quality(10)')
expected = self.get_fixture('quality-10%.jpg')
expect(self.context.request.quality).to_equal(10)
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.99)
| {
"content_hash": "49c01966416f39c5f6ee0082de578943",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 34.07692307692308,
"alnum_prop": 0.6952595936794582,
"repo_name": "Bladrak/thumbor",
"id": "565b4d9b39909895a90b94f4ff61b507b78190eb",
"size": "695",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/filters/test_quality.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "52290"
},
{
"name": "Makefile",
"bytes": "1696"
},
{
"name": "Python",
"bytes": "490416"
}
],
"symlink_target": ""
} |
'''@file multi_target_dummy_processor.py
contains the MultiTargetDummyProcessor class'''
import os
import subprocess
import StringIO
import scipy.io.wavfile as wav
import numpy as np
import processor
from nabu.processing.feature_computers import feature_computer_factory
import pdb
class MultiTargetDummyProcessor(processor.Processor):
'''a processor for audio files, this will compute the multiple targets'''
def __init__(self, conf, segment_lengths):
'''MultiTargetDummyProcessor constructor
Args:
conf: MultiTargetDummyProcessor configuration as a dict of strings
segment_lengths: A list containing the desired lengths of segments.
Possibly multiple segment lengths'''
#create the feature computer
self.comp = feature_computer_factory.factory(conf['feature'])(conf)
#set the length of the segments. Possibly multiple segment lengths
self.segment_lengths = segment_lengths
#initialize the metadata
self.nrS = int(conf['nrs'])
self.target_dim = self.comp.get_dim()
self.nontime_dims=[self.target_dim,self.nrS]
super(MultiTargetDummyProcessor, self).__init__(conf)
def __call__(self, dataline):
'''process the data in dataline
Args:
dataline: either a path to a wav file or a command to read and pipe
an audio file
Returns:
segmented_data: The segmented targets as a list of numpy arrays per segment length
utt_info: some info on the utterance'''
utt_info= dict()
targets = None
for ind in range(self.nrS):
#read the wav file
rate, utt = _read_wav(dataline)
#compute the features
features = self.comp(utt, rate)
features = np.expand_dims(features, 2)
if targets is None:
targets = features
else:
targets = np.append(targets,features,2)
# split the data for all desired segment lengths
segmented_data = self.segment_data(targets)
return segmented_data, utt_info
def write_metadata(self, datadir):
'''write the processor metadata to disk
Args:
dir: the directory where the metadata should be written'''
for i,seg_length in enumerate(self.segment_lengths):
seg_dir = os.path.join(datadir,seg_length)
with open(os.path.join(seg_dir, 'nrS'), 'w') as fid:
fid.write(str(self.nrS))
with open(os.path.join(seg_dir, 'dim'), 'w') as fid:
fid.write(str(self.target_dim))
with open(os.path.join(seg_dir, 'nontime_dims'), 'w') as fid:
fid.write(str(self.nontime_dims)[1:-1])
def _read_wav(wavfile):
'''
read a wav file
Args:
wavfile: either a path to a wav file or a command to read and pipe
an audio file
Returns:
- the sampling rate
- the utterance as a numpy array
'''
if os.path.exists(wavfile):
#its a file
(rate, utterance) = wav.read(wavfile)
elif wavfile[-1] == '|':
#its a command
#read the audio file
pid = subprocess.Popen(wavfile + ' tee', shell=True,
stdout=subprocess.PIPE)
output, _ = pid.communicate()
output_buffer = StringIO.StringIO(output)
(rate, utterance) = wav.read(output_buffer)
else:
#its a segment of an utterance
split = wavfile.split(' ')
begin = float(split[-2])
end = float(split[-1])
unsegmented = ' '.join(split[:-2])
rate, full_utterance = _read_wav(unsegmented)
utterance = full_utterance[int(begin*rate):int(end*rate)]
return rate, utterance
| {
"content_hash": "bf51043f67c4f7d895617809678687da",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 94,
"avg_line_length": 31.025,
"alnum_prop": 0.6226161697555734,
"repo_name": "JeroenZegers/Nabu-MSSS",
"id": "4b030cbdbdc318395ea67fe3015391f3981f0673",
"size": "3723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nabu/processing/processors/multi_target_dummy_processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "981104"
},
{
"name": "Shell",
"bytes": "4125"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Observation.send'
db.add_column('notification_observation', 'send',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Observation.send'
db.delete_column('notification_observation', 'send')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notification.notice': {
'Meta': {'ordering': "['-added']", 'object_name': 'Notice'},
'added': ('django.db.models.fields.DateTimeField', [], {}),
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'data': ('picklefield.fields.PickledObjectField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'unseen': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'notification.noticesetting': {
'Meta': {'unique_together': "(('user', 'notice_type', 'medium'),)", 'object_name': 'NoticeSetting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'medium': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'send': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'notification.noticetype': {
'Meta': {'object_name': 'NoticeType'},
'default': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'notification.observation': {
'Meta': {'ordering': "['-added']", 'object_name': 'Observation'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notice_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notification.NoticeType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'send': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['notification'] | {
"content_hash": "138c7c3721bed5e83b416b306c19626e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 182,
"avg_line_length": 67.79591836734694,
"alnum_prop": 0.5552378085490668,
"repo_name": "arctelix/django-notification-automated",
"id": "c6692ba46cdb9f13fa36d0e69095a9a9e6aafa84",
"size": "6668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notification/migrations/0003_auto__add_field_observation_send.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108807"
}
],
"symlink_target": ""
} |
import pickle
from django.conf import settings
from django_redis.serializers.pickle import PickleSerializer
from testil import eq
def test_highest_protocol():
assert pickle.HIGHEST_PROTOCOL <= 5, """
The highest pickle procol supported by Python at time of writing
this test is 5. Support for newer protocols must be added or the
default version used by libraries such as django_redis must be
limited to 5 or less so pickles written by a newer Python can be
read by an older Python after a downgrade.
"""
def test_pickle_5():
eq(pickle.loads(b'\x80\x05\x89.'), False)
def test_dump_and_load_all_protocols():
def test(protocol):
eq(pickle.loads(pickle.dumps(False, protocol=protocol)), False)
for protocol in range(1, pickle.HIGHEST_PROTOCOL + 1):
yield test, protocol
def test_django_redis_protocol():
# Override default pickle protocol to allow smoother Python upgrades.
# Heroics like this will not be necessary once we have upgraded to a
# version of django_redis that uses pickle.DEFAULT_PROTOCOL. See:
# https://github.com/jazzband/django-redis/issues/547
# https://github.com/jazzband/django-redis/pull/555
#
# This test may be removed after upgrading django_redis.
# In the mean time, test for effective protocol override in settings.py
pkl = PickleSerializer(settings.CACHES['default'].get("OPTIONS", {}))
eq(pkl.dumps(False)[1], pickle.DEFAULT_PROTOCOL)
| {
"content_hash": "8120429fb06d8f7e0df611ca0a7eae5e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 75,
"avg_line_length": 37.225,
"alnum_prop": 0.7118871725990598,
"repo_name": "dimagi/commcare-hq",
"id": "ff72f8866bbe887c5eae3e97ab5df7c953dd58ef",
"size": "1489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/tests/test_pickle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
long_description = open(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read()
setup(
name='prompt_toolkit',
author='Jonathan Slenders',
version='0.52',
license='LICENSE.txt',
url='https://github.com/jonathanslenders/python-prompt-toolkit',
description='Library for building powerful interactive command lines in Python',
long_description=long_description,
packages=find_packages('.'),
install_requires = [
'pygments',
'six>=1.9.0',
'wcwidth',
],
)
| {
"content_hash": "9e6a875a4153c5f6e3bf60d0485f1302",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 84,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.6287128712871287,
"repo_name": "ddalex/python-prompt-toolkit",
"id": "c3640d7386867d870ff2bd64ab59cad535d2f91c",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "577643"
}
],
"symlink_target": ""
} |
import os,uuid,sys
import requests,subprocess,shutil
from pprint import pprint
from KBaseReport.KBaseReportClient import KBaseReport
from biokbase.workspace.client import Workspace as workspaceService
from ReadsUtils.ReadsUtilsClient import ReadsUtils
#END_HEADER
class kb_fastqc:
'''
Module Name:
kb_fastqc
Module Description:
A KBase module: kb_fastqc
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "1.0.1"
GIT_URL = "https://github.com/rsutormin/kb_fastqc"
GIT_COMMIT_HASH = "82f3adf025cccb35b247652439709a64a78ca74b"
#BEGIN_CLASS_HEADER
def _get_input_file_ref_from_params(self, params):
if 'input_file_ref' in params:
return params['input_file_ref']
else:
if 'input_ws' not in params and 'input_file' not in params:
raise ValueError('Either the "input_file_ref" field or the "input_ws" with' +
'"input_file" fields must be set.')
return str(params['input_ws']) + '/' + str(params['input_file'])
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.workspaceURL = config['workspace-url']
self.scratch = os.path.abspath(config['scratch'])
self.callback_url = os.environ['SDK_CALLBACK_URL']
#END_CONSTRUCTOR
pass
def runFastQC(self, ctx, input_params):
"""
:param input_params: instance of type "FastQCParams" -> structure:
parameter "input_ws" of String, parameter "input_file" of String,
parameter "input_file_ref" of String
:returns: instance of type "FastQCOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: reported_output
#BEGIN runFastQC
token = ctx['token']
wsClient = workspaceService(self.workspaceURL, token=token)
headers = {'Authorization': 'OAuth '+token}
uuid_string = str(uuid.uuid4())
read_file_path=self.scratch+"/"+uuid_string
os.mkdir(read_file_path)
input_file_ref = self._get_input_file_ref_from_params(input_params)
library=None
try:
library = wsClient.get_objects2({'objects': [{'ref': input_file_ref}]})['data'][0]
except Exception as e:
raise ValueError('Unable to get read library object from workspace: (' + input_file_ref + ')' + str(e))
download_read_params = {'read_libraries': [], 'interleaved':"false"}
if("SingleEnd" in library['info'][2] or "PairedEnd" in library['info'][2]):
download_read_params['read_libraries'].append(library['info'][7]+"/"+library['info'][1])
elif("SampleSet" in library['info'][2]):
for sample_id in library['data']['sample_ids']:
if("/" in sample_id):
download_read_params['read_libraries'].append(sample_id)
else:
if(sample_id.isdigit()):
download_read_params['read_libraries'].append(library['info'][6]+"/"+sample_id)
else:
download_read_params['read_libraries'].append(library['info'][7]+"/"+sample_id)
ru = ReadsUtils(os.environ['SDK_CALLBACK_URL'])
ret = ru.download_reads(download_read_params)
read_file_list=list()
for file in ret['files']:
files = ret['files'][file]['files']
fwd_name=files['fwd'].split('/')[-1]
fwd_name=fwd_name.replace('.gz','')
shutil.move(files['fwd'],os.path.join(read_file_path, fwd_name))
read_file_list.append(os.path.join(read_file_path, fwd_name))
if(files['rev'] is not None):
rev_name=files['rev'].split('/')[-1]
rev_name=rev_name.replace('.gz','')
shutil.move(files['rev'],os.path.join(read_file_path, rev_name))
read_file_list.append(os.path.join(read_file_path, rev_name))
subprocess.check_output(["fastqc"]+read_file_list)
report = "Command run: "+" ".join(["fastqc"]+read_file_list)
output_html_files = list()
output_zip_files = list()
first_file=""
html_string = ""
html_count = 0
with open('/kb/data/index_start.txt', 'r') as start_file:
html_string=start_file.read()
#Make HTML folder
os.mkdir(os.path.join(read_file_path, 'html'))
for file in os.listdir(read_file_path):
label=".".join(file.split(".")[1:])
if(file.endswith(".zip")):
output_zip_files.append({'path' : os.path.join(read_file_path,file),
'name' : file,
'label' : label,
'description' : 'Zip file generated by fastqc that contains ' + \
'original images seen in the report'})
if(file.endswith(".html")):
#Move html into html folder
shutil.move(os.path.join(read_file_path,file),os.path.join(read_file_path,'html',file))
# file = os.path.join('html',file)
if(first_file==""):
first_file=file
html_string+=" <button data-button=\"page "+str(html_count) + \
"\" data-page=\""+file+"\">Page "+str(html_count+1)+"</button>\n"
html_count+=1
html_string+=" </div> </div> <div id=\"body\">\n <iframe id=\"content\" " + \
"style=\"width: 100%; border: none; \" src=\""+first_file+"\"></iframe>\n </div>"
output_html_files.append({'path' : os.path.join(read_file_path,'html'),
'name' : 'html files',
'label' : 'html files',
'description' : 'HTML files generated by fastqc that ' + \
'contains report on quality of reads'})
with open('/kb/data/index_end.txt', 'r') as end_file:
html_string+=end_file.read()
with open(os.path.join(read_file_path,'html',"index.html"),'w') as index_file:
index_file.write(html_string)
# output_html_files.append({'path' : read_file_path+"/index.html",
# 'name' : "index.html",
# 'label' : "index.html",
# 'description' : 'HTML file generated by fastqc that contains report on quality of reads'})
report_params = { 'objects_created' : [],
# 'message' : report,
'direct_html' : html_string,
# 'direct_html_link_index' : 1,
'file_links' : output_zip_files,
'html_links' : output_html_files,
'workspace_name' : input_params['input_ws'],
'report_object_name' : 'kb_fastqc_report_' + uuid_string }
kbase_report_client = KBaseReport(self.callback_url, token=token)
output = kbase_report_client.create_extended_report(report_params)
reported_output = { 'report_name': output['name'], 'report_ref': output['ref'] }
#Remove temp reads directory
shutil.rmtree(read_file_path, ignore_errors=True)
#END runFastQC
# At some point might do deeper type checking...
if not isinstance(reported_output, dict):
raise ValueError('Method runFastQC return value ' +
'reported_output is not type dict as required.')
# return the results
return [reported_output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK", 'message': "", 'version': self.VERSION,
'git_url': self.GIT_URL, 'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| {
"content_hash": "75df2c0f6d888073ac700fbd06bf1bca",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 125,
"avg_line_length": 44.723958333333336,
"alnum_prop": 0.5384884127168976,
"repo_name": "samseaver/kb_fastqc",
"id": "52298809c282be58f5e5c9a38102648d53cd9532",
"size": "8625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/kb_fastqc/kb_fastqcImpl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2063"
},
{
"name": "Java",
"bytes": "11641"
},
{
"name": "JavaScript",
"bytes": "4619"
},
{
"name": "Makefile",
"bytes": "2870"
},
{
"name": "Perl",
"bytes": "10925"
},
{
"name": "Python",
"bytes": "117869"
},
{
"name": "Ruby",
"bytes": "405"
},
{
"name": "Shell",
"bytes": "1665"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0014_eventbase_human_times'),
('fluent_contents', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TodaysOccurrences',
fields=[
('contentitem_ptr', models.OneToOneField(to='fluent_contents.ContentItem', primary_key=True, auto_created=True, parent_link=True, serialize=False)),
('include_finished', models.BooleanField(default=False, help_text=b'include occurrences that have already finished today')),
('types_to_show', models.ManyToManyField(blank=True, db_table=b'ik_todays_occurrences_types', to='icekit_events.EventType', help_text=b'Leave empty to show all events.')),
],
options={
'verbose_name': "Today's events",
'db_table': 'contentitem_ik_events_todays_occurrences_todaysoccurrences',
},
bases=('fluent_contents.contentitem',),
),
]
| {
"content_hash": "9ed36898bea24f64859576926bf30938",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 187,
"avg_line_length": 41.44444444444444,
"alnum_prop": 0.6210902591599643,
"repo_name": "ic-labs/icekit-events",
"id": "602ba23ffc8436dfacfb10381710beaa0ddc6ea3",
"size": "1143",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "icekit_events/plugins/todays_occurrences/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1334"
},
{
"name": "HTML",
"bytes": "19090"
},
{
"name": "JavaScript",
"bytes": "1759"
},
{
"name": "Python",
"bytes": "208757"
}
],
"symlink_target": ""
} |
"""Implementation of an S3-like storage server based on local files.
Useful to test features that will eventually run on S3, or if you want to
run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
S3 client with this module:
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
c.create_bucket("mybucket")
c.put("mybucket", "mykey", "a value")
print c.get("mybucket", "mykey").body
"""
import bisect
import datetime
import hashlib
import os
import os.path
import urllib
from tornado import escape
from tornado import httpserver
from tornado import ioloop
from tornado import web
from tornado.util import unicode_type
from tornado.options import options, define
try:
long
except NameError:
long = int
define("port", default=9888, help="TCP port to listen on")
define("root_directory", default="/tmp/s3", help="Root storage directory")
define("bucket_depth", default=0, help="Bucket file system depth limit")
def start(port, root_directory, bucket_depth):
"""Starts the mock S3 server on the given port at the given path."""
application = S3Application(root_directory, bucket_depth)
http_server = httpserver.HTTPServer(application)
http_server.listen(port)
ioloop.IOLoop.current().start()
class S3Application(web.Application):
"""Implementation of an S3-like storage server based on local files.
If bucket depth is given, we break files up into multiple directories
to prevent hitting file system limits for number of files in each
directories. 1 means one level of directories, 2 means 2, etc.
"""
def __init__(self, root_directory, bucket_depth=0):
web.Application.__init__(self, [
(r"/", RootHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/([^/]+)/", BucketHandler),
])
self.directory = os.path.abspath(root_directory)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.bucket_depth = bucket_depth
class BaseRequestHandler(web.RequestHandler):
SUPPORTED_METHODS = ("PUT", "GET", "DELETE")
def render_xml(self, value):
assert isinstance(value, dict) and len(value) == 1
self.set_header("Content-Type", "application/xml; charset=UTF-8")
name = list(value.keys())[0]
parts = []
parts.append('<' + name +
' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
self._render_parts(value[name], parts)
parts.append('</' + name + '>')
self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
''.join(parts))
def _render_parts(self, value, parts=[]):
if isinstance(value, (unicode_type, bytes)):
parts.append(escape.xhtml_escape(value))
elif isinstance(value, (int, long)):
parts.append(str(value))
elif isinstance(value, datetime.datetime):
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
elif isinstance(value, dict):
for name, subvalue in value.items():
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subsubvalue in subvalue:
parts.append('<' + name + '>')
self._render_parts(subsubvalue, parts)
parts.append('</' + name + '>')
else:
raise Exception("Unknown S3 value type %r", value)
def _object_path(self, bucket, object_name):
if self.application.bucket_depth < 1:
return os.path.abspath(os.path.join(
self.application.directory, bucket, object_name))
hash = hashlib.md5(object_name).hexdigest()
path = os.path.abspath(os.path.join(
self.application.directory, bucket))
for i in range(self.application.bucket_depth):
path = os.path.join(path, hash[:2 * (i + 1)])
return os.path.join(path, object_name)
class RootHandler(BaseRequestHandler):
def get(self):
names = os.listdir(self.application.directory)
buckets = []
for name in names:
path = os.path.join(self.application.directory, name)
info = os.stat(path)
buckets.append({
"Name": name,
"CreationDate": datetime.datetime.utcfromtimestamp(
info.st_ctime),
})
self.render_xml({"ListAllMyBucketsResult": {
"Buckets": {"Bucket": buckets},
}})
class BucketHandler(BaseRequestHandler):
def get(self, bucket_name):
prefix = self.get_argument("prefix", u"")
marker = self.get_argument("marker", u"")
max_keys = int(self.get_argument("max-keys", 50000))
path = os.path.abspath(os.path.join(self.application.directory,
bucket_name))
terse = int(self.get_argument("terse", 0))
if not path.startswith(self.application.directory) or \
not os.path.isdir(path):
raise web.HTTPError(404)
object_names = []
for root, dirs, files in os.walk(path):
for file_name in files:
object_names.append(os.path.join(root, file_name))
skip = len(path) + 1
for i in range(self.application.bucket_depth):
skip += 2 * (i + 1) + 1
object_names = [n[skip:] for n in object_names]
object_names.sort()
contents = []
start_pos = 0
if marker:
start_pos = bisect.bisect_right(object_names, marker, start_pos)
if prefix:
start_pos = bisect.bisect_left(object_names, prefix, start_pos)
truncated = False
for object_name in object_names[start_pos:]:
if not object_name.startswith(prefix):
break
if len(contents) >= max_keys:
truncated = True
break
object_path = self._object_path(bucket_name, object_name)
c = {"Key": object_name}
if not terse:
info = os.stat(object_path)
c.update({
"LastModified": datetime.datetime.utcfromtimestamp(
info.st_mtime),
"Size": info.st_size,
})
contents.append(c)
marker = object_name
self.render_xml({"ListBucketResult": {
"Name": bucket_name,
"Prefix": prefix,
"Marker": marker,
"MaxKeys": max_keys,
"IsTruncated": truncated,
"Contents": contents,
}})
def put(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or \
os.path.exists(path):
raise web.HTTPError(403)
os.makedirs(path)
self.finish()
def delete(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or \
not os.path.isdir(path):
raise web.HTTPError(404)
if len(os.listdir(path)) > 0:
raise web.HTTPError(403)
os.rmdir(path)
self.set_status(204)
self.finish()
class ObjectHandler(BaseRequestHandler):
def get(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if not path.startswith(self.application.directory) or \
not os.path.isfile(path):
raise web.HTTPError(404)
info = os.stat(path)
self.set_header("Content-Type", "application/unknown")
self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
info.st_mtime))
object_file = open(path, "rb")
try:
self.finish(object_file.read())
finally:
object_file.close()
def put(self, bucket, object_name):
object_name = urllib.unquote(object_name)
bucket_dir = os.path.abspath(os.path.join(
self.application.directory, bucket))
if not bucket_dir.startswith(self.application.directory) or \
not os.path.isdir(bucket_dir):
raise web.HTTPError(404)
path = self._object_path(bucket, object_name)
if not path.startswith(bucket_dir) or os.path.isdir(path):
raise web.HTTPError(403)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
object_file = open(path, "w")
object_file.write(self.request.body)
object_file.close()
self.finish()
def delete(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if not path.startswith(self.application.directory) or \
not os.path.isfile(path):
raise web.HTTPError(404)
os.unlink(path)
self.set_status(204)
self.finish()
if __name__ == "__main__":
options.parse_command_line()
start(options.port, options.root_directory, options.bucket_depth)
| {
"content_hash": "160afc723cf5dfb27f26c84676213371",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 76,
"avg_line_length": 36.8828125,
"alnum_prop": 0.5883287439101885,
"repo_name": "hhru/tornado",
"id": "4e85794461ecf209d99eaa792ebb46414683bba3",
"size": "10017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/s3server/s3server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1664"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1625160"
},
{
"name": "Ruby",
"bytes": "1428"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
} |
import os
import StringIO
import cProfile
from datetime import datetime
import pytz
from django.shortcuts import *
from django.utils import timezone
class TimezoneMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
tzname = request.user.profile.timezone
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
if "/settings" not in request.path and "/admin" not in request.path and "/static" not in request.path:
return redirect('/settings')
from django.conf import settings
from django.http import HttpResponseRedirect
class SSLMiddleware(object):
def process_request(self, request):
if not any([settings.DEBUG, request.is_secure(), request.META.get("HTTP_X_FORWARDED_PROTO", "") == 'https']):
url = request.build_absolute_uri(request.get_full_path())
secure_url = url.replace("http://", "https://")
return HttpResponseRedirect(secure_url) | {
"content_hash": "3ae6b329f2a5c79a160d20ada51dd378",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 118,
"avg_line_length": 31.647058823529413,
"alnum_prop": 0.6384758364312267,
"repo_name": "twitterdev/twitter-leaderboard",
"id": "c8d7ffd8efec4874e9c975f35de6a34f83b1a4d9",
"size": "1076",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "services/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86313"
},
{
"name": "HTML",
"bytes": "15519"
},
{
"name": "JavaScript",
"bytes": "316932"
},
{
"name": "Python",
"bytes": "22390"
}
],
"symlink_target": ""
} |
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class LeaveVxnetAction(BaseAction):
action = 'LeaveVxnet'
command = 'leave-vxnet'
usage = '%(prog)s --instances "instance_id, ..." --vxnet <vxnet_id> [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-i', '--instances', dest='instances',
action='store', type=str, default='',
help='the IDs of instances that will leave a vxnet.')
parser.add_argument('-v', '--vxnet', dest='vxnet',
action='store', type=str, default='',
help='the ID of the vxnet the instances will leave. ')
@classmethod
def build_directive(cls, options):
instances = explode_array(options.instances)
if not options.vxnet or not instances:
print('error: [instances] and [vxnet] should be specified')
return None
return {
'vxnet': options.vxnet,
'instances': instances,
}
| {
"content_hash": "55d2ef470826a451ea4c537fc162d247",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 89,
"avg_line_length": 36.6,
"alnum_prop": 0.592896174863388,
"repo_name": "yunify/qingcloud-cli",
"id": "ff4311bf77c4e791a704db3d03ca81ae3e0e6548",
"size": "1931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qingcloud/cli/iaas_client/actions/vxnet/leave_vxnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "852"
},
{
"name": "Python",
"bytes": "607642"
}
],
"symlink_target": ""
} |
import datetime
import tzlocal
def now_tz():
"""Get the current local time as a time zone aware datetime."""
now = datetime.datetime.now(datetime.timezone.utc)
return now.astimezone()
def start_of_day_tz():
"""Get the start of the current day as a time zone aware datetime."""
now = now_tz()
return now.replace(hour=0, minute=0, second=0, microsecond=0)
def end_of_day_tz():
"""Get the end of the current day as a time zone aware datetime."""
now = now_tz()
return now.replace(hour=23, minute=59, second=59, microsecond=9999)
def parse_date_tz(date):
"""Parse an ISO8601 date string returning a time zone aware datetime.
If you want parsing of times and time zones, try the dateutil package.
"""
parsed = datetime.datetime.strptime(date, "%Y-%m-%d")
time_zone = tzlocal.get_localzone()
return time_zone.localize(parsed)
| {
"content_hash": "d03515cbb9d79fbaa588e5a8a8dd962c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 28.70967741935484,
"alnum_prop": 0.6786516853932584,
"repo_name": "genericmoniker/mirror",
"id": "3aae291621ab952beb7c8700d2d4ac1e436929a2",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "backend/src/mirror/plugins/calendars/datetime_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31880"
},
{
"name": "Dockerfile",
"bytes": "3441"
},
{
"name": "HTML",
"bytes": "15567"
},
{
"name": "JavaScript",
"bytes": "8782"
},
{
"name": "Python",
"bytes": "66404"
},
{
"name": "Riot",
"bytes": "179"
},
{
"name": "Shell",
"bytes": "3851"
},
{
"name": "Svelte",
"bytes": "33588"
}
],
"symlink_target": ""
} |
"""Helper classes for Google Assistant integration."""
from __future__ import annotations
from abc import ABC, abstractmethod
from asyncio import gather
from collections.abc import Mapping
from http import HTTPStatus
import logging
import pprint
from aiohttp.web import json_response
from homeassistant.components import webhook
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
CLOUD_NEVER_EXPOSED_ENTITIES,
CONF_NAME,
STATE_UNAVAILABLE,
)
from homeassistant.core import Context, HomeAssistant, State, callback
from homeassistant.helpers import start
from homeassistant.helpers.area_registry import AreaEntry
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.entity_registry import RegistryEntry
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.network import get_url
from homeassistant.helpers.storage import Store
from . import trait
from .const import (
CONF_ALIASES,
CONF_ROOM_HINT,
DEVICE_CLASS_TO_GOOGLE_TYPES,
DOMAIN,
DOMAIN_TO_GOOGLE_TYPES,
ERR_FUNCTION_NOT_SUPPORTED,
NOT_EXPOSE_LOCAL,
SOURCE_LOCAL,
STORE_AGENT_USER_IDS,
)
from .error import SmartHomeError
SYNC_DELAY = 15
_LOGGER = logging.getLogger(__name__)
async def _get_entity_and_device(
hass, entity_id
) -> tuple[RegistryEntry, DeviceEntry] | None:
"""Fetch the entity and device entries for a entity_id."""
dev_reg, ent_reg = await gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
)
if not (entity_entry := ent_reg.async_get(entity_id)):
return None, None
device_entry = dev_reg.devices.get(entity_entry.device_id)
return entity_entry, device_entry
async def _get_area(hass, entity_entry, device_entry) -> AreaEntry | None:
"""Calculate the area for an entity."""
if entity_entry and entity_entry.area_id:
area_id = entity_entry.area_id
elif device_entry and device_entry.area_id:
area_id = device_entry.area_id
else:
return None
area_reg = await hass.helpers.area_registry.async_get_registry()
return area_reg.areas.get(area_id)
async def _get_device_info(device_entry) -> dict[str, str] | None:
"""Retrieve the device info for a device."""
if not device_entry:
return None
device_info = {}
if device_entry.manufacturer:
device_info["manufacturer"] = device_entry.manufacturer
if device_entry.model:
device_info["model"] = device_entry.model
if device_entry.sw_version:
device_info["swVersion"] = device_entry.sw_version
return device_info
class AbstractConfig(ABC):
"""Hold the configuration for Google Assistant."""
_unsub_report_state = None
def __init__(self, hass):
"""Initialize abstract config."""
self.hass = hass
self._store = None
self._google_sync_unsub = {}
self._local_sdk_active = False
async def async_initialize(self):
"""Perform async initialization of config."""
self._store = GoogleConfigStore(self.hass)
await self._store.async_load()
if not self.enabled:
return
async def sync_google(_):
"""Sync entities to Google."""
await self.async_sync_entities_all()
start.async_at_start(self.hass, sync_google)
@property
def enabled(self):
"""Return if Google is enabled."""
return False
@property
def entity_config(self):
"""Return entity config."""
return {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return None
@property
def is_reporting_state(self):
"""Return if we're actively reporting states."""
return self._unsub_report_state is not None
@property
def is_local_sdk_active(self):
"""Return if we're actively accepting local messages."""
return self._local_sdk_active
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return False
@property
def local_sdk_webhook_id(self):
"""Return the local SDK webhook ID.
Return None to disable the local SDK.
"""
return None
@property
def local_sdk_user_id(self):
"""Return the user ID to be used for actions received via the local SDK."""
raise NotImplementedError
@abstractmethod
def get_agent_user_id(self, context):
"""Get agent user ID from context."""
@abstractmethod
def should_expose(self, state) -> bool:
"""Return if entity should be exposed."""
def should_2fa(self, state):
"""If an entity should have 2FA checked."""
# pylint: disable=no-self-use
return True
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
raise NotImplementedError
async def async_report_state_all(self, message):
"""Send a state report to Google for all previously synced users."""
jobs = [
self.async_report_state(message, agent_user_id)
for agent_user_id in self._store.agent_user_ids
]
await gather(*jobs)
@callback
def async_enable_report_state(self):
"""Enable proactive mode."""
# Circular dep
# pylint: disable=import-outside-toplevel
from .report_state import async_enable_report_state
if self._unsub_report_state is None:
self._unsub_report_state = async_enable_report_state(self.hass, self)
@callback
def async_disable_report_state(self):
"""Disable report state."""
if self._unsub_report_state is not None:
self._unsub_report_state()
self._unsub_report_state = None
async def async_sync_entities(self, agent_user_id: str):
"""Sync all entities to Google."""
# Remove any pending sync
self._google_sync_unsub.pop(agent_user_id, lambda: None)()
status = await self._async_request_sync_devices(agent_user_id)
if status == HTTPStatus.NOT_FOUND:
await self.async_disconnect_agent_user(agent_user_id)
return status
async def async_sync_entities_all(self):
"""Sync all entities to Google for all registered agents."""
res = await gather(
*(
self.async_sync_entities(agent_user_id)
for agent_user_id in self._store.agent_user_ids
)
)
return max(res, default=204)
@callback
def async_schedule_google_sync(self, agent_user_id: str):
"""Schedule a sync."""
async def _schedule_callback(_now):
"""Handle a scheduled sync callback."""
self._google_sync_unsub.pop(agent_user_id, None)
await self.async_sync_entities(agent_user_id)
self._google_sync_unsub.pop(agent_user_id, lambda: None)()
self._google_sync_unsub[agent_user_id] = async_call_later(
self.hass, SYNC_DELAY, _schedule_callback
)
@callback
def async_schedule_google_sync_all(self):
"""Schedule a sync for all registered agents."""
for agent_user_id in self._store.agent_user_ids:
self.async_schedule_google_sync(agent_user_id)
async def _async_request_sync_devices(self, agent_user_id: str) -> int:
"""Trigger a sync with Google.
Return value is the HTTP status code of the sync request.
"""
raise NotImplementedError
async def async_connect_agent_user(self, agent_user_id: str):
"""Add an synced and known agent_user_id.
Called when a completed sync response have been sent to Google.
"""
self._store.add_agent_user_id(agent_user_id)
async def async_disconnect_agent_user(self, agent_user_id: str):
"""Turn off report state and disable further state reporting.
Called when the user disconnects their account from Google.
"""
self._store.pop_agent_user_id(agent_user_id)
@callback
def async_enable_local_sdk(self):
"""Enable the local SDK."""
if (webhook_id := self.local_sdk_webhook_id) is None:
return
try:
webhook.async_register(
self.hass,
DOMAIN,
"Local Support",
webhook_id,
self._handle_local_webhook,
)
except ValueError:
_LOGGER.info("Webhook handler is already defined!")
return
self._local_sdk_active = True
@callback
def async_disable_local_sdk(self):
"""Disable the local SDK."""
if not self._local_sdk_active:
return
webhook.async_unregister(self.hass, self.local_sdk_webhook_id)
self._local_sdk_active = False
async def _handle_local_webhook(self, hass, webhook_id, request):
"""Handle an incoming local SDK message."""
# Circular dep
# pylint: disable=import-outside-toplevel
from . import smart_home
payload = await request.json()
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug("Received local message:\n%s\n", pprint.pformat(payload))
if not self.enabled:
return json_response(smart_home.turned_off_response(payload))
result = await smart_home.async_handle_message(
self.hass, self, self.local_sdk_user_id, payload, SOURCE_LOCAL
)
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug("Responding to local message:\n%s\n", pprint.pformat(result))
return json_response(result)
class GoogleConfigStore:
"""A configuration store for google assistant."""
_STORAGE_VERSION = 1
_STORAGE_KEY = DOMAIN
def __init__(self, hass):
"""Initialize a configuration store."""
self._hass = hass
self._store = Store(hass, self._STORAGE_VERSION, self._STORAGE_KEY)
self._data = {STORE_AGENT_USER_IDS: {}}
@property
def agent_user_ids(self):
"""Return a list of connected agent user_ids."""
return self._data[STORE_AGENT_USER_IDS]
@callback
def add_agent_user_id(self, agent_user_id):
"""Add an agent user id to store."""
if agent_user_id not in self._data[STORE_AGENT_USER_IDS]:
self._data[STORE_AGENT_USER_IDS][agent_user_id] = {}
self._store.async_delay_save(lambda: self._data, 1.0)
@callback
def pop_agent_user_id(self, agent_user_id):
"""Remove agent user id from store."""
if agent_user_id in self._data[STORE_AGENT_USER_IDS]:
self._data[STORE_AGENT_USER_IDS].pop(agent_user_id, None)
self._store.async_delay_save(lambda: self._data, 1.0)
async def async_load(self):
"""Store current configuration to disk."""
if data := await self._store.async_load():
self._data = data
class RequestData:
"""Hold data associated with a particular request."""
def __init__(
self,
config: AbstractConfig,
user_id: str,
source: str,
request_id: str,
devices: list[dict] | None,
) -> None:
"""Initialize the request data."""
self.config = config
self.source = source
self.request_id = request_id
self.context = Context(user_id=user_id)
self.devices = devices
@property
def is_local_request(self):
"""Return if this is a local request."""
return self.source == SOURCE_LOCAL
def get_google_type(domain, device_class):
"""Google type based on domain and device class."""
typ = DEVICE_CLASS_TO_GOOGLE_TYPES.get((domain, device_class))
return typ if typ is not None else DOMAIN_TO_GOOGLE_TYPES[domain]
class GoogleEntity:
"""Adaptation of Entity expressed in Google's terms."""
def __init__(
self, hass: HomeAssistant, config: AbstractConfig, state: State
) -> None:
"""Initialize a Google entity."""
self.hass = hass
self.config = config
self.state = state
self._traits = None
@property
def entity_id(self):
"""Return entity ID."""
return self.state.entity_id
@callback
def traits(self):
"""Return traits for entity."""
if self._traits is not None:
return self._traits
state = self.state
domain = state.domain
attributes = state.attributes
features = attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if not isinstance(features, int):
_LOGGER.warning(
"Entity %s contains invalid supported_features value %s",
self.entity_id,
features,
)
return []
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
self._traits = [
Trait(self.hass, state, self.config)
for Trait in trait.TRAITS
if Trait.supported(domain, features, device_class, attributes)
]
return self._traits
@callback
def should_expose(self):
"""If entity should be exposed."""
return self.config.should_expose(self.state)
@callback
def should_expose_local(self) -> bool:
"""Return if the entity should be exposed locally."""
return (
self.should_expose()
and get_google_type(
self.state.domain, self.state.attributes.get(ATTR_DEVICE_CLASS)
)
not in NOT_EXPOSE_LOCAL
and not self.might_2fa()
)
@callback
def is_supported(self) -> bool:
"""Return if the entity is supported by Google."""
return bool(self.traits())
@callback
def might_2fa(self) -> bool:
"""Return if the entity might encounter 2FA."""
if not self.config.should_2fa(self.state):
return False
return self.might_2fa_traits()
@callback
def might_2fa_traits(self) -> bool:
"""Return if the entity might encounter 2FA based on just traits."""
state = self.state
domain = state.domain
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
return any(
trait.might_2fa(domain, features, device_class) for trait in self.traits()
)
async def sync_serialize(self, agent_user_id):
"""Serialize entity for a SYNC response.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
state = self.state
entity_config = self.config.entity_config.get(state.entity_id, {})
name = (entity_config.get(CONF_NAME) or state.name).strip()
domain = state.domain
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
entity_entry, device_entry = await _get_entity_and_device(
self.hass, state.entity_id
)
traits = self.traits()
device_type = get_google_type(domain, device_class)
device = {
"id": state.entity_id,
"name": {"name": name},
"attributes": {},
"traits": [trait.name for trait in traits],
"willReportState": self.config.should_report_state,
"type": device_type,
}
# use aliases
if aliases := entity_config.get(CONF_ALIASES):
device["name"]["nicknames"] = [name] + aliases
if self.config.is_local_sdk_active and self.should_expose_local():
device["otherDeviceIds"] = [{"deviceId": self.entity_id}]
device["customData"] = {
"webhookId": self.config.local_sdk_webhook_id,
"httpPort": self.hass.http.server_port,
"httpSSL": self.hass.config.api.use_ssl,
"uuid": await self.hass.helpers.instance_id.async_get(),
"baseUrl": get_url(self.hass, prefer_external=True),
"proxyDeviceId": agent_user_id,
}
for trt in traits:
device["attributes"].update(trt.sync_attributes())
if room := entity_config.get(CONF_ROOM_HINT):
device["roomHint"] = room
else:
area = await _get_area(self.hass, entity_entry, device_entry)
if area and area.name:
device["roomHint"] = area.name
if device_info := await _get_device_info(device_entry):
device["deviceInfo"] = device_info
return device
@callback
def query_serialize(self):
"""Serialize entity for a QUERY response.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
state = self.state
if state.state == STATE_UNAVAILABLE:
return {"online": False}
attrs = {"online": True}
for trt in self.traits():
deep_update(attrs, trt.query_attributes())
return attrs
@callback
def reachable_device_serialize(self):
"""Serialize entity for a REACHABLE_DEVICE response."""
return {"verificationId": self.entity_id}
async def execute(self, data, command_payload):
"""Execute a command.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
command = command_payload["command"]
params = command_payload.get("params", {})
challenge = command_payload.get("challenge", {})
executed = False
for trt in self.traits():
if trt.can_execute(command, params):
await trt.execute(command, data, params, challenge)
executed = True
break
if not executed:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
f"Unable to execute {command} for {self.state.entity_id}",
)
@callback
def async_update(self):
"""Update the entity with latest info from Home Assistant."""
self.state = self.hass.states.get(self.entity_id)
if self._traits is None:
return
for trt in self._traits:
trt.state = self.state
def deep_update(target, source):
"""Update a nested dictionary with another nested dictionary."""
for key, value in source.items():
if isinstance(value, Mapping):
target[key] = deep_update(target.get(key, {}), value)
else:
target[key] = value
return target
@callback
def async_get_entities(hass, config) -> list[GoogleEntity]:
"""Return all entities that are supported by Google."""
entities = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
entity = GoogleEntity(hass, config, state)
if entity.is_supported():
entities.append(entity)
return entities
| {
"content_hash": "57039adf7250ebfdbaf8d124c684c2ee",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 87,
"avg_line_length": 31.43421052631579,
"alnum_prop": 0.6095646714106321,
"repo_name": "home-assistant/home-assistant",
"id": "238ee8d957648b1b3fee274e83c4327ea2667522",
"size": "19112",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/google_assistant/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
from odoo import api, models
from odoo.tools.translate import _
PARAMS = [
("web_debranding.new_name", _("Software")),
("web_debranding.new_title", _("Software")),
("web_debranding.new_website", "example.com"),
("web_debranding.new_documentation_website", ""),
("web_debranding.favicon_url", ""),
("web_debranding.send_publisher_warranty_url", "0"),
("web_debranding.icon_url", ""),
("web_debranding.apple_touch_icon_url", ""),
]
def get_debranding_parameters_env(env):
res = {}
for param, default in PARAMS:
value = env["ir.config_parameter"].sudo().get_param(param, default)
res[param] = value.strip()
return res
class IrConfigParameter(models.Model):
_inherit = "ir.config_parameter"
@api.model
def get_debranding_parameters(self):
return get_debranding_parameters_env(self.env)
@api.model
def create_debranding_parameters(self):
for param, default in PARAMS:
if not self.env["ir.config_parameter"].sudo().get_param(param):
self.env["ir.config_parameter"].sudo().set_param(param, default or " ")
| {
"content_hash": "1d572db3a5b57ab6f5d8abcdba43dd20",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 87,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.6389380530973451,
"repo_name": "it-projects-llc/misc-addons",
"id": "50e1ba9f1e278b0224cdf58e7ab1207c822baea0",
"size": "1616",
"binary": false,
"copies": "1",
"ref": "refs/heads/13.0",
"path": "web_debranding/models/ir_config_parameter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14551"
},
{
"name": "HTML",
"bytes": "130934"
},
{
"name": "JavaScript",
"bytes": "407608"
},
{
"name": "Python",
"bytes": "414883"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from reviewers import review_group_name
from reviewers.models import Review, Reviewer
from .actions import export_as_csv_action
class ReviewAdminForm(forms.ModelForm):
class Meta:
model = Review
fields = "__all__"
def __init__(self, *args, **kwargs):
super(ReviewAdminForm, self).__init__(*args, **kwargs)
self.fields["user"].queryset = get_user_model().objects.filter(
Q(groups__name=review_group_name) | Q(is_superuser=True)
)
def clean(self):
cleaned_data = super(ReviewAdminForm, self).clean()
user = cleaned_data.get("user")
proposal = cleaned_data.get("proposal")
if user == proposal.speaker.user:
raise forms.ValidationError("You can not asign a review to its author!")
@admin.register(Review)
class ReviewAdmin(admin.ModelAdmin):
list_display = ["id", "proposal", "user", "relevance", "interest", "newness", "get_avg", "conflict", "finished",
"created"]
list_filter = ["proposal", "user", "conflict", "finished"]
actions = [
export_as_csv_action("CSV Export", fields=[
"id",
"proposal",
"user",
"relevance",
"interest",
"newness",
"avg_property",
"conflict",
"finished",
"created",
])
]
form = ReviewAdminForm
def get_avg(self, instance):
return instance.avg()
get_avg.short_description = _("Media")
@admin.register(Reviewer)
class ReviewerAdmin(admin.ModelAdmin):
list_display = ["id", "user", "get_reviews", "created"]
def get_reviews(self, instance):
return instance.reviews_count()
get_reviews.short_description = _("Revisiones") | {
"content_hash": "6b8376410b797a5b94250e3adf571ab3",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 116,
"avg_line_length": 29.115942028985508,
"alnum_prop": 0.6122448979591837,
"repo_name": "olea/PyConES-2016",
"id": "97b079425220fd30d9ad23ffd4f5b0af07b3ef95",
"size": "2033",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pycones/reviewers/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "142786"
},
{
"name": "HTML",
"bytes": "88272"
},
{
"name": "JavaScript",
"bytes": "677"
},
{
"name": "Python",
"bytes": "304958"
},
{
"name": "Shell",
"bytes": "488"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import time
import re
import sys
from google.appengine.ext import db
def iterate(query, callback=lambda x: x, batch_size=1000, verbose=True):
"""Utility for iterating over a query, applying the callback to each row."""
start = time.time()
count = 0
results = query.fetch(batch_size)
while results:
rstart = time.time()
for row in results:
output = callback(row)
if output:
print(output)
count += 1
if verbose:
print('%s rows processed in %.1fs' % (count, time.time() - rstart))
print('total time: %.1fs' % (time.time() - start))
results = query.with_cursor(query.cursor()).fetch(batch_size)
callback()
print('total rows: %s, total time: %.1fs' % (count, time.time() - start))
def dangling_pic(pic):
"""Filter for photos with no referencing person."""
ppl = pic.person_set.fetch(100)
if not ppl:
return pic.key().id()
ids = []
def dangling_pic_list(pic):
"""Track photos with no referencing person."""
if pic and not pic.person_set.count():
ids.append(pic.key().id())
| {
"content_hash": "bc20b0d1787d6d065a8b60b3bc244218",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 80,
"avg_line_length": 31.864864864864863,
"alnum_prop": 0.6030534351145038,
"repo_name": "gimite/personfinder",
"id": "8e8655181f8d21549fe42fb78f00ae2504851933",
"size": "1777",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/iterate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21011"
},
{
"name": "Dockerfile",
"bytes": "1428"
},
{
"name": "HTML",
"bytes": "4527"
},
{
"name": "JavaScript",
"bytes": "90402"
},
{
"name": "Python",
"bytes": "1768355"
},
{
"name": "Shell",
"bytes": "30308"
}
],
"symlink_target": ""
} |
# Copyright 2009, Jean-Michel Sizun
# Copyright 2009 Frank Scholz <coherence@beebits.net>
import os.path
import time
from twisted.internet import threads
from twisted.web import server, static
from twisted.web.error import PageRedirect
from coherence.upnp.core.utils import ReverseProxyUriResource
from twisted.internet import task
from coherence.upnp.core import utils
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendStore, BackendItem, Container, LazyContainer, \
AbstractBackendStore
from coherence import log
from urlparse import urlsplit
import gdata.photos.service
import gdata.media
import gdata.geo
DEFAULT_NAME = 'Picasa Web Albums'
FEED_URI = 'http://picasaweb.google.com/data/feed/api/featured'
class PicasaProxy(ReverseProxyUriResource):
def __init__(self, uri):
ReverseProxyUriResource.__init__(self, uri)
def render(self, request):
request.requestHeaders.removeHeader('referer')
return ReverseProxyUriResource.render(self, request)
class PicasaPhotoItem(BackendItem):
def __init__(self, photo):
BackendItem.__init__(self)
#print photo
self.photo = photo
self.name = photo.summary.text
if self.name is None:
self.name = photo.title.text
self.duration = None
self.size = None
self.mimetype = photo.content.type
self.description = photo.summary.text
self.date = None
self.item = None
self.photo_url = photo.content.src
self.thumbnail_url = photo.media.thumbnail[0].url
self.url = None
self.location = PicasaProxy(self.photo_url)
def replace_by(self, item):
#print photo
self.photo = item.photo
self.name = photo.summary.text
if self.name is None:
self.name = photo.title.text
self.mimetype = self.photo.content.type
self.description = self.photo.summary.text
self.photo_url = self.photo.content.src
self.thumbnail_url = self.photo.media.thumbnail[0].url
self.location = PicasaProxy(self.photo_url)
return True
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.Photo(upnp_id, upnp_parent_id, self.name)
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
self.item.res.append(res)
self.item.childCount = 0
return self.item
def get_path(self):
return self.url
def get_id(self):
return self.storage_id
class PicasaStore(AbstractBackendStore):
logCategory = 'picasa_store'
implements = ['MediaServer']
description = ('Picasa Web Albums', 'connects to the Picasa Web Albums service and exposes the featured photos and albums for a given user.', None)
options = [{'option': 'name', 'text': 'Server Name:', 'type': 'string', 'default': 'my media', 'help': 'the name under this MediaServer shall show up with on other UPnP clients'},
{'option': 'version', 'text': 'UPnP Version:', 'type': 'int', 'default': 2, 'enum': (2, 1), 'help': 'the highest UPnP version this MediaServer shall support', 'level': 'advance'},
{'option': 'uuid', 'text': 'UUID Identifier:', 'type': 'string', 'help': 'the unique (UPnP) identifier for this MediaServer, usually automatically set', 'level': 'advance'},
{'option': 'refresh', 'text': 'Refresh period', 'type': 'string'},
{'option': 'login', 'text': 'User ID:', 'type': 'string', 'group': 'User Account'},
{'option': 'password', 'text': 'Password:', 'type': 'string', 'group': 'User Account'},
]
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name', DEFAULT_NAME)
self.refresh = int(kwargs.get('refresh', 60)) * 60
self.login = kwargs.get('userid', kwargs.get('login', ''))
self.password = kwargs.get('password', '')
rootContainer = Container(None, self.name)
self.set_root_item(rootContainer)
self.AlbumsContainer = LazyContainer(rootContainer, 'My Albums', None, self.refresh, self.retrieveAlbums)
rootContainer.add_child(self.AlbumsContainer)
self.FeaturedContainer = LazyContainer(rootContainer, 'Featured photos', None, self.refresh, self.retrieveFeaturedPhotos)
rootContainer.add_child(self.FeaturedContainer)
self.init_completed()
def __repr__(self):
return self.__class__.__name__
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_TN;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_SM;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_MED;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_LRG;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:*,'
'http-get:*:image/gif:*,'
'http-get:*:image/png:*',
default=True)
self.wmc_mapping = {'16': self.get_root_id()}
self.gd_client = gdata.photos.service.PhotosService()
self.gd_client.email = self.login
self.gd_client.password = self.password
self.gd_client.source = 'Coherence UPnP backend'
if len(self.login) > 0:
d = threads.deferToThread(self.gd_client.ProgrammaticLogin)
def retrieveAlbums(self, parent=None):
albums = threads.deferToThread(self.gd_client.GetUserFeed)
def gotAlbums(albums):
if albums is None:
print "Unable to retrieve albums"
return
for album in albums.entry:
title = album.title.text
album_id = album.gphoto_id.text
item = LazyContainer(parent, title, album_id, self.refresh, self.retrieveAlbumPhotos, album_id=album_id)
parent.add_child(item, external_id=album_id)
def gotError(error):
print "ERROR: %s" % error
albums.addCallbacks(gotAlbums, gotError)
return albums
def retrieveFeedPhotos (self, parent=None, feed_uri=''):
#print feed_uri
photos = threads.deferToThread(self.gd_client.GetFeed, feed_uri)
def gotPhotos(photos):
if photos is None:
print "Unable to retrieve photos for feed %s" % feed_uri
return
for photo in photos.entry:
photo_id = photo.gphoto_id.text
item = PicasaPhotoItem(photo)
item.parent = parent
parent.add_child(item, external_id=photo_id)
def gotError(error):
print "ERROR: %s" % error
photos.addCallbacks(gotPhotos, gotError)
return photos
def retrieveAlbumPhotos (self, parent=None, album_id=''):
album_feed_uri = '/data/feed/api/user/%s/albumid/%s?kind=photo' % (self.login, album_id)
return self.retrieveFeedPhotos(parent, album_feed_uri)
def retrieveFeaturedPhotos (self, parent=None):
return self.retrieveFeedPhotos(parent, FEED_URI)
| {
"content_hash": "e0cecfa306be90b5bcec2b1408ecb009",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 186,
"avg_line_length": 40.61,
"alnum_prop": 0.5886481162275302,
"repo_name": "coherence-project/Coherence",
"id": "ef2e903d16775a55ddacf749e27432d44eb04d62",
"size": "8206",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "coherence/backends/picasa_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1305048"
},
{
"name": "Roff",
"bytes": "712"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
} |
"""
pyhik.constants
~~~~~~~~~~~~~~~~~~~~
Constants list
Copyright (c) 2016-2021 John Mihalic <https://github.com/mezz64>
Licensed under the MIT license.
"""
MAJOR_VERSION = 0
MINOR_VERSION = 3
SUB_MINOR_VERSION = 1
__version__ = '{}.{}.{}'.format(
MAJOR_VERSION, MINOR_VERSION, SUB_MINOR_VERSION)
CONNECT_TIMEOUT = 10
READ_TIMEOUT = 60
DEFAULT_PORT = 80
XML_ENCODING = 'UTF-8'
XML_NAMESPACE = 'http://www.hikvision.com/ver20/XMLSchema'
DEFAULT_HEADERS = {
'Content-Type': "application/xml; charset='UTF-8'",
'Accept': "*/*"
}
SENSOR_MAP = {
'vmd': 'Motion',
'linedetection': 'Line Crossing',
'fielddetection': 'Field Detection',
'tamperdetection': 'Tamper Detection',
'shelteralarm': 'Tamper Detection',
'defocus': 'Tamper Detection',
'diskfull': 'Disk Full',
'diskerror': 'Disk Error',
'nicbroken': 'Net Interface Broken',
'ipconflict': 'IP Conflict',
'illaccess': 'Illegal Access',
'videomismatch': 'Video Mismatch',
'badvideo': 'Bad Video',
'pir': 'PIR Alarm',
'facedetection': 'Face Detection',
'scenechangedetection': 'Scene Change Detection',
'io': 'I/O',
'unattendedbaggage': 'Unattended Baggage',
'attendedbaggage': 'Attended Baggage',
'recordingfailure': 'Recording Failure',
'regionexiting': "Exiting Region",
'regionentrance': "Entering Region",
'duration': "Ongoing Events"
}
# The name 'id' should always be last
CHANNEL_NAMES = ['dynVideoInputChannelID', 'videoInputChannelID',
'dynInputIOPortID', 'inputIOPortID',
'id']
ID_TYPES = ['channelID', 'dynChannelID', 'inputIOPortID',
'dynInputIOPortID']
CAM_DEVICE = 'CAM'
NVR_DEVICE = 'NVR'
CONTEXT_INFO = 'INFO'
CONTEXT_TRIG = 'TRIGGERS'
CONTEXT_ALERT = 'ALERTS'
CONTEXT_MOTION = 'MOTION'
| {
"content_hash": "b3c3f6a3891ebffb55c472c09aeef341",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 65,
"avg_line_length": 27,
"alnum_prop": 0.6434494195688225,
"repo_name": "mezz64/pyHik",
"id": "fbad3e15501d4087647e6ab40dab0f41c6174dc4",
"size": "1809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyhik/constants.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33500"
}
],
"symlink_target": ""
} |
from django.forms import CharField, Form, ModelChoiceField
from django.utils.translation import ugettext_lazy as _
from sendinel.backend.authhelper import format_and_validate_phonenumber
from sendinel.backend.models import get_enabled_wocs
from sendinel.infoservices.models import InfoMessage, InfoService
class RegisterPatientForMedicineForm(Form):
phone_number = CharField(validators = [format_and_validate_phonenumber],
error_messages={'required':_('Please enter a phone number')})
way_of_communication = ModelChoiceField(
queryset = get_enabled_wocs(),
error_messages={'required': \
_('Please choose a way of communication')})
medicine = ModelChoiceField(
queryset=InfoService.objects.filter(type='medicine'),
error_messages={'required': \
_('Please choose a medicine'),
'invalid_choice':
_('Please choose a medicine')})
class MedicineMessageValidationForm(Form):
medicine = ModelChoiceField(
queryset=InfoService.objects.filter(type='medicine'),
error_messages={'required': \
_('Please choose a medicine'), \
'invalid_choice': \
_('Please choose a medicine')})
text = CharField(error_messages={ \
'required': _('Please enter a text to send'), \
'invalid': _('The text contains invalid characters')})
| {
"content_hash": "fd9f3c9d8371a00981e9f4bf9f12ea3a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 91,
"avg_line_length": 51.53125,
"alnum_prop": 0.5651910248635537,
"repo_name": "Sendinel/Sendinel",
"id": "02d7b76bb7189e06826d7245fe189090b1d136e7",
"size": "1649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sendinel/medicines/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "15388"
},
{
"name": "JavaScript",
"bytes": "25980"
},
{
"name": "Python",
"bytes": "175933"
},
{
"name": "Shell",
"bytes": "22210"
}
],
"symlink_target": ""
} |
"""Helpers to build or extract data from feaLib AST objects."""
from __future__ import print_function, division, absolute_import, unicode_literals
from fontTools.feaLib import ast
from fontTools import unicodedata
import collections
import re
# we re-export here all the feaLib AST classes so they can be used from
# writer modules with a single `from ufo2ft.featureWriters import ast`
import sys
self = sys.modules[__name__]
for name in getattr(ast, "__all__", dir(ast)):
if isinstance(getattr(ast, name), type):
setattr(self, name, getattr(ast, name))
del sys, self, name
def getScriptLanguageSystems(feaFile):
"""Return dictionary keyed by Unicode script code containing lists of
(OT_SCRIPT_TAG, [OT_LANGUAGE_TAG, ...]) tuples (excluding "DFLT").
"""
languagesByScript = collections.OrderedDict()
for ls in [
st for st in feaFile.statements if isinstance(st, ast.LanguageSystemStatement)
]:
if ls.script == "DFLT":
continue
languagesByScript.setdefault(ls.script, []).append(ls.language)
langSysMap = collections.OrderedDict()
for script, languages in languagesByScript.items():
sc = unicodedata.ot_tag_to_script(script)
langSysMap.setdefault(sc, []).append((script, languages))
return langSysMap
def iterFeatureBlocks(feaFile, tag=None):
for statement in feaFile.statements:
if isinstance(statement, ast.FeatureBlock):
if tag is not None and statement.name != tag:
continue
yield statement
def findFeatureTags(feaFile):
return {f.name for f in iterFeatureBlocks(feaFile)}
def iterClassDefinitions(feaFile, featureTag=None):
if featureTag is None:
# start from top-level class definitions
for s in feaFile.statements:
if isinstance(s, ast.GlyphClassDefinition):
yield s
# then iterate over per-feature class definitions
for fea in iterFeatureBlocks(feaFile, tag=featureTag):
for s in fea.statements:
if isinstance(s, ast.GlyphClassDefinition):
yield s
LOOKUP_FLAGS = {
"RightToLeft": 1,
"IgnoreBaseGlyphs": 2,
"IgnoreLigatures": 4,
"IgnoreMarks": 8,
}
def makeLookupFlag(name=None, markAttachment=None, markFilteringSet=None):
value = 0 if name is None else LOOKUP_FLAGS[name]
if markAttachment is not None:
assert isinstance(markAttachment, ast.GlyphClassDefinition)
markAttachment = ast.GlyphClassName(markAttachment)
if markFilteringSet is not None:
assert isinstance(markFilteringSet, ast.GlyphClassDefinition)
markFilteringSet = ast.GlyphClassName(markFilteringSet)
return ast.LookupFlagStatement(
value, markAttachment=markAttachment, markFilteringSet=markFilteringSet
)
def makeGlyphClassDefinitions(groups, feaFile=None, stripPrefix=""):
""" Given a groups dictionary ({str: list[str]}), create feaLib
GlyphClassDefinition objects for each group.
Return a dict keyed by the original group name.
If `stripPrefix` (str) is provided and a group name starts with it,
the string will be stripped from the beginning of the class name.
"""
classDefs = {}
if feaFile is not None:
classNames = {cdef.name for cdef in iterClassDefinitions(feaFile)}
else:
classNames = set()
lengthPrefix = len(stripPrefix)
for groupName, members in sorted(groups.items()):
originalGroupName = groupName
if stripPrefix and groupName.startswith(stripPrefix):
groupName = groupName[lengthPrefix:]
className = makeFeaClassName(groupName, classNames)
classNames.add(className)
classDef = makeGlyphClassDefinition(className, members)
classDefs[originalGroupName] = classDef
return classDefs
def makeGlyphClassDefinition(className, members):
glyphNames = [ast.GlyphName(g) for g in members]
glyphClass = ast.GlyphClass(glyphNames)
classDef = ast.GlyphClassDefinition(className, glyphClass)
return classDef
def makeFeaClassName(name, existingClassNames=None):
"""Make a glyph class name which is legal to use in feature text.
Ensures the name only includes characters in "A-Za-z0-9._", and
isn't already defined.
"""
name = re.sub(r"[^A-Za-z0-9._]", r"", name)
if existingClassNames is None:
return name
i = 1
origName = name
while name in existingClassNames:
name = "%s_%d" % (origName, i)
i += 1
return name
def addLookupReferences(
feature, lookups, script=None, languages=None, exclude_dflt=False
):
"""Add references to named lookups to the feature's statements.
If `script` (str) and `languages` (sequence of str) are provided,
only register the lookup for the given script and languages,
optionally with `exclude_dflt` directive.
Otherwise add a global reference which will be registered for all
the scripts and languages in the feature file's `languagesystems`
statements.
"""
assert lookups
if not script:
for lookup in lookups:
feature.statements.append(ast.LookupReferenceStatement(lookup))
return
feature.statements.append(ast.ScriptStatement(script))
if exclude_dflt:
for language in languages or ("dflt",):
feature.statements.append(
ast.LanguageStatement(language, include_default=False)
)
for lookup in lookups:
feature.statements.append(ast.LookupReferenceStatement(lookup))
else:
feature.statements.append(ast.LanguageStatement("dflt", include_default=True))
for lookup in lookups:
feature.statements.append(ast.LookupReferenceStatement(lookup))
for language in languages or ():
if language == "dflt":
continue
feature.statements.append(
ast.LanguageStatement(language, include_default=True)
)
_GDEFGlyphClasses = collections.namedtuple(
"_GDEFGlyphClasses", "base ligature mark component"
)
def getGDEFGlyphClasses(feaLib):
"""Return GDEF GlyphClassDef base/mark/ligature/component glyphs, or
None if no GDEF table is defined in the feature file.
"""
for st in feaLib.statements:
if isinstance(st, ast.TableBlock) and st.name == "GDEF":
for st in st.statements:
if isinstance(st, ast.GlyphClassDefStatement):
return _GDEFGlyphClasses(
frozenset(st.baseGlyphs.glyphSet())
if st.baseGlyphs is not None
else frozenset(),
frozenset(st.ligatureGlyphs.glyphSet())
if st.ligatureGlyphs is not None
else frozenset(),
frozenset(st.markGlyphs.glyphSet())
if st.markGlyphs is not None
else frozenset(),
frozenset(st.componentGlyphs.glyphSet())
if st.componentGlyphs is not None
else frozenset(),
)
return _GDEFGlyphClasses(None, None, None, None)
| {
"content_hash": "568db5a792d371e9286d2ee0202d139f",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 86,
"avg_line_length": 35.87128712871287,
"alnum_prop": 0.6577422025945349,
"repo_name": "jamesgk/ufo2ft",
"id": "f51c6247fd773e6f00c18988e892f08119bc7ebf",
"size": "7246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/ufo2ft/featureWriters/ast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77326"
}
],
"symlink_target": ""
} |
"""
The classes in this module provide a property-like interface
to widget instance variables in a class. These properties translate
essential pieces of widget state into more convenient python objects
(for example, the check state of a button to a bool).
Example Use::
class Foo(object):
bar = ButtonProperty('_button')
def __init__(self):
self._button = QtGui.QCheckBox()
f = Foo()
f.bar = True # equivalent to f._button.setChecked(True)
assert f.bar == True
"""
from .qtutil import pretty_number
class WidgetProperty(object):
""" Base class for widget properties
Subclasses implement, at a minimum, the "get" and "set" methods,
which translate between widget states and python variables
"""
def __init__(self, att):
"""
:param att: The location, within a class instance, of the widget
to wrap around. If the widget is nested inside another variable,
normal '.' syntax can be used (e.g. 'sub_window.button')
:type att: str"""
self._att = att.split('.')
def __get__(self, instance, type=None):
widget = reduce(getattr, [instance] + self._att)
return self.getter(widget)
def __set__(self, instance, value):
widget = reduce(getattr, [instance] + self._att)
self.setter(widget, value)
def getter(self, widget):
""" Return the state of a widget. Depends on type of widget,
and must be overridden"""
raise NotImplementedError()
def setter(self, widget, value):
""" Set the state of a widget to a certain value"""
raise NotImplementedError()
class ButtonProperty(WidgetProperty):
"""Wrapper around the check state for QAbstractButton widgets"""
def getter(self, widget):
return widget.isChecked()
def setter(self, widget, value):
widget.setChecked(value)
class FloatLineProperty(WidgetProperty):
"""Wrapper around the text state for QLineEdit widgets.
Assumes that the text is a floating point number
"""
def getter(self, widget):
try:
return float(widget.text())
except ValueError:
return 0
def setter(self, widget, value):
widget.setText(pretty_number(value))
widget.editingFinished.emit()
| {
"content_hash": "621f0d31c222fcf63b9965941649040b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 72,
"avg_line_length": 29.27848101265823,
"alnum_prop": 0.6437527021184609,
"repo_name": "glue-viz/glue-qt",
"id": "df4c4e21e6653ecf6e5578be7048cfb1cc7b41d5",
"size": "2313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/qt/widget_properties.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4040"
},
{
"name": "Python",
"bytes": "2472826"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
import matplotlib
from getdist import plots, MCSamples
import getdist
import numpy as np
import matplotlib.pyplot as plt
import astropy
from loadMontePython import load as loadMCMC
import glob
basedir = '../chains/nonzero_model/'
#"/home/zequnl/Projects/isocurvature_2017/analysis/plot_triangle/nonzero/"
burnin = 1000
data1 = loadMCMC('../chains/planckdata/r1.txt', '../chains/planckdata/param')
data2 = loadMCMC('../chains/planckdata/r2.txt', '../chains/planckdata/param')
data = astropy.table.vstack( [data1[burnin:], data2[burnin:]])
data_planck = data[:]
weights_planck = data['acceptance'][:]
for col in ['likelihood', 'acceptance','omega_b','omega_cdm','100theta_s','tau_reio']:
data.remove_column(col)
nparr_planck = np.array(data.as_array().tolist()[:])
planck = MCSamples(samples=nparr_planck,names = data.colnames, labels = data.colnames, name_tag='Planck')
## C
folder = basedir + 'fC/'
files = glob.glob(folder + "*__*.txt")
params = glob.glob(folder + "*_.paramnames")
datalist = []
for f in files:
datalist.append( loadMCMC(f, params[0]) )
data = astropy.table.vstack( datalist )
data_sim = data[:]
weights_act = data['acceptance'][:]
for col in ['likelihood', 'acceptance','omega_b','omega_cdm','100theta_s','tau_reio']:
data.remove_column(col)
nparr_act = np.array(data.as_array().tolist()[:])
planck_s4 = MCSamples(samples=nparr_act,names = data.colnames, labels = data.colnames, name_tag='Planck low_l + S4')
## E
folder = basedir + 'fE/'
files = glob.glob(folder + "*__*.txt")
params = glob.glob(folder + "*_.paramnames")
datalist = []
for f in files:
datalist.append( loadMCMC(f, params[0]) )
data = astropy.table.vstack( datalist )
data_sim = data[:]
weights_act = data['acceptance'][:]
for col in ['likelihood', 'acceptance','omega_b','omega_cdm','100theta_s','tau_reio']:
data.remove_column(col)
nparr_act = np.array(data.as_array().tolist()[:])
pixie_planck = MCSamples(samples=nparr_act,names = data.colnames, labels = data.colnames, name_tag='PIXIE low_l + Planck high_l')
## F
folder = basedir + 'fF/'
files = glob.glob(folder + "*__*.txt")
params = glob.glob(folder + "*_.paramnames")
datalist = []
for f in files:
datalist.append( loadMCMC(f, params[0]) )
data = astropy.table.vstack( datalist )
data_sim = data[:]
weights_act = data['acceptance'][:]
for col in ['likelihood', 'acceptance','omega_b','omega_cdm','100theta_s','tau_reio']:
data.remove_column(col)
nparr_act = np.array(data.as_array().tolist()[:])
pixie_s4 = MCSamples(samples=nparr_act,names = data.colnames, labels = data.colnames, name_tag='PIXIE low_l + S4')
#Triangle plot
g = plots.getSubplotPlotter()
g.triangle_plot([ planck_s4, pixie_planck, pixie_s4], filled=True)
# now we add some boundaries
# P_II^1
for ax in g.subplots[:,2]:
if ax != None:
ax.set_xlim(0,ax.get_xlim()[1])
for ax in g.subplots[2,:]:
if ax != None:
ax.set_ylim(0,ax.get_ylim()[1])
# P_II^2
for ax in g.subplots[:,3]:
if ax != None:
ax.set_xlim(0,ax.get_xlim()[1])
for ax in g.subplots[3,:]:
if ax != None:
ax.set_ylim(0,ax.get_ylim()[1])
plt.savefig('../../figures/nonzero_forecast_all_overplotted.pdf')
plt.show()
| {
"content_hash": "c70b71895cdfb7789698d127fa82dca1",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 129,
"avg_line_length": 29.30841121495327,
"alnum_prop": 0.6817602040816326,
"repo_name": "xzackli/isocurvature_2017",
"id": "089788876591bee16482005ee77f8c5744794229",
"size": "3138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/plot_triangle/make_only_nonzero_forecasts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "300659"
},
{
"name": "Python",
"bytes": "69029"
},
{
"name": "Shell",
"bytes": "213"
},
{
"name": "TeX",
"bytes": "119428"
}
],
"symlink_target": ""
} |
import sys
from typing import cast, Iterable, List, Tuple, TYPE_CHECKING, Union
from pyspark import since, SparkContext
from pyspark.sql.column import _to_seq, _to_java_column
from py4j.java_gateway import JavaObject
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName, ColumnOrName_
__all__ = ["Window", "WindowSpec"]
def _to_java_cols(cols: Tuple[Union["ColumnOrName", List["ColumnOrName_"]], ...]) -> int:
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0] # type: ignore[assignment]
assert sc is not None
return _to_seq(sc, cast(Iterable["ColumnOrName"], cols), _to_java_column)
class Window:
"""
Utility functions for defining window in DataFrames.
.. versionadded:: 1.4
Notes
-----
When ordering is not defined, an unbounded window frame (rowFrame,
unboundedPreceding, unboundedFollowing) is used by default. When ordering is defined,
a growing window frame (rangeFrame, unboundedPreceding, currentRow) is used by default.
Examples
--------
>>> # ORDER BY date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
>>> window = Window.orderBy("date").rowsBetween(Window.unboundedPreceding, Window.currentRow)
>>> # PARTITION BY country ORDER BY date RANGE BETWEEN 3 PRECEDING AND 3 FOLLOWING
>>> window = Window.orderBy("date").partitionBy("country").rangeBetween(-3, 3)
"""
_JAVA_MIN_LONG = -(1 << 63) # -9223372036854775808
_JAVA_MAX_LONG = (1 << 63) - 1 # 9223372036854775807
_PRECEDING_THRESHOLD = max(-sys.maxsize, _JAVA_MIN_LONG)
_FOLLOWING_THRESHOLD = min(sys.maxsize, _JAVA_MAX_LONG)
unboundedPreceding: int = _JAVA_MIN_LONG
unboundedFollowing: int = _JAVA_MAX_LONG
currentRow: int = 0
@staticmethod
@since(1.4)
def partitionBy(*cols: Union["ColumnOrName", List["ColumnOrName_"]]) -> "WindowSpec":
"""
Creates a :class:`WindowSpec` with the partitioning defined.
Parameters
----------
cols : str, :class:`Column` or list
names of columns or expressions
"""
sc = SparkContext._active_spark_context
assert sc is not None and sc._jvm is not None
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.partitionBy(_to_java_cols(cols))
return WindowSpec(jspec)
@staticmethod
@since(1.4)
def orderBy(*cols: Union["ColumnOrName", List["ColumnOrName_"]]) -> "WindowSpec":
"""
Creates a :class:`WindowSpec` with the ordering defined.
Parameters
----------
cols : str, :class:`Column` or list
names of columns or expressions
"""
sc = SparkContext._active_spark_context
assert sc is not None and sc._jvm is not None
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.orderBy(_to_java_cols(cols))
return WindowSpec(jspec)
@staticmethod
def rowsBetween(start: int, end: int) -> "WindowSpec":
"""
Creates a :class:`WindowSpec` with the frame boundaries defined,
from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative positions from the current row.
For example, "0" means "current row", while "-1" means the row before
the current row, and "5" means the fifth row after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
A row based boundary is based on the position of the row within the partition.
An offset indicates the number of rows above or below the current row, the frame for the
current row starts or ends. For instance, given a row based sliding frame with a lower bound
offset of -1 and a upper bound offset of +2. The frame for row with index 5 would range from
index 4 to index 7.
.. versionadded:: 2.1.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to -9223372036854775808.
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to 9223372036854775807.
Examples
--------
>>> from pyspark.sql import Window
>>> from pyspark.sql import functions as func
>>> df = spark.createDataFrame(
... [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")], ["id", "category"])
>>> df.show()
+---+--------+
| id|category|
+---+--------+
| 1| a|
| 1| a|
| 2| a|
| 1| b|
| 2| b|
| 3| b|
+---+--------+
Calculate sum of ``id`` in the range from currentRow to currentRow + 1
in partition ``category``
>>> window = Window.partitionBy("category").orderBy("id").rowsBetween(Window.currentRow, 1)
>>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category", "sum").show()
+---+--------+---+
| id|category|sum|
+---+--------+---+
| 1| a| 2|
| 1| a| 3|
| 1| b| 3|
| 2| a| 2|
| 2| b| 5|
| 3| b| 3|
+---+--------+---+
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
sc = SparkContext._active_spark_context
assert sc is not None and sc._jvm is not None
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rowsBetween(start, end)
return WindowSpec(jspec)
@staticmethod
def rangeBetween(start: int, end: int) -> "WindowSpec":
"""
Creates a :class:`WindowSpec` with the frame boundaries defined,
from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative from the current row. For example,
"0" means "current row", while "-1" means one off before the current row,
and "5" means the five off after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
A range-based boundary is based on the actual value of the ORDER BY
expression(s). An offset is used to alter the value of the ORDER BY expression, for
instance if the current ORDER BY expression has a value of 10 and the lower bound offset
is -3, the resulting lower bound for the current row will be 10 - 3 = 7. This however puts a
number of constraints on the ORDER BY expressions: there can be only one expression and this
expression must have a numerical data type. An exception can be made when the offset is
unbounded, because no value modification is needed, in this case multiple and non-numeric
ORDER BY expression are allowed.
.. versionadded:: 2.1.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
Examples
--------
>>> from pyspark.sql import Window
>>> from pyspark.sql import functions as func
>>> df = spark.createDataFrame(
... [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")], ["id", "category"])
>>> df.show()
+---+--------+
| id|category|
+---+--------+
| 1| a|
| 1| a|
| 2| a|
| 1| b|
| 2| b|
| 3| b|
+---+--------+
Calculate sum of ``id`` in the range from ``id`` of currentRow to ``id`` of currentRow + 1
in partition ``category``
>>> window = Window.partitionBy("category").orderBy("id").rangeBetween(Window.currentRow, 1)
>>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category").show()
+---+--------+---+
| id|category|sum|
+---+--------+---+
| 1| a| 4|
| 1| a| 4|
| 1| b| 3|
| 2| a| 2|
| 2| b| 5|
| 3| b| 3|
+---+--------+---+
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
sc = SparkContext._active_spark_context
assert sc is not None and sc._jvm is not None
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rangeBetween(start, end)
return WindowSpec(jspec)
class WindowSpec:
"""
A window specification that defines the partitioning, ordering,
and frame boundaries.
Use the static methods in :class:`Window` to create a :class:`WindowSpec`.
.. versionadded:: 1.4.0
"""
def __init__(self, jspec: JavaObject) -> None:
self._jspec = jspec
def partitionBy(self, *cols: Union["ColumnOrName", List["ColumnOrName_"]]) -> "WindowSpec":
"""
Defines the partitioning columns in a :class:`WindowSpec`.
.. versionadded:: 1.4.0
Parameters
----------
cols : str, :class:`Column` or list
names of columns or expressions
"""
return WindowSpec(self._jspec.partitionBy(_to_java_cols(cols)))
def orderBy(self, *cols: Union["ColumnOrName", List["ColumnOrName_"]]) -> "WindowSpec":
"""
Defines the ordering columns in a :class:`WindowSpec`.
.. versionadded:: 1.4.0
Parameters
----------
cols : str, :class:`Column` or list
names of columns or expressions
"""
return WindowSpec(self._jspec.orderBy(_to_java_cols(cols)))
def rowsBetween(self, start: int, end: int) -> "WindowSpec":
"""
Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative positions from the current row.
For example, "0" means "current row", while "-1" means the row before
the current row, and "5" means the fifth row after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
.. versionadded:: 1.4.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
return WindowSpec(self._jspec.rowsBetween(start, end))
def rangeBetween(self, start: int, end: int) -> "WindowSpec":
"""
Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative from the current row. For example,
"0" means "current row", while "-1" means one off before the current row,
and "5" means the five off after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
.. versionadded:: 1.4.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
return WindowSpec(self._jspec.rangeBetween(start, end))
def _test() -> None:
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.window
globs = pyspark.sql.window.__dict__.copy()
spark = SparkSession.builder.master("local[4]").appName("sql.window tests").getOrCreate()
globs["spark"] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.window, globs=globs, optionflags=doctest.NORMALIZE_WHITESPACE
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "f25773aac43e6d8774d2f95ab258e606",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 100,
"avg_line_length": 38.03013698630137,
"alnum_prop": 0.5881420646927454,
"repo_name": "WeichenXu123/spark",
"id": "7bb59f362898c19929f158fa629933e486b5f985",
"size": "14666",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/window.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "58123"
},
{
"name": "Batchfile",
"bytes": "27405"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26338"
},
{
"name": "Dockerfile",
"bytes": "16097"
},
{
"name": "HTML",
"bytes": "42080"
},
{
"name": "HiveQL",
"bytes": "1859465"
},
{
"name": "Java",
"bytes": "4699504"
},
{
"name": "JavaScript",
"bytes": "222842"
},
{
"name": "Jupyter Notebook",
"bytes": "4310512"
},
{
"name": "Makefile",
"bytes": "2379"
},
{
"name": "PLpgSQL",
"bytes": "352609"
},
{
"name": "PowerShell",
"bytes": "4221"
},
{
"name": "Python",
"bytes": "7728379"
},
{
"name": "R",
"bytes": "1286372"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "31582"
},
{
"name": "Scala",
"bytes": "43493445"
},
{
"name": "Shell",
"bytes": "241106"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "111129"
}
],
"symlink_target": ""
} |
import re
from subprocess import Popen, PIPE
import sys
# Super hacky but easier to do than calling Keystone and Heat's APIs
process = Popen(('heat', 'stack-show', 'overcloud'), stdout=PIPE, stderr=PIPE)
output = process.stdout.read()
# If we're in a HA mode (i.e. more than one controller), make sure the
# NtpServer parameters are set.
controller_count = int(re.search(r'"Controller-[^":]*::count"\s*:\s*"([^"]*)"', output).group(1))
if controller_count > 1:
print "This is a HA setup, checking whether NTP is configured."
ntp_servers = re.findall(r'"(Controller|Compute)-[^":]*::NtpServer"\s*:\s*"([^"]*)"', output)
if all(t[1] for t in ntp_servers):
print "SUCCESS: Controller and Compute nodes are configured with NTP."
else:
print "ERROR: NTP server is not configured for Controller or Compute nodes!"
sys.exit(1)
else:
print "SUCESS: This is not a HA setup, we don't need NTP configured."
| {
"content_hash": "475d6729045c97a122f22b7595948181",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 97,
"avg_line_length": 42.86363636363637,
"alnum_prop": 0.6744432661717922,
"repo_name": "coolsvap/clapper",
"id": "e092d7108d0382202b9146167e8c82d975540eb6",
"size": "943",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ntp-ha-test/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1526"
},
{
"name": "Python",
"bytes": "60690"
},
{
"name": "Shell",
"bytes": "3142"
}
],
"symlink_target": ""
} |
"""
Provides BaseConfig - a class that makes a config file act like a dictionary
myconf = BaseConfig("myfile.ini")
val = myconf["mysection"]["mykey"]
for each value you read, it tries to return int, failing this float,
failing this returns bool for special string values 'true' / 'false'
(case insensitive test), failing this returns string
added feature is that if the value does not exist in the config,
then you get None instead of an exception
Another added feature is that in any string value, you can have a substring
which is substituted using another variable, in the format
{{var:[section]key}}.
Substitutions also include other substitutions. Documenting
by example:
[mysection]
topdir=/some/path
[othersection]
dir1={{var:[mysection]topdir}}/some/subdirectory
dir2={{var:[othersection]dir1}}/bar
It is entirely the user's responsibility not to set up loops when doing this.
You can also have expression substitutions, in the format {{eval:python_code}}
e.g. {{eval:1+1}}. These are evaluated with eval. They do not have to return
a string type, although in that case they should be constitute the whole of the
value as otherwise there will be an error from concatenating a string with
another type. Expression substitutions are evaluated after variable substitutions.
Another added feature is compulsory variables. The presence of these variables
will be checked for at the time that the config is read, and an exception raised
if not. The compulsory variable section is empty in this class, but may be
overridden to useful effect in a subclass.
Variables can be overridden via environment vars called
CONFIG__<section>__<key> e.g. in the example above setting
CONFIG__mysection__topdir=/some/other/path will override
any value in the file and will also find its way into any substitutions
"""
import ConfigParser
import sys
import os
import time
import re
import settings
class ConfigSection(dict):
"""
A class to make a section act as a dictionary.
This class is unlikely to be instantiated directly by calling code.
"""
_varSubsRe = re.compile("\$\(([a-zA-Z_]+)\:(.*?)\)")
_evalSubsRe = re.compile("\{\{eval:(.*?)\}\}")
def __init__(self, config=None, section=None, parent=None):
d = super(ConfigSection, self)
d.__init__()
self.parent = parent # used for substitutions
self.section = section
if config:
for opt in config.options(section):
value = config.get(section, opt)
d.__setitem__(opt, value)
def __getitem__(self, key):
return self.lookup(key)
def lookup(self, key, default=None):
"""
Look up a single key, doing any substitutions as described in the
module-level docstring.
"""
value = None
if self.section:
try:
value = os.environ["CONFIG__%s__%s" % (self.section, key)]
except KeyError:
pass
if value == None:
d = super(ConfigSection, self)
try:
value = d.__getitem__(key)
except KeyError:
pass
if value != None:
return self.mapValue(value)
else:
return default
def mapValue(self, value):
"""
map a string value from the config file to something that has
potentially different type and also has the special tokens
substituted
"""
if not isinstance(value, str):
return value
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
upper = value.upper()
if upper == "TRUE": return True
if upper == "FALSE": return False
if value == "None": return None
value = self.doSubs(value, self.parent)
return value
def getVarSub(self, config, section, key):
"""
get substitution text that will be used to replace a
{{var:[section]key}} token - i.e. the variable referred to
or else the empty string
"""
if config:
try:
return config[section][key]
except KeyError:
pass
return ""
def doSub(self, str_, matchobj, sub):
"""
Given a string, a re.match object and a substitution value,
return the result of substituting it.
The substitution value should normally be a string, but in the
case where the match constitutes the whole string, then just the
substitution value itself, so it can then be another data type.
"""
string1 = str_[ : matchobj.start()]
string2 = str_[matchobj.end() : ]
if (not string1) and (not string2):
return sub
return string1 + sub + string2
def doSubs(self, str_, config):
"""
Given a string and a config object, return a revised value after
expanding all special tokens (i.e. if none are found then just get
the original string back)
"""
while True:
m = self._varSubsRe.search(str_)
if not m:
break
section = m.group(1)
key = m.group(2)
sub = self.getVarSub(config, section, key)
str_ = self.doSub(str_, m, sub)
while isinstance(str_, str):
m = self._evalSubsRe.search(str_)
if not m:
break
code = m.group(1)
try:
sub = eval(code)
except:
sub = ""
str_ = self.doSub(str_, m, sub)
return str_
class BaseConfig(dict):
"""
See module-level doc for details.
Note not "AbstractConfig" - this can meaningfully be instantiated,
although in fact GlobalConfig and DatasetConfig do subclass it to add
extra functionality.
"""
compulsoryVars = []
def __init__(self, config_file_path, missing_ok = False):
"""
Instantiate based on given path to config file
"""
self.d = super(BaseConfig, self)
self.d.__init__()
self.config_file_path = config_file_path
self.missing_ok = missing_ok
self.settings = []
self.reread()
def __getitem__(self, key):
"""
Return a ConfigSection object based on the specified section of
the file (or an empty ConfigSection if none)
"""
if not self.d.has_key(key):
self.d.__setitem__(key, ConfigSection()) # create an empty section
return self.d.__getitem__(key)
def _readConfig(self, config):
retval = config.read(self.config_file_path)
if not retval and not self.missing_ok:
raise RuntimeError("Could not read config file %s" % self.config_file_path)
def reread(self):
"""
Unconditionally reread the config file. Returns nothing if file disappeared.
"""
if not os.path.exists(self.config_file_path):
return
self.clear()
config = ConfigParser.ConfigParser()
self._readConfig(config)
for section in config.sections():
self.d.__setitem__(section, ConfigSection(config, section, parent=self))
# go through the default settings and update the config with the
# defaults if none have been set so far
for s in self.settings:
if not self.checkSet(s[0] + "." + s[1]):
if not config.has_section(s[0]):
config.add_section(s[0])
self.d.__setitem__(s[0], ConfigSection(config, s[0], parent=self))
self.set(s[0] + "." + s[1], s[2])
self.time_last_read = time.time()
self.checkCompulsoryVars()
def readDefaults(self):
'''
Read global default values from python settings.py file
'''
self.settings = []
defaults = dir(settings)
for default in defaults:
if not default.startswith("__"):
c = getattr(settings, default)
d = default.split("_")
for k in c:
e = d[0]
self.settings.append((e, k, c[k]))
def checkCompulsoryVars(self):
"""
Raise an exception if any compulsory variable does not exist or
has wrong type. Note that there are no compulsory variables except
where a subclass (e.g. GlobalConfig / DatasetConfig) defines some.
"""
for sect, varnames in self.compulsoryVars:
s = self[sect]
for v in varnames:
if isinstance(v, str):
varname, vartype = v, None
else:
varname, vartype = v
if varname not in s:
raise Exception("Compulsory variable %s::%s not in %s" %
(sect, varname, self.config_file_path))
value = s[varname]
type_ = type(value)
if vartype and not isinstance(value, vartype):
raise Exception("Compulsory variable %s::%s in %s has type %s (value %s), should be %s" %
(sect, varname, self.config_file_path, type_, value, vartype))
def rereadIfUpdated(self):
"""
Re-reads the config file, if necessary.
Return value is whether it actually reread or not
"""
# note: duplicates a test in FileUtils but prefer not to depend on that module here
if not os.path.exists(self.config_file_path):
# config has been deleted, maybe intentionally, so don't reread
return False
mtime = os.path.getmtime(self.config_file_path)
if mtime + 1 > self.time_last_read:
self.reread()
return True
else:
return False
# set a value for a key
def set(self, key, value):
try:
sk = key.split(".")
a = self.d.__getitem__(sk[0])
#b = a[sk[1]]
a[sk[1]] = value
self.d.__setitem__(sk[0], a);
except Exception, ex:
print str(ex)
# get the value for a key
def get(self, key):
b = None
try:
sk = key.split(".")
a = self.d.__getitem__(sk[0])
b = a[sk[1]]
except Exception, ex:
print str(ex)
return b
# check if a key is set and it has
# somesort of value
def checkSet(self, key):
try:
a = self.get(key)
if a:
return True
return False
except:
return False
# if a keys is set (with the appropriate value),
# then ensure the other
# relavent keys are set
def checkSetIf(self, key, val, keys):
rv = []
if self.checkSet(key) == True and self.get(key) != val:
for k in keys:
if self.checkSet(k) == False:
rv.append(k)
if len(rv) == 0:
return None
return rv
return None
def dump(self, stream = sys.stdout):
"""
For debugging.
"""
stream.write("\n===Config dump===\n")
stream.write("Filename = %s\n" % self.config_file_path)
sections = self.keys()
sections.sort()
for section in sections:
s = self[section]
stream.write("[%s]\n" % section)
keys = s.keys()
keys.sort()
for k in keys:
stream.write(" %s = %s (%s)\n" % (k, s[k], type(s[k]).__name__))
stream.write("===End of config dump===\n\n")
if __name__ == '__main__':
for file in ["../conf/dataset_hadgem2_2xco2.ini", "../conf/global.ini"]:
a = BaseConfig(file)
# main test - can we dump the config
a.dump()
print "these should be None:"
print a["asdadsasklejklj"]["adsfasdf"]
for sect in a:
print a[sect]["asdfasdfasdfad"]
# test we can put stuff to existing and non-existing section
print "Put test:"
sects = [a.keys()[0], "mysect"]
for s in sects:
for k in ["username", "mykey"]:
print s, k, a[s][k],
a[s][k] = "foo"
print a[s][k]
print "Reread test:"
# rereading the config file will wipe the value that we wrote, but we are calling
# rereadIfUpdated(), so it will only happen if the file modification time is updated
a["rsync"]["cmd"] = "FOO"
print a["rsync"]["cmd"]
a.rereadIfUpdated()
print a["rsync"]["cmd"]
os.system("touch %s" % file)
a.rereadIfUpdated()
print a["rsync"]["cmd"]
| {
"content_hash": "cf14ec84a8a8bcae762e23f64aa9b2e0",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 109,
"avg_line_length": 32.40841584158416,
"alnum_prop": 0.551210570533873,
"repo_name": "cedadev/mistamover",
"id": "5c5be2e06126c7ede167ec26ca7eadce8cf08b22",
"size": "13298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/Config/BaseConfig.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Python",
"bytes": "268174"
},
{
"name": "Shell",
"bytes": "424"
}
],
"symlink_target": ""
} |
import pytest
from selenium.webdriver.common.by import By
@pytest.mark.xfail_safari
def test_should_fire_click_event_when_clicking(driver, pages):
pages.load("javascriptPage.html")
_click_on_element_which_records_events(driver)
_assert_event_fired(driver, "click")
@pytest.mark.xfail_safari
def test_should_fire_mouse_down_event_when_clicking(driver, pages):
pages.load("javascriptPage.html")
_click_on_element_which_records_events(driver)
_assert_event_fired(driver, "mousedown")
@pytest.mark.xfail_safari
def test_should_fire_mouse_up_event_when_clicking(driver, pages):
pages.load("javascriptPage.html")
_click_on_element_which_records_events(driver)
_assert_event_fired(driver, "mouseup")
@pytest.mark.xfail_safari
def test_should_issue_mouse_down_events(driver, pages):
pages.load("javascriptPage.html")
driver.find_element(By.ID, "mousedown").click()
result = driver.find_element(By.ID, "result").text
assert result == "mouse down"
@pytest.mark.xfail_safari
def test_should_issue_click_events(driver, pages):
pages.load("javascriptPage.html")
driver.find_element(By.ID, "mouseclick").click()
result = driver.find_element(By.ID, "result").text
assert result == "mouse click"
@pytest.mark.xfail_safari
def test_should_issue_mouse_up_events(driver, pages):
pages.load("javascriptPage.html")
driver.find_element(By.ID, "mouseup").click()
result = driver.find_element(By.ID, "result").text
assert result == "mouse up"
@pytest.mark.xfail_safari
def test_mouse_events_should_bubble_up_to_containing_elements(driver, pages):
pages.load("javascriptPage.html")
driver.find_element(By.ID, "child").click()
result = driver.find_element(By.ID, "result").text
assert result == "mouse down"
@pytest.mark.xfail_safari
def test_should_emit_on_change_events_when_selecting_elements(driver, pages):
pages.load("javascriptPage.html")
select = driver.find_element(By.ID, 'selector')
options = select.find_elements(By.TAG_NAME, 'option')
initialTextValue = driver.find_element(By.ID, "result").text
select.click()
assert driver.find_element(By.ID, "result").text == initialTextValue
options[1].click()
assert driver.find_element(By.ID, "result").text == "bar"
@pytest.mark.xfail_safari
def test_should_emit_on_change_events_when_changing_the_state_of_acheckbox(driver, pages):
pages.load("javascriptPage.html")
checkbox = driver.find_element(By.ID, "checkbox")
checkbox.click()
assert driver.find_element(By.ID, "result").text == "checkbox thing"
def test_should_emit_click_event_when_clicking_on_atext_input_element(driver, pages):
pages.load("javascriptPage.html")
clicker = driver.find_element(By.ID, "clickField")
clicker.click()
assert clicker.get_attribute("value") == "Clicked"
@pytest.mark.xfail_safari
def test_clearing_an_element_should_cause_the_on_change_handler_to_fire(driver, pages):
pages.load("javascriptPage.html")
element = driver.find_element(By.ID, "clearMe")
element.clear()
result = driver.find_element(By.ID, "result")
assert result.text == "Cleared"
# TODO Currently Failing and needs fixing
# def test_sending_keys_to_another_element_should_cause_the_blur_event_to_fire(driver, pages):
# pages.load("javascriptPage.html")
# element = driver.find_element(By.ID, "theworks")
# element.send_keys("foo")
# element2 = driver.find_element(By.ID, "changeable")
# element2.send_keys("bar")
# _assertEventFired(driver, "blur")
# TODO Currently Failing and needs fixing
# def test_sending_keys_to_an_element_should_cause_the_focus_event_to_fire(driver, pages):
# pages.load("javascriptPage.html")
# element = driver.find_element(By.ID, "theworks")
# element.send_keys("foo")
# _assertEventFired(driver, "focus")
def _click_on_element_which_records_events(driver):
driver.find_element(By.ID, "plainButton").click()
def _assert_event_fired(driver, eventName):
result = driver.find_element(By.ID, "result")
text = result.text
assert eventName in text, "No " + eventName + " fired: " + text
| {
"content_hash": "3e8b9db45325751aa3e5548be5dfc5cc",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 94,
"avg_line_length": 34.56666666666667,
"alnum_prop": 0.7128736740597879,
"repo_name": "HtmlUnit/selenium",
"id": "ae63df258a1d26caa046154bb4323cf1d8ce51e1",
"size": "4936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/test/selenium/webdriver/common/correct_event_firing_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "4443"
},
{
"name": "C",
"bytes": "82917"
},
{
"name": "C#",
"bytes": "2977660"
},
{
"name": "C++",
"bytes": "2282643"
},
{
"name": "CSS",
"bytes": "1049"
},
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "HTML",
"bytes": "1379423"
},
{
"name": "Java",
"bytes": "6302205"
},
{
"name": "JavaScript",
"bytes": "2533049"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "974345"
},
{
"name": "Ragel",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "1004097"
},
{
"name": "Shell",
"bytes": "30004"
},
{
"name": "Starlark",
"bytes": "395776"
},
{
"name": "TypeScript",
"bytes": "110634"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
} |
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
extra = {}
try:
from nowin_core.scripts import setup_cmd
extra['cmdclass'] = {
'initdb': setup_cmd.InitdbCommand,
'shell': setup_cmd.ShellCommand
}
except ImportError:
pass
tests_require = [
'mock',
'pytest',
'pytest-cov',
'pytest-xdist',
'pytest-capturelog',
'pytest-mock',
]
setup(
name='nowin_core',
packages=find_packages(),
install_requires=[
'psutil',
'SQLAlchemy',
'zope.sqlalchemy',
'transaction',
'nose',
'PyYaml',
'Twisted',
'zope.sqlalchemy',
],
tests_require=[
'nose-cov'
],
extras_require=dict(
tests=tests_require,
),
**extra
)
| {
"content_hash": "1c6c8faea8f46d30026ed181f0203309",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 44,
"avg_line_length": 18.11111111111111,
"alnum_prop": 0.5631901840490797,
"repo_name": "g0v/nowin_core",
"id": "a2891035cca247696c9de7de9b7ea79078c8d9b2",
"size": "815",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "299101"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pastebin', '0007_auto_20170129_1536'),
]
operations = [
migrations.CreateModel(
name='UserPasteStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paste_count', models.IntegerField(verbose_name='paste count')),
('bio', models.TextField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "a5b5b727fcb5b5cc22c64b42fbf72d91",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 121,
"avg_line_length": 33.48,
"alnum_prop": 0.6236559139784946,
"repo_name": "johannessarpola/django-pastebin",
"id": "779e663f839f9efe51e7c4e47ff8e9cdd2a4251f",
"size": "910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pastebin/migrations/0008_userpastestats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "139"
},
{
"name": "HTML",
"bytes": "9950"
},
{
"name": "JavaScript",
"bytes": "3363"
},
{
"name": "Python",
"bytes": "56138"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0083_auto_20171019_0024'),
]
operations = [
migrations.CreateModel(
name='ClassroomResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(max_length=80, unique=True)),
('description', models.CharField(max_length=100)),
],
),
migrations.RemoveField(
model_name='lesson',
name='classroom_resources',
),
migrations.AddField(
model_name='lesson',
name='classroom_resources',
field=models.ManyToManyField(to='topics.ClassroomResource'),
),
]
| {
"content_hash": "62d413ece6dde16c0996f05c9455b032",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 114,
"avg_line_length": 29.9,
"alnum_prop": 0.5674470457079153,
"repo_name": "uccser/cs-unplugged",
"id": "543b9a2ace933ccb4c0e633f3bb4ebddfbb81102",
"size": "970",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "csunplugged/topics/migrations/0084_auto_20171019_2324.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7927"
},
{
"name": "HTML",
"bytes": "432891"
},
{
"name": "JavaScript",
"bytes": "104806"
},
{
"name": "Python",
"bytes": "1257568"
},
{
"name": "SCSS",
"bytes": "67560"
},
{
"name": "Shell",
"bytes": "12461"
}
],
"symlink_target": ""
} |
"""Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
__all__ = ["tsplot"]
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100] or "sd" or None
Confidence interval size(s). If a list, it will stack the error plots
for each confidence interval. If ``"sd"``, show standard deviation of
the observations instead of boostrapped confidence intervals. Only
relevant for error styles with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Show the standard deviation of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci="sd")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
msg = (
"The `tsplot` function is deprecated and will be removed in a future "
"release. Please update your code to use the new `lineplot` function."
)
warnings.warn(msg, UserWarning)
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(1, index=data.index)
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = utils.get_color_cycle()
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
c = None
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
if "sd" in ci:
est = estimator(df_c.values, axis=0)
sd = np.std(df_c.values, axis=0)
cis = [(est - sd, est + sd)]
boot_data = df_c.values
else:
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
if c is None:
raise RuntimeError("Invalid input data for tsplot.")
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, facecolor=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| {
"content_hash": "e4dfad5ab1f4a4edd70f22404a1830d0",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 79,
"avg_line_length": 35.35462555066079,
"alnum_prop": 0.5923618466139181,
"repo_name": "petebachant/seaborn",
"id": "a3b25bf457929357c97fc5af9b699978da9bf981",
"size": "16051",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "seaborn/timeseries.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "342"
},
{
"name": "Python",
"bytes": "652614"
}
],
"symlink_target": ""
} |
default_app_config = 't4proj.apps.stats.apps.StatsConfig' | {
"content_hash": "8d55021b624ea70a700eacb431b7910b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 57,
"avg_line_length": 57,
"alnum_prop": 0.8070175438596491,
"repo_name": "mivanov-utwente/t4proj",
"id": "f1aeca083731ed0bdd451842a014bee412fca0cc",
"size": "81",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "t4proj/apps/stats/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "451384"
},
{
"name": "HTML",
"bytes": "44876"
},
{
"name": "JavaScript",
"bytes": "545288"
},
{
"name": "Python",
"bytes": "25492"
}
],
"symlink_target": ""
} |
PROGRESS_BAR_LENGTH = 40
def generate_loading_string(completed_tasks, total_tasks):
""" <percentage completed>% [< -- based on percentage completion>] Completed/Total
"""
try:
fraction_completed = (completed_tasks / total_tasks)
except:
fraction_completed = 1 # To avoid division by Zero
percentage_complete = fraction_completed * 100
dashes = int(PROGRESS_BAR_LENGTH * fraction_completed)
blanks = PROGRESS_BAR_LENGTH - dashes
bar = "[" + "-" * dashes + ">" + " " * blanks + "]"
fraction_display = "%s/%s" % (completed_tasks, total_tasks)
loading_string = "%s%% %s %s" % (percentage_complete, bar, fraction_display)
return loading_string
| {
"content_hash": "815ce2736052f12fbbe5df49327229db",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 87,
"avg_line_length": 41.35294117647059,
"alnum_prop": 0.6443812233285917,
"repo_name": "CodingVanGogh/parallelization3",
"id": "9e54a75e7c99889154abc00673671bb2e2ce7adf",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plmap3/progress_bar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6574"
}
],
"symlink_target": ""
} |
import sys
import os
from xml.etree import ElementTree
import json
# Import vendor modules from a subdirectory
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, "./vendored"))
import requests
from bs4 import BeautifulSoup
import boto3
if os.environ.get("IN_AWS", "false").lower() == "true":
from aws_xray_sdk.core import xray_recorder # noqa
from aws_xray_sdk.core import patch_all # noqa
patch_all()
xray_recorder.configure(sampling=False)
# Download RSS feed and parse news entries
@xray_recorder.capture("get_feed_xml")
def get_feed_xml():
url = "http://feeds.feedburner.com/ampparit-uutiset"
response = requests.get(url, timeout=5)
ns = {"atom": "http://www.w3.org/2005/Atom"}
feed = ElementTree.fromstring(response.content)
print(f"Feed last updated {feed.find('atom:updated', ns).text}")
id_prefix = "id="
entries = [
{
"id": entry.find("atom:id", ns).text.split(id_prefix, 1)[1],
"title": entry.find("atom:title", ns).text,
"updated": entry.find("atom:updated", ns).text,
"feed_url": entry.find("atom:link", ns).attrib.get("href"),
"author": entry.find("atom:author", ns).find("atom:name", ns).text
}
for entry in feed.findall("atom:entry", ns)
]
print(f"Parsed {len(entries)} items")
return entries
# Scrape given html to plaintext
@xray_recorder.capture("parse_text")
def parse_text(html, source):
soup = BeautifulSoup(html, "html.parser")
print("Parsing content from source " + source)
xray_recorder.current_subsegment().put_annotation('source_parser', source)
class_parsers = {
"Aamulehti": {"css": "content--main"},
"Demokraatti.fi": {"css": "post-content", "parent": "section"},
"Iltalehti": {"css": "article-body"},
"Kainuun Sanomat": {"css": "Teksti"},
"Kaleva": {"css": "article__text"},
"Karjalainen": {"css": "itemBody"},
"Lapin Kansa": {"css": "content--main"},
"Mikrobitti.fi": {"css": "post-content"},
"Mobiili.fi": {"css": "blogcontent"},
"MTV.fi": {"css": "article"},
"Pohjalainen": {"css": "article__full", "parent": "article"},
"Savon Sanomat": {"css": "article__body"},
"Seura": {"css": "content__body"},
"Suomenmaa": {"css": "ArticleText"},
"Talouselämä": {"css": "article-body"},
"Tivi": {"css": "article-body"},
"Verkkouutiset": {"css": "entry-content"}
}
# Returns all child tags of the parent tag which has a specific css class
def children(css, parent="div", child="p"):
parent_node = soup.find(parent, class_=css)
if parent_node:
return parent_node.find_all(child)
else:
return []
text = ""
if source in class_parsers:
for e in children(**class_parsers[source]):
text += e.get_text() + " "
elif source == "Yle":
for e in children("yle__article__content") or children("ydd-article__body"):
text += e.get_text() + " "
elif source == "Uusi Suomi":
mess = soup.find("div", class_="field-name-body").find("div", class_="field-item")
for script in mess.find_all("script"):
script.decompose()
for e in mess.find_all("div"):
text += e.get_text() + " "
elif source in ["Ilta-Sanomat", "Taloussanomat"]:
mess = soup.find("div", class_="body")
for script in mess.find_all("script"):
script.decompose()
for script in mess.select(".hidden"):
script.decompose()
text = mess.get_text()
else:
print("Fallback to crude parser")
for e in soup.find_all("p"):
text += e.get_text() + " "
print(f"Parsed {len(text)} bytes of plaintext")
return text
# Fetch page content and return object with parsed plaintext
def get_content(item):
if item["author"] in ["Kauppalehti"]:
print("Dropping unsupported source " + item["author"])
return None
try:
url = item["feed_url"]
xray = xray_recorder.begin_subsegment("get url")
response = requests.get(url, timeout=5)
xray.put_annotation('html_size', len(response.content))
xray_recorder.end_subsegment()
except Exception as e:
print(e)
xray_recorder.end_subsegment()
return None
if response.status_code == 404:
print("Feed link is stale")
return None
else:
print(f"Fetched {len(response.content)} bytes of HTML from {response.url}")
item["content_url"] = response.url
item["content"] = parse_text(response.text, item["author"])
item["content_length"] = len(item["content"])
if item["content"] == "":
item["content"] = "FAILED_TO_PARSE_CONTENT"
return item
# Save entries to DynamoDB
@xray_recorder.capture("save_to_dynamo")
def save_to_dynamo(items):
table_name = os.environ.get("CORPUS_TABLE_NAME")
table = boto3.resource("dynamodb").Table(table_name)
with table.batch_writer() as batch:
for item in items:
batch.put_item(Item=item)
# Lambda entry point
@xray_recorder.capture("handler")
def handler(event, context):
headlines = get_feed_xml()
max_items = int(os.environ.get("MAX_HARVESTED_HEADLINES"))
corpus_items = list(
filter(
None.__ne__,
[get_content(headline) for headline in headlines[:max_items]]
)
)
if not event.get("is_local_dev"):
save_to_dynamo(corpus_items)
else:
print(json.dumps(corpus_items, indent=1))
raise NotImplementedError("Local DynamoDB usage not implemented")
# Main function for local testing
if __name__ == "__main__":
print("Local execution is not completely supported. Please run this in AWS Lambda.")
handler({"is_local_dev": True}, {})
| {
"content_hash": "05ea679e49c94642b8612695d99a363e",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 90,
"avg_line_length": 33.97142857142857,
"alnum_prop": 0.5971404541631623,
"repo_name": "Vilsepi/infinimonkey",
"id": "b36a6db0e47d27bc552ed2ddf9a84006db9768d1",
"size": "5997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/harvester.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "362"
},
{
"name": "HTML",
"bytes": "579"
},
{
"name": "JavaScript",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9732"
},
{
"name": "Shell",
"bytes": "1378"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("..")
import iac.app.libreoffice.calc as localc
import iac.app.libreoffice.writer as lowriter
import iac.app.gnumeric as gnumeric
| {
"content_hash": "2a6b9c16bbc789fce403906b05167ede",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 45,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.7974683544303798,
"repo_name": "Risto-Stevcev/iac-protocol",
"id": "7bbb3b5d85af34030708742a7e660c283c7f0d0f",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iac/interfaces.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "27273"
},
{
"name": "Shell",
"bytes": "5047"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/engine/shared_engine_overdriver_mk1.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "efddb4c564c38bb18850a8b085272f7c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.7012578616352201,
"repo_name": "obi-two/Rebelion",
"id": "dcd27a30341b9b32eb80d80dec8d30eb94c33b8c",
"size": "463",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/space/engine/shared_engine_overdriver_mk1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import collections
import heapq
class Solution:
def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:
graph = collections.defaultdict(list)
for i, (a, b) in enumerate(edges):
graph[a].append((b, i))
graph[b].append((a, i))
prob = [0] * n
prob[start] = 1
pq = [(-1, start)]
while pq:
p, index = heapq.heappop(pq)
if index == end:
return -p
for node, i in graph[index]:
if -p * succProb[i] > prob[node]:
prob[node] = -p * succProb[i]
heapq.heappush(pq, (-prob[node], node))
return 0
| {
"content_hash": "16da4bbcf9423f8393848b2d301d6bfe",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 115,
"avg_line_length": 36.55,
"alnum_prop": 0.4911080711354309,
"repo_name": "jiadaizhao/LeetCode",
"id": "963f02bb1903c3900727752b049ce824ce287eb2",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1501-1600/1514-Path with Maximum Probability/1514-Path with Maximum Probability.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1140864"
},
{
"name": "Java",
"bytes": "34062"
},
{
"name": "Python",
"bytes": "758800"
},
{
"name": "Shell",
"bytes": "698"
},
{
"name": "TSQL",
"bytes": "774"
}
],
"symlink_target": ""
} |
import mock
import yaml
from nailgun import objects
from nailgun.openstack.common import jsonutils
from nailgun.plugins import attr_plugin
from nailgun.test import base
def get_config(config):
def _get_config(*args):
return mock.mock_open(read_data=yaml.dump(config))()
return _get_config
class BasePluginTest(base.BaseIntegrationTest):
TASKS_CONFIG = [
{'priority': 10,
'role': ['controller'],
'type': 'shell',
'parameters': {'cmd': './lbaas_enable.sh', 'timeout': 42},
'stage': 'post_deployment'},
{'priority': 10,
'role': '*',
'type': 'shell',
'parameters': {'cmd': 'echo all > /tmp/plugin.all', 'timeout': 42},
'stage': 'pre_deployment'}]
def setUp(self):
super(BasePluginTest, self).setUp()
self.sample_plugin = self.env.get_default_plugin_metadata()
self.plugin_env_config = self.env.get_default_plugin_env_config()
def create_plugin(self, sample=None, expect_errors=False):
sample = sample or self.sample_plugin
resp = self.app.post(
base.reverse('PluginCollectionHandler'),
jsonutils.dumps(sample),
headers=self.default_headers,
expect_errors=expect_errors
)
return resp
def delete_plugin(self, plugin_id):
resp = self.app.delete(
base.reverse('PluginHandler', {'obj_id': plugin_id}),
headers=self.default_headers
)
return resp
def create_cluster(self, nodes=None):
nodes = nodes if nodes else []
with mock.patch('nailgun.plugins.attr_plugin.os') as os:
with mock.patch('nailgun.plugins.attr_plugin.open',
create=True) as f_m:
os.access.return_value = True
os.path.exists.return_value = True
f_m.side_effect = get_config(self.plugin_env_config)
self.env.create(
release_kwargs={'version': '2014.2-6.0',
'operating_system': 'Ubuntu'},
nodes_kwargs=nodes)
return self.env.clusters[0]
def default_attributes(self, cluster):
resp = self.app.get(
base.reverse('ClusterAttributesDefaultsHandler',
{'cluster_id': cluster.id}),
headers=self.default_headers)
return resp
def modify_plugin(self, cluster, plugin_name, enabled):
editable_attrs = cluster.attributes.editable
editable_attrs[plugin_name]['metadata']['enabled'] = enabled
resp = self.app.put(
base.reverse('ClusterAttributesHandler',
{'cluster_id': cluster.id}),
jsonutils.dumps({'editable': editable_attrs}),
headers=self.default_headers)
return resp
def enable_plugin(self, cluster, plugin_name):
return self.modify_plugin(cluster, plugin_name, True)
def disable_plugin(self, cluster, plugin_name):
return self.modify_plugin(cluster, plugin_name, False)
def get_pre_hooks(self, cluster):
with mock.patch('nailgun.plugins.attr_plugin.glob') as glob:
glob.glob.return_value = ['/some/path']
with mock.patch('nailgun.plugins.attr_plugin.os') as os:
with mock.patch('nailgun.plugins.attr_plugin.open',
create=True) as f_m:
os.access.return_value = True
os.path.exists.return_value = True
f_m.side_effect = get_config(self.TASKS_CONFIG)
resp = self.app.get(
base.reverse('DefaultPrePluginsHooksInfo',
{'cluster_id': cluster.id}),
headers=self.default_headers)
return resp
def get_post_hooks(self, cluster):
with mock.patch('nailgun.plugins.attr_plugin.os') as os:
with mock.patch('nailgun.plugins.attr_plugin.open',
create=True) as f_m:
os.access.return_value = True
os.path.exists.return_value = True
f_m.side_effect = get_config(self.TASKS_CONFIG)
resp = self.app.get(
base.reverse('DefaultPostPluginsHooksInfo',
{'cluster_id': cluster.id}),
headers=self.default_headers)
return resp
class TestPluginsApi(BasePluginTest):
def test_plugin_created_on_post(self):
resp = self.create_plugin()
self.assertEqual(resp.status_code, 201)
def test_env_create_and_load_env_config(self):
self.create_plugin()
cluster = self.create_cluster()
self.assertIn(self.sample_plugin['name'], cluster.attributes.editable)
def test_enable_disable_plugin(self):
resp = self.create_plugin()
plugin = objects.Plugin.get_by_uid(resp.json['id'])
cluster = self.create_cluster()
self.assertEqual(plugin.clusters, [])
resp = self.enable_plugin(cluster, plugin.name)
self.assertEqual(resp.status_code, 200)
self.assertIn(cluster, plugin.clusters)
resp = self.disable_plugin(cluster, plugin.name)
self.assertEqual(resp.status_code, 200)
self.assertEqual(plugin.clusters, [])
def test_delete_plugin(self):
resp = self.create_plugin()
del_resp = self.delete_plugin(resp.json['id'])
self.assertEqual(del_resp.status_code, 204)
def test_update_plugin(self):
resp = self.create_plugin()
data = resp.json
data['package_version'] = '2.0.0'
plugin_id = data.pop('id')
resp = self.app.put(
base.reverse('PluginHandler', {'obj_id': plugin_id}),
jsonutils.dumps(data),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
updated_data = resp.json
updated_data.pop('id')
self.assertEqual(updated_data, data)
def test_default_attributes_after_plugin_is_created(self):
self.create_plugin()
cluster = self.create_cluster()
default_attributes = self.default_attributes(cluster)
self.assertIn(self.sample_plugin['name'], default_attributes)
def test_plugins_multiversioning(self):
def create_with_version(version):
self.create_plugin(sample=self.env.get_default_plugin_metadata(
name='multiversion_plugin', version=version))
for version in ['1.0.0', '2.0.0', '0.0.1']:
create_with_version(version)
cluster = self.create_cluster()
# Create new plugin after environment is created
create_with_version('5.0.0')
self.enable_plugin(cluster, 'multiversion_plugin')
self.assertEqual(len(cluster.plugins), 1)
enabled_plugin = cluster.plugins[0]
# Should be enabled the newest plugin,
# at the moment of environment creation
self.assertEqual(enabled_plugin.version, '2.0.0')
self.disable_plugin(cluster, 'multiversion_plugin')
self.assertEqual(len(cluster.plugins), 0)
class TestPrePostHooks(BasePluginTest):
def setUp(self):
super(TestPrePostHooks, self).setUp()
resp = self.create_plugin()
self.plugin = attr_plugin.ClusterAttributesPlugin(
objects.Plugin.get_by_uid(resp.json['id']))
self.cluster = self.create_cluster([
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True}])
self.enable_plugin(self.cluster, self.sample_plugin['name'])
def test_generate_pre_hooks(self):
tasks = self.get_pre_hooks(self.cluster).json
upload_file = [t for t in tasks if t['type'] == 'upload_file']
rsync = [t for t in tasks if t['type'] == 'sync']
cmd_tasks = [t for t in tasks if t['type'] == 'shell']
self.assertEqual(len(upload_file), 1)
self.assertEqual(len(rsync), 1)
self.assertEqual(len(cmd_tasks), 2)
for t in tasks:
#shoud uid be a string
self.assertEqual(
sorted(t['uids']), sorted([n.uid for n in self.cluster.nodes]))
self.assertTrue(t['fail_on_error'])
self.assertEqual(t['diagnostic_name'], self.plugin.full_name)
apt_update = [t for t in cmd_tasks
if u'apt-get update' in t['parameters']['cmd']]
self.assertEqual(len(apt_update), 1)
def test_generate_post_hooks(self):
tasks = self.get_post_hooks(self.cluster).json
self.assertEqual(len(tasks), 1)
task = tasks[0]
controller_id = [n.uid for n in self.cluster.nodes
if 'controller' in n.roles]
self.assertEqual(controller_id, task['uids'])
self.assertTrue(task['fail_on_error'])
self.assertEqual(task['diagnostic_name'], self.plugin.full_name)
class TestPluginValidation(BasePluginTest):
def test_releases_not_provided(self):
sample = {
'name': 'test_name',
'version': '0.1.1',
'fuel_version': ['6.0'],
'title': 'Test plugin',
'package_version': '1.0.0'
}
resp = self.create_plugin(sample=sample, expect_errors=True)
self.assertEqual(resp.status_code, 400)
def test_version_is_not_present_in_release_data(self):
sample = {
'name': 'test_name',
'version': '0.1.1',
'fuel_version': ['6.0'],
'title': 'Test plugin',
'package_version': '1.0.0',
'releases': [
{'os': 'Ubuntu', 'mode': ['ha', 'multinode']}
]
}
resp = self.create_plugin(sample=sample, expect_errors=True)
self.assertEqual(resp.status_code, 400)
def test_plugin_version_is_floating(self):
sample = {
'name': 'test_name',
'title': 'Test plugin',
'version': 1.1,
'fuel_version': ['6.0'],
'package_version': '1.0.0',
'releases': [
{'os': 'Ubuntu',
'mode': ['ha', 'multinode'],
'version': '2014.2.1-5.1'}
]
}
resp = self.create_plugin(sample=sample, expect_errors=True)
self.assertEqual(resp.status_code, 400)
def test_title_is_not_present(self):
sample = {
'name': 'test_name',
'version': '1.1',
'fuel_version': ['6.0'],
'package_version': '1.0.0',
'releases': [
{'os': 'Ubuntu',
'mode': ['multinode'],
'version': '2014.2.1-5.1'}
]
}
resp = self.create_plugin(sample=sample, expect_errors=True)
self.assertEqual(resp.status_code, 400)
| {
"content_hash": "5f274e3fae55bb12a1ec4e8a42b8aa8f",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 79,
"avg_line_length": 38.149825783972126,
"alnum_prop": 0.5656224312722623,
"repo_name": "andrei4ka/fuel-web-redhat",
"id": "70f30564084011a5e343320c1a8940eb348c0fdb",
"size": "11585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/test/integration/test_plugins_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "100524"
},
{
"name": "JavaScript",
"bytes": "639783"
},
{
"name": "Makefile",
"bytes": "5891"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "3206343"
},
{
"name": "Ruby",
"bytes": "33423"
},
{
"name": "Shell",
"bytes": "31460"
}
],
"symlink_target": ""
} |
import json
import os
import cloudpickle
import sys
if __name__ == '__main__':
temp_dir = sys.argv[1]
with open(os.path.join(temp_dir, "args.pkl"), 'rb') as f:
args = cloudpickle.load(f)
with open(os.path.join(temp_dir, "target.pkl"), 'rb') as f:
target = cloudpickle.load(f)
history = target(*args)
tf_config = json.loads(os.environ["TF_CONFIG"])
with open(os.path.join(temp_dir,
f"history_{tf_config['task']['index']}"), "wb") as f:
cloudpickle.dump(history, f)
| {
"content_hash": "9d44c4d7218a4f8e8853425ccff9b6d3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 80,
"avg_line_length": 27.2,
"alnum_prop": 0.5808823529411765,
"repo_name": "yangw1234/BigDL",
"id": "82db9efaa10b216591e0961a4101745ab75870c3",
"size": "1131",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/nano/src/bigdl/nano/common/multiprocessing/subprocess_worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "138760"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54063856"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8762180"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216038"
},
{
"name": "Shell",
"bytes": "844916"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from testdoubles.utils import bind_function_to_object
from tests.common.compat import mock
class BindFunctionToObjectTestCase(TestCase):
def test_when_binding_a_function_to_an_object_it_is_available_for_the_object_instance(self):
def f(self):
pass
class Obj(object):
pass
bind_function_to_object(f, Obj)
sut = Obj()
self.assertTrue(hasattr(sut, 'f'), 'Obj has no attribute f')
def test_when_binding_a_function_to_an_object_it_is_callable_on_the_object_instance(self):
def f(self):
pass
class Obj(object):
pass
bind_function_to_object(f, Obj)
sut = Obj()
sut.f()
def test_when_binding_a_function_to_an_object_then_the_object_is_returned(self):
def f(self):
pass
class Obj(object):
pass
actual = bind_function_to_object(f, Obj)
self.assertEqual(actual, Obj)
def test_when_providing_a_non_callable_a_type_error_is_raised(self):
class Obj(object):
pass
with self.assertRaises(TypeError):
bind_function_to_object(mock.sentinel, Obj)
def test_when_providing_a_non_boundable_function_a_value_error_is_raised(self):
def f():
pass
class Obj(object):
pass
with self.assertRaises(ValueError):
bind_function_to_object(f, Obj)
def test_when_providing_a_non_boundable_function_then_the_value_error_message_is_correct(self):
def f():
pass
class Obj(object):
pass
with self.assertRaisesRegexp(ValueError, '%s does not have a self argument' % f):
bind_function_to_object(f, Obj) | {
"content_hash": "223e17061a8e084494e4cb75a87bd2f9",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 99,
"avg_line_length": 25.47142857142857,
"alnum_prop": 0.6029164329781268,
"repo_name": "testsuite/testdoubles",
"id": "11af6157f3a1ca8b933be7a6b8edd5727ccde831",
"size": "1821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_bind_function_to_object.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17483"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
"""
# UI/UX Authoring Tool
# @license http://www.apache.org/licenses/LICENSE-2.0
# Author @ Jamil Hussain
"""
from django.shortcuts import render
from .forms import AppCreationForm
from .models import (App,ActionLog,Log)
from accounts.models import MyUser
from django.http import HttpResponseRedirect, Http404
from django.db.models import Count, Min, Sum, Avg
from django.utils import timezone
def createApp(request, *args, **kwargs):
form = AppCreationForm(request.POST or None)
if form.is_valid():
form.save()
return HttpResponseRedirect("analytics/overview.html")
return render(request, "analytics/app.html", {"form": form})
def homeView(request):
application_list=App.objects.all()
context = {
"app_list": application_list
}
if not request.user.is_authenticated:
return HttpResponseRedirect("/accounts/login")
else:
return render(request, 'analytics/home.html',context)
def overviewView(request):
actions_logs=ActionLog.objects.all()
logs=Log.objects.filter(visit_time__gt=timezone.now()).exclude(event_category__isnull=True).exclude(event_category__exact='').values('visit_time','event_category','event_name','event_action')
#gd_total= Log.objects.annotate(total_country=Sum('Log__country'))
gb_list= Log.objects.values('country').annotate(Count('country'))
# visits=Log.objects.extra({'visit_time' : "date(visit_time)"}).values('visit_time').annotate(Count('visit_time'))
device_model=Log.objects.exclude(user_agent__isnull=True).exclude(user_agent__exact='').values('user_agent').distinct()
andorid=Log.objects.exclude(user_agent__isnull=True).exclude(user_agent__exact='').filter(user_agent__contains='Android').values('user_agent').count()
other = Log.objects.exclude(user_agent__isnull=True).exclude(user_agent__exact='').exclude(user_agent__contains='Android').values('user_agent').count()
total_visit= Log.objects.all().count()
visits= Log.objects.extra({'vists_date': "date(visit_time)"}).values('vists_date').annotate(count=Count('id'))
user=MyUser.objects.all()
male = MyUser.objects.filter(gender='male')
female = MyUser.objects.filter(gender='female')
context = {
'actionlog':actions_logs,
'log': logs,
'gb_total': gb_list,
'user': user,
'male': male,
'female': female,
'visits' : visits,
'total_visit': total_visit,
'device_model': device_model,
'andorid' : andorid,
'other' : other
}
return render(request, 'analytics/overview.html', context)
def screensView(request):
context = {}
return render(request, 'analytics/screens.html', context)
def eventsView(request):
context = {}
return render(request, 'analytics/events.html', context)
def locationsView(request):
context = {}
return render(request, 'analytics/locations.html', context)
def exceptionsView(request):
context = {}
return render(request, 'analytics/exceptions.html', context)
| {
"content_hash": "acae090d78b266f0b85b0fc9d93f33a9",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 195,
"avg_line_length": 37.49382716049383,
"alnum_prop": 0.6825814948962792,
"repo_name": "ubiquitous-computing-lab/Mining-Minds",
"id": "21dbc250e8ca67e431e51810e410da7251d886f9",
"size": "3037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supporting-layer/uiux-authoring-tool/analytics/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2388167"
},
{
"name": "CoffeeScript",
"bytes": "87725"
},
{
"name": "HTML",
"bytes": "6002417"
},
{
"name": "Java",
"bytes": "2523276"
},
{
"name": "JavaScript",
"bytes": "35544943"
},
{
"name": "Makefile",
"bytes": "1558"
},
{
"name": "PHP",
"bytes": "874945"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "63930"
},
{
"name": "Shell",
"bytes": "3879"
}
],
"symlink_target": ""
} |
class BaseFactoryGenerator():
def __init__(self):
self.data = None
self.namespace = None
def init(self, data, namespace):
self.data = data
self.namespace = namespace
def generate_import(self):
raise NotImplementedError()
def generate(self, data, namespace):
raise NotImplementedError()
def _generate_to_json(self):
raise NotImplementedError()
def _generate_from_json(self):
raise NotImplementedError() | {
"content_hash": "ce6434c17f32f3bb1aff503f6ad668fa",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 40,
"avg_line_length": 24.7,
"alnum_prop": 0.631578947368421,
"repo_name": "HenrikPoulsen/Json2Class",
"id": "810bb378062053c1cee6391b6de813f30d3070cb",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert/base/factorygenerator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "84311"
},
{
"name": "Java",
"bytes": "37872"
},
{
"name": "Python",
"bytes": "104260"
}
],
"symlink_target": ""
} |
"""Actions for the help menu.
"""
# Authors: Gael Varoquaux <gael.varoquaux[at]normalesup.org>
# Prabhu Ramachandran
# Copyright (c) 2007-2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os import path
import os
import sys
from os.path import join, dirname
# Enthought library imports.
from pyface.action.api import Action
from traitsui.api import auto_close_message
# Local imports
import mayavi.api
from mayavi.core.common import error
from mayavi.preferences.api import preference_manager
# To find the html documentation directory, first look under the
# standard place. If that directory doesn't exist, assume you
# are running from the source.
local_dir = dirname(mayavi.api.__file__)
HTML_DIR = join(local_dir, 'html')
if not path.exists(HTML_DIR):
HTML_DIR = join(dirname(dirname(local_dir)),
'build', 'docs', 'html', 'mayavi')
if not path.exists(HTML_DIR):
HTML_DIR = None
def browser_open(url):
if sys.platform == 'darwin':
os.system('open %s &' % url)
else:
import webbrowser
if webbrowser._iscommand('firefox') and \
preference_manager.root.open_help_in_light_browser:
# Firefox is installed, let's use it, we know how to make it
# chromeless.
firefox = webbrowser.get('firefox')
firefox._invoke(['-chrome', url], remote=False, autoraise=True)
else:
webbrowser.open(url, autoraise=1)
def open_help_index():
""" Open the mayavi user manual index in a browser.
"""
# If the HTML_DIR was found, bring up the documentation in a
# web browser. Otherwise, bring up an error message.
if HTML_DIR:
auto_close_message("Opening help in web browser...")
browser_open(join(HTML_DIR, 'index.html'))
else:
error("Could not find the user guide in your installation " \
"or the source tree.")
def open_tvtk_docs():
""" Open the TVTK class browser.
"""
from tvtk.tools.tvtk_doc import TVTKClassChooser
TVTKClassChooser().edit_traits()
######################################################################
# `HelpIndex` class.
######################################################################
class HelpIndex(Action):
""" An action that pop up the help in a browser. """
tooltip = "The Mayavi2 user guide"
description = "The Mayavi2 user guide"
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self, event):
""" Performs the action. """
open_help_index()
######################################################################
# `TVTKClassBrowser` class.
######################################################################
class TVTKClassBrowser(Action):
""" An action that opens the tvtk interactive class browser. """
tooltip = "The TVTK interactive class browser"
description = "The TVTK interactive class browser"
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self, event):
""" Performs the action. """
open_tvtk_docs()
| {
"content_hash": "45d1ac12c3e4cb78d974691f592be784",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 32.60952380952381,
"alnum_prop": 0.5286214953271028,
"repo_name": "dmsurti/mayavi",
"id": "526036ee2fb6fb1e9c4b7323e686fbc3064b1850",
"size": "3424",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mayavi/action/help.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2494055"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
} |
"""
Script to scan Zephyr include directories and emit system call and subsystem metadata
System calls require a great deal of boilerplate code in order to implement
completely. This script is the first step in the build system's process of
auto-generating this code by doing a text scan of directories containing
C or header files, and building up a database of system calls and their
function call prototypes. This information is emitted to a generated
JSON file for further processing.
This script also scans for struct definitions such as __subsystem and
__net_socket, emitting a JSON dictionary mapping tags to all the struct
declarations found that were tagged with them.
If the output JSON file already exists, its contents are checked against
what information this script would have outputted; if the result is that the
file would be unchanged, it is not modified to prevent unnecessary
incremental builds.
"""
import sys
import re
import argparse
import os
import json
regex_flags = re.MULTILINE | re.VERBOSE
syscall_regex = re.compile(r'''
__syscall\s+ # __syscall attribute, must be first
([^(]+) # type and name of system call (split later)
[(] # Function opening parenthesis
([^)]*) # Arg list (split later)
[)] # Closing parenthesis
''', regex_flags)
struct_tags = ["__subsystem", "__net_socket"]
tagged_struct_decl_template = r'''
%s\s+ # tag, must be first
struct\s+ # struct keyword is next
([^{]+) # name of subsystem
[{] # Open curly bracket
'''
def tagged_struct_update(target_list, tag, contents):
regex = re.compile(tagged_struct_decl_template % tag, regex_flags)
items = [mo.groups()[0].strip() for mo in regex.finditer(contents)]
target_list.extend(items)
def analyze_headers(multiple_directories):
syscall_ret = []
tagged_ret = {}
for tag in struct_tags:
tagged_ret[tag] = []
for base_path in multiple_directories:
for root, dirs, files in os.walk(base_path, topdown=True):
dirs.sort()
files.sort()
for fn in files:
# toolchain/common.h has the definitions of these tags which we
# don't want to trip over
path = os.path.join(root, fn)
if (not (path.endswith(".h") or path.endswith(".c")) or
path.endswith(os.path.join(os.sep, 'toolchain',
'common.h'))):
continue
with open(path, "r", encoding="utf-8") as fp:
contents = fp.read()
try:
syscall_result = [(mo.groups(), fn)
for mo in syscall_regex.finditer(contents)]
for tag in struct_tags:
tagged_struct_update(tagged_ret[tag], tag, contents)
except Exception:
sys.stderr.write("While parsing %s\n" % fn)
raise
syscall_ret.extend(syscall_result)
return syscall_ret, tagged_ret
def update_file_if_changed(path, new):
if os.path.exists(path):
with open(path, 'r') as fp:
old = fp.read()
if new != old:
with open(path, 'w') as fp:
fp.write(new)
else:
with open(path, 'w') as fp:
fp.write(new)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", "--include", required=True, action='append',
help='''include directories recursively scanned
for .h files. Can be specified multiple times:
-i topdir1 -i topdir2 ...''')
parser.add_argument(
"-j", "--json-file", required=True,
help="Write system call prototype information as json to file")
parser.add_argument(
"-t", "--tag-struct-file", required=True,
help="Write tagged struct name information as json to file")
args = parser.parse_args()
def main():
parse_args()
syscalls, tagged = analyze_headers(args.include)
# Only write json files if they don't exist or have changes since
# they will force an incremental rebuild.
syscalls_in_json = json.dumps(
syscalls,
indent=4,
sort_keys=True
)
update_file_if_changed(args.json_file, syscalls_in_json)
tagged_struct_in_json = json.dumps(
tagged,
indent=4,
sort_keys=True
)
update_file_if_changed(args.tag_struct_file, tagged_struct_in_json)
if __name__ == "__main__":
main()
| {
"content_hash": "5082171cbea93b5dd9647898bbe9b268",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 85,
"avg_line_length": 33.31972789115646,
"alnum_prop": 0.5822784810126582,
"repo_name": "finikorg/zephyr",
"id": "9994efdd2742d700ac0e82855924c4c1c018aa72",
"size": "5003",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "scripts/build/parse_syscalls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "445128"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "44321001"
},
{
"name": "C++",
"bytes": "29292"
},
{
"name": "CMake",
"bytes": "1369918"
},
{
"name": "Cadence",
"bytes": "1501"
},
{
"name": "EmberScript",
"bytes": "997"
},
{
"name": "Forth",
"bytes": "1648"
},
{
"name": "GDB",
"bytes": "1285"
},
{
"name": "Haskell",
"bytes": "722"
},
{
"name": "JetBrains MPS",
"bytes": "3152"
},
{
"name": "PLSQL",
"bytes": "281"
},
{
"name": "Perl",
"bytes": "215338"
},
{
"name": "Python",
"bytes": "2251570"
},
{
"name": "Shell",
"bytes": "171294"
},
{
"name": "SmPL",
"bytes": "36840"
},
{
"name": "Smalltalk",
"bytes": "1885"
},
{
"name": "SourcePawn",
"bytes": "14890"
},
{
"name": "Tcl",
"bytes": "5838"
},
{
"name": "VBA",
"bytes": "294"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
} |
"""
To run this script, type
python buyLotsOfFruit.py
Once you have correctly implemented the buyLotsOfFruit function,
the script should produce the output:
Cost of [('apples', 2.0), ('pears', 3.0), ('limes', 4.0)] is 12.25
"""
fruitPrices = {'apples':2.00, 'oranges': 1.50, 'pears': 1.75,
'limes':0.75, 'strawberries':1.00}
def buyLotsOfFruit(orderList):
"""
orderList: List of (fruit, numPounds) tuples
Returns cost of order
"""
totalCost = 0.0
for fruit, qty in orderList:
if fruitPrices[fruit] != None:
totalCost += fruitPrices[fruit] * qty
return totalCost
# Main Method
if __name__ == '__main__':
"This code runs when you invoke the script from the command line"
orderList = [ ('apples', 2.0), ('pears', 3.0), ('limes', 4.0) ]
print 'Cost of', orderList, 'is', buyLotsOfFruit(orderList)
| {
"content_hash": "7894a47469940c4bf8c0e2ca52357b75",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 28.387096774193548,
"alnum_prop": 0.6227272727272727,
"repo_name": "lucasosouza/berkeleyAI",
"id": "8daab6b98772f3fb525699ebce1b95bf854da569",
"size": "1546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorial/buyLotsOfFruit.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "923675"
}
],
"symlink_target": ""
} |
class InfinityType:
def __repr__(self) -> str:
return "Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return False
def __le__(self, other: object) -> bool:
return False
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __ne__(self, other: object) -> bool:
return not isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return True
def __ge__(self, other: object) -> bool:
return True
def __neg__(self: object) -> "NegativeInfinityType":
return NegativeInfinity
Infinity = InfinityType()
class NegativeInfinityType:
def __repr__(self) -> str:
return "-Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __ne__(self, other: object) -> bool:
return not isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return False
def __ge__(self, other: object) -> bool:
return False
def __neg__(self: object) -> InfinityType:
return Infinity
NegativeInfinity = NegativeInfinityType()
| {
"content_hash": "7c3b0a1b879005ca6efd1b8a133fb071",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 56,
"avg_line_length": 23.338709677419356,
"alnum_prop": 0.5639253628196268,
"repo_name": "martbhell/wasthereannhlgamelastnight",
"id": "951549753afa255148c7c60d868303963f8c1813",
"size": "1629",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "src/lib/setuptools/_vendor/packaging/_structures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "730"
},
{
"name": "HTML",
"bytes": "8959"
},
{
"name": "JavaScript",
"bytes": "3318"
},
{
"name": "Python",
"bytes": "5989638"
}
],
"symlink_target": ""
} |
!/usr/bin/env python
import smtplib
import json
from pprint import pprint
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Send a text message notifying them of a new song by artist (based on their choices)
# This opens json file containing needed values
# TODO: investigate IMAP
# TODO: Create subscription to avoid spam filter
# TODO: Put html in separate file (make it look nice)
# TODO: Rename this file
# TODO: concat multiple new artists in 1 text
def main():
'''In the future, this could read the correct user from a file and depending and select the
correct message to send as well'''
message = "Got.... Heem"
# message = "Testing email"
subject = "plain"
person = "Eric Krause"
# full_message = make_email_message(person, subject, message)
#send_message(person, full_message, 'email')
send_message(person, message, 'text')
def make_new_song_text(artist, song_id):
'''Creates the message based off the artist and song_id, which are pulled from youtube-dl.'''
return "New song '%s' by '%s' was uploaded today" % (song_id, artist)
def make_email_message(person, subject, message):
''' Constructs email from given information (generic method). Pass string of person's name.'''
json_data=open('privates.json')
data = json.load(json_data)
json_data.close()
full_msg = MIMEMultipart('alternative')
# plaintext version of message
full_msg['Subject'] = '%s' %subject
full_msg['From'] = '%s' % data['credentials']['username']
#data['credentials']['username']
full_msg['To'] = '%s' % data['phonebook'][person][1]
text = "%s" % message
# html version of message
html = """
<html>
<head></head>
<body>
<p> Totally different now<br>
Here's more info. Yep.
</p>
</body>
</html>
"""
# This reads in html file
# f = open("subscribe_msg.html")
# html = f.read()
# f.close()
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
full_msg.attach(part1)
full_msg.attach(part2)
return full_msg.as_string()
def send_message(person, message, service):
'''Sends message to any person in our phonebook. Service selects which technology is used
(text or email). '''
# open phonebook info
json_data=open('privates.json')
data = json.load(json_data)
json_data.close()
server = smtplib.SMTP('smtp.gmail.com',587)
#select correct list index to get correct email or text address
if (service == 'text' or service == 'Text'):
s = 0
elif (service == 'email' or service == 'Email'):
s = 1
else:
print ("Incorrect service option selected. Please enter 'text' or 'email'")
try:
server.starttls()
server.login(data['credentials']['username'],data['credentials']['password'])
server.sendmail(data['credentials']['username'], data['phonebook'][person][s],message)
except:
print "Could not send message"
finally:
server.quit()
| {
"content_hash": "0741d369548a053616315d13c77972a7",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 100,
"avg_line_length": 34.391304347826086,
"alnum_prop": 0.6390644753476612,
"repo_name": "ekrause/splatfilch",
"id": "78433444bb463bd81f56a126a32b1c0474dec693",
"size": "3164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/text_notify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27071"
},
{
"name": "Shell",
"bytes": "96"
}
],
"symlink_target": ""
} |
"""Defines the base component class from which Landlab components inherit.
Base component class methods
++++++++++++++++++++++++++++
.. autosummary::
~landlab.core.model_component.Component.name
~landlab.core.model_component.Component.from_path
~landlab.core.model_component.Component.unit_agnostic
~landlab.core.model_component.Component.units
~landlab.core.model_component.Component.definitions
~landlab.core.model_component.Component.input_var_names
~landlab.core.model_component.Component.output_var_names
~landlab.core.model_component.Component.optional_var_names
~landlab.core.model_component.Component.var_type
~landlab.core.model_component.Component.var_units
~landlab.core.model_component.Component.var_definition
~landlab.core.model_component.Component.var_mapping
~landlab.core.model_component.Component.var_loc
~landlab.core.model_component.Component.var_help
~landlab.core.model_component.Component.initialize_output_fields
~landlab.core.model_component.Component.initialize_optional_output_fields
~landlab.core.model_component.Component.shape
~landlab.core.model_component.Component.grid
~landlab.core.model_component.Component.coords
"""
import os
import textwrap
import numpy as np
from .. import registry
from ..field import FieldError
from .model_parameter_loader import load_params
_VAR_HELP_MESSAGE = """
name: {name}
description:
{desc}
units: {units}
unit agnostic: {unit_agnostic}
at: {loc}
intent: {intent}
"""
class classproperty(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class Component:
"""Base component class from which Landlab components inherit."""
_info = {}
_name = None
_cite_as = ""
_unit_agnostic = None
def __new__(cls, *args, **kwds):
registry.add(cls)
return object.__new__(cls)
def __init__(self, grid):
self._grid = grid
self._current_time = None
# ensure that required input fields exist
for name in self._info.keys():
at = self._info[name]["mapping"]
optional = self._info[name]["optional"]
in_true = "in" in self._info[name]["intent"]
if (in_true) and (not optional):
# if required input, verify that it exists.
if name not in self._grid[at]:
raise FieldError(
"{component} is missing required input field: {name} at {at}".format(
component=self._name, name=name, at=at
)
)
# if required input exists, check dtype.
field = self._grid[at][name]
dtype = self._info[name]["dtype"]
if field.dtype != dtype:
raise FieldError(
"{component} required input variable: {name} at {at} has incorrect dtype. dtype must be {dtype} and is {actual}".format(
component=self._name,
name=name,
at=at,
dtype=dtype,
actual=field.dtype,
)
)
# if optional input exists, check dtype
if in_true and optional:
if name in self._grid[at]:
field = self._grid[at][name]
dtype = self._info[name]["dtype"]
if field.dtype != dtype:
raise FieldError(
"{component} optional input variable: {name} at {at} has incorrect dtype. dtype must be {dtype} and is {actual}".format(
component=self._name,
name=name,
at=at,
dtype=dtype,
actual=field.dtype,
)
)
@classmethod
def from_path(cls, grid, path):
"""Create a component from an input file.
Parameters
----------
grid : ModelGrid
A landlab grid.
path : str or file_like
Path to a parameter file, contents of a parameter file, or
a file-like object.
Returns
-------
Component
A newly-created component.
"""
if os.path.isfile(path):
with open(path, "r") as fp:
params = load_params(fp)
else:
params = load_params(path)
return cls(grid, **params)
@classproperty
@classmethod
def cite_as(cls):
"""Citation information for component.
Return required software citation, if any. An empty string indicates
that no citations other than the standard Landlab package citations are
needed for the component.
Citations are provided in BibTeX format.
Returns
-------
cite_as
"""
return cls._cite_as
@property
def current_time(self):
"""Current time.
Some components may keep track of the current time. In this case, the
``current_time`` attribute is incremented. Otherwise it is set to None.
Returns
-------
current_time
"""
return self._current_time
@current_time.setter
def current_time(self, new_time):
if self._current_time is not None:
assert new_time > self._current_time
self._current_time = new_time
@classproperty
@classmethod
def input_var_names(cls):
"""Names of fields that are used by the component.
Returns
-------
tuple of str
Tuple of field names.
"""
input_var_names = [
name
for name in cls._info.keys()
if (not cls._info[name]["optional"]) and ("in" in cls._info[name]["intent"])
]
return tuple(sorted(input_var_names))
@classproperty
@classmethod
def output_var_names(cls):
"""Names of fields that are provided by the component.
Returns
-------
tuple of str
Tuple of field names.
"""
output_var_names = [
name
for name in cls._info.keys()
if (not cls._info[name]["optional"])
and ("out" in cls._info[name]["intent"])
]
return tuple(sorted(output_var_names))
@classproperty
@classmethod
def optional_var_names(cls):
"""Names of fields that are optionally provided by the component, if
any.
Returns
-------
tuple of str
Tuple of field names.
"""
optional_var_names = [
name for name in cls._info.keys() if cls._info[name]["optional"]
]
return tuple(sorted(optional_var_names))
@classmethod
def var_type(cls, name):
"""Returns the dtype of a field (float, int, bool, str...).
Parameters
----------
name : str
A field name.
Returns
-------
dtype
The dtype of the field.
"""
return cls._info[name]["dtype"]
@classproperty
@classmethod
def name(cls):
"""Name of the component.
Returns
-------
str
Component name.
"""
return cls._name
@classproperty
@classmethod
def unit_agnostic(cls):
"""Whether the component is unit agnostic.
If True, then the component is unit agnostic. Under this condition a
user must still provide consistent units across all input arguments,
keyword arguments, and fields. However, when ``unit_agnostic`` is True
the units specified can be interpreted as dimensions.
When False, then the component requires inputs in the specified units.
Returns
-------
bool
"""
return cls._unit_agnostic
@classproperty
@classmethod
def units(cls):
"""Get the units for all field values.
Returns
-------
tuple or str
Units for each field.
"""
return tuple(
sorted([(name, cls._info[name]["units"]) for name in cls._info.keys()])
)
@classmethod
def var_units(cls, name):
"""Get the units of a particular field.
Parameters
----------
name : str
A field name.
Returns
-------
str
Units for the given field.
"""
return cls._info[name]["units"]
@classproperty
@classmethod
def definitions(cls):
"""Get a description of each field.
Returns
-------
tuple of (*name*, *description*)
A description of each field.
"""
return tuple(
sorted([(name, cls._info[name]["doc"]) for name in cls._info.keys()])
)
@classmethod
def var_definition(cls, name):
"""Get a description of a particular field.
Parameters
----------
name : str
A field name.
Returns
-------
tuple of (*name*, *description*)
A description of each field.
"""
return cls._info[name]["doc"]
@classmethod
def var_help(cls, name):
"""Print a help message for a particular field.
Parameters
----------
name : str
A field name.
"""
desc = os.linesep.join(
textwrap.wrap(
cls._info[name]["doc"], initial_indent=" ", subsequent_indent=" "
)
)
units = cls._info[name]["units"]
loc = cls._info[name]["mapping"]
intent = cls._info[name]["intent"]
help = _VAR_HELP_MESSAGE.format(
name=name,
desc=desc,
units=units,
loc=loc,
intent=intent,
unit_agnostic=cls._unit_agnostic,
)
print(help.strip())
@classproperty
@classmethod
def var_mapping(cls):
"""Location where variables are defined.
Returns
-------
tuple of (name, location)
Tuple of variable name and location ('node', 'link', etc.) pairs.
"""
return tuple(
sorted([(name, cls._info[name]["mapping"]) for name in cls._info.keys()])
)
@classmethod
def var_loc(cls, name):
"""Location where a particular variable is defined.
Parameters
----------
name : str
A field name.
Returns
-------
str
The location ('node', 'link', etc.) where a variable is defined.
"""
return cls._info[name]["mapping"]
def initialize_output_fields(self, values_per_element=None):
"""Create fields for a component based on its input and output var
names.
This method will create new fields (without overwrite) for any fields
output by, but not supplied to, the component. New fields are
initialized to zero. Ignores optional fields. New fields are created as
arrays of floats, unless the component specifies the variable type.
Parameters
----------
values_per_element: int (optional)
On occasion, it is necessary to create a field that is of size
(n_grid_elements, values_per_element) instead of the default size
(n_grid_elements,). Use this keyword argument to acomplish this
task.
"""
for name in self._info.keys():
at = self._info[name]["mapping"]
optional = self._info[name]["optional"]
out_true = "out" in self._info[name]["intent"]
if (out_true) and (not optional) and (name not in self._grid[at]):
type_in = self.var_type(name)
num_elements = self._grid.size(at)
if values_per_element is None:
size = num_elements
else:
size = (num_elements, values_per_element)
init_vals = np.zeros(size, dtype=type_in)
units_in = self.var_units(name)
self.grid.add_field(name, init_vals, at=at, units=units_in, copy=False)
def initialize_optional_output_fields(self):
"""Create fields for a component based on its optional field outputs,
if declared in _optional_var_names.
This method will create new fields (without overwrite) for any
fields output by the component as optional. New fields are
initialized to zero. New fields are created as arrays of floats,
unless the component also contains the specifying property
_var_type.
"""
for name in self._info.keys():
at = self._info[name]["mapping"]
optional = self._info[name]["optional"]
out_true = "out" in self._info[name]["intent"]
if (out_true) and (optional) and (name not in self._grid[at]):
type_in = self.var_type(name)
init_vals = self.grid.zeros(at, dtype=type_in)
units_in = self.var_units(name)
self.grid.add_field(name, init_vals, at=at, units=units_in, copy=False)
@property
def shape(self):
"""Return the grid shape attached to the component, if defined."""
return self.grid._shape
@property
def grid(self):
"""Return the grid attached to the component."""
return self._grid
@property
def coords(self):
"""Return the coordinates of nodes on grid attached to the
component."""
return (self.grid.node_x, self.grid.node_y)
| {
"content_hash": "f59191ea71ba623fb81cf964de6e8842",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 148,
"avg_line_length": 29.86266094420601,
"alnum_prop": 0.541966082207531,
"repo_name": "cmshobe/landlab",
"id": "d1801b4d8b5fa75b621b43d02a763f92369111f9",
"size": "13939",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "landlab/core/model_component.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1359"
},
{
"name": "HTML",
"bytes": "99948"
},
{
"name": "Jupyter Notebook",
"bytes": "701992"
},
{
"name": "Makefile",
"bytes": "1924"
},
{
"name": "PowerShell",
"bytes": "7128"
},
{
"name": "Python",
"bytes": "4132304"
},
{
"name": "Shell",
"bytes": "2691"
},
{
"name": "TeX",
"bytes": "19453"
}
],
"symlink_target": ""
} |
from toontown.parties import PartyGlobals
from toontown.parties.DistributedPartyDanceActivityBase import DistributedPartyDanceActivityBase
from toontown.toonbase import TTLocalizer
class DistributedPartyValentineDanceActivity(DistributedPartyDanceActivityBase):
notify = directNotify.newCategory('DistributedPartyValentineDanceActivity')
def __init__(self, cr):
DistributedPartyDanceActivityBase.__init__(self, cr, PartyGlobals.ActivityIds.PartyDance, PartyGlobals.DancePatternToAnims, model='phase_13/models/parties/tt_m_ara_pty_danceFloorValentine')
def getInstructions(self):
return TTLocalizer.PartyDanceActivityInstructions
def getTitle(self):
return TTLocalizer.PartyDanceActivityTitle
def load(self):
DistributedPartyDanceActivityBase.load(self)
parentGroup = self.danceFloor.find('**/discoBall_mesh')
correctBall = self.danceFloor.find('**/discoBall_10')
origBall = self.danceFloor.find('**/discoBall_mesh_orig')
if not correctBall.isEmpty():
numChildren = parentGroup.getNumChildren()
for i in xrange(numChildren):
child = parentGroup.getChild(i)
if child != correctBall:
child.hide()
| {
"content_hash": "5b9e607be8c7b8ed2fd96a9f1c898b58",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 197,
"avg_line_length": 46.629629629629626,
"alnum_prop": 0.727561556791104,
"repo_name": "ToontownUprising/src",
"id": "d61d5e40f0b8a9954ba1d8295be46bca633ef8da",
"size": "1259",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "toontown/parties/DistributedPartyValentineDanceActivity.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "36"
},
{
"name": "Python",
"bytes": "16244807"
},
{
"name": "Shell",
"bytes": "11615"
}
],
"symlink_target": ""
} |
"""
This module provides a hook which generates a cucumber json result file at the end of the run.
"""
from getpass import getuser
from socket import gethostname
from datetime import timedelta
import re
import json
import logging
from radish.terrain import world
from radish.hookregistry import after
from radish.exceptions import RadishError
from radish.scenariooutline import ScenarioOutline
from radish.scenarioloop import ScenarioLoop
from radish.stepmodel import Step
from radish.extensionregistry import extension
import radish.utils as utils
@extension
class CucumberJSONWriter(object):
"""
cucumber json Writer radish extension
"""
OPTIONS = [("--cucumber-json=<ccjson>", "write cucumber json result file after run")]
LOAD_IF = staticmethod(lambda config: config.cucumber_json)
LOAD_PRIORITY = 60
def __init__(self):
after.all(self.generate_ccjson)
def generate_ccjson(self, features, marker):
"""
Generates the cucumber json
"""
if not features:
raise RadishError("No features given to generate cucumber json file")
duration = timedelta()
for feature in features:
if feature.state in [Step.State.PASSED, Step.State.FAILED]:
duration += feature.duration
ccjson = []
for feature in features:
if not feature.has_to_run(world.config.scenarios):
continue
feature_description = "\n".join(feature.description)
feature_json = {
"uri": feature.path,
"type": "feature",
"keyword": feature.keyword,
"id": str(feature.id),
"name": feature.sentence,
"line": feature.line,
"description": feature_description,
"tags": [],
"elements": []
}
for i,j in enumerate(feature.tags):
feature_json["tags"].append({"name": "@" + j.name, "line": feature.line - len(feature.tags) + i})
for scenario in (s for s in feature.all_scenarios if not isinstance(s, (ScenarioOutline, ScenarioLoop))):
if not scenario.has_to_run(world.config.scenarios):
continue
scenario_json = {
"keyword": scenario.keyword,
"type": "scenario",
"id": str(scenario.id),
"name": scenario.sentence,
"line": scenario.line,
"description": "",
"steps": [],
"tags": []
}
start_line_no = scenario.line - len(scenario.tags)
for i, tag in enumerate(scenario.tags):
scenario_json["tags"].append({"name": "@" + tag.name, "line": start_line_no + i})
for step in scenario.all_steps:
duration = step.duration.total_seconds() * 1e9 if step.starttime and step.endtime else 0.0
step_json = {
"keyword": step.sentence.split()[0],
"name": step.sentence,
"line": step.line,
"result": {
"status": step.state,
"duration": duration
}
}
if step.state is Step.State.FAILED:
step_json["result"]["error_message"] = step.failure.reason
if step.state is Step.State.UNTESTED:
step_json["result"]["status"] = "skipped"
scenario_json["steps"].append(step_json)
feature_json["elements"].append(scenario_json)
ccjson.append(feature_json)
with open(world.config.cucumber_json, "w+") as f:
content = json.dumps(ccjson, indent=4, sort_keys=True)
f.write(content)
| {
"content_hash": "db0dfefc942972482d34fe87cf6ff410",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 117,
"avg_line_length": 39.205882352941174,
"alnum_prop": 0.5358839709927482,
"repo_name": "SamuelYvon/radish",
"id": "ccd2b452457831ac935fa9fb440df4dc43a4416a",
"size": "4024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radish/extensions/cucumber_json_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "20376"
},
{
"name": "Python",
"bytes": "261585"
},
{
"name": "Shell",
"bytes": "1686"
}
],
"symlink_target": ""
} |
import os
import sys
_SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__))
_CHROME_SOURCE = os.path.realpath(
os.path.join(_SCRIPT_DIR, *[os.path.pardir] * 6))
sys.path.append(os.path.join(_CHROME_SOURCE, 'build/android/gyp'))
import argparse
import json
from util import build_utils
def process_emoticon_data(metadata):
"""Produce the emoticon data to be consumed by the emoji picker.
Args:
metadata (list(dict)): list of emoticon group data.
Returns:
list(dict): list of readily used emoticon groups.
"""
return [{
"group":
group["group"],
"emoji": [{
"base": {
"string": emoticon["value"],
"name": emoticon["description"],
"keywords": []
},
"alternates": []
} for emoticon in group["emoticon"]]
} for group in metadata]
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--metadata',
required=True,
help='emoji metadata ordering file as JSON')
parser.add_argument('--output',
required=True,
help='output JSON file path')
options = parser.parse_args(args)
metadata_file = options.metadata
output_file = options.output
# Parse emoticon ordering data.
metadata = []
with open(metadata_file, 'r') as file:
metadata = json.load(file)
emoticon_data = process_emoticon_data(metadata)
# Write output file atomically in utf-8 format.
with build_utils.AtomicOutput(output_file) as tmp_file:
tmp_file.write(
json.dumps(emoticon_data,
separators=(',', ':'),
ensure_ascii=False).encode('utf-8'))
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "1b1e891ceef575c4e9ca881fe67f0f8d",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 68,
"avg_line_length": 28.49230769230769,
"alnum_prop": 0.5680345572354212,
"repo_name": "scheib/chromium",
"id": "0848a76f02557185d5c72dabe64f26b8f9badfac",
"size": "2015",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "chrome/browser/resources/chromeos/emoji_picker/tools/emoticon_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountNotEnabled
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import TICK_SIZE
class novadax(Exchange):
def describe(self):
return self.deep_extend(super(novadax, self).describe(), {
'id': 'novadax',
'name': 'NovaDAX',
'countries': ['BR'], # Brazil
# 60 requests per second = 1000ms / 60 = 16.6667ms between requests(public endpoints, limited by IP address)
# 20 requests per second => cost = 60 / 20 = 3(private endpoints, limited by API Key)
'rateLimit': 16.6667,
'version': 'v1',
# new metainfo interface
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': True,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': True,
'withdraw': True,
},
'timeframes': {
'1m': 'ONE_MIN',
'5m': 'FIVE_MIN',
'15m': 'FIFTEEN_MIN',
'30m': 'HALF_HOU',
'1h': 'ONE_HOU',
'1d': 'ONE_DAY',
'1w': 'ONE_WEE',
'1M': 'ONE_MON',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/92337550-2b085500-f0b3-11ea-98e7-5794fb07dd3b.jpg',
'api': {
'public': 'https://api.novadax.com',
'private': 'https://api.novadax.com',
},
'www': 'https://www.novadax.com.br',
'doc': [
'https://doc.novadax.com/pt-BR/',
],
'fees': 'https://www.novadax.com.br/fees-and-limits',
'referral': 'https://www.novadax.com.br/?s=ccxt',
},
'api': {
'public': {
'get': {
'common/symbol': 1.2,
'common/symbols': 1.2,
'common/timestamp': 1.2,
'market/tickers': 1.2,
'market/ticker': 1.2,
'market/depth': 1.2,
'market/trades': 1.2,
'market/kline/history': 1.2,
},
},
'private': {
'get': {
'orders/get': 3,
'orders/list': 3,
'orders/fill': 3,
'orders/fills': 3,
'account/getBalance': 3,
'account/subs': 3,
'account/subs/balance': 3,
'account/subs/transfer/record': 3,
'wallet/query/deposit-withdraw': 3,
},
'post': {
'orders/create': 3,
'orders/cancel': 3,
'account/withdraw/coin': 3,
'account/subs/transfer': 3,
},
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': self.parse_number('0.005'),
'maker': self.parse_number('0.0025'),
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'A99999': ExchangeError, # 500 Failed Internal error
# 'A10000': ExchangeError, # 200 Success Successful request
'A10001': BadRequest, # 400 Params error Parameter is invalid
'A10002': ExchangeError, # 404 Api not found API used is irrelevant
'A10003': AuthenticationError, # 403 Authentication failed Authentication is failed
'A10004': RateLimitExceeded, # 429 Too many requests Too many requests are made
'A10005': PermissionDenied, # 403 Kyc required Need to complete KYC firstly
'A10006': AccountSuspended, # 403 Customer canceled Account is canceled
'A10007': AccountNotEnabled, # 400 Account not exist Sub account does not exist
'A10011': BadSymbol, # 400 Symbol not exist Trading symbol does not exist
'A10012': BadSymbol, # 400 Symbol not trading Trading symbol is temporarily not available
'A10013': OnMaintenance, # 503 Symbol maintain Trading symbol is in maintain
'A30001': OrderNotFound, # 400 Order not found Queried order is not found
'A30002': InvalidOrder, # 400 Order amount is too small Order amount is too small
'A30003': InvalidOrder, # 400 Order amount is invalid Order amount is invalid
'A30004': InvalidOrder, # 400 Order value is too small Order value is too small
'A30005': InvalidOrder, # 400 Order value is invalid Order value is invalid
'A30006': InvalidOrder, # 400 Order price is invalid Order price is invalid
'A30007': InsufficientFunds, # 400 Insufficient balance The balance is insufficient
'A30008': InvalidOrder, # 400 Order was closed The order has been executed
'A30009': InvalidOrder, # 400 Order canceled The order has been cancelled
'A30010': CancelPending, # 400 Order cancelling The order is being cancelled
'A30011': InvalidOrder, # 400 Order price too high The order price is too high
'A30012': InvalidOrder, # 400 Order price too low The order price is too low
'A40004': InsufficientFunds, # {"code":"A40004","data":[],"message":"sub account balance Insufficient"}
},
'broad': {
},
},
'options': {
'fetchOHLCV': {
'volume': 'amount', # 'amount' for base volume or 'vol' for quote volume
},
'transfer': {
'fillResponseFromRequest': True,
},
},
})
def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the novadax api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = self.publicGetCommonTimestamp(params)
#
# {
# "code":"A10000",
# "data":1599090512080,
# "message":"Success"
# }
#
return self.safe_integer(response, 'data')
def fetch_markets(self, params={}):
"""
retrieves data on all markets for novadax
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = self.publicGetCommonSymbols(params)
#
# {
# "code":"A10000",
# "data":[
# {
# "amountPrecision":8,
# "baseCurrency":"BTC",
# "minOrderAmount":"0.001",
# "minOrderValue":"25",
# "pricePrecision":2,
# "quoteCurrency":"BRL",
# "status":"ONLINE",
# "symbol":"BTC_BRL",
# "valuePrecision":2
# },
# ],
# "message":"Success"
# }
#
result = []
data = self.safe_value(response, 'data', [])
for i in range(0, len(data)):
market = data[i]
baseId = self.safe_string(market, 'baseCurrency')
quoteId = self.safe_string(market, 'quoteCurrency')
id = self.safe_string(market, 'symbol')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'ONLINE'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'amountPrecision'))),
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'pricePrecision'))),
'cost': self.parse_number(self.parse_precision(self.safe_string(market, 'valuePrecision'))),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderAmount'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderValue'),
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker, fetchTickers
#
# {
# "ask":"61946.1",
# "baseVolume24h":"164.41930186",
# "bid":"61815",
# "high24h":"64930.72",
# "lastPrice":"61928.41",
# "low24h":"61156.32",
# "open24h":"64512.46",
# "quoteVolume24h":"10308157.95",
# "symbol":"BTC_BRL",
# "timestamp":1599091115090
# }
#
timestamp = self.safe_integer(ticker, 'timestamp')
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market, '_')
open = self.safe_string(ticker, 'open24h')
last = self.safe_string(ticker, 'lastPrice')
baseVolume = self.safe_string(ticker, 'baseVolume24h')
quoteVolume = self.safe_string(ticker, 'quoteVolume24h')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high24h'),
'low': self.safe_string(ticker, 'low24h'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetMarketTicker(self.extend(request, params))
#
# {
# "code":"A10000",
# "data":{
# "ask":"61946.1",
# "baseVolume24h":"164.41930186",
# "bid":"61815",
# "high24h":"64930.72",
# "lastPrice":"61928.41",
# "low24h":"61156.32",
# "open24h":"64512.46",
# "quoteVolume24h":"10308157.95",
# "symbol":"BTC_BRL",
# "timestamp":1599091115090
# },
# "message":"Success"
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_ticker(data, market)
def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
symbols = self.market_symbols(symbols)
response = self.publicGetMarketTickers(params)
#
# {
# "code":"A10000",
# "data":[
# {
# "ask":"61879.36",
# "baseVolume24h":"164.40955092",
# "bid":"61815",
# "high24h":"64930.72",
# "lastPrice":"61820.04",
# "low24h":"61156.32",
# "open24h":"64624.19",
# "quoteVolume24h":"10307493.92",
# "symbol":"BTC_BRL",
# "timestamp":1599091291083
# },
# ],
# "message":"Success"
# }
#
data = self.safe_value(response, 'data', [])
result = {}
for i in range(0, len(data)):
ticker = self.parse_ticker(data[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 10, max 20
response = self.publicGetMarketDepth(self.extend(request, params))
#
# {
# "code":"A10000",
# "data":{
# "asks":[
# ["0.037159","0.3741"],
# ["0.037215","0.2706"],
# ["0.037222","1.8459"],
# ],
# "bids":[
# ["0.037053","0.3857"],
# ["0.036969","0.8101"],
# ["0.036953","1.5226"],
# ],
# "timestamp":1599280414448
# },
# "message":"Success"
# }
#
data = self.safe_value(response, 'data', {})
timestamp = self.safe_integer(data, 'timestamp')
return self.parse_order_book(data, market['symbol'], timestamp, 'bids', 'asks')
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "amount":"0.0632",
# "price":"0.037288",
# "side":"BUY",
# "timestamp":1599279694576
# }
#
# private fetchOrderTrades
#
# {
# "id": "608717046691139584",
# "orderId": "608716957545402368",
# "symbol": "BTC_BRL",
# "side": "BUY",
# "amount": "0.0988",
# "price": "45514.76",
# "fee": "0.0000988 BTC",
# "feeAmount": "0.0000988",
# "feeCurrency": "BTC",
# "role": "MAKER",
# "timestamp": 1565171053345
# }
#
# private fetchMyTrades(same endpoint as fetchOrderTrades)
#
# {
# "id": "608717046691139584",
# "orderId": "608716957545402368",
# "symbol": "BTC_BRL",
# "side": "BUY",
# "amount": "0.0988",
# "price": "45514.76",
# "fee": "0.0000988 BTC",
# "feeAmount": "0.0000988",
# "feeCurrency": "BTC",
# "role": "MAKER",
# "timestamp": 1565171053345
# }
#
id = self.safe_string(trade, 'id')
orderId = self.safe_string(trade, 'orderId')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string_lower(trade, 'side')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market, '_')
takerOrMaker = self.safe_string_lower(trade, 'role')
feeString = self.safe_string(trade, 'fee')
fee = None
if feeString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': self.safe_string(trade, 'feeAmount'),
'currency': feeCurrencyCode,
}
return self.safe_trade({
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'takerOrMaker': takerOrMaker,
'fee': fee,
'info': trade,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the novadax api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 100
response = self.publicGetMarketTrades(self.extend(request, params))
#
# {
# "code":"A10000",
# "data":[
# {"amount":"0.0632","price":"0.037288","side":"BUY","timestamp":1599279694576},
# {"amount":"0.0052","price":"0.03715","side":"SELL","timestamp":1599276606852},
# {"amount":"0.0058","price":"0.037188","side":"SELL","timestamp":1599275187812},
# ],
# "message":"Success"
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the novadax api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'unit': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.seconds()
if limit is None:
limit = 3000 # max
if since is None:
request['from'] = now - limit * duration
request['to'] = now
else:
startFrom = int(since / 1000)
request['from'] = startFrom
request['to'] = self.sum(startFrom, limit * duration)
response = self.publicGetMarketKlineHistory(self.extend(request, params))
#
# {
# "code": "A10000",
# "data": [
# {
# "amount": 8.25709100,
# "closePrice": 62553.20,
# "count": 29,
# "highPrice": 62592.87,
# "lowPrice": 62553.20,
# "openPrice": 62554.23,
# "score": 1602501480,
# "symbol": "BTC_BRL",
# "vol": 516784.2504067500
# }
# ],
# "message": "Success"
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "amount": 8.25709100,
# "closePrice": 62553.20,
# "count": 29,
# "highPrice": 62592.87,
# "lowPrice": 62553.20,
# "openPrice": 62554.23,
# "score": 1602501480,
# "symbol": "BTC_BRL",
# "vol": 516784.2504067500
# }
#
options = self.safe_value(self.options, 'fetchOHLCV', {})
volumeField = self.safe_string(options, 'volume', 'amount') # or vol
return [
self.safe_timestamp(ohlcv, 'score'),
self.safe_number(ohlcv, 'openPrice'),
self.safe_number(ohlcv, 'highPrice'),
self.safe_number(ohlcv, 'lowPrice'),
self.safe_number(ohlcv, 'closePrice'),
self.safe_number(ohlcv, volumeField),
]
def parse_balance(self, response):
data = self.safe_value(response, 'data', [])
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'hold')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
self.load_markets()
response = self.privateGetAccountGetBalance(params)
#
# {
# "code": "A10000",
# "data": [
# {
# "available": "1.23",
# "balance": "0.23",
# "currency": "BTC",
# "hold": "1"
# }
# ],
# "message": "Success"
# }
#
return self.parse_balance(response)
def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float|None price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
uppercaseType = type.upper()
uppercaseSide = side.upper()
request = {
'symbol': market['id'],
'side': uppercaseSide, # or SELL
# 'amount': self.amount_to_precision(symbol, amount),
# "price": "1234.5678", # required for LIMIT and STOP orders
# 'operator': '' # for stop orders, can be found in order introduction
# 'stopPrice': self.price_to_precision(symbol, stopPrice),
# 'accountId': '...', # subaccount id, optional
}
stopPrice = self.safe_value_2(params, 'triggerPrice', 'stopPrice')
if stopPrice is None:
if (uppercaseType == 'STOP_LIMIT') or (uppercaseType == 'STOP_MARKET'):
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for ' + uppercaseType + ' orders')
else:
if uppercaseType == 'LIMIT':
uppercaseType = 'STOP_LIMIT'
elif uppercaseType == 'MARKET':
uppercaseType = 'STOP_MARKET'
defaultOperator = 'LTE' if (uppercaseSide == 'BUY') else 'GTE'
request['operator'] = self.safe_string(params, 'operator', defaultOperator)
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
params = self.omit(params, ['triggerPrice', 'stopPrice'])
if (uppercaseType == 'LIMIT') or (uppercaseType == 'STOP_LIMIT'):
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif (uppercaseType == 'MARKET') or (uppercaseType == 'STOP_MARKET'):
if uppercaseSide == 'SELL':
request['amount'] = self.amount_to_precision(symbol, amount)
elif uppercaseSide == 'BUY':
value = self.safe_number(params, 'value')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if value is None:
value = amount * price
elif value is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'value' extra parameter(the exchange-specific behaviour)")
else:
value = amount if (value is None) else value
request['value'] = self.cost_to_precision(symbol, value)
request['type'] = uppercaseType
response = self.privatePostOrdersCreate(self.extend(request, params))
#
# {
# "code": "A10000",
# "data": {
# "amount": "0.001",
# "averagePrice": null,
# "filledAmount": "0",
# "filledFee": "0",
# "filledValue": "0",
# "id": "870613508008464384",
# "operator": "GTE",
# "price": "210000",
# "side": "BUY",
# "status": "SUBMITTED",
# "stopPrice": "211000",
# "symbol": "BTC_BRL",
# "timestamp": 1627612035528,
# "type": "STOP_LIMIT",
# "value": "210"
# },
# "message": "Success"
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str|None symbol: not used by novadax cancelOrder()
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {
'id': id,
}
response = self.privatePostOrdersCancel(self.extend(request, params))
#
# {
# "code": "A10000",
# "data": {
# "result": True
# },
# "message": "Success"
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data)
def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str|None symbol: not used by novadax fetchOrder
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrdersGet(self.extend(request, params))
#
# {
# "code": "A10000",
# "data": {
# "id": "608695623247466496",
# "symbol": "BTC_BRL",
# "type": "MARKET",
# "side": "SELL",
# "price": null,
# "averagePrice": "0",
# "amount": "0.123",
# "filledAmount": "0",
# "value": null,
# "filledValue": "0",
# "filledFee": "0",
# "status": "REJECTED",
# "timestamp": 1565165945588
# },
# "message": "Success"
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the novadax api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {
# 'symbol': market['id'],
# 'status': 'SUBMITTED,PROCESSING', # SUBMITTED, PROCESSING, PARTIAL_FILLED, CANCELING, FILLED, CANCELED, REJECTED
# 'fromId': '...', # order id to begin with
# 'toId': '...', # order id to end up with
# 'fromTimestamp': since,
# 'toTimestamp': self.milliseconds(),
# 'limit': limit, # default 100, max 100
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit # default 100, max 100
if since is not None:
request['fromTimestamp'] = since
response = self.privateGetOrdersList(self.extend(request, params))
#
# {
# "code": "A10000",
# "data": [
# {
# "id": "608695678650028032",
# "symbol": "BTC_BRL",
# "type": "MARKET",
# "side": "SELL",
# "price": null,
# "averagePrice": "0",
# "amount": "0.123",
# "filledAmount": "0",
# "value": null,
# "filledValue": "0",
# "filledFee": "0",
# "status": "REJECTED",
# "timestamp": 1565165958796
# },
# ],
# "message": "Success"
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the novadax api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
request = {
'status': 'SUBMITTED,PROCESSING,PARTIAL_FILLED,CANCELING',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the novadax api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
request = {
'status': 'FILLED,CANCELED,REJECTED',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
"""
fetch all the trades made from a single order
:param str id: order id
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades to retrieve
:param dict params: extra parameters specific to the novadax api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrdersFill(self.extend(request, params))
market = None
if symbol is not None:
market = self.market(symbol)
data = self.safe_value(response, 'data', [])
#
# {
# "code": "A10000",
# "data": [
# {
# "id": "608717046691139584",
# "orderId": "608716957545402368",
# "symbol": "BTC_BRL",
# "side": "BUY",
# "amount": "0.0988",
# "price": "45514.76",
# "fee": "0.0000988 BTC",
# "feeAmount": "0.0000988",
# "feeCurrency": "BTC",
# "role": "MAKER",
# "timestamp": 1565171053345
# },
# ],
# "message": "Success"
# }
#
return self.parse_trades(data, market, since, limit)
def parse_order_status(self, status):
statuses = {
'SUBMITTED': 'open',
'PROCESSING': 'open',
'PARTIAL_FILLED': 'open',
'CANCELING': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
'REJECTED': 'rejected',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder, fetchOrders, fetchOrder
#
# {
# "amount": "0.001",
# "averagePrice": null,
# "filledAmount": "0",
# "filledFee": "0",
# "filledValue": "0",
# "id": "870613508008464384",
# "operator": "GTE",
# "price": "210000",
# "side": "BUY",
# "status": "SUBMITTED",
# "stopPrice": "211000",
# "symbol": "BTC_BRL",
# "timestamp": 1627612035528,
# "type": "STOP_LIMIT",
# "value": "210"
# }
#
# cancelOrder
#
# {
# "result": True
# }
#
id = self.safe_string(order, 'id')
amount = self.safe_string(order, 'amount')
price = self.safe_string(order, 'price')
cost = self.safe_string_2(order, 'filledValue', 'value')
type = self.safe_string_lower(order, 'type')
side = self.safe_string_lower(order, 'side')
status = self.parse_order_status(self.safe_string(order, 'status'))
timestamp = self.safe_integer(order, 'timestamp')
average = self.safe_string(order, 'averagePrice')
filled = self.safe_string(order, 'filledAmount')
fee = None
feeCost = self.safe_number(order, 'filledFee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
}
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market, '_')
stopPrice = self.safe_number(order, 'stopPrice')
return self.safe_order({
'id': id,
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
def transfer(self, code, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: a `transfer structure <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
self.load_markets()
currency = self.currency(code)
if fromAccount != 'main' and toAccount != 'main':
raise ExchangeError(self.id + ' transfer() supports transfers between main account and subaccounts only')
# master-transfer-in = from master account to subaccount
# master-transfer-out = from subaccount to master account
type = 'master-transfer-in' if (fromAccount == 'main') else 'master-transfer-out'
request = {
'transferAmount': self.currency_to_precision(code, amount),
'currency': currency['id'],
'subId': toAccount if (type == 'master-transfer-in') else fromAccount,
'transferType': type,
}
response = self.privatePostAccountSubsTransfer(self.extend(request, params))
#
# {
# "code":"A10000",
# "message":"Success",
# "data":40
# }
#
transfer = self.parse_transfer(response, currency)
transferOptions = self.safe_value(self.options, 'transfer', {})
fillResponseFromRequest = self.safe_value(transferOptions, 'fillResponseFromRequest', True)
if fillResponseFromRequest:
transfer['fromAccount'] = fromAccount
transfer['toAccount'] = toAccount
transfer['amount'] = amount
return transfer
def parse_transfer(self, transfer, currency=None):
#
# {
# "code":"A10000",
# "message":"Success",
# "data":40
# }
#
id = self.safe_string(transfer, 'data')
status = self.safe_string(transfer, 'message')
currencyCode = self.safe_currency_code(None, currency)
return {
'info': transfer,
'id': id,
'amount': None,
'code': currencyCode, # kept here for backward-compatibility, but will be removed soon
'currency': currencyCode,
'fromAccount': None,
'toAccount': None,
'timestamp': None,
'datetime': None,
'status': status,
}
def parse_transfer_status(self, status):
statuses = {
'SUCCESS': 'pending',
}
return self.safe_string(statuses, status, 'failed')
def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.load_markets()
currency = self.currency(code)
request = {
'code': currency['id'],
'amount': self.currency_to_precision(code, amount),
'wallet': address,
}
if tag is not None:
request['tag'] = tag
response = self.privatePostAccountWithdrawCoin(self.extend(request, params))
#
# {
# "code":"A10000",
# "data": "DR123",
# "message":"Success"
# }
#
return self.parse_transaction(response, currency)
def fetch_accounts(self, params={}):
"""
fetch all the accounts associated with a profile
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: a dictionary of `account structures <https://docs.ccxt.com/en/latest/manual.html#account-structure>` indexed by the account type
"""
response = self.privateGetAccountSubs(params)
#
# {
# "code": "A10000",
# "data": [
# {
# "subId": "CA648856083527372800",
# "state": "Normal",
# "subAccount": "003",
# "subIdentify": "003"
# }
# ],
# "message": "Success"
# }
#
data = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(data)):
account = data[i]
accountId = self.safe_string(account, 'subId')
type = self.safe_string(account, 'subAccount')
result.append({
'id': accountId,
'type': type,
'currency': None,
'info': account,
})
return result
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the novadax api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
request = {
'type': 'coin_in',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the novadax api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
request = {
'type': 'coin_out',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
"""
fetch history of deposits and withdrawals
:param str|None code: unified currency code for the currency of the transactions, default is None
:param int|None since: timestamp in ms of the earliest transaction, default is None
:param int|None limit: max number of transactions to return, default is None
:param dict params: extra parameters specific to the novadax api endpoint
:returns dict: a list of `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
self.load_markets()
request = {
# 'currency': currency['id'],
# 'type': 'coin_in', # 'coin_out'
# 'direct': 'asc', # 'desc'
# 'size': limit, # default 100
# 'start': id, # offset id
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit
response = self.privateGetWalletQueryDepositWithdraw(self.extend(request, params))
#
# {
# "code": "A10000",
# "data": [
# {
# "id": "DR562339304588709888",
# "type": "COIN_IN",
# "currency": "XLM",
# "chain": "XLM",
# "address": "GCUTK7KHPJC3ZQJ3OMWWFHAK2OXIBRD4LNZQRCCOVE7A2XOPP2K5PU5Q",
# "addressTag": "1000009",
# "amount": 1.0,
# "state": "SUCCESS",
# "txHash": "39210645748822f8d4ce673c7559aa6622e6e9cdd7073bc0fcae14b1edfda5f4",
# "createdAt": 1554113737000,
# "updatedAt": 1601371273000
# }
# ],
# "message": "Success"
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit)
def parse_transaction_status(self, status):
# Pending the record is wait broadcast to chain
# x/M confirming the comfirming state of tx, the M is total confirmings needed
# SUCCESS the record is success full
# FAIL the record failed
parts = status.split(' ')
status = self.safe_string(parts, 1, status)
statuses = {
'Pending': 'pending',
'confirming': 'pending',
'SUCCESS': 'ok',
'FAIL': 'failed',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "code":"A10000",
# "data": "DR123",
# "message":"Success"
# }
#
# fetchTransactions
#
# {
# "id": "DR562339304588709888",
# "type": "COIN_IN",
# "currency": "XLM",
# "chain": "XLM",
# "address": "GCUTK7KHPJC3ZQJ3OMWWFHAK2OXIBRD4LNZQRCCOVE7A2XOPP2K5PU5Q",
# "addressTag": "1000009",
# "amount": 1.0,
# "state": "SUCCESS",
# "txHash": "39210645748822f8d4ce673c7559aa6622e6e9cdd7073bc0fcae14b1edfda5f4",
# "createdAt": 1554113737000,
# "updatedAt": 1601371273000
# }
#
id = self.safe_string_2(transaction, 'id', 'data')
type = self.safe_string(transaction, 'type')
if type == 'COIN_IN':
type = 'deposit'
elif type == 'COIN_OUT':
type = 'withdraw'
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'addressTag')
txid = self.safe_string(transaction, 'txHash')
timestamp = self.safe_integer(transaction, 'createdAt')
updated = self.safe_integer(transaction, 'updatedAt')
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
network = self.safe_string(transaction, 'chain')
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'network': network,
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': tag,
'tagTo': tag,
'tagFrom': None,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the novadax api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
self.load_markets()
request = {
# 'orderId': id, # Order ID, string
# 'symbol': market['id'], # The trading symbol, like BTC_BRL, string
# 'fromId': fromId, # Search fill id to begin with, string
# 'toId': toId, # Search fill id to end up with, string
# 'fromTimestamp': since, # Search order fill time to begin with, in milliseconds, string
# 'toTimestamp': self.milliseconds(), # Search order fill time to end up with, in milliseconds, string
# 'limit': limit, # The number of fills to return, default 100, max 100, string
# 'accountId': subaccountId, # Sub account ID, if not informed, the fills will be return under master account, string
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['fromTimestamp'] = since
response = self.privateGetOrdersFills(self.extend(request, params))
#
# {
# "code": "A10000",
# "data": [
# {
# "id": "608717046691139584",
# "orderId": "608716957545402368",
# "symbol": "BTC_BRL",
# "side": "BUY",
# "amount": "0.0988",
# "price": "45514.76",
# "fee": "0.0000988 BTC",
# "feeAmount": "0.0000988",
# "feeCurrency": "BTC",
# "role": "MAKER",
# "timestamp": 1565171053345
# },
# ],
# "message": "Success"
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.version + '/' + self.implode_params(path, params)
url = self.urls['api'][api] + request
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
timestamp = str(self.milliseconds())
headers = {
'X-Nova-Access-Key': self.apiKey,
'X-Nova-Timestamp': timestamp,
}
queryString = None
if method == 'POST':
body = self.json(query)
queryString = self.hash(self.encode(body), 'md5')
headers['Content-Type'] = 'application/json'
else:
if query:
url += '?' + self.urlencode(query)
queryString = self.urlencode(self.keysort(query))
auth = method + "\n" + request + "\n" + queryString + "\n" + timestamp # eslint-disable-line quotes
headers['X-Nova-Signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"code":"A10003","data":[],"message":"Authentication failed, Invalid accessKey."}
#
errorCode = self.safe_string(response, 'code')
if errorCode != 'A10000':
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
| {
"content_hash": "27f52842665adbc1440d6cb7798ea3e8",
"timestamp": "",
"source": "github",
"line_count": 1419,
"max_line_length": 514,
"avg_line_length": 42.68851303735025,
"alnum_prop": 0.4993974411886092,
"repo_name": "ccxt/ccxt",
"id": "9a4a071c079f657007adc480a908e122c0aa48bd",
"size": "60756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ccxt/novadax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1724"
},
{
"name": "HTML",
"bytes": "246"
},
{
"name": "JavaScript",
"bytes": "11619228"
},
{
"name": "PHP",
"bytes": "10272973"
},
{
"name": "Python",
"bytes": "9037496"
},
{
"name": "Shell",
"bytes": "6887"
}
],
"symlink_target": ""
} |
"""Unit tests for the write transform."""
import logging
import unittest
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that, is_empty
from apache_beam.transforms.ptransform import PTransform
class _TestSink(iobase.Sink):
TEST_INIT_RESULT = 'test_init_result'
def __init__(self, return_init_result=True, return_write_results=True):
self.return_init_result = return_init_result
self.return_write_results = return_write_results
def initialize_write(self):
if self.return_init_result:
return _TestSink.TEST_INIT_RESULT
def finalize_write(self, init_result, writer_results):
self.init_result_at_finalize = init_result
self.write_results_at_finalize = writer_results
def open_writer(self, init_result, uid):
writer = _TestWriter(init_result, uid, self.return_write_results)
return writer
class _TestWriter(iobase.Writer):
STATE_UNSTARTED, STATE_WRITTEN, STATE_CLOSED = 0, 1, 2
TEST_WRITE_RESULT = 'test_write_result'
def __init__(self, init_result, uid, return_write_results=True):
self.state = _TestWriter.STATE_UNSTARTED
self.init_result = init_result
self.uid = uid
self.write_output = []
self.return_write_results = return_write_results
def close(self):
assert self.state in (
_TestWriter.STATE_WRITTEN, _TestWriter.STATE_UNSTARTED)
self.state = _TestWriter.STATE_CLOSED
if self.return_write_results:
return _TestWriter.TEST_WRITE_RESULT
def write(self, value):
if self.write_output:
assert self.state == _TestWriter.STATE_WRITTEN
else:
assert self.state == _TestWriter.STATE_UNSTARTED
self.state = _TestWriter.STATE_WRITTEN
self.write_output.append(value)
class WriteToTestSink(PTransform):
def __init__(self, return_init_result=True, return_write_results=True):
self.return_init_result = return_init_result
self.return_write_results = return_write_results
self.last_sink = None
self.label = 'write_to_test_sink'
def expand(self, pcoll):
self.last_sink = _TestSink(return_init_result=self.return_init_result,
return_write_results=self.return_write_results)
return pcoll | beam.io.Write(self.last_sink)
class WriteTest(unittest.TestCase):
DATA = ['some data', 'more data', 'another data', 'yet another data']
def _run_write_test(self,
data,
return_init_result=True,
return_write_results=True):
write_to_test_sink = WriteToTestSink(return_init_result,
return_write_results)
with TestPipeline() as p:
result = p | beam.Create(data) | write_to_test_sink | beam.Map(list)
assert_that(result, is_empty())
sink = write_to_test_sink.last_sink
self.assertIsNotNone(sink)
def test_write(self):
self._run_write_test(WriteTest.DATA)
def test_write_with_empty_pcollection(self):
data = []
self._run_write_test(data)
def test_write_no_init_result(self):
self._run_write_test(WriteTest.DATA, return_init_result=False)
def test_write_no_write_results(self):
self._run_write_test(WriteTest.DATA, return_write_results=False)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| {
"content_hash": "a113a11e9176841b8a80afd0cc76762b",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 31.174311926605505,
"alnum_prop": 0.6839317245438493,
"repo_name": "yk5/beam",
"id": "50f0debb0a704d267fc0c390e189ec8934bd6701",
"size": "4182",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/transforms/write_ptransform_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "50057"
},
{
"name": "Java",
"bytes": "11703716"
},
{
"name": "Protocol Buffer",
"bytes": "55082"
},
{
"name": "Python",
"bytes": "2856021"
},
{
"name": "Shell",
"bytes": "44966"
}
],
"symlink_target": ""
} |
"""Classes related to Rivendell Artists.
Artist uses sqlalchemy to represent a single artist and various
aspects about it as used in the Scheduler.
"""
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import Unicode
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Artist(Base):
"""An artist.
Contains a single artist and its related scheduling information,
such as the "age" to calculate "artist separation" for
the generated merge file. Maintains a persistent list of artists
and the number of units ago each was last scheduled.
"""
__tablename__ = 'artists'
name = Column(Unicode(34), primary_key=True)
age = Column(Integer, default=1)
def __repr__(self):
"""Represent ourself to the world."""
return f"'{self.name}':{self.age}"
def reset(self):
"""Reset age to 0. This happens when the artist gets scheduled."""
self.age = 1
def __add__(self, value):
"""Increment the age counter for the named artist."""
self.age = self.age + value
return self
def __iadd__(self, value):
"""In-place increment (+=) of age counter for the named artist."""
self.age += value
return self
def __sub__(self, value):
"""Decrement the artist's age counter."""
self.age = self.age - value
return self
def __isub__(self, value):
"""In-place decrement (-=) of artist's age counter."""
self.age -= value
return self
class Artists():
"""The collection of all artists."""
def __init__(self, method, location, separation):
"""Make a group of artists.
:param method: The database backend method (sqlite, mysql,
etc.).
:param location: The location of the backend database.
:param separation: The integer value representing the number
of "units" that must transpire before an artist may be
scheduled.
"""
self.method = method
self.location = location
self.separation = separation
self.engine = create_engine(method + '://' + location, echo=False)
session = sessionmaker(bind=self.engine)
self.session = session()
Base.metadata.create_all(self.engine)
@property
def all(self):
"""Read the data source and return all the rows a dictionary."""
return {a.name: a.age for a in self.session.query(Artist).all()}
def add(self, artist):
"""Add this artist to the list."""
try:
new_artist = Artist(name=artist.lower(), age=1)
self.session.add(new_artist)
#self.session.commit()
except:
print("Artist: ERROR: unable to add new artist '{a}'."
.format(a=artist))
return False
return True
def bump(self, artist):
"""Increment the age counter for all artists in the list.
Then reset the counter for 'artist' to 1 since this is the artist
we are scheduling.
"""
for one_of_all_artists in self.session.query(Artist).all():
one_of_all_artists += 1
if artist is None:
artist = 'xx-missing-artist-xx'
this_artist = self.session.query(Artist).filter_by(name=artist.lower()).first()
if this_artist is None:
self.add(artist)
else:
this_artist.reset()
def ok_to_schedule(self, artist):
"""Whether it's OK to schedule this artist.
Has this artist been scheduled more recently than "separation"
units?
"""
if artist is None:
artist = 'xx-missing-artist-xx'
a = self.session.query(Artist).filter_by(name=artist.lower()).first()
if a is None:
# Apparently we have not yet seen this artist.
if not self.add(artist):
return False
return True
if a.age < self.separation:
return False
return True
| {
"content_hash": "1d7cc295b6d216554851ad27b6c3ae9b",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 87,
"avg_line_length": 30.028985507246375,
"alnum_prop": 0.6037644787644788,
"repo_name": "opensourceradio/ram",
"id": "eda170784051902cc3630adb43e4b6f9e861d7a1",
"size": "4144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usr/local/bin/btd_sched/artist.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Perl",
"bytes": "62318"
},
{
"name": "Python",
"bytes": "55373"
},
{
"name": "Shell",
"bytes": "443379"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^instavent/', include('weekend.urls', namespace='instavent')),
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "3d291ef8c1ab3ae175291054d04d48dc",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 72,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.705607476635514,
"repo_name": "ecatkins/week6weekend",
"id": "4f38bc441e45841cc56228e5e655c66d4e242d7b",
"size": "214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/project/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2374"
},
{
"name": "HTML",
"bytes": "2539"
},
{
"name": "JavaScript",
"bytes": "3399"
},
{
"name": "Python",
"bytes": "14118"
}
],
"symlink_target": ""
} |
from appengine.memcache.guestbook import main
from tests import DatastoreTestbedCase
import webapp2
class TestHandlers(DatastoreTestbedCase):
def test_hello(self):
# Build a request object passing the URI path to be tested.
# You can also pass headers, query arguments etc.
request = webapp2.Request.blank('/')
# Get a response for that request.
response = request.get_response(main.app)
# Let's check if the response is correct.
self.assertEqual(response.status_int, 200)
| {
"content_hash": "fba96662218962f768877fcc7d6fb38b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 67,
"avg_line_length": 33.5625,
"alnum_prop": 0.7001862197392924,
"repo_name": "EricYangzhiHong/python-docs-samples",
"id": "b9747929d070e49769b230bd4df3ec938bd1cc9b",
"size": "1157",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "appengine/memcache/guestbook/tests/test_guestbook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "141289"
}
],
"symlink_target": ""
} |
import json
import libvirt
import sys
from lxml import etree
from . import virt_ifaces
def _handle_event(conn, domain, event, detail, opaque):
msg = dict(
type='libvirt',
vm=dict(
name=domain.name(),
uuid=domain.UUIDString(),
uri=opaque['uri'],
),
)
if event == libvirt.VIR_DOMAIN_EVENT_DEFINED:
xml_s = domain.XMLDesc(flags=0)
tree = etree.fromstring(xml_s)
ifaces = virt_ifaces.get_interfaces(tree)
ifaces = list(ifaces)
msg['vm']['interfaces'] = ifaces
elif event == libvirt.VIR_DOMAIN_EVENT_UNDEFINED:
pass
else:
print >>sys.stderr, \
('unknown event:'
+ ' Domain {name} event={event} detail={detail}'.format(
name=domain.name(),
event=event,
detail=detail,
)
)
return
opaque['callback'](msg)
def getAllDomains(conn):
"""
List and get all domains, active or not.
The python bindings don't seem to have this at version
0.9.8-2ubuntu17.1 and a combination of listDefinedDomains and
listDomainsID is just miserable.
http://libvirt.org/html/libvirt-libvirt.html#virConnectListAllDomains
Also fetch the actual domain object, as we'll need the xml.
"""
for name in conn.listDefinedDomains():
try:
domain = conn.lookupByName(name)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
# lost a race, someone undefined the domain
# between listing names and fetching details
pass
else:
raise
else:
yield domain
for id_ in conn.listDomainsID():
try:
domain = conn.lookupByID(id_)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
# lost a race, someone undefined the domain
# between listing names and fetching details
pass
else:
raise
else:
yield domain
def monitor(uris, callback):
libvirt.virEventRegisterDefaultImpl()
conns = {}
for uri in uris:
conn = libvirt.openReadOnly(uri)
conns[uri] = conn
conn.setKeepAlive(5, 3)
conn.domainEventRegisterAny(
dom=None,
eventID=libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
cb=_handle_event,
opaque=dict(uri=uri, callback=callback),
)
for uri, conn in conns.iteritems():
for domain in getAllDomains(conn):
# inject fake defined event for each domain that
# exists at startup
_handle_event(
conn=conn,
domain=domain,
event=libvirt.VIR_DOMAIN_EVENT_DEFINED,
detail=None,
opaque=dict(uri=uri, callback=callback),
)
# signal that all current vms have been observed; that is, pruning
# old entries is safe
callback(dict(type='libvirt_complete'))
while True:
libvirt.virEventRunDefaultImpl()
for uri, conn in conns.iteritems():
if not conn.isAlive() == 1:
# conn.close() tends to fail at this point, so don't
# even try
raise RuntimeError(
'Lost connection to {uri}'.format(uri=uri),
)
def main():
def cb(msg):
sys.stdout.write(json.dumps(msg) + '\n')
sys.stdout.flush()
uris = sys.argv[1:]
monitor(uris, callback=cb)
if __name__ == "__main__":
main()
| {
"content_hash": "14e804d2188b63d7dbd88fdb05ac956a",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 73,
"avg_line_length": 28.119402985074625,
"alnum_prop": 0.5448513800424628,
"repo_name": "ceph/propernoun",
"id": "d54b642cb4ef14023be7fc26ca8f9a842e6fd206",
"size": "3768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "propernoun/virt_mon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36804"
},
{
"name": "Shell",
"bytes": "272"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import argparse
import maskgen.scenario_model
from maskgen.graph_rules import processProjectProperties
from maskgen.batch import pick_projects, BatchProcessor
from maskgen.userinfo import get_username, setPwdX,CustomPwdX
from maskgen.validation.core import hasErrorMessages
from maskgen.preferences_initializer import initialize
from maskgen.external.exporter import ExportManager
import logging
export_manager = ExportManager()
def upload_projects(args, project):
"""
Uploads project directories to S3 bucket
:param s3dir: bucket/dir S3 location
:param dir: directory of project directories
:param qa: bool for if the projects need to be qa'd
:param username: export and qa username
:param updatename: change the project username to match username value
:param organization: change project organization
"""
s3dir = args.s3
qa = args.qa
username = args.username
organization = args.organization
updatename = args.updatename
ignore_errors = args.ignore
log = logging.getLogger('maskgen')
redactions= [redaction.strip() for redaction in args.redacted.split(',')]
scModel = maskgen.scenario_model.loadProject(project)
if username is None:
setPwdX(CustomPwdX(scModel.getGraph().getDataItem("username")))
else:
if updatename:
oldValue = scModel.getProjectData('username')
scModel.setProjectData('creator', username)
scModel.setProjectData('username', username)
scModel.getGraph().replace_attribute_value('username', oldValue, username)
if organization is not None:
scModel.setProjectData('organization', organization)
scModel.save()
processProjectProperties(scModel)
if qa:
username = username if username is not None else get_username()
scModel.set_validation_properties("yes", username, "QA redone via Batch Updater")
errors = [] if args.skipValidation else scModel.validate(external=True)
if ignore_errors or not hasErrorMessages(errors, contentCheck=lambda x: len([m for m in redactions if m not in x]) == 0 ):
path, error_list = scModel.export('.', redacted=redactions)
if path is not None and (ignore_errors or len(error_list) == 0):
export_manager.sync_upload(path, s3dir)
if len(error_list) > 0:
for err in error_list:
log.error(str(err))
raise ValueError('Export Failed')
return errors
def main(argv=sys.argv[1:]):
from functools import partial
parser = argparse.ArgumentParser()
parser.add_argument('--threads', default=1, required=False, help='number of projects to build')
parser.add_argument('-d', '--projects', help='directory of projects')
parser.add_argument('-s', '--s3', help='bucket/path of s3 storage', required=False)
parser.add_argument('--qa', help="option argument to QA the journal prior to uploading", required=False,
action="store_true")
parser.add_argument('-u', '--username', help="optional username", required=False)
parser.add_argument('-o', '--organization', help="update organization in project", required=False)
parser.add_argument('-n', '--updatename', help="should update username in project", required=False,
action="store_true")
parser.add_argument('-r', '--redacted', help='comma separated list of file argument to exclude from export',
default='', required=False)
parser.add_argument('-v', '--skipValidation', help='skip validation',action="store_true")
parser.add_argument('-i', '--ignore', help='ignore errors', default='', required=False)
parser.add_argument('--completeFile', default=None, help='A file recording completed projects')
args = parser.parse_args(argv)
iterator = pick_projects(args.projects)
processor = BatchProcessor(args.completeFile, iterator, threads=args.threads)
func = partial(upload_projects, args)
return processor.process(func)
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "b9662aa380a31bf4d4585bfe6cb139cc",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 127,
"avg_line_length": 46.30337078651685,
"alnum_prop": 0.6918223732103859,
"repo_name": "rwgdrummer/maskgen",
"id": "c0ae73732af2457a6d7a8a8c62c62ec8e9d4f3bb",
"size": "4391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maskgen/batch/bulk_export.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "544"
},
{
"name": "Dockerfile",
"bytes": "4825"
},
{
"name": "NSIS",
"bytes": "4907"
},
{
"name": "Python",
"bytes": "2768871"
},
{
"name": "Shell",
"bytes": "8086"
}
],
"symlink_target": ""
} |
import unittest
from pypika import (
Case,
Field,
Table,
functions as fn,
)
from pypika.terms import (
Negative,
ValueWrapper,
)
class IsAggregateTests(unittest.TestCase):
def test__field_is_not_aggregate(self):
v = Field("foo")
self.assertFalse(v.is_aggregate)
def test__constant_is_aggregate_none(self):
v = ValueWrapper(100)
self.assertIsNone(v.is_aggregate)
def test__constant_arithmetic_is_aggregate_none(self):
v = ValueWrapper(100) + ValueWrapper(100)
self.assertIsNone(v.is_aggregate)
def test__field_arithmetic_is_not_aggregate(self):
v = Field("foo") + Field("bar")
self.assertFalse(v.is_aggregate)
def test__field_arithmetic_constant_is_not_aggregate(self):
v = Field("foo") + 1
self.assertFalse(v.is_aggregate)
def test__agg_func_is_aggregate(self):
v = fn.Sum(Field("foo"))
self.assertTrue(v.is_aggregate)
def test__negative_agg_func_is_aggregate(self):
v = Negative(fn.Sum(Field("foo")))
self.assertTrue(v.is_aggregate)
def test__agg_func_arithmetic_is_aggregate(self):
v = fn.Sum(Field("foo")) / fn.Sum(Field("foo"))
self.assertTrue(v.is_aggregate)
def test__mixed_func_arithmetic_is_not_aggregate(self):
v = Field("foo") / fn.Sum(Field("foo"))
self.assertFalse(v.is_aggregate)
def test__func_arithmetic_constant_is_not_aggregate(self):
v = 1 / fn.Sum(Field("foo"))
self.assertTrue(v.is_aggregate)
def test__agg_case_criterion_is_aggregate(self):
v = Case().when(fn.Sum(Field("foo")) > 666, 'More than 666').else_('Less than 666')
self.assertTrue(v.is_aggregate)
def test__agg_case_is_aggregate(self):
v = (
Case()
.when(Field("foo") == 1, fn.Sum(Field("bar")))
.when(Field("foo") == 2, fn.Sum(Field("fiz")))
.else_(fn.Sum(Field("fiz")))
)
self.assertTrue(v.is_aggregate)
def test__mixed_case_is_not_aggregate(self):
v = Case().when(Field("foo") == 1, fn.Sum(Field("bar"))).when(Field("foo") == 2, Field("fiz"))
self.assertFalse(v.is_aggregate)
def test__case_mixed_else_is_not_aggregate(self):
v = (
Case()
.when(Field("foo") == 1, fn.Sum(Field("bar")))
.when(Field("foo") == 2, fn.Sum(Field("fiz")))
.else_(Field("fiz"))
)
self.assertFalse(v.is_aggregate)
def test__case_mixed_constant_is_not_aggregate(self):
v = Case().when(Field("foo") == 1, fn.Sum(Field("bar"))).when(Field("foo") == 2, fn.Sum(Field("fiz"))).else_(1)
self.assertTrue(v.is_aggregate)
def test__case_with_field_is_not_aggregate(self):
v = Case().when(Field("foo") == 1, 1).when(Field("foo") == 2, 2).else_(3)
self.assertFalse(v.is_aggregate)
def test__case_with_single_aggregate_field_in_one_criterion_is_aggregate(self):
v = Case().when(Field("foo") == 1, 1).when(fn.Sum(Field("foo")) == 2, 2).else_(3)
self.assertTrue(v.is_aggregate)
def test__non_aggregate_function_with_aggregated_arg(self):
t = Table("abc")
expr = fn.Sqrt(fn.Sum(t.a))
self.assertTrue(expr.is_aggregate)
def test_complicated(self):
t = Table("abc")
is_placebo = t.campaign_extra_info == "placebo"
pixel_mobile_search = Case().when(is_placebo, t.action_fb_pixel_search + t.action_fb_mobile_search)
unique_impressions = Case().when(is_placebo, t.unique_impressions)
v = fn.Sum(pixel_mobile_search) / fn.Sum(unique_impressions) - 1.96 * fn.Sqrt(
1
/ fn.Sum(unique_impressions)
* fn.Sum(pixel_mobile_search)
/ fn.Sum(unique_impressions)
* (1 - fn.Sum(pixel_mobile_search) / fn.Sum(unique_impressions))
)
self.assertTrue(v.is_aggregate)
| {
"content_hash": "51c219f13fed0502b16eaa79151ee166",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 119,
"avg_line_length": 32.34426229508197,
"alnum_prop": 0.5884439939178915,
"repo_name": "kayak/pypika",
"id": "1c627d8e9c2d65410c67cf2c39974949b0720a0e",
"size": "3946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypika/tests/test_aggregate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "394110"
}
],
"symlink_target": ""
} |
import sys
import matplotlib
from matplotlib import style
import cloudside
matplotlib.use("agg")
style.use("classic")
if "--strict" in sys.argv:
sys.argv.remove("--strict")
status = cloudside.teststrict(*sys.argv[1:])
else:
status = cloudside.test(*sys.argv[1:])
sys.exit(status)
| {
"content_hash": "22e4b5616f23eb8cb591f832d5787136",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 18.5,
"alnum_prop": 0.706081081081081,
"repo_name": "phobson/cloudside",
"id": "e0232b22e356c82bd6b1e186277f01690dc7c00c",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_cloudside.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "94088"
}
],
"symlink_target": ""
} |
"""
Check for unique lists within an array
"""
def unique_list(array, elements_to_check=0):
"""
:param array: Array of lists to be checked
:param elements_to_check: range of numbers corresponding to indices of list
:return: new unique array
"""
n_rows = len(array)
unique_array = []
if elements_to_check == 0:
elements = range(len(array[0]))
else:
elements = elements_to_check
# for each row
for i in range(n_rows):
row_a = array[i]
equal_row = False
# for all subsequent rows
for j in range(i + 1, n_rows):
row_b = array[j]
# check if each element in rows are equal, breaking after finding even one unequal element
for k in elements:
equal_element = True
if row_a[k] != row_b[k]:
equal_element = False
break
if equal_element == True:
equal_row = True
break
if equal_row == True:
pass
# append all unique rows to new array
else:
unique_array.append(row_a)
return unique_array
| {
"content_hash": "1062bbba3de15aa9c737e84d5ba6f0cf",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 102,
"avg_line_length": 18.46153846153846,
"alnum_prop": 0.525,
"repo_name": "nitikayad96/chandra_suli",
"id": "b1eea5882475eb66db26bc598f49ab9ae42adc75",
"size": "1200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chandra_suli/unique_list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "139543"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
import queue as Queue # https://pymotw.com/2/Queue/
from functools import partial
from threading import Thread
#------------------------------------------------------------------------
# use the raspi board pin number
#GPIO.setmode(GPIO.BOARD)
# use the gpio number
GPIO.setmode(GPIO.BCM)
Taster = 25
#------------------------------------------------------------------------
def interrupt_event(qF, qR, pin):
if GPIO.input(pin) == GPIO.HIGH:
qR.put(pin)
else:
qF.put(pin)
def rising_edge(queue):
while running:
if not queue.empty():
pin = queue.get()
zeit = time.strftime("%d.%m.%Y %H:%M:%S")
print("{} Rising edge detected on {}".format(zeit, pin))
time.sleep(0.5)
def falling_edge(queue):
while running:
if not queue.empty():
pin = queue.get()
zeit = time.strftime("%d.%m.%Y %H:%M:%S")
print("{} Falling edge detected on {}".format(zeit, pin))
time.sleep(0.5)
def main():
queueFalling = Queue.Queue()
queueRising = Queue.Queue()
rising_thread = Thread(target=rising_edge, args=(queueRising,))
falling_thread = Thread(target=falling_edge, args=(queueFalling,))
rising_thread.start()
falling_thread.start()
GPIO.setup(Taster, GPIO.IN)
GPIO.add_event_detect(Taster, GPIO.BOTH, callback=partial(interrupt_event, queueFalling, queueRising), bouncetime=200)
#keep script running
while True:
time.sleep(5)
if __name__ == '__main__':
try:
running = True
main()
except (KeyboardInterrupt, SystemExit):
running = False
print("\nQuit\n")
GPIO.cleanup() | {
"content_hash": "eb600f472f4e36f816e16e426c7e91e9",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 122,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.5593023255813954,
"repo_name": "meigrafd/Sample-Code",
"id": "513a0cf19adf5e14363fca60a8e4431a2574583d",
"size": "1739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interrupt_rising.falling_queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "496"
},
{
"name": "CSS",
"bytes": "638"
},
{
"name": "HTML",
"bytes": "1141"
},
{
"name": "JavaScript",
"bytes": "1624"
},
{
"name": "PHP",
"bytes": "77857"
},
{
"name": "Perl",
"bytes": "478"
},
{
"name": "Python",
"bytes": "382809"
},
{
"name": "Shell",
"bytes": "56023"
}
],
"symlink_target": ""
} |
from google.cloud import monitoring_v3
def sample_get_alert_policy():
# Create a client
client = monitoring_v3.AlertPolicyServiceClient()
# Initialize request argument(s)
request = monitoring_v3.GetAlertPolicyRequest(
name="name_value",
)
# Make the request
response = client.get_alert_policy(request=request)
# Handle the response
print(response)
# [END monitoring_v3_generated_AlertPolicyService_GetAlertPolicy_sync]
| {
"content_hash": "2c53e5779f55dcc0ff580b64527c7b64",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 24.68421052631579,
"alnum_prop": 0.7164179104477612,
"repo_name": "googleapis/python-monitoring",
"id": "a9ee89a26f46f36e41f47f8f7400b36a20cbef79",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/monitoring_v3_generated_alert_policy_service_get_alert_policy_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2375818"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
"""@package src.wi.views.user.user
@author Piotr Wójcik
@date 31.01.2014
"""
from django.contrib import messages
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_protect
from wi.commontags.templatetags.templatetags import filesizeformatmb
from wi.forms.user import CMAuthenticationForm, HelpForm, PasswordChangeForm, \
AccountDataEdit
from wi.utils import get_dict_from_list, messages_ajax
from wi.utils.decorators import django_view, user_permission
from wi.utils.exceptions import RestErrorException
from wi.utils.messages_ajax import ajax_request
from wi.utils.messages_codes import get_message
from wi.utils.states import message_levels_reversed
from wi.utils.views import prep_data
@django_view
@user_permission
def change_cm(request, cm_id, success_url='mai_main'):
"""
View changing used CM.
"""
request.session['user'].cm_id = int(cm_id)
request.session.modified = True
messages.success(request, _('Cluster Manager changed.'))
return redirect(request.META['HTTP_REFERER'] or success_url)
@django_view
@ajax_request
@user_permission
def get_messages(request):
"""
Ajax view fetching user messages.
"""
if request.method == 'POST':
response = prep_data('user/message/get_list/', request.session)
for item in response:
item['text'] = get_message(item['code'], item['params'])
item['level'] = message_levels_reversed[item['level']]
return messages_ajax.success(response)
@django_view
@ajax_request
@user_permission
def acc_ajax_get_user_data(request):
"""
Ajax view. Returns user account data.
"""
if request.method == 'GET':
rest_data = prep_data({'user': 'user/user/get_my_data/',
'cms': 'guest/cluster/list_names/'
}, request.session)
user_data = rest_data['user']
users_cm = get_dict_from_list(rest_data['cms'], user_data['default_cluster_id'], key='cluster_id')
if users_cm is None:
raise Exception('User\'s default_cluster_id=%d is not a valid CM id.' % user_data['default_cluster_id'])
user_data['default_cluster_id'] = users_cm['name']
return messages_ajax.success(user_data)
@django_view
@ajax_request
@user_permission
@csrf_protect
def acc_ajax_account_data_edit(request, template_name='generic/form.html', form_class=AccountDataEdit):
"""
Ajax view for user account data editing.
"""
rest_data = prep_data({'cms': 'guest/cluster/list_names/'}, request.session)
if request.method == 'POST':
form = form_class(data=request.POST, rest_data=rest_data)
if form.is_valid():
prep_data({'user': ('user/user/edit/', form.cleaned_data)}, request.session)
request.session['user'].email = form.cleaned_data['email']
request.session['user'].default_cluster_id = form.cleaned_data['default_cluster_id']
request.session.modified = True
return messages_ajax.success(_('Account data edited.'))
else:
form = form_class(data={'email': request.session['user'].email,
'default_cluster_id': request.session['user'].default_cluster_id}, rest_data=rest_data)
return messages_ajax.success(render_to_string(template_name, {'form': form,
'text': '',
'confirmation': _('Save')},
context_instance=RequestContext(request)),
status=1)
@django_view
@ajax_request
@user_permission
def acc_ajax_get_user_quotas(request):
"""
Ajax view for fetching users' quotas.
"""
if request.method == 'GET':
quota = prep_data('user/user/check_quota/', request.session)
quota['memory'] = filesizeformatmb(quota['memory'])
quota['used_memory'] = filesizeformatmb(quota['used_memory'])
quota['storage'] = filesizeformatmb(quota['storage'])
quota['used_storage'] = filesizeformatmb(quota['used_storage'])
return messages_ajax.success(quota)
@django_view
@csrf_protect
@user_permission
def acc_password_change(request, template_name='account/password_change_form.html', password_change_form=PasswordChangeForm):
"""
View for password changing (for logged users).
"""
if request.method == "POST":
form = password_change_form(user=request.session['user'], data=request.POST)
if form.is_valid():
new_password = form.cleaned_data['new_password1']
try:
prep_data(('user/user/set_password/', {'new_password': new_password}), request.session)
except RestErrorException as ex:
messages.error(request, ex.value)
request.session['user'].set_password(new_password)
request.session.modified = True
return redirect('acc_password_change_done')
else:
form = password_change_form(user=request.session['user'])
return render_to_response(template_name, {'form': form}, context_instance=RequestContext(request))
@django_view
@user_permission
def hlp_form(request, form_class=HelpForm, template_name='help/form.html'):
"""
View handling help form.
"""
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
topic, issue, email = form.cleaned_data['topic'], form.cleaned_data['issue'], form.cleaned_data['email']
name = str(request.session.get('user', form.cleaned_data['firstlast']))
topic += _(' from user:') + name + ', email: ' + email
dictionary = {'issue': issue,
'topic': topic}
try:
prep_data(('user/user/send_issue/', dictionary), request.session)
except Exception:
return redirect('hlp_issue_error')
return redirect('hlp_issue_sent')
else:
form = form_class()
rest_data = prep_data('guest/user/is_mailer_active/', request.session)
return render_to_response(template_name, dict({'form': form}.items() + rest_data.items()),
context_instance=RequestContext(request))
| {
"content_hash": "e6df216b37579c8ef7411ba725715d62",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 125,
"avg_line_length": 37.07954545454545,
"alnum_prop": 0.6259577076310144,
"repo_name": "cc1-cloud/cc1",
"id": "779ca94f3295938fb0f803a6dd3397114bd47169",
"size": "7225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wi/views/user/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63829"
},
{
"name": "HTML",
"bytes": "323260"
},
{
"name": "JavaScript",
"bytes": "458924"
},
{
"name": "Python",
"bytes": "1466456"
},
{
"name": "Shell",
"bytes": "14317"
}
],
"symlink_target": ""
} |
"""
Django settings for elf project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pdj&pd=xh^e3o#g6#=8o1bk=6jbj^6683q^-xl)c5)0v610p+z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# master
'master',
# apps
'todo',
'event',
'hours10k',
'mood',
'dream',
'diary',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'elf.urls'
WSGI_APPLICATION = 'elf.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static', 'static')
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static', 'media')
# Template DIR
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(BASE_DIR), 'static', 'templates'),
)
if DEBUG:
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static', 'media')
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static', 'static')
TEMPLATE_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static', 'templates')
STATICFILES_DIRS = (
os.path.join(os.path.dirname(BASE_DIR), 'static'),
)
| {
"content_hash": "e30896971a8b213804fefc4120628064",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 82,
"avg_line_length": 24.605263157894736,
"alnum_prop": 0.6898395721925134,
"repo_name": "KellyChan/python-examples",
"id": "5cf98149275ef38328bbf5342a0a6a5acadd1d0f",
"size": "2805",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/django/elf/elf/src/elf/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86277"
},
{
"name": "HTML",
"bytes": "320182"
},
{
"name": "JavaScript",
"bytes": "154998"
},
{
"name": "Jupyter Notebook",
"bytes": "30660"
},
{
"name": "Python",
"bytes": "238130"
}
],
"symlink_target": ""
} |
from tapiriik.services.ratelimiting import RateLimit, RateLimitExceededException
from tapiriik.services.api import ServiceException, UserExceptionType, UserException
class ServiceAuthenticationType:
OAuth = "oauth"
UsernamePassword = "direct"
class InvalidServiceOperationException(Exception):
pass
class ServiceBase:
# Short ID used everywhere in logging and DB storage
ID = None
# Alias ID in case somebody (not naming names) typoed a service name and needs to keep the old ID functional
IDAliases = None
# Full display name given to users
DisplayName = None
# 2-3 letter abbreviated name
DisplayAbbreviation = None
# One of ServiceAuthenticationType
AuthenticationType = None
# Enables extended auth ("Save these details") functionality
RequiresExtendedAuthorizationDetails = False
# URL to direct user to when starting authentication
UserAuthorizationURL = None
# Don't attempt to IFrame the OAuth login
AuthenticationNoFrame = False
# List of ActivityTypes
SupportedActivities = None
# Used only in tests
SupportsHR = SupportsCalories = SupportsCadence = SupportsTemp = SupportsPower = False
# Does it?
ReceivesActivities = True # Any at all?
ReceivesStationaryActivities = True # Manually-entered?
ReceivesNonGPSActivitiesWithOtherSensorData = True # Trainer-ish?
SuppliesActivities = True
# Services with this flag unset will receive an explicit date range for activity listing,
# rather than the exhaustive flag alone. They are also processed after all other services.
# An account must have at least one service that supports exhaustive listing.
SupportsExhaustiveListing = True
SupportsActivityDeletion = False
# Causes synchronizations to be skipped until...
# - One is triggered (via IDs returned by ExternalIDsForPartialSyncTrigger or PollPartialSyncTrigger)
# - One is necessitated (non-partial sync, possibility of uploading new activities, etc)
PartialSyncRequiresTrigger = False
PartialSyncTriggerRequiresSubscription = False
PartialSyncTriggerStatusCode = 204
# Timedelta for polling to happen at (or None for no polling)
PartialSyncTriggerPollInterval = None
# How many times to call the polling method per interval (this is for the multiple_index kwarg)
PartialSyncTriggerPollMultiple = 1
# How many times should we try each operation on an activity before giving up?
# (only ever tries once per sync run - so ~1 hour interval on average)
UploadRetryCount = 5
DownloadRetryCount = 5
# Global rate limiting options
# For when there's a limit on the API key itself
GlobalRateLimits = []
@property
def PartialSyncTriggerRequiresPolling(self):
return self.PartialSyncRequiresTrigger and self.PartialSyncTriggerPollInterval
# Adds the Setup button to the service configuration pane, and not much else
Configurable = False
# Defaults for per-service configuration
ConfigurationDefaults = {}
# For the diagnostics dashboard
UserProfileURL = UserActivityURL = None
def RequiresConfiguration(self, serviceRecord): # Should convert this into a real property
return False # True means no sync until user configures
def WebInit(self):
pass
# Return an URL pointing directly to the specified activity on the remote site
def UserUploadedActivityURL(self, uploadId):
raise NotImplementedError
def GenerateUserAuthorizationURL(self, session, level=None):
raise NotImplementedError
def Authorize(self, email, password, store=False):
raise NotImplementedError
def RevokeAuthorization(self, serviceRecord):
raise NotImplementedError
def DownloadActivityList(self, serviceRecord, exhaustive_start_date=None):
raise NotImplementedError
def DownloadActivity(self, serviceRecord, activity):
raise NotImplementedError
# Should return an uploadId for storage and potential use in DeleteActivity
def UploadActivity(self, serviceRecord, activity):
raise NotImplementedError
def DeleteActivity(self, serviceRecord, uploadId):
raise NotImplementedError
def DeleteCachedData(self, serviceRecord):
raise NotImplementedError
def SubscribeToPartialSyncTrigger(self, serviceRecord):
if self.PartialSyncRequiresTrigger:
raise NotImplementedError
else:
raise InvalidServiceOperationException
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
if self.PartialSyncRequiresTrigger:
raise NotImplementedError
else:
raise InvalidServiceOperationException
def ShouldForcePartialSyncTrigger(self, serviceRecord):
if self.PartialSyncRequiresTrigger:
return False
else:
raise InvalidServiceOperationException
def PollPartialSyncTrigger(self, multiple_index):
if self.PartialSyncRequiresTrigger and self.PartialSyncTriggerPollInterval:
raise NotImplementedError
else:
raise InvalidServiceOperationException
def ExternalIDsForPartialSyncTrigger(self, req):
raise NotImplementedError
def PartialSyncTriggerGET(self, req):
from django.http import HttpResponse
return HttpResponse(status=204)
def ConfigurationUpdating(self, serviceRecord, newConfig, oldConfig):
pass
def _globalRateLimit(self):
try:
RateLimit.Limit(self.ID)
except RateLimitExceededException:
raise ServiceException("Global rate limit reached", user_exception=UserException(UserExceptionType.RateLimited))
| {
"content_hash": "eb5fbc22575873c22c0f8760dfef6f81",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 124,
"avg_line_length": 36.100628930817614,
"alnum_prop": 0.7369337979094077,
"repo_name": "cgourlay/tapiriik",
"id": "513a6f53e38a97b3f7e488528b3abdebea9dbc97",
"size": "5740",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tapiriik/services/service_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23790"
},
{
"name": "HTML",
"bytes": "66893"
},
{
"name": "JavaScript",
"bytes": "48529"
},
{
"name": "Python",
"bytes": "605994"
},
{
"name": "Shell",
"bytes": "1120"
}
],
"symlink_target": ""
} |
import warnings
from pathlib import Path
import matplotlib.pyplot as plt
import pytest
import pandas as pd
from plotnine import (ggplot, aes, geom_text, geom_point, facet_wrap,
ggsave, theme_xkcd)
from plotnine.data import mtcars
from plotnine.exceptions import PlotnineError, PlotnineWarning
p = (ggplot(mtcars, aes(x='wt', y='mpg', label='name'))
+ geom_text())
def sequential_filenames():
"""
Generate filenames for the tests
"""
for i in range(100):
yield Path(f'filename-{i}.png')
filename_gen = sequential_filenames()
def assert_exist_and_clean(filename, msg=None):
if not msg:
msg = f"File {filename} does not exist"
assert filename.exists(), msg
filename.unlink()
class TestArguments:
def test_default_filename(self):
p.save(verbose=False)
fn = p._save_filename('pdf')
assert_exist_and_clean(fn, "default filename")
def test_save_method(self):
fn = next(filename_gen)
with pytest.warns(PlotnineWarning) as record:
p.save(fn)
assert_exist_and_clean(fn, "save method")
res = ('saving' in str(item.message).lower()
for item in record)
assert any(res)
res = ('filename' in str(item.message).lower()
for item in record)
assert any(res)
# verbose
fn = next(filename_gen)
with warnings.catch_warnings(record=True) as record:
p.save(fn, verbose=False)
assert_exist_and_clean(fn, "save method")
assert not record, "Issued an unexpected warning"
def test_filename_plot_path(self):
fn = next(filename_gen)
p.save(fn, path='.', verbose=False)
assert_exist_and_clean(fn, "fn, plot and path")
def test_format_png(self):
p.save(format='png', verbose=False)
fn = p._save_filename('png')
assert_exist_and_clean(fn, "format png")
def test_dpi(self):
fn = next(filename_gen)
p.save(fn, dpi=100, verbose=False)
assert_exist_and_clean(fn, "dpi = 100")
def test_ggsave(self):
ggsave(p, verbose=False)
fn = p._save_filename('pdf')
assert_exist_and_clean(fn, "default filename")
def test_save_big(self):
fn = next(filename_gen)
# supplying the ggplot object will work without
# printing it first! 26 is the current limit, just go
# over it to not use too much memory
p.save(fn, width=26, height=26, limitsize=False, verbose=False)
assert_exist_and_clean(fn, "big height and width")
def test_dpi_theme_xkcd(self):
fn1 = next(filename_gen)
fn2 = next(filename_gen)
df = pd.DataFrame({
'x': range(4),
'y': range(4),
'b': list('aabb')
})
p = (
ggplot(df)
+ geom_point(aes('x', 'y'))
+ facet_wrap('b')
+ theme_xkcd()
)
p.save(fn1, verbose=False)
assert_exist_and_clean(fn1, "Saving with theme_xkcd and dpi (1)")
p.save(fn2, dpi=72, verbose=False)
assert_exist_and_clean(fn2, "Saving with theme_xkcd and dpi (2)")
class TestExceptions:
def test_unknown_format(self):
with pytest.raises(Exception):
p.save(format='unknown', verbose=False)
def test_width_only(self):
with pytest.raises(PlotnineError):
p.save(width=11)
def test_height_only(self):
with pytest.raises(PlotnineError):
p.save(height=8)
def test_large_width(self):
with pytest.raises(PlotnineError):
p.save(width=300, height=8)
def test_large_height(self):
with pytest.raises(PlotnineError):
p.save(widhth=11, height=300)
def test_bad_units(self):
with pytest.raises(Exception):
p.save(width=1, heigth=1, units='xxx')
# This should be the last function in the file since it can catch
# "leakages" due to the tests in this test module.
def test_ggsave_closes_plot():
assert plt.get_fignums() == [], "There are unsaved test plots"
fn = next(filename_gen)
p.save(fn, verbose=False)
assert_exist_and_clean(fn, "exist")
assert plt.get_fignums() == [], "ggplot.save did not close the plot"
| {
"content_hash": "cdb40221692fa83bd756a91ce494a3d7",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 73,
"avg_line_length": 29.34013605442177,
"alnum_prop": 0.5981915140273592,
"repo_name": "has2k1/plotnine",
"id": "e733ecea3442bb7fee0c699175556aa7d3bbba6e",
"size": "4313",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_ggsave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1729"
},
{
"name": "Python",
"bytes": "991031"
},
{
"name": "Shell",
"bytes": "2997"
}
],
"symlink_target": ""
} |
import json
from base64 import b64encode
from beanstalk_dispatch import ARGS
from beanstalk_dispatch import FUNCTION
from beanstalk_dispatch import KWARGS
from beanstalk_dispatch.common import create_request_body
from django.core.urlresolvers import reverse
from django.test import Client
from django.test import TestCase
from django.test import override_settings
# Don't log logger errors.
import logging
logging.disable(logging.CRITICAL)
CALL_COUNTER = 0
def counter_incrementer(first_arg, second_arg=None):
global CALL_COUNTER
CALL_COUNTER += first_arg
if second_arg:
CALL_COUNTER += second_arg
DISPATCH_SETTINGS = {
'BEANSTALK_DISPATCH_TABLE': {
'the_counter': (
'beanstalk_dispatch.tests.test_dispatcher',
'counter_incrementer')}}
class DispatcherTestCase(TestCase):
""" Test the server-side function dispatcher.
In these tests, we base64-encode every message we send to the server
because this is what boto does.
"""
def setUp(self):
global CALL_COUNTER
CALL_COUNTER = 0
self.client = Client()
self.url = reverse('beanstalk_dispatcher')
@override_settings(**DISPATCH_SETTINGS)
def test_no_get(self):
response = self.client.get(self.url)
self.assertEquals(response.status_code, 405)
@override_settings(BEANSTALK_DISPATCH_TABLE=None)
def test_no_dispatch(self):
response = self.client.post(
self.url, b64encode(
create_request_body('some_func').encode('ascii')),
content_type='application/json')
self.assertEquals(response.status_code, 400)
self.assertEquals(json.loads(response.content.decode()),
{'message': 'No beanstalk dispatch table configured',
'error': 400})
@override_settings(**DISPATCH_SETTINGS)
def test_missing_function(self):
response = self.client.post(
self.url,
b64encode(create_request_body('nonexistent_func').encode('ascii')),
content_type='application/json')
self.assertEquals(response.status_code, 400)
self.assertEquals(
json.loads(response.content.decode()),
{'message': 'Requested function not found: nonexistent_func',
'error': 400})
@override_settings(**DISPATCH_SETTINGS)
def test_malformed_request(self):
keys = {FUNCTION, ARGS, KWARGS}
for missing_key in keys:
request_body = {key: 'test' for key in
keys - {missing_key}}
response = self.client.post(
self.url,
b64encode(json.dumps(request_body).encode('ascii')),
content_type='application/json')
self.assertEquals(response.status_code, 400)
self.assertEquals(json.loads(response.content.decode()), {
'message': 'Please provide a {} argument'.format(missing_key),
'error': 400})
@override_settings(**DISPATCH_SETTINGS)
def test_both_args_kwargs(self):
body = b64encode(
create_request_body('the_counter', 1, second_arg=5)
.encode('ascii'))
response = self.client.post(self.url,
body,
content_type='application/json')
self.assertEquals(response.status_code, 200)
self.assertEquals(json.loads(response.content.decode()),
{})
self.assertEquals(CALL_COUNTER, 6)
@override_settings(**DISPATCH_SETTINGS)
def test_just_args(self):
body = b64encode(create_request_body('the_counter', 2).encode('ascii'))
response = self.client.post(self.url,
body,
content_type='application/json')
self.assertEquals(response.status_code, 200)
self.assertEquals(json.loads(response.content.decode()),
{})
self.assertEquals(CALL_COUNTER, 2)
| {
"content_hash": "2ebeaa97dd3ed2f5a9bf284b51be2d56",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 35.771929824561404,
"alnum_prop": 0.6074055909759686,
"repo_name": "Sonblind/orchestra",
"id": "500189f9f428e8025f869619dc7830904facbb57",
"size": "4078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beanstalk_dispatch/tests/test_dispatcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "76414"
},
{
"name": "HTML",
"bytes": "57292"
},
{
"name": "JavaScript",
"bytes": "234470"
},
{
"name": "Makefile",
"bytes": "826"
},
{
"name": "Python",
"bytes": "310246"
}
],
"symlink_target": ""
} |
import unittest
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import gradient_check
from chainer import initializers
from chainer import links
from chainer import memory_layouts
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
import chainerx
_parameterize = testing.parameterize(*(testing.product_dict(
testing.product({
'test': [True, False],
'size': ['skip', 'explicit'],
'dtype': [numpy.float16, numpy.float32, numpy.float64,
chainer.mixed16],
}),
testing.product({
'ndim': [0, 1, 2, 3],
}) + [
{'input_shape': (5, 4, 3, 2), 'axis': (0, 2, 3)},
{'input_shape': (5, 4), 'axis': 0},
{'input_shape': (5, 4, 3), 'axis': (0, 1)},
]
)))
_inject_backend_tests = testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
def _batch_normalization(expander, gamma, beta, x, mean, var, eps, test):
mean = mean[expander]
if test:
std = numpy.sqrt(var[expander])
else:
std = numpy.sqrt(var[expander] + eps)
y_expect = gamma * (x - mean) / std + beta
return y_expect
class BatchNormalizationTestBase(object):
param_names = ('gamma', 'beta')
def setUp(self):
if self.dtype == chainer.mixed16:
self.highprec_dtype = numpy.float32
else:
self.highprec_dtype = self.dtype
if hasattr(self, 'axis') and hasattr(self, 'input_shape'):
aggr_axes = self.axis
if isinstance(aggr_axes, int):
aggr_axes = aggr_axes,
shape = self.input_shape
param_shape = tuple(
s for i, s in enumerate(shape) if i not in aggr_axes)
expander = tuple(
None if i in aggr_axes else slice(None)
for i in range(len(shape)))
elif hasattr(self, 'ndim'):
aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
shape = (5, 3) + (2,) * self.ndim
param_shape = shape[1]
expander = (None, Ellipsis) + (None,) * self.ndim
else:
assert False
self.aggr_axes = aggr_axes
self.shape = shape
self.param_shape = param_shape
self.expander = expander
self.finetune = False
self.eps = 2e-5
if self.test:
self.mean = numpy.random.uniform(
-1, 1, param_shape).astype(self.highprec_dtype)
self.var = numpy.random.uniform(
0.5, 1, param_shape).astype(self.highprec_dtype)
else:
self.mean = None
self.var = None
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
if self.dtype in (numpy.float16, chainer.mixed16):
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-1}
self.check_backward_options = {'atol': 5e-1, 'rtol': 1e-1}
def before_test(self, test_name):
if (self.dtype == chainer.mixed16
and self.backend_config.xp is chainerx):
raise unittest.SkipTest(
'ChainerX does not yet support mixed-FP16 mode.')
def generate_params(self):
initial_gamma = numpy.random.uniform(
-1, 1, self.param_shape).astype(self.highprec_dtype)
initial_beta = numpy.random.uniform(
-1, 1, self.param_shape).astype(self.highprec_dtype)
return initial_gamma, initial_beta
def create_link(self, initializers):
initial_gamma, initial_beta = initializers
size = self.param_shape if self.size == 'explicit' else None
initial_avg_mean = None if self.mean is None else self.mean.copy()
initial_avg_var = None if self.var is None else self.var.copy()
link = links.BatchNormalization(
size=size,
axis=self.aggr_axes,
eps=self.eps,
dtype=self.dtype,
initial_gamma=initial_gamma,
initial_beta=initial_beta,
initial_avg_mean=initial_avg_mean,
initial_avg_var=initial_avg_var)
return link
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, link, inputs, device):
x, = inputs
# The inputs might be of different dtype than what the link was
# initialized with. In that case, persistent values must be manually
# cast. This is needed when forward is called in order to compute
# numerical gradients.
if ((self.dtype == chainer.mixed16 and x.dtype != numpy.float16)
or (self.dtype != chainer.mixed16 and link.avg_mean is not None
and x.dtype != link.avg_mean.dtype)):
link.avg_mean = link.avg_mean.astype(x.dtype)
link.avg_var = link.avg_var.astype(x.dtype)
with chainer.using_config('train', not self.test):
y = link(x, finetune=self.finetune)
return y,
def forward_expected(self, link, inputs):
gamma = link.gamma.array
beta = link.beta.array
x, = inputs
if self.test:
mean = self.mean[self.expander]
var = self.var[self.expander]
std = numpy.sqrt(var)
else:
mean = x.mean(
axis=self.aggr_axes, dtype=self.highprec_dtype, keepdims=True)
var = x.var(
axis=self.aggr_axes, dtype=self.highprec_dtype, keepdims=True)
std = numpy.sqrt(var + self.eps)
y = gamma[self.expander] * (x - mean) / std + beta[self.expander]
return y.astype(self.dtype),
def check_forward_outputs(self, outputs, expected_outputs):
super(BatchNormalizationTestBase, self).check_forward_outputs(
outputs, expected_outputs)
y, = outputs
assert y.dtype == chainer.get_dtype(self.dtype)
@_inject_backend_tests
@_parameterize
class BatchNormalizationTest(BatchNormalizationTestBase, testing.LinkTestCase):
pass
# TODO(hvy): Safely remove this test class when BackendConfig no longer
# modifies the current device since those cases should be covered by the tests
# above.
@testing.inject_backend_tests(
None,
testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0],
}))
@_parameterize
@attr.multi_gpu(2)
class BatchNormalizationMultiGpuTest(
BatchNormalizationTestBase, testing.LinkTestCase):
skip_backward_test = True
# TODO(hvy): Remove this relaxation. It is currently needed as the
# inter-device copy in CuPy with non-contiguous arrays are broken.
contiguous = 'C'
def forward(self, link, inputs, device):
x, = inputs
device_1 = backend.GpuDevice.from_device_id(1)
link.to_device(device_1)
x.to_device(device_1)
device_0 = backend.GpuDevice.from_device_id(0)
with chainer.using_device(device_0):
with chainer.using_config('train', not self.test):
y = link(x, finetune=self.finetune)
return y,
@testing.parameterize(*(testing.product_dict(
testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))))
@_inject_backend_tests
class BatchNormalizationInitializersTest(
BatchNormalizationTestBase, testing.LinkInitializersTestCase):
test = False
size = 'skip'
ndim = 1
input_shape = (5, 4)
axis = 0
def get_initializers(self):
initial_gamma = [
initializers.Constant(2), 2, testing.InitializerArgument(None, 1)]
initial_beta = [
initializers.Constant(2), 2, testing.InitializerArgument(None, 0)]
return initial_gamma, initial_beta
@testing.parameterize(
{'nx': 10, 'ny': 10, 'eps': 2e-5},
{'nx': 10, 'ny': 10, 'eps': 1e-1},
# TODO(Kenta Oono)
# Pass the case below (this test does not pass when nx != ny).
# {'nx': 10, 'ny': 15}
)
class TestPopulationStatistics(unittest.TestCase):
def setUp(self):
self.decay = 0.9
self.size = 3
self.link = links.BatchNormalization(self.size, self.decay, self.eps)
self.x = numpy.random.uniform(
-1, 1, (self.nx, self.size)).astype(numpy.float32)
self.y = numpy.random.uniform(
-1, 1, (self.ny, self.size)).astype(numpy.float32)
def check_statistics(self, x, y):
x = chainer.Variable(x)
self.link(x, finetune=True)
mean = self.x.mean(axis=0)
testing.assert_allclose(mean, self.link.avg_mean)
unbiased_var = self.x.var(axis=0) * self.nx / (self.nx - 1)
testing.assert_allclose(unbiased_var, self.link.avg_var)
y = chainer.Variable(y)
with chainer.using_config('train', False):
self.link(y, finetune=True)
testing.assert_allclose(mean, self.link.avg_mean)
testing.assert_allclose(unbiased_var, self.link.avg_var)
def test_statistics_cpu(self):
self.check_statistics(self.x, self.y)
@attr.gpu
def test_statistics_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_statistics(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
@attr.cudnn
def test_statistics_gpu_without_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.test_statistics_gpu()
def check_statistics2(self, x, y):
x = chainer.Variable(x)
y = chainer.Variable(y)
self.link(x, finetune=True)
self.link(y, finetune=True)
mean = (self.x.sum(axis=0) + self.y.sum(axis=0)) / (self.nx + self.ny)
var = (self.x.var(axis=0) * self.nx +
self.y.var(axis=0) * self.ny) / (self.nx + self.ny)
# TODO(Kenta Oono)
# Fix the estimate of the unbiased variance.
# Unbiased variance should be (nx + ny) / (nx + ny - 1) times of
# the variance.
# But the multiplier is ny / (ny - 1) in current implementation
# these two values are different when nx is not equal to ny.
unbiased_var = var * self.ny / (self.ny - 1)
testing.assert_allclose(mean, self.link.avg_mean)
testing.assert_allclose(unbiased_var, self.link.avg_var)
def test_statistics2_cpu(self):
self.check_statistics2(self.x, self.y)
@attr.gpu
def test_statistics2_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.check_statistics2(
cuda.to_gpu(self.x),
cuda.to_gpu(self.y))
@attr.cudnn
def test_statistics2_gpu_without_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.test_statistics2_gpu()
# TODO(hvy): Rewrite this test using testing.LinkTestCase.
@testing.parameterize(*testing.product({
'test': [True, False],
'ndim': [0, 1, 2, 3],
}))
class BatchNormalizationTestWithoutGammaAndBeta(unittest.TestCase):
def setUp(self):
self.link = links.BatchNormalization(
3, use_gamma=False, use_beta=False)
if self.test:
mean = numpy.random.uniform(-1, 1, (3,)).astype(numpy.float32)
self.link.avg_mean[...] = mean
var = numpy.random.uniform(0.5, 1, (3,)).astype(numpy.float32)
self.link.avg_var[...] = var
self.link.cleargrads()
shape = (7, 3) + (2,) * self.ndim
self.x = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
expander = (None, Ellipsis) + (None,) * self.ndim
gamma = numpy.ones((3,), dtype=numpy.float32)[expander]
beta = numpy.zeros((3,), dtype=numpy.float32)[expander]
if self.test:
mean = self.link.avg_mean
var = self.link.avg_var
else:
aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
mean = self.x.mean(axis=aggr_axes)
var = self.x.var(axis=aggr_axes)
self.y_expected = _batch_normalization(
expander, gamma, beta, self.x, mean, var, self.link.eps, self.test)
def test_no_gamma_and_beta(self):
assert self.link.gamma is None
assert self.link.beta is None
def check_forward(self, x_data):
x = chainer.Variable(x_data)
with chainer.using_config('train', not self.test):
y = self.link(x)
testing.assert_allclose(self.y_expected, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
self.check_forward(x)
@attr.multi_gpu(2)
def test_forward_gpu_multi(self):
with cuda.get_device_from_id(0):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(1):
self.check_forward(x)
@attr.cudnn
def test_forward_gpu_without_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.test_forward_gpu()
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(self.link, x_data, y_grad,
eps=1e-2, rtol=1e-3, atol=1e-4)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
gy = cuda.to_gpu(self.gy)
self.check_backward(x, gy)
@attr.cudnn
def test_backward_gpu_without_cudnn(self):
with chainer.using_config('use_cudnn', 'never'):
self.test_backward_gpu()
def _generate_uniform(low, high, shape, dtype=numpy.float32):
return numpy.random.uniform(low, high, shape).astype(dtype)
@testing.parameterize(*testing.product({
'size': [3, (2, 3)],
}))
class TestInitialize(unittest.TestCase):
def setUp(self):
self.decay = 0.9
self.initial_gamma = _generate_uniform(-1, 1, self.size)
self.initial_beta = _generate_uniform(-1, 1, self.size)
self.initial_avg_mean = _generate_uniform(-1, 1, self.size)
self.initial_avg_var = _generate_uniform(-1, 1, self.size)
self.link = links.BatchNormalization(
self.size, self.decay,
initial_gamma=self.initial_gamma,
initial_beta=self.initial_beta,
initial_avg_mean=self.initial_avg_mean,
initial_avg_var=self.initial_avg_var,
)
@condition.retry(3)
def test_initialize_cpu(self):
testing.assert_allclose(self.initial_gamma, self.link.gamma.data)
testing.assert_allclose(self.initial_beta, self.link.beta.data)
testing.assert_allclose(self.initial_avg_mean, self.link.avg_mean)
testing.assert_allclose(self.initial_avg_var, self.link.avg_var)
@attr.gpu
@condition.retry(3)
def test_initialize_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
testing.assert_allclose(self.initial_gamma, self.link.gamma.data)
testing.assert_allclose(self.initial_beta, self.link.beta.data)
testing.assert_allclose(self.initial_avg_mean, self.link.avg_mean)
testing.assert_allclose(self.initial_avg_var, self.link.avg_var)
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float16, chainer.mixed16],
}))
class TestDefaultInitializer(unittest.TestCase):
def setUp(self):
self.decay = 0.9
self.size = 3
with chainer.using_config('dtype', self.dtype):
self.link = links.BatchNormalization(self.size, self.decay)
dtype = numpy.float32 if self.dtype == chainer.mixed16 else self.dtype
assert self.link.beta.dtype == dtype
assert self.link.gamma.dtype == dtype
assert self.link.avg_mean.dtype == dtype
assert self.link.avg_var.dtype == dtype
self.x = numpy.arange(6, dtype=self.dtype).reshape(2, 3)
def check_initialize(self):
testing.assert_allclose(numpy.ones(self.size), self.link.gamma.array)
testing.assert_allclose(numpy.zeros(self.size), self.link.beta.array)
testing.assert_allclose(0, self.link.avg_mean)
testing.assert_allclose(1, self.link.avg_var)
y = self.link(self.x)
assert y.dtype == self.x.dtype
def test_initialize_cpu(self):
self.check_initialize()
@attr.gpu
def test_initialize_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.x = cuda.to_gpu(self.x)
self.check_initialize()
@testing.parameterize(*testing.product({
'shape': [(2, 4), (2, 5, 3, 4)],
}))
class TestInvalidInput(unittest.TestCase):
def setUp(self):
self.link = links.BatchNormalization(3)
def test_invalid_shape_cpu(self):
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(numpy.zeros(self.shape, dtype='f')))
@attr.gpu
def test_invalid_shape_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with self.assertRaises(type_check.InvalidType):
self.link(chainer.Variable(cuda.cupy.zeros(self.shape, dtype='f')))
class TestInvalidInitialize(unittest.TestCase):
def test_invalid_type(self):
with self.assertRaises(TypeError):
self.link = links.BatchNormalization({})
class TestInvalidArgument(unittest.TestCase):
def setUp(self):
self.link = links.BatchNormalization(1)
self.x = numpy.random.uniform(-1, 1, (3,)).astype('f')
def test_test_argument(self):
with self.assertRaises(ValueError):
self.link(self.x, test=True)
def test_positional_argument(self):
# positional argument is prohibited from v2
with self.assertRaises(TypeError):
self.link(self.x, True)
def test_redundant_argument(self):
with self.assertRaises(TypeError):
self.link(self.x, unknown_argument=1)
@testing.parameterize(
{'shape': (5, 4, 3, 2), 'axis': (0, 2, 3)},
{'shape': (5, 4), 'axis': 0},
{'shape': (5, 4, 3), 'axis': (0, 1)},
)
class TestChannalSizeInference(unittest.TestCase):
def setUp(self):
self.x = numpy.random.randn(*self.shape).astype('f')
axis = self.axis
if isinstance(axis, int):
axis = (axis,)
self.expected_size = tuple(
n
for i, n in enumerate(self.shape)
if i not in axis
)
def test_no_inference(self):
bn = links.BatchNormalization(self.expected_size)
assert bn.avg_mean is not None
assert bn.avg_var is not None
def test_inference_cpu(self):
bn = links.BatchNormalization(axis=self.axis)
bn(self.x)
assert bn.beta.shape == self.expected_size
assert bn.gamma.shape == self.expected_size
assert bn.avg_mean.shape == self.expected_size
assert bn.avg_var.shape == self.expected_size
@attr.gpu
def test_inference_gpu(self):
bn = links.BatchNormalization(axis=self.axis)
with testing.assert_warns(DeprecationWarning):
bn.to_gpu()
bn(cuda.to_gpu(self.x))
assert isinstance(bn.beta.data, cuda.cupy.ndarray)
assert isinstance(bn.gamma.data, cuda.cupy.ndarray)
assert isinstance(bn.avg_mean, cuda.cupy.ndarray)
assert isinstance(bn.avg_var, cuda.cupy.ndarray)
assert bn.beta.shape == self.expected_size
assert bn.gamma.shape == self.expected_size
assert bn.avg_mean.shape == self.expected_size
assert bn.avg_var.shape == self.expected_size
def test_no_gamma(self):
bn = links.BatchNormalization(axis=self.axis, use_gamma=False)
assert bn.gamma is None
bn(self.x)
assert bn.gamma is None
def test_no_beta(self):
bn = links.BatchNormalization(axis=self.axis, use_beta=False)
assert bn.beta is None
bn(self.x)
assert bn.beta is None
class TestFailChannalSizeInference(unittest.TestCase):
def test_fail_inference(self):
with self.assertRaises(RuntimeError):
links.BatchNormalization()
@attr.multi_gpu(2)
class TestLazyInitializationWithNonZeroCurrentCudaDevice(unittest.TestCase):
def test_lazy_initialization_with_non_zero_current_cuda_device(self):
# Create a lazily initialized BatchNormalization link.
bn = links.BatchNormalization(axis=(0, 2, 3))
assert bn.xp is numpy
device = backend.GpuDevice.from_device_id(1)
bn.to_device(device)
assert bn.xp is cuda.cupy
assert bn.device == device
assert bn.beta.device == device
assert bn.gamma.device == device
assert bn.avg_mean is None
assert bn.avg_var is None
x = numpy.random.randn(5, 4, 3, 2).astype(numpy.float32)
x = device.send(x)
# All parameters and persistent values should correctly be initialized
# on device 1, and not device 0, meaning forward pass should not raise
# any errors.
bn(x)
assert bn.xp is cuda.cupy
assert bn.device == device
assert bn.beta.device == device
assert bn.gamma.device == device
assert bn.avg_mean is not None
assert bn.avg_var is not None
assert backend.GpuDevice.from_array(bn.avg_mean) == device
assert backend.GpuDevice.from_array(bn.avg_var) == device
@testing.parameterize(*testing.product({
'x_shape,bn_kwargs': [
((4, 3), {'axis': (0,)}),
((4, 3), {'size': (3,)}),
],
}))
class TestSerialize(unittest.TestCase):
def create_link(self):
return links.BatchNormalization(**self.bn_kwargs)
def train_link(self, bn):
x = numpy.random.rand(*self.x_shape).astype(numpy.float32)
bn(x)
x = numpy.random.rand(*self.x_shape).astype(numpy.float32)
bn(x, finetune=True)
# has non-trivial values to be stored
assert bn.avg_mean is not None
assert bn.N == 1
def create_serializer_pair(self):
target = {}
return (
chainer.serializers.DictionarySerializer(target),
chainer.serializers.NpzDeserializer(target),
)
def test_serialize(self):
ser, de = self.create_serializer_pair()
link1 = self.create_link()
self.train_link(link1)
link1.serialize(ser)
link2 = self.create_link()
link2.serialize(de)
testing.assert_allclose(link2.avg_mean, link1.avg_mean)
testing.assert_allclose(link2.avg_var, link1.avg_var)
testing.assert_allclose(link2.beta.array, link1.beta.array)
testing.assert_allclose(link2.gamma.array, link1.gamma.array)
assert link2.N == link1.N
@testing.inject_backend_tests(
[
'test_param_layout_to_device',
'test_forward',
],
# CPU tests
[{}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
}))
class TestBatchNormalizationMemoryLayouts(unittest.TestCase):
batch = 2
channels = 3
height = 13
width = 11
axis = None
dtype = numpy.float32
def create_link(self):
channel_dims = (self.channels,)
link = links.BatchNormalization(
channel_dims,
axis=self.axis)
return link
def create_input_array(self, xp):
x_shape = (self.batch, self.height, self.width, self.channels)
x = xp.ones(x_shape, self.dtype)
return x
def test_param_layout(self):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
assert link.gamma.layout is None
assert link.beta.layout is None
def test_param_layout_to_device(self, backend_config):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
assert link.gamma.device == chainer.get_device('@numpy')
assert link.beta.device == chainer.get_device('@numpy')
link.to_device(backend_config.device)
assert link.gamma.device == backend_config.device
assert link.beta.device == backend_config.device
assert link.gamma.layout is None
assert link.beta.layout is None
def test_forward(self, backend_config):
with chainer.using_config('compute_mode', 'cudnn_fast'):
link = self.create_link()
link.to_device(backend_config.device)
x = self.create_input_array(backend_config.xp)
x = chainer.Variable(x, layout=memory_layouts.CUDNN_CHANNEL_LAST_X)
x.to_device(backend_config.device)
with backend_config:
y = link(x)
assert link.gamma.device == backend_config.device
assert link.beta.device == backend_config.device
assert y.layout == memory_layouts.CUDNN_CHANNEL_LAST_X
assert y.shape == (
self.batch,
self.channels,
self.height,
self.width)
testing.run_module(__name__, __file__)
| {
"content_hash": "6a1c017feef8e81ec5ff175360235fcd",
"timestamp": "",
"source": "github",
"line_count": 774,
"max_line_length": 79,
"avg_line_length": 33.622739018087856,
"alnum_prop": 0.6077851214263756,
"repo_name": "pfnet/chainer",
"id": "b4bf70ae5ec5320ba58409c9a01267828e3102db",
"size": "26024",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/links_tests/normalization_tests/test_batch_normalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2564338"
}
],
"symlink_target": ""
} |
import mock
import pytest
from future.moves.urllib.parse import quote
from django.utils import timezone
from django.core.exceptions import MultipleObjectsReturned
from osf.models import Guid, NodeLicenseRecord, OSFUser
from osf_tests.factories import AuthUserFactory, UserFactory, NodeFactory, NodeLicenseRecordFactory, \
RegistrationFactory, PreprintFactory, PreprintProviderFactory
from osf.utils.permissions import ADMIN
from tests.base import OsfTestCase
from tests.test_websitefiles import TestFile
from website.settings import MFR_SERVER_URL, WATERBUTLER_URL
@pytest.mark.django_db
class TestGuid:
def test_long_id_gets_generated_on_creation(self):
obj = NodeLicenseRecordFactory()
assert obj._id
assert len(obj._id) > 5
def test_loading_by_object_id(self):
obj = NodeLicenseRecordFactory()
assert NodeLicenseRecord.load(obj._id) == obj
def test_loading_by_short_guid(self):
obj = UserFactory()
assert OSFUser.load(obj._id) == obj
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory,
RegistrationFactory,
])
def test_short_guid_gets_generated_on_creation(self, Factory):
obj = Factory()
assert obj._id
assert len(obj._id) == 5
@pytest.mark.django_db
class TestReferent:
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory
])
def test_referent(self, Factory):
obj = Factory()
guid = Guid.objects.get(_id=obj._id)
assert guid.referent == obj
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory
])
def test_referent_can_be_set(self, Factory):
obj = Factory()
obj1 = Factory()
guid = Guid.load(obj._id)
assert guid.referent == obj # sanity check
guid.referent = obj1
assert guid.referent == obj1
@pytest.mark.skip('I don\'t actually think we do this anywhere')
def test_swapping_guids(self):
user = UserFactory()
node = NodeFactory()
user_guid = user.guids[0]
node_guid = node.guids[0]
user._id = node_guid._id
node._id = user_guid._id
assert node_guid._id == user._id
assert user_guid._id == node._id
def test_id_matches(self):
user = UserFactory()
guid = Guid.objects.get(_id=user._id)
assert user._id == guid._id
@pytest.mark.skip('I don\'t actually think we do this anywhere')
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory
])
def test_nulling_out_guid(self, Factory):
obj = Factory()
guid = Guid.load(obj._id)
obj.guid = None
obj.save()
obj.refresh_from_db()
# queryset cache returns the old version
guid.refresh_from_db()
assert obj.guid != guid
assert guid.guid != obj.guid.guid
@pytest.mark.parametrize('Factory',
[
UserFactory,
NodeFactory,
])
def test_querying_with_multiple_guids(self, Factory):
obj = Factory()
guids = [obj.guids.first()]
for i in range(0, 16):
guids.append(Guid.objects.create(referent=obj))
try:
Factory._meta.model.objects.get(id=obj.id)
except MultipleObjectsReturned as ex:
pytest.fail('Multiple objects returned for {} with multiple guids. {}'.format(Factory._meta.model, ex))
@pytest.mark.enable_bookmark_creation
class TestResolveGuid(OsfTestCase):
def setUp(self):
super(TestResolveGuid, self).setUp()
self.node = NodeFactory()
def test_resolve_guid(self):
res_guid = self.app.get(self.node.web_url_for('node_setting', _guid=True), auth=self.node.creator.auth)
res_full = self.app.get(self.node.web_url_for('node_setting'), auth=self.node.creator.auth)
assert res_guid.text == res_full.text
def test_resolve_guid_no_referent(self):
guid = Guid.load(self.node._id)
guid.referent = None
guid.save()
res = self.app.get(
self.node.web_url_for('node_setting', _guid=True),
auth=self.node.creator.auth,
expect_errors=True,
)
assert res.status_code == 404
@mock.patch('osf.models.node.Node.deep_url', None)
def test_resolve_guid_no_url(self):
res = self.app.get(
self.node.web_url_for('node_setting', _guid=True),
auth=self.node.creator.auth,
expect_errors=True,
)
assert res.status_code == 404
def test_resolve_guid_no_auth_redirect_to_cas_includes_public(self):
"""
Unauthenticated users are sent to login when visiting private projects, but not if the projects are public.
"""
res = self.app.get(
self.node.web_url_for('resolve_guid', guid=self.node._id),
expect_errors=True,
)
assert res.status_code == 302
assert '/login?service=' in res.location
self.node.is_public = True
self.node.save()
res = self.app.get(
self.node.web_url_for('resolve_guid', guid=self.node._id),
expect_errors=True,
)
assert res.status_code == 200
def test_resolve_guid_no_auth_redirect_to_cas_includes_public_with_url_segments(self):
"""
Unauthenticated users are sent to login when visiting private projects related URLs, but not if the projects are
public
"""
for segment in ('comments', 'links', 'components', 'files', 'files/osfstorage', 'files/addon'):
self.node.is_public = False
self.node.save()
res = self.app.get(
f'{self.node.web_url_for("resolve_guid", guid=self.node._id)}/{segment}/',
expect_errors=True,
)
assert res.status_code == 302
assert '/login?service=' in res.location
self.node.is_public = True
self.node.save()
res = self.app.get(
f'{self.node.web_url_for("resolve_guid", guid=self.node._id)}/{segment}/',
expect_errors=True,
)
assert res.status_code == 200
def test_resolve_guid_private_request_access_or_redirect_to_cas(self):
"""
Authenticated users are sent to the request access page when it is set to true on the node, otherwise they get a
legacy Forbidden page.
"""
non_contrib = AuthUserFactory()
self.node.access_requests_enabled = False
self.node.save()
res = self.app.get(
self.node.web_url_for('resolve_guid', guid=self.node._id),
auth=non_contrib.auth,
expect_errors=True,
)
assert '<title>OSF | Forbidden</title>' in res.body.decode()
assert res.status_code == 403
self.node.access_requests_enabled = True
self.node.save()
res = self.app.get(
self.node.web_url_for('resolve_guid', guid=self.node._id),
auth=non_contrib.auth,
expect_errors=True,
)
assert res.status_code == 403
assert '<title>OSF | Request Access</title>' in res.body.decode()
def test_resolve_guid_download_file(self):
pp = PreprintFactory(finish=True)
res = self.app.get(pp.url + 'download')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get(pp.url + 'download/')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('/{}/download'.format(pp.primary_file.get_guid(create=True)._id))
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
pp.primary_file.create_version(
creator=pp.creator,
location={u'folder': u'osf', u'object': u'deadbe', u'service': u'cloud'},
metadata={u'contentType': u'img/png', u'size': 9001}
)
pp.primary_file.save()
res = self.app.get(pp.url + 'download/')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=2'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get(pp.url + 'download/?version=1')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?version=1&action=download&direct'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
unpub_pp = PreprintFactory(project=self.node, is_published=False)
res = self.app.get(unpub_pp.url + 'download/?version=1', auth=unpub_pp.creator.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?version=1&action=download&direct'.format(WATERBUTLER_URL, unpub_pp._id, unpub_pp.primary_file.provider, unpub_pp.primary_file.path) in res.location
@mock.patch('website.settings.USE_EXTERNAL_EMBER', True)
@mock.patch('website.settings.EXTERNAL_EMBER_APPS', {
'preprints': {
'server': 'http://localhost:4200',
'path': '/preprints/'
},
})
def test_resolve_guid_download_file_from_emberapp_preprints(self):
provider = PreprintProviderFactory(_id='sockarxiv', name='Sockarxiv')
pp = PreprintFactory(finish=True, provider=provider)
assert pp.url.startswith('/preprints/sockarxiv')
res = self.app.get(pp.url + 'download')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get(pp.url + 'download/')
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
@mock.patch('website.settings.USE_EXTERNAL_EMBER', True)
@mock.patch('website.settings.EXTERNAL_EMBER_APPS', {
'preprints': {
'server': 'http://localhost:4200',
'path': '/preprints/'
},
})
def test_resolve_guid_download_file_from_emberapp_preprints_unpublished(self):
# non-branded domains
provider = PreprintProviderFactory(_id='sockarxiv', name='Sockarxiv', reviews_workflow='pre-moderation')
# branded domains
branded_provider = PreprintProviderFactory(_id='spot', name='Spotarxiv', reviews_workflow='pre-moderation')
branded_provider.allow_submissions = False
branded_provider.domain = 'https://www.spotarxiv.com'
branded_provider.description = 'spots not dots'
branded_provider.domain_redirect_enabled = True
branded_provider.share_publish_type = 'Thesis'
branded_provider.save()
# test_provider_submitter_can_download_unpublished
submitter = AuthUserFactory()
pp = PreprintFactory(finish=True, provider=provider, is_published=False, creator=submitter)
pp.run_submit(submitter)
pp_branded = PreprintFactory(finish=True, provider=branded_provider, is_published=False, filename='preprint_file_two.txt', creator=submitter)
pp_branded.run_submit(submitter)
res = self.app.get('{}download'.format(pp.url), auth=submitter.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('{}download'.format(pp_branded.url), auth=submitter.auth)
assert res.status_code == 302
# test_provider_super_user_can_download_unpublished
super_user = AuthUserFactory()
super_user.is_superuser = True
super_user.save()
res = self.app.get('{}download'.format(pp.url), auth=super_user.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('{}download'.format(pp_branded.url), auth=super_user.auth)
assert res.status_code == 302
# test_provider_moderator_can_download_unpublished
moderator = AuthUserFactory()
provider.add_to_group(moderator, 'moderator')
provider.save()
res = self.app.get('{}download'.format(pp.url), auth=moderator.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
branded_provider.add_to_group(moderator, 'moderator')
branded_provider.save()
res = self.app.get('{}download'.format(pp_branded.url), auth=moderator.auth)
assert res.status_code == 302
# test_provider_admin_can_download_unpublished
admin = AuthUserFactory()
provider.add_to_group(admin, ADMIN)
provider.save()
res = self.app.get('{}download'.format(pp.url), auth=admin.auth)
assert res.status_code == 302
assert '{}/v1/resources/{}/providers/{}{}?action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
branded_provider.add_to_group(admin, ADMIN)
branded_provider.save()
res = self.app.get('{}download'.format(pp_branded.url), auth=admin.auth)
assert res.status_code == 302
def test_resolve_guid_download_file_export(self):
pp = PreprintFactory(finish=True)
res = self.app.get(pp.url + 'download?format=asdf')
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3Fformat%3Dasdf%26action%3Ddownload%26direct%26version%3D1'.format(quote(WATERBUTLER_URL), pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get(pp.url + 'download/?format=asdf')
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3Fformat%3Dasdf%26action%3Ddownload%26direct%26version%3D1'.format(quote(WATERBUTLER_URL), pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('/{}/download?format=asdf'.format(pp.primary_file.get_guid(create=True)._id))
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3Fformat%3Dasdf%26action%3Ddownload%26direct%26version%3D1'.format(quote(WATERBUTLER_URL), pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
res = self.app.get('/{}/download/?format=asdf'.format(pp.primary_file.get_guid(create=True)._id))
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3Fformat%3Dasdf%26action%3Ddownload%26direct%26version%3D1'.format(quote(WATERBUTLER_URL), pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
pp.primary_file.create_version(
creator=pp.creator,
location={u'folder': u'osf', u'object': u'deadbe', u'service': u'cloud'},
metadata={u'contentType': u'img/png', u'size': 9001}
)
pp.primary_file.save()
res = self.app.get(pp.url + 'download/?format=asdf')
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3F'.format(quote(WATERBUTLER_URL), pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
quarams = res.location.split('%3F')[1].split('%26')
assert 'action%3Ddownload' in quarams
assert 'version%3D2' in quarams
assert 'direct' in quarams
res = self.app.get(pp.url + 'download/?format=asdf&version=1')
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3F'.format(quote(WATERBUTLER_URL), pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
quarams = res.location.split('%3F')[1].split('%26')
assert 'action%3Ddownload' in quarams
assert 'version%3D1' in quarams
assert 'direct' in quarams
unpub_pp = PreprintFactory(project=self.node, is_published=False)
res = self.app.get(unpub_pp.url + 'download?format=asdf', auth=unpub_pp.creator.auth)
assert res.status_code == 302
assert res.status_code == 302
assert '{}/export?format=asdf&url='.format(MFR_SERVER_URL) in res.location
assert '{}/v1/resources/{}/providers/{}{}%3F'.format(quote(WATERBUTLER_URL), unpub_pp._id, unpub_pp.primary_file.provider, unpub_pp.primary_file.path) in res.location
quarams = res.location.split('%3F')[1].split('%26')
assert 'action%3Ddownload' in quarams
assert 'version%3D1' in quarams
assert 'direct' in quarams
def test_resolve_guid_download_file_export_same_format_optimization(self):
pp = PreprintFactory(filename='test.pdf', finish=True)
res = self.app.get(pp.url + 'download/?format=pdf')
assert res.status_code == 302
assert '{}/export?'.format(MFR_SERVER_URL) not in res.location
assert '{}/v1/resources/{}/providers/{}{}?format=pdf&action=download&direct&version=1'.format(WATERBUTLER_URL, pp._id, pp.primary_file.provider, pp.primary_file.path) in res.location
def test_resolve_guid_download_errors(self):
testfile = TestFile.get_or_create(self.node, 'folder/path')
testfile.name = 'asdf'
testfile.materialized_path = '/folder/path'
guid = testfile.get_guid(create=True)
testfile.save()
testfile.delete()
res = self.app.get('/{}/download'.format(guid), expect_errors=True)
assert res.status_code == 404
pp = PreprintFactory(is_published=False)
res = self.app.get(pp.url + 'download', expect_errors=True)
assert res.status_code == 404
pp.is_published = True
pp.save()
pp.is_public = False
pp.save()
non_contrib = AuthUserFactory()
res = self.app.get(pp.url + 'download', auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
pp.deleted = timezone.now()
pp.save()
res = self.app.get(pp.url + 'download', auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 410
| {
"content_hash": "cb5ecb089e7d80ed9906f4f305dddc81",
"timestamp": "",
"source": "github",
"line_count": 461,
"max_line_length": 212,
"avg_line_length": 42.23644251626898,
"alnum_prop": 0.6343279749370859,
"repo_name": "aaxelb/osf.io",
"id": "d3b5d9bd87a8636745bd21225f0db7296d09ce1e",
"size": "19471",
"binary": false,
"copies": "5",
"ref": "refs/heads/feature/keen-replacement",
"path": "osf_tests/test_guid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373758"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "12036193"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
print PROJECT_ROOT
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*g104nbk9h8t3%5pjj(jz8b0pxm_7_2y**m++3=uow5zmwbcbh'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'website.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'website.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'crispy_forms',
'south',
'rest_framework',
'posts',
'profiles',
'relationships',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Extra Settings.
AUTH_LOGIN_URL = '/login/'
AUTH_USER_MODEL = 'profiles.SiteUser'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
| {
"content_hash": "76cf0014e92590940dbaa604ab669769",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 127,
"avg_line_length": 32.733333333333334,
"alnum_prop": 0.6860149355057705,
"repo_name": "bajubullet/twitter-clone",
"id": "53d1fd669f0476b98c67cb5e715079dc0e4354c3",
"size": "5931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/website/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "305"
},
{
"name": "JavaScript",
"bytes": "3030"
},
{
"name": "Python",
"bytes": "43028"
}
],
"symlink_target": ""
} |
"""Test class for Fake driver."""
import mock
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers import base as driver_base
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
class FakeDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(FakeDriverTestCase, self).setUp()
mgr_utils.mock_the_extension_manager()
self.driver = driver_factory.get_driver("fake")
self.node = obj_utils.get_test_node(self.context)
self.task = mock.MagicMock(spec=task_manager.TaskManager)
self.task.shared = False
self.task.node = self.node
self.task.driver = self.driver
def test_driver_interfaces(self):
self.assertIsInstance(self.driver.power, driver_base.PowerInterface)
self.assertIsInstance(self.driver.deploy, driver_base.DeployInterface)
self.assertIsInstance(self.driver.boot, driver_base.BootInterface)
self.assertIsInstance(self.driver.vendor, driver_base.VendorInterface)
self.assertIsInstance(self.driver.console,
driver_base.ConsoleInterface)
self.assertIsNone(self.driver.rescue)
def test_get_properties(self):
expected = ['A1', 'A2', 'B1', 'B2']
properties = self.driver.get_properties()
self.assertEqual(sorted(expected), sorted(properties.keys()))
def test_power_interface(self):
self.assertEqual({}, self.driver.power.get_properties())
self.driver.power.validate(self.task)
self.driver.power.get_power_state(self.task)
self.assertRaises(exception.InvalidParameterValue,
self.driver.power.set_power_state,
self.task, states.NOSTATE)
self.driver.power.set_power_state(self.task, states.POWER_ON)
self.driver.power.reboot(self.task)
def test_deploy_interface(self):
self.assertEqual({}, self.driver.deploy.get_properties())
self.driver.deploy.validate(None)
self.driver.deploy.prepare(None)
self.driver.deploy.deploy(None)
self.driver.deploy.take_over(None)
self.driver.deploy.clean_up(None)
self.driver.deploy.tear_down(None)
def test_boot_interface(self):
self.assertEqual({}, self.driver.boot.get_properties())
self.driver.boot.validate(self.task)
self.driver.boot.prepare_ramdisk(self.task, {})
self.driver.boot.clean_up_ramdisk(self.task)
self.driver.boot.prepare_instance(self.task)
self.driver.boot.clean_up_instance(self.task)
def test_console_interface(self):
self.assertEqual({}, self.driver.console.get_properties())
self.driver.console.validate(self.task)
self.driver.console.start_console(self.task)
self.driver.console.stop_console(self.task)
self.driver.console.get_console(self.task)
def test_management_interface_get_properties(self):
self.assertEqual({}, self.driver.management.get_properties())
def test_management_interface_validate(self):
self.driver.management.validate(self.task)
def test_management_interface_set_boot_device_good(self):
self.driver.management.set_boot_device(self.task, boot_devices.PXE)
def test_management_interface_set_boot_device_fail(self):
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device, self.task,
'not-supported')
def test_management_interface_get_supported_boot_devices(self):
expected = [boot_devices.PXE]
self.assertEqual(
expected,
self.driver.management.get_supported_boot_devices(self.task))
def test_management_interface_get_boot_device(self):
expected = {'boot_device': boot_devices.PXE, 'persistent': False}
self.assertEqual(expected,
self.driver.management.get_boot_device(self.task))
def test_inspect_interface(self):
self.assertEqual({}, self.driver.inspect.get_properties())
self.driver.inspect.validate(self.task)
self.driver.inspect.inspect_hardware(self.task)
| {
"content_hash": "67daca29980a070b1fa94827670a3c87",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 41.242990654205606,
"alnum_prop": 0.6798096532970768,
"repo_name": "ruyang/ironic",
"id": "c0a849f5a522cb49a458ed62d569d0e1538ee057",
"size": "5086",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/drivers/test_fake.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5133461"
},
{
"name": "Shell",
"bytes": "107097"
}
],
"symlink_target": ""
} |
import os
import sys
from utdirect.utils import setup_extra
# If you use an 'extra' folder with svn externals, uncomment the following lines:
#CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
#setup_extra(os.path.join(CURRENT_DIR, 'extra'))
import settings
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
try: #This should never be on the python path.
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
except:
pass
execute_from_command_line(sys.argv)
| {
"content_hash": "6b862ccd433a08086e10063394ddb7db",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 81,
"avg_line_length": 32.10526315789474,
"alnum_prop": 0.7016393442622951,
"repo_name": "jeffles/LI",
"id": "276060fd6e6f04dc1e3f2a462089afca05621c54",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "76593"
}
],
"symlink_target": ""
} |
'''
File: statistics.py
Author: Oliver Zscheyge
Description:
Collection of statistics functions.
'''
import math
def mean(values, ndigits=None):
"""
Return:
Mean of values, rounded to ndigits.
"""
n = len(values)
if n == 0:
raise ValueError(u"Can't compute mean over empty list!")
su = math.fsum(values)
if ndigits is not None:
return round(su / float(n), ndigits)
return su / float(n)
def variance(values, ndigits=None):
"""
Args:
values: List of at least 2 values.
Return:
Variance of a list of values, rounded to ndigits.
"""
n = len(values)
if n < 2:
raise ValueError(u"Can't compute variance over less than 2 values.")
mean = math.fsum(values) / float(n)
var = math.fsum([(v - mean) * (v - mean) for v in values])
if ndigits is not None:
return round(var / float(n), ndigits)
return var / float(n)
def stdev(values, ndigits=None):
"""
Args:
values: List of at least 2 values.
Return:
Standard deviation of a list of values, rounded to ndigits.
"""
n = len(values)
if n < 2:
raise ValueError(u"Can't compute standard deviation over less than 2 values.")
mean = math.fsum(values) / float(n)
var = math.fsum([(v - mean) * (v - mean) for v in values]) / float(n)
if ndigits is not None:
return round(math.sqrt(var), ndigits)
return math.sqrt(var)
def mean_stdev(values, ndigits=None):
"""
Args:
values: List of at least 2 values.
Return:
(mean, standard deviation) tuple of a list of values, rounded to ndigits.
"""
n = len(values)
if n < 2:
raise ValueError(u"Can't compute variance/standard deviation over less than 2 values.")
mean = math.fsum(values) / float(n)
sd = math.sqrt(math.fsum([(v - mean) * (v - mean) for v in values]) / float(n))
if ndigits is not None:
return (round(mean, ndigits), round(sd, ndigits))
return (mean, sd)
if __name__ == '__main__':
print u"Test for %s" % __file__
values = range(10)
def assert_raises(fun, arg, msg):
try:
res = fun(arg)
assert False, msg
except ValueError:
pass
print u" Testing mean and variance..."
m = mean(values)
var = variance(values)
assert m == 4.5
assert var == 8.25
assert_raises(mean, [], u"Mean of empty list did not fail properly!")
assert_raises(variance, [], u"Variance of empty list did not fail properly!")
assert_raises(variance, [42], u"Variance of 1 element list did not fail properly!")
print u" Testing stdev and mean_stdev..."
sv = stdev(values)
(mean2, stdev2) = mean_stdev(values)
assert sv == math.sqrt(var)
assert m == mean2
assert sv == stdev2
assert_raises(stdev, [], u"Stdev of empty list did not fail!")
assert_raises(stdev, [42], u"Stdev of 1 element list did not fail!")
assert_raises(mean_stdev, [], u"Mean_stdev of empty list did not fail!")
assert_raises(mean_stdev, [42], u"Mean_stdev of 1 element list did not fail!")
print u" Testing mean_stdev rounding..."
stats_rounded = mean_stdev(values, 2)
assert stats_rounded == (4.5, 2.87)
print u"Passed all tests!"
| {
"content_hash": "03266651e910dd3fc4d519ec74214fb6",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 95,
"avg_line_length": 29.4375,
"alnum_prop": 0.6060054595086443,
"repo_name": "ooz/Confopy",
"id": "1bcf618b8baf446984bfce0d820c06c28cde07e5",
"size": "3313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "confopy/analysis/statistics.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182874"
},
{
"name": "Shell",
"bytes": "426"
}
],
"symlink_target": ""
} |
r"""Train Onsets and Frames piano transcription model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from magenta.common import tf_utils
from magenta.models.onsets_frames_transcription import constants
from magenta.models.onsets_frames_transcription import model
from magenta.models.onsets_frames_transcription import train_util
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('master', '',
'Name of the TensorFlow runtime to use.')
tf.app.flags.DEFINE_string(
'examples_path', None,
'Path to a TFRecord file of train/eval examples.')
tf.app.flags.DEFINE_string(
'model_dir', '~/tmp/onsets_frames',
'Path where checkpoints and summary events will be located during '
'training and evaluation. Separate subdirectories `train` and `eval` '
'will be created within this directory.')
tf.app.flags.DEFINE_integer('num_steps', 1000000,
'Number of training steps or `None` for infinite.')
tf.app.flags.DEFINE_integer(
'keep_checkpoint_max', 100,
'Maximum number of checkpoints to keep in `train` mode or 0 for infinite.')
tf.app.flags.DEFINE_string(
'hparams', '',
'A comma-separated list of `name=value` hyperparameter values.')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged: '
'DEBUG, INFO, WARN, ERROR, or FATAL.')
def run(hparams, model_dir):
"""Run train/eval/test."""
train_util.train(
master=FLAGS.master,
model_dir=model_dir,
examples_path=FLAGS.examples_path,
hparams=hparams,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
num_steps=FLAGS.num_steps)
def main(unused_argv):
tf.logging.set_verbosity(FLAGS.log)
tf.app.flags.mark_flags_as_required(['examples_path'])
model_dir = os.path.expanduser(FLAGS.model_dir)
hparams = tf_utils.merge_hparams(constants.DEFAULT_HPARAMS,
model.get_default_hparams())
# Command line flags override any of the preceding hyperparameter values.
hparams.parse(FLAGS.hparams)
run(hparams, model_dir)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| {
"content_hash": "fe2aa84fed487aa46c4ad517bb81711f",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 31.410958904109588,
"alnum_prop": 0.6916703009158308,
"repo_name": "adarob/magenta",
"id": "9422fb329157f94caacef498e23e2cd70cc983bd",
"size": "2878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magenta/models/onsets_frames_transcription/onsets_frames_transcription_train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "Python",
"bytes": "2941402"
},
{
"name": "Shell",
"bytes": "24986"
}
],
"symlink_target": ""
} |
import json
import os
import random
import unittest
import warnings
from copy import deepcopy
import numpy as np
from scipy.misc import central_diff_weights
from pymatgen.analysis.elasticity.elastic import (
ComplianceTensor,
ElasticTensor,
ElasticTensorExpansion,
NthOrderElasticTensor,
diff_fit,
find_eq_stress,
generate_pseudo,
get_diff_coeff,
get_strain_state_dict,
)
from pymatgen.analysis.elasticity.strain import Deformation, Strain
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.tensors import Tensor
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.testing import PymatgenTest
class ElasticTensorTest(PymatgenTest):
def setUp(self):
self.voigt_1 = [
[59.33, 28.08, 28.08, 0, 0, 0],
[28.08, 59.31, 28.07, 0, 0, 0],
[28.08, 28.07, 59.32, 0, 0, 0],
[0, 0, 0, 26.35, 0, 0],
[0, 0, 0, 0, 26.35, 0],
[0, 0, 0, 0, 0, 26.35],
]
mat = np.random.randn(6, 6)
mat = mat + np.transpose(mat)
self.rand_elastic_tensor = ElasticTensor.from_voigt(mat)
self.ft = np.array(
[
[
[[59.33, 0, 0], [0, 28.08, 0], [0, 0, 28.08]],
[[0, 26.35, 0], [26.35, 0, 0], [0, 0, 0]],
[[0, 0, 26.35], [0, 0, 0], [26.35, 0, 0]],
],
[
[[0, 26.35, 0], [26.35, 0, 0], [0, 0, 0]],
[[28.08, 0, 0], [0, 59.31, 0], [0, 0, 28.07]],
[[0, 0, 0], [0, 0, 26.35], [0, 26.35, 0]],
],
[
[[0, 0, 26.35], [0, 0, 0], [26.35, 0, 0]],
[[0, 0, 0], [0, 0, 26.35], [0, 26.35, 0]],
[[28.08, 0, 0], [0, 28.07, 0], [0, 0, 59.32]],
],
]
)
self.elastic_tensor_1 = ElasticTensor(self.ft)
filepath = os.path.join(PymatgenTest.TEST_FILES_DIR, "Sn_def_stress.json")
with open(filepath) as f:
self.def_stress_dict = json.load(f)
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "test_toec_data.json")) as f:
self.toec_dict = json.load(f)
self.structure = self.get_structure("Sn")
warnings.simplefilter("always")
def test_properties(self):
# compliance tensor
ct = ComplianceTensor.from_voigt(np.linalg.inv(self.elastic_tensor_1.voigt))
self.assertArrayAlmostEqual(ct, self.elastic_tensor_1.compliance_tensor)
# KG average properties
self.assertAlmostEqual(38.49111111111, self.elastic_tensor_1.k_voigt)
self.assertAlmostEqual(22.05866666666, self.elastic_tensor_1.g_voigt)
self.assertAlmostEqual(38.49110945133, self.elastic_tensor_1.k_reuss)
self.assertAlmostEqual(20.67146635306, self.elastic_tensor_1.g_reuss)
self.assertAlmostEqual(38.49111028122, self.elastic_tensor_1.k_vrh)
self.assertAlmostEqual(21.36506650986, self.elastic_tensor_1.g_vrh)
# universal anisotropy
self.assertAlmostEqual(0.33553509658699, self.elastic_tensor_1.universal_anisotropy)
# homogeneous poisson
self.assertAlmostEqual(0.26579965576472, self.elastic_tensor_1.homogeneous_poisson)
# voigt notation tensor
self.assertArrayAlmostEqual(self.elastic_tensor_1.voigt, self.voigt_1)
# young's modulus
self.assertAlmostEqual(54087787667.160583, self.elastic_tensor_1.y_mod)
# prop dict
prop_dict = self.elastic_tensor_1.property_dict
self.assertAlmostEqual(prop_dict["homogeneous_poisson"], 0.26579965576)
for k, v in prop_dict.items():
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k), v)
def test_directional_elastic_mod(self):
self.assertAlmostEqual(
self.elastic_tensor_1.directional_elastic_mod([1, 0, 0]),
self.elastic_tensor_1.voigt[0, 0],
)
self.assertAlmostEqual(self.elastic_tensor_1.directional_elastic_mod([1, 1, 1]), 73.624444444)
def test_compliance_tensor(self):
stress = self.elastic_tensor_1.calculate_stress([0.01] + [0] * 5)
comp = self.elastic_tensor_1.compliance_tensor
strain = Strain(comp.einsum_sequence([stress]))
self.assertArrayAlmostEqual(strain.voigt, [0.01] + [0] * 5)
def test_directional_poisson_ratio(self):
v_12 = self.elastic_tensor_1.directional_poisson_ratio([1, 0, 0], [0, 1, 0])
self.assertAlmostEqual(v_12, 0.321, places=3)
def test_structure_based_methods(self):
# trans_velocity
self.assertAlmostEqual(1996.35019877, self.elastic_tensor_1.trans_v(self.structure))
# long_velocity
self.assertAlmostEqual(3534.68123832, self.elastic_tensor_1.long_v(self.structure))
# Snyder properties
self.assertAlmostEqual(18.06127074, self.elastic_tensor_1.snyder_ac(self.structure))
self.assertAlmostEqual(0.18937465, self.elastic_tensor_1.snyder_opt(self.structure))
self.assertAlmostEqual(18.25064540, self.elastic_tensor_1.snyder_total(self.structure))
# Clarke
self.assertAlmostEqual(0.3450307, self.elastic_tensor_1.clarke_thermalcond(self.structure))
# Cahill
self.assertAlmostEqual(0.37896275, self.elastic_tensor_1.cahill_thermalcond(self.structure))
# Debye
self.assertAlmostEqual(198.8037985019, self.elastic_tensor_1.debye_temperature(self.structure))
# structure-property dict
sprop_dict = self.elastic_tensor_1.get_structure_property_dict(self.structure)
self.assertAlmostEqual(sprop_dict["long_v"], 3534.68123832)
for val in sprop_dict.values():
self.assertFalse(isinstance(val, FloatWithUnit))
for k, v in sprop_dict.items():
if k == "structure":
self.assertEqual(v, self.structure)
else:
f = getattr(self.elastic_tensor_1, k)
if callable(f):
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k)(self.structure), v)
else:
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k), v)
# Test other sprop dict modes
sprop_dict = self.elastic_tensor_1.get_structure_property_dict(self.structure, include_base_props=False)
self.assertFalse("k_vrh" in sprop_dict)
# Test ValueError being raised for structure properties
test_et = deepcopy(self.elastic_tensor_1)
test_et[0][0][0][0] = -100000
prop_dict = test_et.property_dict
for attr_name in sprop_dict:
if attr_name not in (list(prop_dict.keys()) + ["structure"]):
self.assertRaises(ValueError, getattr(test_et, attr_name), self.structure)
self.assertRaises(ValueError, test_et.get_structure_property_dict, self.structure)
noval_sprop_dict = test_et.get_structure_property_dict(self.structure, ignore_errors=True)
self.assertIsNone(noval_sprop_dict["snyder_ac"])
def test_new(self):
self.assertArrayAlmostEqual(self.elastic_tensor_1, ElasticTensor(self.ft))
nonsymm = self.ft
nonsymm[0, 1, 2, 2] += 1.0
with warnings.catch_warnings(record=True) as w:
ElasticTensor(nonsymm)
self.assertEqual(len(w), 1)
badtensor1 = np.zeros((3, 3, 3))
badtensor2 = np.zeros((3, 3, 3, 2))
self.assertRaises(ValueError, ElasticTensor, badtensor1)
self.assertRaises(ValueError, ElasticTensor, badtensor2)
def test_from_pseudoinverse(self):
strain_list = [Strain.from_deformation(def_matrix) for def_matrix in self.def_stress_dict["deformations"]]
stress_list = [stress for stress in self.def_stress_dict["stresses"]]
with warnings.catch_warnings(record=True):
et_fl = -0.1 * ElasticTensor.from_pseudoinverse(strain_list, stress_list).voigt
self.assertArrayAlmostEqual(
et_fl.round(2),
[
[59.29, 24.36, 22.46, 0, 0, 0],
[28.06, 56.91, 22.46, 0, 0, 0],
[28.06, 25.98, 54.67, 0, 0, 0],
[0, 0, 0, 26.35, 0, 0],
[0, 0, 0, 0, 26.35, 0],
[0, 0, 0, 0, 0, 26.35],
],
)
def test_from_independent_strains(self):
strains = self.toec_dict["strains"]
stresses = self.toec_dict["stresses"]
with warnings.catch_warnings(record=True):
et = ElasticTensor.from_independent_strains(strains, stresses)
self.assertArrayAlmostEqual(et.voigt, self.toec_dict["C2_raw"], decimal=-1)
def test_energy_density(self):
film_elac = ElasticTensor.from_voigt(
[
[324.32, 187.3, 170.92, 0.0, 0.0, 0.0],
[187.3, 324.32, 170.92, 0.0, 0.0, 0.0],
[170.92, 170.92, 408.41, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 150.73, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 150.73, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 238.74],
]
)
dfm = Deformation(
[
[-9.86004855e-01, 2.27539582e-01, -4.64426035e-17],
[-2.47802121e-01, -9.91208483e-01, -7.58675185e-17],
[-6.12323400e-17, -6.12323400e-17, 1.00000000e00],
]
)
self.assertAlmostEqual(film_elac.energy_density(dfm.green_lagrange_strain), 0.00125664672793)
film_elac.energy_density(
Strain.from_deformation(
[
[0.99774738, 0.11520994, -0.0],
[-0.11520994, 0.99774738, 0.0],
[
-0.0,
-0.0,
1.0,
],
]
)
)
class ElasticTensorExpansionTest(PymatgenTest):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "test_toec_data.json")) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict["strains"]]
self.pk_stresses = [Stress(d) for d in self.data_dict["pk_stresses"]]
self.c2 = self.data_dict["C2_raw"]
self.c3 = self.data_dict["C3_raw"]
self.exp = ElasticTensorExpansion.from_voigt([self.c2, self.c3])
self.cu = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.623), ["Cu"], [[0] * 3])
indices = [(0, 0), (0, 1), (3, 3)]
values = [167.8, 113.5, 74.5]
cu_c2 = ElasticTensor.from_values_indices(values, indices, structure=self.cu, populate=True)
indices = [(0, 0, 0), (0, 0, 1), (0, 1, 2), (0, 3, 3), (0, 5, 5), (3, 4, 5)]
values = [-1507.0, -965.0, -71.0, -7.0, -901.0, 45.0]
cu_c3 = Tensor.from_values_indices(values, indices, structure=self.cu, populate=True)
self.exp_cu = ElasticTensorExpansion([cu_c2, cu_c3])
cu_c4 = Tensor.from_voigt(self.data_dict["Cu_fourth_order"])
self.exp_cu_4 = ElasticTensorExpansion([cu_c2, cu_c3, cu_c4])
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
cijkl = Tensor.from_voigt(self.c2)
cijklmn = Tensor.from_voigt(self.c3)
exp = ElasticTensorExpansion([cijkl, cijklmn])
ElasticTensorExpansion.from_voigt([self.c2, self.c3])
self.assertEqual(exp.order, 3)
def test_from_diff_fit(self):
ElasticTensorExpansion.from_diff_fit(self.strains, self.pk_stresses)
def test_calculate_stress(self):
calc_stress = self.exp.calculate_stress(self.strains[0])
self.assertArrayAlmostEqual(self.pk_stresses[0], calc_stress, decimal=2)
def test_energy_density(self):
edensity = self.exp.energy_density(self.strains[0])
self.assertAlmostEqual(edensity, 1.36363099e-4)
def test_gruneisen(self):
# Get GGT
ggt = self.exp_cu.get_ggt([1, 0, 0], [0, 1, 0])
self.assertArrayAlmostEqual(np.eye(3) * np.array([4.92080537, 4.2852349, -0.7147651]), ggt)
# Get TGT
tgt = self.exp_cu.get_tgt()
self.assertArrayAlmostEqual(tgt, np.eye(3) * 2.59631832)
# Get heat capacity
c0 = self.exp_cu.get_heat_capacity(0, self.cu, [1, 0, 0], [0, 1, 0])
self.assertEqual(c0, 0.0)
c = self.exp_cu.get_heat_capacity(300, self.cu, [1, 0, 0], [0, 1, 0])
self.assertAlmostEqual(c, 8.285611958)
# Get Gruneisen parameter
gp = self.exp_cu.get_gruneisen_parameter()
self.assertAlmostEqual(gp, 2.59631832)
_ = self.exp_cu.get_gruneisen_parameter(temperature=200, structure=self.cu)
def test_thermal_expansion_coeff(self):
# TODO get rid of duplicates
alpha_dp = self.exp_cu.thermal_expansion_coeff(self.cu, 300, mode="dulong-petit")
alpha_dp_ground_truth = 6.3471959e-07 * np.ones((3, 3))
alpha_dp_ground_truth[np.diag_indices(3)] = 2.2875769e-7
self.assertArrayAlmostEqual(alpha_dp_ground_truth, alpha_dp, decimal=4)
alpha_debye = self.exp_cu.thermal_expansion_coeff(self.cu, 300, mode="debye")
alpha_comp = 5.9435148e-7 * np.ones((3, 3))
alpha_comp[np.diag_indices(3)] = 21.4533472e-06
self.assertArrayAlmostEqual(alpha_comp, alpha_debye)
def test_get_compliance_expansion(self):
ce_exp = self.exp_cu.get_compliance_expansion()
et_comp = ElasticTensorExpansion(ce_exp)
strain_orig = Strain.from_voigt([0.01, 0, 0, 0, 0, 0])
stress = self.exp_cu.calculate_stress(strain_orig)
strain_revert = et_comp.calculate_stress(stress)
self.assertArrayAlmostEqual(strain_orig, strain_revert, decimal=4)
def test_get_effective_ecs(self):
# Ensure zero strain is same as SOEC
test_zero = self.exp_cu.get_effective_ecs(np.zeros((3, 3)))
self.assertArrayAlmostEqual(test_zero, self.exp_cu[0])
s = np.zeros((3, 3))
s[0, 0] = 0.02
test_2percent = self.exp_cu.get_effective_ecs(s)
diff = test_2percent - test_zero
self.assertArrayAlmostEqual(self.exp_cu[1].einsum_sequence([s]), diff)
def test_get_strain_from_stress(self):
strain = Strain.from_voigt([0.05, 0, 0, 0, 0, 0])
stress3 = self.exp_cu.calculate_stress(strain)
strain_revert3 = self.exp_cu.get_strain_from_stress(stress3)
self.assertArrayAlmostEqual(strain, strain_revert3, decimal=2)
# fourth order
stress4 = self.exp_cu_4.calculate_stress(strain)
strain_revert4 = self.exp_cu_4.get_strain_from_stress(stress4)
self.assertArrayAlmostEqual(strain, strain_revert4, decimal=2)
def test_get_yield_stress(self):
self.exp_cu_4.get_yield_stress([1, 0, 0])
class NthOrderElasticTensorTest(PymatgenTest):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "test_toec_data.json")) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict["strains"]]
self.pk_stresses = [Stress(d) for d in self.data_dict["pk_stresses"]]
self.c2 = NthOrderElasticTensor.from_voigt(self.data_dict["C2_raw"])
self.c3 = NthOrderElasticTensor.from_voigt(self.data_dict["C3_raw"])
def test_init(self):
c2 = NthOrderElasticTensor(self.c2.tolist())
c3 = NthOrderElasticTensor(self.c3.tolist())
c4 = NthOrderElasticTensor(np.zeros([3] * 8))
for n, c in enumerate([c2, c3, c4]):
self.assertEqual(c.order, n + 2)
self.assertRaises(ValueError, NthOrderElasticTensor, np.zeros([3] * 5))
def test_from_diff_fit(self):
c3 = NthOrderElasticTensor.from_diff_fit(
self.strains,
self.pk_stresses,
eq_stress=self.data_dict["eq_stress"],
order=3,
)
self.assertArrayAlmostEqual(c3.voigt, self.data_dict["C3_raw"], decimal=2)
def test_calculate_stress(self):
calc_stress = self.c2.calculate_stress(self.strains[0])
self.assertArrayAlmostEqual(self.pk_stresses[0], calc_stress, decimal=0)
# Test calculation from voigt strain
self.c2.calculate_stress(self.strains[0].voigt)
def test_energy_density(self):
self.c3.energy_density(self.strains[0])
class DiffFitTest(PymatgenTest):
"""
Tests various functions related to diff fitting
"""
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "test_toec_data.json")) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict["strains"]]
self.pk_stresses = [Stress(d) for d in self.data_dict["pk_stresses"]]
def test_get_strain_state_dict(self):
strain_inds = [(0,), (1,), (2,), (1, 3), (1, 2, 3)]
vecs = {}
strain_states = []
for strain_ind in strain_inds:
ss = np.zeros(6)
np.put(ss, strain_ind, 1)
strain_states.append(tuple(ss))
vec = np.zeros((4, 6))
rand_values = np.random.uniform(0.1, 1, 4)
for i in strain_ind:
vec[:, i] = rand_values
vecs[strain_ind] = vec
all_strains = [Strain.from_voigt(v).zeroed() for vec in vecs.values() for v in vec]
random.shuffle(all_strains)
all_stresses = [Stress.from_voigt(np.random.random(6)).zeroed() for s in all_strains]
strain_dict = {k.tostring(): v for k, v in zip(all_strains, all_stresses)}
ss_dict = get_strain_state_dict(all_strains, all_stresses, add_eq=False)
# Check length of ss_dict
self.assertEqual(len(strain_inds), len(ss_dict))
# Check sets of strain states are correct
self.assertEqual(set(strain_states), set(ss_dict.keys()))
for strain_state, data in ss_dict.items():
# Check correspondence of strains/stresses
for strain, stress in zip(data["strains"], data["stresses"]):
self.assertArrayAlmostEqual(
Stress.from_voigt(stress),
strain_dict[Strain.from_voigt(strain).tostring()],
)
# Add test to ensure zero strain state doesn't cause issue
strains, stresses = [Strain.from_voigt([-0.01] + [0] * 5)], [Stress(np.eye(3))]
ss_dict = get_strain_state_dict(strains, stresses)
self.assertArrayAlmostEqual(list(ss_dict.keys()), [[1, 0, 0, 0, 0, 0]])
def test_find_eq_stress(self):
test_strains = deepcopy(self.strains)
test_stresses = deepcopy(self.pk_stresses)
with warnings.catch_warnings(record=True):
no_eq = find_eq_stress(test_strains, test_stresses)
self.assertArrayAlmostEqual(no_eq, np.zeros((3, 3)))
test_strains[3] = Strain.from_voigt(np.zeros(6))
eq_stress = find_eq_stress(test_strains, test_stresses)
self.assertArrayAlmostEqual(test_stresses[3], eq_stress)
def test_get_diff_coeff(self):
forward_11 = get_diff_coeff([0, 1], 1)
forward_13 = get_diff_coeff([0, 1, 2, 3], 1)
backward_26 = get_diff_coeff(np.arange(-6, 1), 2)
central_29 = get_diff_coeff(np.arange(-4, 5), 2)
self.assertArrayAlmostEqual(forward_11, [-1, 1])
self.assertArrayAlmostEqual(forward_13, [-11.0 / 6, 3, -3.0 / 2, 1.0 / 3])
self.assertArrayAlmostEqual(
backward_26,
[
137.0 / 180,
-27.0 / 5,
33.0 / 2,
-254.0 / 9,
117.0 / 4,
-87.0 / 5,
203.0 / 45,
],
)
self.assertArrayAlmostEqual(central_29, central_diff_weights(9, 2))
def test_generate_pseudo(self):
strain_states = np.eye(6).tolist()
m2, abs = generate_pseudo(strain_states, order=2)
m3, abs = generate_pseudo(strain_states, order=3)
m4, abs = generate_pseudo(strain_states, order=4)
def test_fit(self):
diff_fit(self.strains, self.pk_stresses, self.data_dict["eq_stress"])
reduced = [(e, pk) for e, pk in zip(self.strains, self.pk_stresses) if not (abs(abs(e) - 0.05) < 1e-10).any()]
# Get reduced dataset
r_strains, r_pk_stresses = zip(*reduced)
with warnings.catch_warnings(record=True):
c2 = diff_fit(r_strains, r_pk_stresses, self.data_dict["eq_stress"], order=2)
c2, c3, c4 = diff_fit(r_strains, r_pk_stresses, self.data_dict["eq_stress"], order=4)
c2, c3 = diff_fit(self.strains, self.pk_stresses, self.data_dict["eq_stress"], order=3)
c2_red, c3_red = diff_fit(r_strains, r_pk_stresses, self.data_dict["eq_stress"], order=3)
self.assertArrayAlmostEqual(c2.voigt, self.data_dict["C2_raw"])
self.assertArrayAlmostEqual(c3.voigt, self.data_dict["C3_raw"], decimal=5)
self.assertArrayAlmostEqual(c2, c2_red, decimal=0)
self.assertArrayAlmostEqual(c3, c3_red, decimal=-1)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "bed3b84c9bfdfab6ae0e3ef6915496f9",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 118,
"avg_line_length": 44.44979079497908,
"alnum_prop": 0.5946251235468537,
"repo_name": "davidwaroquiers/pymatgen",
"id": "db305bdaf5d12e6a82e74e43c49cc61d6e8494f2",
"size": "21247",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/elasticity/tests/test_elastic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38793"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9213466"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
} |
import re
import six
from cassandra.util import OrderedDict
from cassandra.cqlengine import CQLEngineException
from cassandra.cqlengine import columns
from cassandra.cqlengine import connection
from cassandra.cqlengine import models
class UserTypeException(CQLEngineException):
pass
class UserTypeDefinitionException(UserTypeException):
pass
class BaseUserType(object):
"""
The base type class; don't inherit from this, inherit from UserType, defined below
"""
__type_name__ = None
_fields = None
_db_map = None
def __init__(self, **values):
self._values = {}
for name, field in self._fields.items():
value = values.get(name, None)
if value is not None or isinstance(field, columns.BaseContainerColumn):
value = field.to_python(value)
value_mngr = field.value_manager(self, field, value)
if name in values:
value_mngr.explicit = True
self._values[name] = value_mngr
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
keys = set(self._fields.keys())
other_keys = set(other._fields.keys())
if keys != other_keys:
return False
for key in other_keys:
if getattr(self, key, None) != getattr(other, key, None):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "{{{0}}}".format(', '.join("'{0}': {1}".format(k, getattr(self, k)) for k, v in six.iteritems(self._values)))
def has_changed_fields(self):
return any(v.changed for v in self._values.values())
def reset_changed_fields(self):
for v in self._values.values():
v.reset_previous_value()
def __iter__(self):
for field in self._fields.keys():
yield field
def __getitem__(self, key):
if not isinstance(key, six.string_types):
raise TypeError
if key not in self._fields.keys():
raise KeyError
return getattr(self, key)
def __setitem__(self, key, val):
if not isinstance(key, six.string_types):
raise TypeError
if key not in self._fields.keys():
raise KeyError
return setattr(self, key, val)
def __len__(self):
try:
return self._len
except:
self._len = len(self._columns.keys())
return self._len
def keys(self):
""" Returns a list of column IDs. """
return [k for k in self]
def values(self):
""" Returns list of column values. """
return [self[k] for k in self]
def items(self):
""" Returns a list of column ID/value tuples. """
return [(k, self[k]) for k in self]
@classmethod
def register_for_keyspace(cls, keyspace):
connection.register_udt(keyspace, cls.type_name(), cls)
@classmethod
def type_name(cls):
"""
Returns the type name if it's been defined
otherwise, it creates it from the class name
"""
if cls.__type_name__:
type_name = cls.__type_name__.lower()
else:
camelcase = re.compile(r'([a-z])([A-Z])')
ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2)), s)
type_name = ccase(cls.__name__)
# trim to less than 48 characters or cassandra will complain
type_name = type_name[-48:]
type_name = type_name.lower()
type_name = re.sub(r'^_+', '', type_name)
cls.__type_name__ = type_name
return type_name
def validate(self):
"""
Cleans and validates the field values
"""
pass
for name, field in self._fields.items():
v = getattr(self, name)
if v is None and not self._values[name].explicit and field.has_default:
v = field.get_default()
val = field.validate(v)
setattr(self, name, val)
class UserTypeMetaClass(type):
def __new__(cls, name, bases, attrs):
field_dict = OrderedDict()
field_defs = [(k, v) for k, v in attrs.items() if isinstance(v, columns.Column)]
field_defs = sorted(field_defs, key=lambda x: x[1].position)
def _transform_column(field_name, field_obj):
field_dict[field_name] = field_obj
field_obj.set_column_name(field_name)
attrs[field_name] = models.ColumnDescriptor(field_obj)
# transform field definitions
for k, v in field_defs:
# don't allow a field with the same name as a built-in attribute or method
if k in BaseUserType.__dict__:
raise UserTypeDefinitionException("field '{0}' conflicts with built-in attribute/method".format(k))
_transform_column(k, v)
# create db_name -> model name map for loading
db_map = {}
for field_name, field in field_dict.items():
db_map[field.db_field_name] = field_name
attrs['_fields'] = field_dict
attrs['_db_map'] = db_map
klass = super(UserTypeMetaClass, cls).__new__(cls, name, bases, attrs)
return klass
@six.add_metaclass(UserTypeMetaClass)
class UserType(BaseUserType):
"""
This class is used to model User Defined Types. To define a type, declare a class inheriting from this,
and assign field types as class attributes:
.. code-block:: python
# connect with default keyspace ...
from cassandra.cqlengine.columns import Text, Integer
from cassandra.cqlengine.usertype import UserType
class address(UserType):
street = Text()
zipcode = Integer()
from cassandra.cqlengine import management
management.sync_type(address)
Please see :ref:`user_types` for a complete example and discussion.
"""
__type_name__ = None
"""
*Optional.* Sets the name of the CQL type for this type.
If not specified, the type name will be the name of the class, with it's module name as it's prefix.
"""
| {
"content_hash": "8a17b9a0683b679c2a51bc979cc6e056",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 124,
"avg_line_length": 30.524509803921568,
"alnum_prop": 0.5829452384775976,
"repo_name": "jregovic/python-driver",
"id": "88ec033ba8e26600349f385cebd97b04e3e96eec",
"size": "6227",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "cassandra/cqlengine/usertype.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28918"
},
{
"name": "Python",
"bytes": "1710751"
}
],
"symlink_target": ""
} |
from rest_framework import viewsets
from rest_framework import pagination
from django_filters.rest_framework import DjangoFilterBackend
from .models import SkosConcept, SkosConceptScheme, SkosLabel, SkosNamespace
from .serializers import (
SkosLabelSerializer, SkosNamespaceSerializer, SkosConceptSchemeSerializer, SkosConceptSerializer
)
from .filters import SkosConceptFilter
from .api_renderers import RDFRenderer
from rest_framework.settings import api_settings
class LargeResultsSetPagination(pagination.PageNumberPagination):
page_size = 25
page_size_query_param = 'page_size'
max_page_size = 10000
class SkosLabelViewSet(viewsets.ModelViewSet):
queryset = SkosLabel.objects.all()
serializer_class = SkosLabelSerializer
class SkosNamespaceViewSet(viewsets.ModelViewSet):
queryset = SkosNamespace.objects.all()
serializer_class = SkosNamespaceSerializer
class SkosConceptSchemeViewSet(viewsets.ModelViewSet):
queryset = SkosConceptScheme.objects.all()
serializer_class = SkosConceptSchemeSerializer
class SkosConceptViewSet(viewsets.ModelViewSet):
queryset = SkosConcept.objects.all()
serializer_class = SkosConceptSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = SkosConceptFilter
pagination_class = LargeResultsSetPagination
renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (RDFRenderer,)
| {
"content_hash": "94a4eb0c27ad0674318b6c9202e94632",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 100,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.8028368794326242,
"repo_name": "acdh-oeaw/vhioe",
"id": "6264881fd6db04c6b802fd4a6f639f65fadac7f1",
"size": "1410",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vocabs/api_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25850"
},
{
"name": "HTML",
"bytes": "105907"
},
{
"name": "JavaScript",
"bytes": "220270"
},
{
"name": "Python",
"bytes": "91715"
}
],
"symlink_target": ""
} |
from ..helpers.search_helpers import get_filters_from_request
def sections_for_lot(lot, builder):
if lot is None or lot == 'all':
sections = builder.filter(
{'lot': 'iaas'}).filter(
{'lot': 'paas'}).filter(
{'lot': 'saas'}).filter(
{'lot': 'scs'}).sections
else:
sections = builder.filter({'lot': lot}).sections
return sections
def filters_for_lot(lot, builder):
sections = sections_for_lot(lot, builder)
lot_filters = []
for section in sections:
section_filter = {
"label": section["name"],
"filters": [],
}
for question in section["questions"]:
section_filter["filters"].extend(
filters_for_question(question)
)
lot_filters.append(section_filter)
return lot_filters
def filters_for_question(question):
question_filters = []
if question['type'] == 'boolean':
question_filters.append({
'label': question['question'],
'name': question['id'],
'id': question['id'],
'value': 'true',
})
elif question['type'] in ['checkboxes', 'radios']:
for option in question['options']:
question_filters.append({
'label': option['label'],
'name': question['id'],
'id': '{}-{}'.format(
question['id'],
option['label'].lower().replace(' ', '-')),
'value': option['label'].lower(),
})
return question_filters
def set_filter_states(filter_groups, request):
"""Sets a flag on each filter to mark it as set or not"""
request_filters = get_filters_from_request(request)
for filter_group in filter_groups:
for filter in filter_group['filters']:
filter['checked'] = False
param_values = request_filters.getlist(
filter['name'],
type=str
)
if len(param_values) > 0:
filter['checked'] = (
filter['value'] in param_values
)
| {
"content_hash": "f758565c69eac4184a48278cb434e56e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 63,
"avg_line_length": 29.256756756756758,
"alnum_prop": 0.5108545034642032,
"repo_name": "AusDTO/dto-digitalmarketplace-buyer-frontend",
"id": "56c82725fb620d1c30758d485f1966271ee30eac",
"size": "2165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/presenters/search_presenters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "904"
},
{
"name": "Gherkin",
"bytes": "6490"
},
{
"name": "HTML",
"bytes": "314252"
},
{
"name": "JavaScript",
"bytes": "38480"
},
{
"name": "Makefile",
"bytes": "1461"
},
{
"name": "Python",
"bytes": "414306"
},
{
"name": "SCSS",
"bytes": "140490"
},
{
"name": "Shell",
"bytes": "5964"
}
],
"symlink_target": ""
} |
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from mse.constants import *
from mse.charset import fix_charset_collation
class EqualityMixin:
def __eq__(self, other):
return (type(other) is type(self)) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
class Table(EqualityMixin):
def __init__(self, name, engine=None, charset=None, collation=None):
self.name = name
self.columns = OrderedDict()
self.indexes = OrderedDict()
self.engine = engine
self.charset, self.collation = fix_charset_collation(charset, collation)
def add_or_update_column(self, column):
assert isinstance(column, Column)
self.columns[column.name] = column
def add_or_update_index(self, index):
assert isinstance(index, Index)
for index_column in index.columns:
if not self.columns.get(index_column.name):
raise ValueError(
"unable to create index [{0}], column [{1}] does not exists in table"
.format(index, index_column.name))
self.indexes[index.name] = index
class IndexColumn(EqualityMixin):
def __init__(self, name, length=0, direction=DIRECTION_ASC):
self.name = name
self.length = length
self.direction = direction
def __repr__(self):
return self.__str__()
def __str__(self):
base = self.name
if self.length > 0:
base += "({0})".format(self.length)
if self.direction == DIRECTION_DESC:
base += " {0}".format(DIRECTION_DESC)
return base
class Index(EqualityMixin):
def __init__(self, name, columns, is_primary=False, is_unique=False):
name = name.strip()
assert name
assert isinstance(columns, list)
assert len(columns) >= 1
self.name = name
self.columns = []
for col in columns:
if isinstance(col, IndexColumn):
self.columns.append(col)
elif isinstance(col, str):
self.columns.append(IndexColumn(col))
else:
raise ValueError("unknown index column {0}".format(col))
self.is_primary = is_primary
# Overwrite is_unique when it's primary
self.is_unique = is_primary or is_unique
def __repr__(self):
return self.__str__()
def __str__(self):
col_str = [str(x) for x in self.columns]
cols = ", ".join(col_str)
if self.is_primary:
return "PRIMARY KEY ({0})".format(cols)
elif self.is_unique:
return "UNIQUE KEY {0} ({1})".format(self.name, cols)
else:
return "KEY {0} ({1})".format(self.name, cols)
class Column(EqualityMixin):
def __init__(self, name, data_type, length=0, decimal=None, nullable=True, charset=None, collation=None):
name = name.strip()
data_type = data_type.strip().upper()
assert name
assert data_type in STRING_TYPES or data_type in NUMERIC_TYPES or data_type in DATE_TYPES
self.name = name
self.data_type = data_type
self.length = length
self.decimal = decimal
self.nullable = nullable
self.charset, self.collation = fix_charset_collation(charset, collation)
def __repr__(self):
return self.__str__()
def __str__(self):
nn = "" if self.nullable else "NOT NULL"
if self.decimal is not None:
return "{0} {1}({2},{3}) {4}".format(self.name, self.data_type, self.length, self.decimal, nn).strip()
elif self.data_type in DATE_TYPES or self.data_type in NUMERIC_TYPES:
return "{0} {1} {2}".format(self.name, self.data_type, nn).strip()
elif self.data_type in STRING_TYPES:
cs = "CHARACTER SET {0} COLLATION {1}".format(self.charset, self.collation) if self.charset else ""
return "{0} {1}({2}) {3} {4}".format(self.name, self.data_type, self.length, cs, nn).strip()
| {
"content_hash": "f46c42d4316cf2e01d0094e931ac9498",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 114,
"avg_line_length": 34.42857142857143,
"alnum_prop": 0.5887234561874543,
"repo_name": "frail/mysql-size-estimator",
"id": "11d51ce66d2d64bca35238a2556a5648331cc6b1",
"size": "4097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mse/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55159"
}
],
"symlink_target": ""
} |
import json
import httpretty
import pytest
import pypuppetdb
def stub_request(url, data=None, method=httpretty.GET, status=200, **kwargs):
if data is None:
body = '[]'
else:
with open(data, 'r') as d:
body = json.load(d.read())
return httpretty.register_uri(method, url, body=body, status=status,
**kwargs)
@pytest.fixture(params=['string', 'QueryBuilder'])
def query(request):
key = 'certname'
value = 'node1'
if request.param == 'string':
return '["{0}", "=", "{1}"]'.format(key, value)
elif request.param == 'QueryBuilder':
return pypuppetdb.QueryBuilder.EqualsOperator(key, value)
class TestQueryAPI(object):
def test_facts(self, api):
facts_body = [{
'certname': 'test_certname',
'name': 'test_name',
'value': 'test_value',
'environment': 'test_environment',
}]
facts_url = 'http://localhost:8080/pdb/query/v4/facts'
httpretty.enable()
httpretty.register_uri(httpretty.GET, facts_url,
body=json.dumps(facts_body))
for fact in api.facts():
pass
assert httpretty.last_request().path == '/pdb/query/v4/facts'
httpretty.disable()
httpretty.reset()
def test_fact_names(self, api):
httpretty.enable()
stub_request('http://localhost:8080/pdb/query/v4/fact-names')
api.fact_names()
assert httpretty.last_request().path == '/pdb/query/v4/fact-names'
httpretty.disable()
httpretty.reset()
def test_normalize_resource_type(self, api):
assert api._normalize_resource_type('sysctl::value') == \
'Sysctl::Value'
assert api._normalize_resource_type('user') == 'User'
def test_environments(self, api):
httpretty.enable()
stub_request('http://localhost:8080/pdb/query/v4/environments')
api.environments()
assert httpretty.last_request().path == '/pdb/query/v4/environments'
httpretty.disable()
httpretty.reset()
def test_inventory(self, api):
inventory_body = [{
'certname': 'test_certname',
'timestamp': '2017-06-05T20:18:23.374Z',
'environment': 'test_environment',
'facts': 'test_facts',
'trusted': 'test_trusted'
}]
inventory_url = 'http://localhost:8080/pdb/query/v4/inventory'
httpretty.enable()
httpretty.register_uri(httpretty.GET, inventory_url,
body=json.dumps(inventory_body))
for inv in api.inventory():
pass
assert httpretty.last_request().path == '/pdb/query/v4/inventory'
httpretty.disable()
httpretty.reset()
def test_nodes_single(self, api):
body = {
"cached_catalog_status": "not_used",
"catalog_environment": "production",
"catalog_timestamp": "2016-08-15T11:06:26.275Z",
"certname": "greenserver.vm",
"deactivated": None,
"expired": None,
"facts_environment": "production",
"facts_timestamp": "2016-08-15T11:06:26.140Z",
"latest_report_hash": "4a956674b016d95a7b77c99513ba26e4a744f8d1",
"latest_report_noop": False,
"latest_report_noop_pending": None,
"latest_report_status": "changed",
"report_environment": "production",
"report_timestamp": "2016-08-15T11:06:18.393Z"
}
url = 'http://localhost:8080/pdb/query/v4/nodes'
httpretty.enable()
httpretty.register_uri(httpretty.GET, url,
body=json.dumps(body))
nodes = list(api.nodes(query='["=","certname","greenserver.vm"'))
assert len(nodes) == 1
assert nodes[0].name == "greenserver.vm"
assert httpretty.last_request().path.startswith('/pdb/query/v4/nodes')
httpretty.disable()
httpretty.reset()
| {
"content_hash": "f5ffe2efaa564ea8200f2923c23d7c2a",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 78,
"avg_line_length": 32.488,
"alnum_prop": 0.5685791676926866,
"repo_name": "puppet-community/pypuppetdb",
"id": "7c38c2fccbad5a8596bb1c392730c1528503e293",
"size": "4061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_api_query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "200266"
}
],
"symlink_target": ""
} |
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import gr
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from optparse import OptionParser
import time
class SALSA_eceiver(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "Salsa Eceiver")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 5000000.0
self.outfile = outfile = "/tmp/vale.dat"
self.int_time = int_time = 10
self.gain = gain = 60
self.fftsize = fftsize = 4096
self.c_freq = c_freq = 1420.4e6
##################################################
# Blocks
##################################################
self.uhd_usrp_source_0 = uhd.usrp_source(
device_addr="addr=192.168.10.2",
stream_args=uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_source_0.set_samp_rate(samp_rate)
self.uhd_usrp_source_0.set_center_freq(c_freq, 0)
self.uhd_usrp_source_0.set_gain(gain, 0)
self.fft_vxx_0 = fft.fft_vcc(fftsize, True, (window.blackmanharris(fftsize)), True, 1)
self.blocks_vector_to_stream_0 = blocks.vector_to_stream(gr.sizeof_gr_complex*1, fftsize)
self.blocks_stream_to_vector_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, fftsize)
self.blocks_head_0 = blocks.head(gr.sizeof_float*1, int(int_time*samp_rate))
self.blocks_file_sink_0 = blocks.file_sink(gr.sizeof_float*1, outfile, False)
self.blocks_file_sink_0.set_unbuffered(False)
self.blocks_complex_to_mag_squared_0 = blocks.complex_to_mag_squared(1)
##################################################
# Connections
##################################################
self.connect((self.fft_vxx_0, 0), (self.blocks_vector_to_stream_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.blocks_stream_to_vector_0, 0))
self.connect((self.blocks_vector_to_stream_0, 0), (self.blocks_complex_to_mag_squared_0, 0))
self.connect((self.blocks_complex_to_mag_squared_0, 0), (self.blocks_head_0, 0))
self.connect((self.blocks_head_0, 0), (self.blocks_file_sink_0, 0))
self.connect((self.blocks_stream_to_vector_0, 0), (self.fft_vxx_0, 0))
# QT sink close method reimplementation
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_source_0.set_samp_rate(self.samp_rate)
def get_outfile(self):
return self.outfile
def set_outfile(self, outfile):
self.outfile = outfile
self.blocks_file_sink_0.open(self.outfile)
def get_int_time(self):
return self.int_time
def set_int_time(self, int_time):
self.int_time = int_time
def get_gain(self):
return self.gain
def set_gain(self, gain):
self.gain = gain
self.uhd_usrp_source_0.set_gain(self.gain, 0)
def get_fftsize(self):
return self.fftsize
def set_fftsize(self, fftsize):
self.fftsize = fftsize
def get_c_freq(self):
return self.c_freq
def set_c_freq(self, c_freq):
self.c_freq = c_freq
self.uhd_usrp_source_0.set_center_freq(self.c_freq, 0)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = SALSA_eceiver()
tb.start()
tb.wait()
| {
"content_hash": "300a5fcf2c588a64c2b94bd2b537911d",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 100,
"avg_line_length": 35.09345794392523,
"alnum_prop": 0.5736351531291611,
"repo_name": "varenius/salsa",
"id": "4ebe7333ca04f08fbc730656e2f2def3514b7a7a",
"size": "3970",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "USRP/usrp_gnuradio_dev/SALSA_eceiver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "587"
},
{
"name": "MATLAB",
"bytes": "137727"
},
{
"name": "PHP",
"bytes": "6663"
},
{
"name": "Python",
"bytes": "311661"
},
{
"name": "Shell",
"bytes": "5475"
},
{
"name": "TeX",
"bytes": "324522"
},
{
"name": "Vim Script",
"bytes": "940"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
import pprint
import ujson
from typing import Dict, Any
PAGER_DUTY_EVENT_NAMES = {
'incident.trigger': 'triggered',
'incident.acknowledge': 'acknowledged',
'incident.unacknowledge': 'unacknowledged',
'incident.resolve': 'resolved',
'incident.assign': 'assigned',
'incident.escalate': 'escalated',
'incident.delegate': 'delineated',
}
def build_pagerduty_formatdict(message):
# Normalize the message dict, after this all keys will exist. I would
# rather some strange looking messages than dropping pages.
format_dict = {} # type: Dict[str, Any]
format_dict['action'] = PAGER_DUTY_EVENT_NAMES[message['type']]
format_dict['incident_id'] = message['data']['incident']['id']
format_dict['incident_num'] = message['data']['incident']['incident_number']
format_dict['incident_url'] = message['data']['incident']['html_url']
format_dict['service_name'] = message['data']['incident']['service']['name']
format_dict['service_url'] = message['data']['incident']['service']['html_url']
# This key can be missing on null
if message['data']['incident'].get('assigned_to_user', None):
format_dict['assigned_to_email'] = message['data']['incident']['assigned_to_user']['email']
format_dict['assigned_to_username'] = message['data']['incident']['assigned_to_user']['email'].split('@')[0]
format_dict['assigned_to_url'] = message['data']['incident']['assigned_to_user']['html_url']
else:
format_dict['assigned_to_email'] = 'nobody'
format_dict['assigned_to_username'] = 'nobody'
format_dict['assigned_to_url'] = ''
# This key can be missing on null
if message['data']['incident'].get('resolved_by_user', None):
format_dict['resolved_by_email'] = message['data']['incident']['resolved_by_user']['email']
format_dict['resolved_by_username'] = message['data']['incident']['resolved_by_user']['email'].split('@')[0]
format_dict['resolved_by_url'] = message['data']['incident']['resolved_by_user']['html_url']
else:
format_dict['resolved_by_email'] = 'nobody'
format_dict['resolved_by_username'] = 'nobody'
format_dict['resolved_by_url'] = ''
trigger_message = []
trigger_subject = message['data']['incident']['trigger_summary_data'].get('subject', '')
if trigger_subject:
trigger_message.append(trigger_subject)
trigger_description = message['data']['incident']['trigger_summary_data'].get('description', '')
if trigger_description:
trigger_message.append(trigger_description)
format_dict['trigger_message'] = u'\n'.join(trigger_message)
return format_dict
def send_raw_pagerduty_json(user_profile, client, stream, message, topic):
subject = topic or 'pagerduty'
body = (
u'Unknown pagerduty message\n'
u'``` py\n'
u'%s\n'
u'```') % (pprint.pformat(message),)
check_send_message(user_profile, client, 'stream',
[stream], subject, body)
def send_formated_pagerduty(user_profile, client, stream, message_type, format_dict, topic):
if message_type in ('incident.trigger', 'incident.unacknowledge'):
template = (u':imp: Incident '
u'[{incident_num}]({incident_url}) {action} by '
u'[{service_name}]({service_url}) and assigned to '
u'[{assigned_to_username}@]({assigned_to_url})\n\n>{trigger_message}')
elif message_type == 'incident.resolve' and format_dict['resolved_by_url']:
template = (u':grinning: Incident '
u'[{incident_num}]({incident_url}) resolved by '
u'[{resolved_by_username}@]({resolved_by_url})\n\n>{trigger_message}')
elif message_type == 'incident.resolve' and not format_dict['resolved_by_url']:
template = (u':grinning: Incident '
u'[{incident_num}]({incident_url}) resolved\n\n>{trigger_message}')
else:
template = (u':no_good: Incident [{incident_num}]({incident_url}) '
u'{action} by [{assigned_to_username}@]({assigned_to_url})\n\n>{trigger_message}')
subject = topic or u'incident {incident_num}'.format(**format_dict)
body = template.format(**format_dict)
check_send_message(user_profile, client, 'stream',
[stream], subject, body)
@api_key_only_webhook_view('PagerDuty')
@has_request_variables
def api_pagerduty_webhook(request, user_profile, client, payload=REQ(argument_type='body'),
stream=REQ(default='pagerduty'), topic=REQ(default=None)):
for message in payload['messages']:
message_type = message['type']
if message_type not in PAGER_DUTY_EVENT_NAMES:
send_raw_pagerduty_json(user_profile, client, stream, message, topic)
try:
format_dict = build_pagerduty_formatdict(message)
except:
send_raw_pagerduty_json(user_profile, client, stream, message, topic)
else:
send_formated_pagerduty(user_profile, client, stream, message_type, format_dict, topic)
return json_success()
| {
"content_hash": "7703ff5477dc5f95588136dee2239dd5",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 116,
"avg_line_length": 44.33613445378151,
"alnum_prop": 0.6432903714935557,
"repo_name": "peiwei/zulip",
"id": "ad33a8c1b9b8b8beecbfb668fd01e324795e4e6f",
"size": "5314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/views/webhooks/pagerduty.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "183830"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "397966"
},
{
"name": "JavaScript",
"bytes": "1588795"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "96085"
},
{
"name": "Python",
"bytes": "2010761"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "33341"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# Examples:
# url(r'^$', 'aboyunapp.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
]
| {
"content_hash": "258624370627307113bd7ef5e36da35d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 54,
"avg_line_length": 25.7,
"alnum_prop": 0.6342412451361867,
"repo_name": "markessien/aboyun",
"id": "f924c5d64fed4730fdbef0dc5b9b91b43d9a4867",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aboyunapp/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6796"
}
],
"symlink_target": ""
} |
from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.lib.watcher import create_file_watcher
def list_segment_key_values(segment, theme_configs, segment_data, key, function_name=None, name=None, module=None, default=None):
try:
yield segment[key]
except KeyError:
pass
found_module_key = False
for theme_config in theme_configs:
try:
segment_data = theme_config['segment_data']
except KeyError:
pass
else:
if function_name and not name:
if module:
try:
yield segment_data[module + '.' + function_name][key]
found_module_key = True
except KeyError:
pass
if not found_module_key:
try:
yield segment_data[function_name][key]
except KeyError:
pass
if name:
try:
yield segment_data[name][key]
except KeyError:
pass
if segment_data is not None:
try:
yield segment_data[key]
except KeyError:
pass
yield default
def get_segment_key(merge, *args, **kwargs):
if merge:
ret = None
for value in list_segment_key_values(*args, **kwargs):
if ret is None:
ret = value
elif isinstance(ret, dict) and isinstance(value, dict):
old_ret = ret
ret = value.copy()
ret.update(old_ret)
else:
return ret
return ret
else:
return next(list_segment_key_values(*args, **kwargs))
def get_function(data, segment):
function_name = segment['function']
if '.' in function_name:
module, function_name = function_name.rpartition('.')[::2]
else:
module = data['default_module']
function = data['get_module_attr'](module, function_name, prefix='segment_generator')
if not function:
raise ImportError('Failed to obtain segment function')
return None, function, module, function_name, segment.get('name')
def get_string(data, segment):
name = segment.get('name')
return data['get_key'](False, segment, None, None, name, 'contents'), None, None, None, name
segment_getters = {
'function': get_function,
'string': get_string,
'segment_list': get_function,
}
def get_attr_func(contents_func, key, args, is_space_func=False):
try:
func = getattr(contents_func, key)
except AttributeError:
return None
else:
if is_space_func:
def expand_func(pl, amount, segment):
try:
return func(pl=pl, amount=amount, segment=segment, **args)
except Exception as e:
pl.exception('Exception while computing {0} function: {1}', key, str(e))
return segment['contents'] + (' ' * amount)
return expand_func
else:
return lambda pl, shutdown_event: func(pl=pl, shutdown_event=shutdown_event, **args)
def process_segment_lister(pl, segment_info, parsed_segments, side, mode, colorscheme,
lister, subsegments, patcher_args):
subsegments = [
subsegment
for subsegment in subsegments
if subsegment['display_condition'](pl, segment_info, mode)
]
for subsegment_info, subsegment_update in lister(pl=pl, segment_info=segment_info, **patcher_args):
draw_inner_divider = subsegment_update.pop('draw_inner_divider', False)
old_pslen = len(parsed_segments)
for subsegment in subsegments:
if subsegment_update:
subsegment = subsegment.copy()
subsegment.update(subsegment_update)
if 'priority_multiplier' in subsegment_update and subsegment['priority']:
subsegment['priority'] *= subsegment_update['priority_multiplier']
process_segment(
pl,
side,
subsegment_info,
parsed_segments,
subsegment,
mode,
colorscheme,
)
new_pslen = len(parsed_segments)
while parsed_segments[new_pslen - 1]['literal_contents'][1]:
new_pslen -= 1
if new_pslen > old_pslen + 1 and draw_inner_divider is not None:
for i in range(old_pslen, new_pslen - 1) if side == 'left' else range(old_pslen + 1, new_pslen):
parsed_segments[i]['draw_soft_divider'] = draw_inner_divider
return None
def set_segment_highlighting(pl, colorscheme, segment, mode):
if segment['literal_contents'][1]:
return True
try:
highlight_group_prefix = segment['highlight_group_prefix']
except KeyError:
hl_groups = lambda hlgs: hlgs
else:
hl_groups = lambda hlgs: [highlight_group_prefix + ':' + hlg for hlg in hlgs] + hlgs
try:
segment['highlight'] = colorscheme.get_highlighting(
hl_groups(segment['highlight_groups']),
mode,
segment.get('gradient_level')
)
if segment['divider_highlight_group']:
segment['divider_highlight'] = colorscheme.get_highlighting(
hl_groups([segment['divider_highlight_group']]),
mode
)
else:
segment['divider_highlight'] = None
except Exception as e:
pl.exception('Failed to set highlight group: {0}', str(e))
return False
else:
return True
def process_segment(pl, side, segment_info, parsed_segments, segment, mode, colorscheme):
segment = segment.copy()
pl.prefix = segment['name']
if segment['type'] in ('function', 'segment_list'):
try:
if segment['type'] == 'function':
contents = segment['contents_func'](pl, segment_info)
else:
contents = segment['contents_func'](pl, segment_info, parsed_segments, side, mode, colorscheme)
except Exception as e:
pl.exception('Exception while computing segment: {0}', str(e))
return
if contents is None:
return
if isinstance(contents, list):
# Needs copying here, but it was performed at the very start of the
# function
segment_base = segment
if contents:
draw_divider_position = -1 if side == 'left' else 0
for key, i, newval in (
('before', 0, ''),
('after', -1, ''),
('draw_soft_divider', draw_divider_position, True),
('draw_hard_divider', draw_divider_position, True),
):
try:
contents[i][key] = segment_base.pop(key)
segment_base[key] = newval
except KeyError:
pass
draw_inner_divider = None
if side == 'right':
append = parsed_segments.append
else:
pslen = len(parsed_segments)
append = lambda item: parsed_segments.insert(pslen, item)
for subsegment in (contents if side == 'right' else reversed(contents)):
segment_copy = segment_base.copy()
segment_copy.update(subsegment)
if draw_inner_divider is not None:
segment_copy['draw_soft_divider'] = draw_inner_divider
draw_inner_divider = segment_copy.pop('draw_inner_divider', None)
if set_segment_highlighting(pl, colorscheme, segment_copy, mode):
append(segment_copy)
else:
segment['contents'] = contents
if set_segment_highlighting(pl, colorscheme, segment, mode):
parsed_segments.append(segment)
elif segment['width'] == 'auto' or (segment['type'] == 'string' and segment['contents'] is not None):
if set_segment_highlighting(pl, colorscheme, segment, mode):
parsed_segments.append(segment)
always_true = lambda pl, segment_info, mode: True
get_fallback_segment = {
'name': 'fallback',
'type': 'string',
'highlight_groups': ['background'],
'divider_highlight_group': None,
'before': None,
'after': None,
'contents': '',
'literal_contents': (0, ''),
'priority': None,
'draw_soft_divider': True,
'draw_hard_divider': True,
'draw_inner_divider': True,
'display_condition': always_true,
'width': None,
'align': None,
'expand': None,
'truncate': None,
'startup': None,
'shutdown': None,
'_rendered_raw': '',
'_rendered_hl': '',
'_len': None,
'_contents_len': None,
}.copy
def gen_segment_getter(pl, ext, common_config, theme_configs, default_module, get_module_attr, top_theme):
data = {
'default_module': default_module or 'powerline.segments.' + ext,
'get_module_attr': get_module_attr,
'segment_data': None,
}
def get_key(merge, segment, module, function_name, name, key, default=None):
return get_segment_key(merge, segment, theme_configs, data['segment_data'], key, function_name, name, module, default)
data['get_key'] = get_key
def get_selector(function_name):
if '.' in function_name:
module, function_name = function_name.rpartition('.')[::2]
else:
module = 'powerline.selectors.' + ext
function = get_module_attr(module, function_name, prefix='segment_generator/selector_function')
if not function:
pl.error('Failed to get segment selector, ignoring it')
return function
def get_segment_selector(segment, selector_type):
try:
function_name = segment[selector_type + '_function']
except KeyError:
function = None
else:
function = get_selector(function_name)
try:
modes = segment[selector_type + '_modes']
except KeyError:
modes = None
if modes:
if function:
return lambda pl, segment_info, mode: (
mode in modes
or function(pl=pl, segment_info=segment_info, mode=mode)
)
else:
return lambda pl, segment_info, mode: mode in modes
else:
if function:
return lambda pl, segment_info, mode: (
function(pl=pl, segment_info=segment_info, mode=mode)
)
else:
return None
def gen_display_condition(segment):
include_function = get_segment_selector(segment, 'include')
exclude_function = get_segment_selector(segment, 'exclude')
if include_function:
if exclude_function:
return lambda *args: (
include_function(*args)
and not exclude_function(*args))
else:
return include_function
else:
if exclude_function:
return lambda *args: not exclude_function(*args)
else:
return always_true
def get(segment, side):
segment_type = segment.get('type', 'function')
try:
get_segment_info = segment_getters[segment_type]
except KeyError:
pl.error('Unknown segment type: {0}', segment_type)
return None
try:
contents, _contents_func, module, function_name, name = get_segment_info(data, segment)
except Exception as e:
pl.exception('Failed to generate segment from {0!r}: {1}', segment, str(e), prefix='segment_generator')
return None
if not get_key(False, segment, module, function_name, name, 'display', True):
return None
segment_datas = getattr(_contents_func, 'powerline_segment_datas', None)
if segment_datas:
try:
data['segment_data'] = segment_datas[top_theme]
except KeyError:
pass
if segment_type == 'function':
highlight_groups = [function_name]
else:
highlight_groups = segment.get('highlight_groups') or [name]
if segment_type in ('function', 'segment_list'):
args = dict((
(str(k), v)
for k, v in
get_key(True, segment, module, function_name, name, 'args', {}).items()
))
display_condition = gen_display_condition(segment)
if segment_type == 'segment_list':
# Handle startup and shutdown of _contents_func?
subsegments = [
subsegment
for subsegment in (
get(subsegment, side)
for subsegment in segment['segments']
) if subsegment
]
return {
'name': name or function_name,
'type': segment_type,
'highlight_groups': None,
'divider_highlight_group': None,
'before': None,
'after': None,
'contents_func': lambda pl, segment_info, parsed_segments, side, mode, colorscheme: (
process_segment_lister(
pl, segment_info, parsed_segments, side, mode, colorscheme,
patcher_args=args,
subsegments=subsegments,
lister=_contents_func,
)
),
'contents': None,
'literal_contents': None,
'priority': None,
'draw_soft_divider': None,
'draw_hard_divider': None,
'draw_inner_divider': None,
'side': side,
'display_condition': display_condition,
'width': None,
'align': None,
'expand': None,
'truncate': None,
'startup': None,
'shutdown': None,
'_rendered_raw': '',
'_rendered_hl': '',
'_len': None,
'_contents_len': None,
}
if segment_type == 'function':
startup_func = get_attr_func(_contents_func, 'startup', args)
shutdown_func = getattr(_contents_func, 'shutdown', None)
expand_func = get_attr_func(_contents_func, 'expand', args, True)
truncate_func = get_attr_func(_contents_func, 'truncate', args, True)
if hasattr(_contents_func, 'powerline_requires_filesystem_watcher'):
create_watcher = lambda: create_file_watcher(pl, common_config['watcher'])
args[str('create_watcher')] = create_watcher
if hasattr(_contents_func, 'powerline_requires_segment_info'):
contents_func = lambda pl, segment_info: _contents_func(pl=pl, segment_info=segment_info, **args)
else:
contents_func = lambda pl, segment_info: _contents_func(pl=pl, **args)
else:
startup_func = None
shutdown_func = None
contents_func = None
expand_func = None
truncate_func = None
return {
'name': name or function_name,
'type': segment_type,
'highlight_groups': highlight_groups,
'divider_highlight_group': None,
'before': get_key(False, segment, module, function_name, name, 'before', ''),
'after': get_key(False, segment, module, function_name, name, 'after', ''),
'contents_func': contents_func,
'contents': contents,
'literal_contents': (0, ''),
'priority': segment.get('priority', None),
'draw_hard_divider': segment.get('draw_hard_divider', True),
'draw_soft_divider': segment.get('draw_soft_divider', True),
'draw_inner_divider': segment.get('draw_inner_divider', False),
'side': side,
'display_condition': display_condition,
'width': segment.get('width'),
'align': segment.get('align', 'l'),
'expand': expand_func,
'truncate': truncate_func,
'startup': startup_func,
'shutdown': shutdown_func,
'_rendered_raw': '',
'_rendered_hl': '',
'_len': None,
'_contents_len': None,
}
return get
| {
"content_hash": "eb57ff897bfe8617ebb04e23b3633172",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 129,
"avg_line_length": 29.971046770601337,
"alnum_prop": 0.669242773277848,
"repo_name": "prvnkumar/powerline",
"id": "e48689b58de69f05690863d8ae87055cb11579d2",
"size": "13487",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "powerline/segment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3781"
},
{
"name": "Lua",
"bytes": "400"
},
{
"name": "Python",
"bytes": "731291"
},
{
"name": "Shell",
"bytes": "49776"
},
{
"name": "VimL",
"bytes": "16969"
}
],
"symlink_target": ""
} |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/contrib/tpu/proto/topology.proto',
package='tensorflow.tpu',
syntax='proto3',
serialized_pb=_b('\n+tensorflow/contrib/tpu/proto/topology.proto\x12\x0etensorflow.tpu\"t\n\rTopologyProto\x12\x12\n\nmesh_shape\x18\x01 \x03(\x05\x12\x11\n\tnum_tasks\x18\x02 \x01(\x05\x12 \n\x18num_tpu_devices_per_task\x18\x03 \x01(\x05\x12\x1a\n\x12\x64\x65vice_coordinates\x18\x04 \x03(\x05\x42\x03\xf8\x01\x01\x62\x06proto3')
)
_TOPOLOGYPROTO = _descriptor.Descriptor(
name='TopologyProto',
full_name='tensorflow.tpu.TopologyProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mesh_shape', full_name='tensorflow.tpu.TopologyProto.mesh_shape', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_tasks', full_name='tensorflow.tpu.TopologyProto.num_tasks', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_tpu_devices_per_task', full_name='tensorflow.tpu.TopologyProto.num_tpu_devices_per_task', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_coordinates', full_name='tensorflow.tpu.TopologyProto.device_coordinates', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=179,
)
DESCRIPTOR.message_types_by_name['TopologyProto'] = _TOPOLOGYPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TopologyProto = _reflection.GeneratedProtocolMessageType('TopologyProto', (_message.Message,), dict(
DESCRIPTOR = _TOPOLOGYPROTO,
__module__ = 'tensorflow.contrib.tpu.proto.topology_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.tpu.TopologyProto)
))
_sym_db.RegisterMessage(TopologyProto)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "f3dd64512963443be1c595df4d3324b0",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 332,
"avg_line_length": 37.943820224719104,
"alnum_prop": 0.7234231566479123,
"repo_name": "ryfeus/lambda-packs",
"id": "81c992016e087438c9e481dea6ee33d5c5963f7c",
"size": "3491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Keras_tensorflow_nightly/source2.7/tensorflow/contrib/tpu/proto/topology_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-WMIRegCachedRDPConnection',
'Author': ['@harmj0y'],
'Description': ('Uses remote registry functionality to query all entries for the '
'Windows Remote Desktop Connection Client" on a machine. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'The hostname or IP to query for local group users.',
'Required' : False,
'Value' : 'localhost'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.strip_powershell_comments(moduleCode)
script += "\n" + moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
| {
"content_hash": "55717764accad42b199d585de83a6a87",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 121,
"avg_line_length": 34.43478260869565,
"alnum_prop": 0.51010101010101,
"repo_name": "EmpireProject/Empire",
"id": "7342618da523efb15c195c7917417ce0414511b9",
"size": "3168",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/situational_awareness/network/powerview/get_cached_rdpconnection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "C#",
"bytes": "1893"
},
{
"name": "Dockerfile",
"bytes": "1870"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "17009836"
},
{
"name": "Python",
"bytes": "2965491"
},
{
"name": "Shell",
"bytes": "10525"
}
],
"symlink_target": ""
} |
from __future__ import division
import json
import os
import os.path
# third party imports
import wx
# pyspend imports
import db
import gui
CONFIG = 'config.json'
OPEN = 0
SAVE = 1
def run():
spend = PySpend(redirect=False)
spend.MainLoop()
class PySpend(wx.App):
def OnInit(self):
self.config = self._read_config()
self.frame = PySpendController(config=self.config, parent=None)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
def OnExit(self):
self._write_config()
def _read_config(self):
'''Read the config file.'''
config = json.load(open(self._config_path(), 'r'))
return config
def _write_config(self):
'''Write changes to the config file on exit.'''
with open(self._config_path(), 'w') as f:
json.dump(self.config, f, indent=4)
def _config_path(self):
path = os.path.dirname(__file__)
return os.path.join(path, CONFIG)
class PySpendController(gui.PySpendGUI):
def __init__(self, config, *args, **kwargs):
super(PySpendController, self).__init__(*args, **kwargs)
# check database
self.config = config
self.db_path = config['DB']
self.check_db()
self.db = db.connect(self.db_path)
self._bind_events()
# initialise some widgets
self.config_cat_list()
self.refresh_cat_list()
self.refresh_category()
self.config_item_list()
self.refresh_item_list()
def _bind_events(self):
self.date_picker.Bind(wx.EVT_DATE_CHANGED, self.pick_date)
self.date_spin.Bind(wx.EVT_SPIN_UP, self.date_next)
self.date_spin.Bind(wx.EVT_SPIN_DOWN, self.date_prev)
self.add_item.Bind(wx.EVT_BUTTON, self.new_item)
self.add_cat.Bind(wx.EVT_BUTTON, self.new_cat)
self.cost.Bind(wx.EVT_KILL_FOCUS, self.validate_cost)
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.delete_popup)
self.menu_delete.Bind(wx.EVT_MENU, self.delete_item)
############################################################################
# open and save database files
def check_db(self):
'''Check that the db path is an sqlite file.'''
if not self.db_path or not os.path.exists(self.db_path):
msg = 'No database file. Please open an existing file, or create a new one.'
caption = 'No Database'
choices = ['Open', 'Create']
dlg = wx.SingleChoiceDialog(self, msg, caption, choices)
if dlg.ShowModal() == wx.ID_OK:
action = dlg.GetStringSelection()
if action == 'Open':
self.db_path = self.open_db()
else:
self.db_path = self.save_db()
self.config['DB'] = self.db_path
dlg.Destroy()
if not self.db_path:
self.quit_no_database()
self.Destroy()
self.config['DB'] = self.db_path
def open_db(self):
'''Open a database file.'''
return self._file_dialog(OPEN)
def save_db(self):
'''Save a database file to a new location or create a new file when
the file doesn't exist.'''
return self._file_dialog(SAVE)
def quit_no_database(self):
'''Inform the user that no database was selected, then quit app.'''
msg = 'Quitting application because no database was selected.'
caption = 'Exit, no database'
style = wx.OK | wx.ICON_EXCLAMATION
dlg = wx.MessageDialog(self, msg, caption, style)
dlg.ShowModal()
dlg.Destroy()
def _file_dialog(self, type_):
if type_ == OPEN:
style = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
msg = "Open a database"
else:
style = wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
msg = "Save or create a database"
wildcard = "Database Files (*.sqlite)|*.sqlite"
dlg = wx.FileDialog(self, message=msg, wildcard=wildcard, style=style)
dlg.ShowModal()
new_path = dlg.GetPath()
dlg.Destroy()
return new_path
############################################################################
# event handlers
def pick_date(self, event):
self.refresh_item_list()
def date_next(self, event):
self.day_inc(1)
def date_prev(self, event):
self.day_inc(-1)
def day_inc(self, inc):
day = self.date_picker.GetValue()
new_day = day + wx.DateSpan(0, 0, 0, inc)
self.date_picker.SetValue(new_day)
self.refresh_item_list()
def new_item(self, event):
cat = self.category.GetValue()
item = self.name.GetValue().strip()
if not item or not cat:
return
amount = int(self.cost.GetValue())
if amount == 0:
return
cat_id = self.db.get_catid(cat)
date = self.date_picker.GetValue().FormatISODate()
#print(cat_id, cat, item, amount, date)
self.db.new_item(cat_id, item, amount, date)
self.refresh_item_list()
# set the focus back to the item text box
self.name.SetFocus()
self.name.SetSelection(-1, -1)
def delete_popup(self, event):
'''Show the delete pop-up menu on right click on a list control.'''
self.delete_list = event.GetEventObject()
selected_item = self.delete_list.GetFirstSelected()
item_id = self.delete_list.GetItem(selected_item, 0).GetText()
self.delete_item_id = int(item_id)
self.PopupMenu(self.menu_delete)
def delete_item(self, event):
if self.delete_list is self.cat_list:
# try to delete category
if not self.db.category_used(self.delete_item_id):
self.db.delete_category(self.delete_item_id)
self.refresh_cat_list()
self.refresh_category()
else:
# delete outgoings item
self.db.delete_item(self.delete_item_id)
self.refresh_item_list()
def validate_cost(self, event):
'''Cost should be an integer.'''
try:
int(self.cost.GetValue())
except ValueError:
self.cost.SetBackgroundColour('pink')
self.cost.SetValue('')
self.cost.SetFocus()
self.cost.Refresh()
else:
bg_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)
self.cost.SetBackgroundColour(bg_colour)
self.cost.Refresh()
def new_cat(self, event):
category = self.cat_text.GetValue().strip()
if not category:
# blank cat name
return
if self.db.category_exists(category):
print('category already exists')
else:
self.db.new_category(category)
self.refresh_cat_list()
self.refresh_category()
############################################################################
# category tab
def config_cat_list(self):
'''Initial configuration'''
self.cat_list.InsertColumn(0, 'cat_id', width=0)
self.cat_list.InsertColumn(1, 'Category', width=450)
def refresh_cat_list(self):
'''Refresh the cat list whenever change is made to categories.'''
self.cat_list.DeleteAllItems()
for row in self.db.categories():
self.cat_list.Append(row)
############################################################################
# item tab
def config_item_list(self):
'''Initial configuration of item list.'''
l = wx.LIST_FORMAT_LEFT
r = wx.LIST_FORMAT_RIGHT
columns = [('itemid', 0, l), ('Category', 130, l), ('Item', 340, l),
(u'Cost (£)', 70, r)]
for i, (col, width, format) in enumerate(columns):
self.item_list.InsertColumn(i, col, format=format, width=width)
def refresh_item_list(self):
date_iso = self.date_picker.GetValue().FormatISODate()
self.item_list.DeleteAllItems()
total_cost = 0
fmt = u'£{:0.2f}'.format
for row in self.db.day_items(date_iso):
row = list(row)
cost = row[-1]
total_cost += cost
cost /= 100
row[-1] = fmt(cost)
self.item_list.Append(row)
# update total in footer
self.total_cost.SetLabel(fmt(total_cost/100))
def refresh_category(self):
'''The category combo box needs refreshing.'''
cats = [c for _, c in self.db.categories()]
self.category.Clear()
self.category.AppendItems(cats) | {
"content_hash": "e6dcfba12d27bb772f651ec4925bbe8e",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 88,
"avg_line_length": 32.89393939393939,
"alnum_prop": 0.5513588208198986,
"repo_name": "PreludeAndFugue/PySpend",
"id": "08e098ec28a497a6dd79e4f6cd027d4a5eea21bb",
"size": "8758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyspend/controller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24386"
}
],
"symlink_target": ""
} |
"""Device and network emulation utilities via devtools."""
import json
# Copied from
# WebKit/Source/devtools/front_end/network/NetworkConditionsSelector.js
# Units:
# download/upload: byte/s
# latency: ms
NETWORK_CONDITIONS = {
'GPRS': {
'download': 50 * 1024 / 8, 'upload': 20 * 1024 / 8, 'latency': 500},
'Regular2G': {
'download': 250 * 1024 / 8, 'upload': 50 * 1024 / 8, 'latency': 300},
'Good2G': {
'download': 450 * 1024 / 8, 'upload': 150 * 1024 / 8, 'latency': 150},
'Regular3G': {
'download': 750 * 1024 / 8, 'upload': 250 * 1024 / 8, 'latency': 100},
'Good3G': {
'download': 1.5 * 1024 * 1024 / 8, 'upload': 750 * 1024 / 8,
'latency': 40},
'Regular4G': {
'download': 4 * 1024 * 1024 / 8, 'upload': 3 * 1024 * 1024 / 8,
'latency': 20},
'DSL': {
'download': 2 * 1024 * 1024 / 8, 'upload': 1 * 1024 * 1024 / 8,
'latency': 5},
'WiFi': {
'download': 30 * 1024 * 1024 / 8, 'upload': 15 * 1024 * 1024 / 8,
'latency': 2}
}
def LoadEmulatedDevices(registry):
"""Loads a list of emulated devices from the DevTools JSON registry.
See, for example, third_party/WebKit/Source/devtools/front_end
/emulated_devices/module.json.
Args:
registry: A file-like object for the device registry (should be JSON).
Returns:
{'device_name': device}
"""
json_dict = json.load(registry)
devices = {}
for device in json_dict['extensions']:
device = device['device']
devices[device['title']] = device
return devices
def SetUpDeviceEmulationAndReturnMetadata(connection, device):
"""Configures an instance of Chrome for device emulation.
Args:
connection: (DevToolsConnection)
device: (dict) An entry from LoadEmulatedDevices().
Returns:
A dict containing the device emulation metadata.
"""
res = connection.SyncRequest('Emulation.canEmulate')
assert res['result'], 'Cannot set device emulation.'
data = _GetDeviceEmulationMetadata(device)
connection.SyncRequestNoResponse(
'Emulation.setDeviceMetricsOverride',
{'width': data['width'],
'height': data['height'],
'deviceScaleFactor': data['deviceScaleFactor'],
'mobile': data['mobile'],
'fitWindow': True})
connection.SyncRequestNoResponse('Network.setUserAgentOverride',
{'userAgent': data['userAgent']})
return data
def SetUpNetworkEmulation(connection, latency, download, upload):
"""Configures an instance of Chrome for network emulation.
See NETWORK_CONDITIONS for example (or valid?) emulation options.
Args:
connection: (DevToolsConnection)
latency: (float) Latency in ms.
download: (float) Download speed (Bytes / s).
upload: (float) Upload speed (Bytes / s).
"""
res = connection.SyncRequest('Network.canEmulateNetworkConditions')
assert res['result'], 'Cannot set network emulation.'
connection.SyncRequestNoResponse(
'Network.emulateNetworkConditions',
{'offline': False, 'latency': latency, 'downloadThroughput': download,
'uploadThroughput': upload})
def BandwidthToString(bandwidth):
"""Converts a bandwidth to string.
Args:
bandwidth: The bandwidth to convert in byte/s. Must be a multiple of 1024/8.
Returns:
A string compatible with wpr --{up,down} command line flags.
"""
assert bandwidth % (1024/8) == 0
bandwidth_kbps = (int(bandwidth) * 8) / 1024
if bandwidth_kbps % 1024:
return '{}Kbit/s'.format(bandwidth_kbps)
return '{}Mbit/s'.format(bandwidth_kbps / 1024)
def _GetDeviceEmulationMetadata(device):
"""Returns the metadata associated with a given device."""
return {'width': device['screen']['vertical']['width'],
'height': device['screen']['vertical']['height'],
'deviceScaleFactor': device['screen']['device-pixel-ratio'],
'mobile': 'mobile' in device['capabilities'],
'userAgent': device['user-agent']}
| {
"content_hash": "745e5bd362ad4fd909a7d7194fae1f19",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 80,
"avg_line_length": 33.15,
"alnum_prop": 0.6435394670688789,
"repo_name": "junhuac/MQUIC",
"id": "d7f05c6284b8234f4230449a62851695786b5264",
"size": "4141",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "src/tools/android/loading/emulation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "Assembly",
"bytes": "5386"
},
{
"name": "Batchfile",
"bytes": "42909"
},
{
"name": "C",
"bytes": "1168925"
},
{
"name": "C#",
"bytes": "81308"
},
{
"name": "C++",
"bytes": "43919800"
},
{
"name": "CMake",
"bytes": "46379"
},
{
"name": "CSS",
"bytes": "19668"
},
{
"name": "Emacs Lisp",
"bytes": "32613"
},
{
"name": "Go",
"bytes": "7247"
},
{
"name": "Groff",
"bytes": "127224"
},
{
"name": "HTML",
"bytes": "2548385"
},
{
"name": "Java",
"bytes": "1332462"
},
{
"name": "JavaScript",
"bytes": "851006"
},
{
"name": "M4",
"bytes": "29823"
},
{
"name": "Makefile",
"bytes": "459525"
},
{
"name": "Objective-C",
"bytes": "120158"
},
{
"name": "Objective-C++",
"bytes": "330017"
},
{
"name": "PHP",
"bytes": "11283"
},
{
"name": "Protocol Buffer",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "16872234"
},
{
"name": "R",
"bytes": "1842"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "764509"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "VimL",
"bytes": "12288"
},
{
"name": "nesC",
"bytes": "14779"
}
],
"symlink_target": ""
} |
import socket, threading, random, multiprocessing, time
def processClient(client, address):
print('In', threading.currentThread().name, 'client descriptor', client, 'client address', address)
client.send(b' Welcome to this server')
while True:
data = client.recv(1024)
print('-> From', address, 'get data: ', data)
if not data or data.decode('utf-8') == 'exit':
client.send(b' Bye')
break
client.send(b' Hello %s' % (data))
client.close()
def server(address, port):
fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if isinstance(address, str) and isinstance(port, int):
fd.bind((address, port))
else:
raise ValueError('params invalid')
fd.listen(3)
print('>>> socket listen in %s:%s' % (address, port))
for index in range(3):
client, addr = fd.accept()
print('>>> client connected', '%s/3' % (index))
t = threading.Thread(target = processClient, args = (client, addr), name = 'Client Worker')
t.start()
print('>>> service end. close socket')
fd.close()
def client(server):
if len(server) != 2:
raise ValueError('params invalid')
print('Client start', threading.currentThread().name)
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.connect(server)
print(' > recv data: ', serv.recv(1024))
for index in range(random.randint(1, 3)):
msg = ''
for chi in range(5):
msg += chr(random.randint(65, 94))
serv.send(msg.encode('utf-8'))
print(' > recv data: ', serv.recv(1024))
serv.send(b'exit')
print(' > recv data: ', serv.recv(1024))
serv.close()
if __name__ == '__main__':
serv = multiprocessing.Process(target = server, args = ('127.0.0.1', 9527))
serv.start()
for index in range(3):
t = threading.Thread(target = client, args = (('127.0.0.1', 9527),), name = 'User')
t.start()
t.join()
serv.terminate()
| {
"content_hash": "068021cb731d91a3d152bf600558a4bc",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 103,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.5645933014354066,
"repo_name": "JShadowMan/package",
"id": "b040536be1c9a4676a6f4fcc9ec16f200297572b",
"size": "2114",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/socket_/cs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "55729"
},
{
"name": "Makefile",
"bytes": "721"
},
{
"name": "Python",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "3398"
}
],
"symlink_target": ""
} |
"""Stand-alone fitting utilities"""
import numpy as np
from scipy.special import gamma, psi
from ..misc import easylsq
from ..misc.errorvalue import ErrorValue
__all__ = ['fit_shullroess']
def fit_shullroess(q, Intensity, Error, R0=None, r=None):
"""Do a Shull-Roess fitting on the scattering data.
Inputs:
q: np.ndarray[ndim=1]
vector of the q values (4*pi*sin(theta)/lambda)
Intensity: np.ndarray[ndim=1]
Intensity vector
Error: np.ndarray[ndim=1]
Error of the intensity (absolute uncertainty, 1sigma)
R0: scalar
first guess for the mean radius (None to autodetermine, default)
r: np.ndarray[ndim=1]
vector of the abscissa of the resulting size distribution (None to
autodetermine, default)
Output:
A: ErrorValue
the fitted value of the intensity scaling factor
r0: the r0 parameter of the maxwellian size distribution
n: the n parameter of the maxwellian size distribution
r: the abscissa of the fitted size distribution
maxw: the size distribution
stat: the statistics dictionary, returned by nlsq_fit()
Note: This first searches for r0, which best linearizes the
log(Intensity) vs. log(q**2+3/r0**2) relation.
After this is found, the parameters of the fitted line give the
parameters of a Maxwellian-like particle size distribution function.
After it a proper least squares fitting is carried out, using the
obtained values as initial parameters.
"""
q = np.array(q)
Intensity = np.array(Intensity)
Error = np.array(Error)
if R0 is None:
r0s = np.linspace(1, 2 * np.pi / q.min(), 1000)
def naive_fit_chi2(q, Intensity, r0):
p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1)
return ((np.polyval(p, q) - Intensity) ** 2).sum() / (len(q) - 3)
chi2 = np.array([naive_fit_chi2(q, Intensity, r0) for r0 in r0s.tolist()])
R0 = r0s[chi2 == chi2.min()][0]
def naive_fit(q, Intensity, r0):
p = np.polyfit(np.log(q ** 2 + 3 / r0 ** 2), np.log(Intensity), 1)
return np.exp(p[1]), -2 * p[0] - 4
K, n = naive_fit(q, Intensity, R0)
def SR_function(q, A, r0, n):
return A * (q ** 2 + 3 / r0 ** 2) ** (-(n + 4.) * 0.5)
p, dp, statdict = easylsq.nlsq_fit(q, Intensity, Error, SR_function, (K, R0, n))
n = ErrorValue(p[2], dp[2])
r0 = ErrorValue(p[1], dp[1])
A = ErrorValue(p[0], dp[0])
if r is None:
r = np.linspace(np.pi / q.max(), np.pi / q.min(), 1000)
return A, r0, n, r, maxwellian(r, r0, n), statdict
def maxwellian(r, r0, n):
"""Maxwellian-like distribution of spherical particles
Inputs:
-------
r: np.ndarray or scalar
radii
r0: positive scalar or ErrorValue
mean radius
n: positive scalar or ErrorValue
"n" parameter
Output:
-------
the distribution function and its uncertainty as an ErrorValue containing arrays.
The uncertainty of 'r0' and 'n' is taken into account.
Notes:
------
M(r)=2*r^n/r0^(n+1)*exp(-r^2/r0^2) / gamma((n+1)/2)
"""
r0 = ErrorValue(r0)
n = ErrorValue(n)
expterm = np.exp(-r ** 2 / r0.val ** 2)
dmaxdr0 = -2 * r ** n.val * r0.val ** (-n.val - 4) * ((n.val + 1) * r0.val ** 2 - 2 * r ** 2) * expterm / gamma((n.val + 1) * 0.5)
dmaxdn = -r ** n.val * r0.val ** (-n.val - 1) * expterm * (2 * np.log(r0.val) - 2 * np.log(r) + psi((n.val + 1) * 0.5)) / gamma((n.val + 1) * 0.5)
maxwellian = 2 * r ** n.val * r0.val ** (-n.val - 1) * expterm / gamma((n.val + 1) * 0.5)
dmaxwellian = (dmaxdn ** 2 * n.err ** 2 + dmaxdr0 ** 2 * r0.err ** 2) ** 0.5
return ErrorValue(maxwellian, dmaxwellian)
| {
"content_hash": "4766270198123825011eada946eb8169",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 150,
"avg_line_length": 40.103092783505154,
"alnum_prop": 0.5727506426735218,
"repo_name": "awacha/sastool",
"id": "1f5e84a87c760529817ee49c1d1c9839f285a0b2",
"size": "3890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sastool/fitting/standalone.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "468672"
},
{
"name": "Shell",
"bytes": "47"
}
],
"symlink_target": ""
} |
'''
This script parses uni-math symbols from
http://milde.users.sourceforge.net/LUCR/Math/data/unimathsymbols.txt
and save the result as a json file.
The result is used to generate command intellisense for LaTeX Workshop.
'''
import json
def remove_relation_character(description):
"""
From unimathsymbols.txt
8. descriptive _`comments`
The descriptive comments provide more information about the
character, or its specific appearance or use.
Some descriptions contain references to related commands,
marked by a character describing the relation
:=: equals (alias commands),
:#: approx (compat mapping, different character with same glyph),
:x: → cross reference/see also (related, false friends, and name clashes),
:t: text (text mode command),
followed by requirements in parantheses, and
delimited by commas.
"""
tokens = description.split(',')
sanitized_description_items = []
for tok in tokens:
t = tok.strip()
if t[0] in ('x', '#', '=', 't') and t[1] == ' ':
t = t[2:]
sanitized_description_items.append(t)
return ', '.join(sanitized_description_items)
def generate_unimathsymbols_intel(infile, json_out):
"""
Generate intellisense data in json format for the unicode math symbols
:param infile: unimathsymbols.txt
:param json_out: the path to the unimathsymbols.json file
"""
data = {}
with open(infile, encoding='utf-8') as f:
for line in f:
if line[0] == '#':
continue
segments = line.split('^')
if segments[3] == '':
continue
if segments[3][0] == '\\':
segments[3] = segments[3][1:]
data[segments[3]] = {
'command': segments[3],
'detail': segments[1],
# 'documentation': segments[7].strip().capitalize()
'documentation': remove_relation_character(segments[7]).capitalize()
}
if segments[6] != '' and segments[6][0] != '-':
data[segments[3]]['detail'] += f' ("{segments[6]}" command)'
json.dump(data, open(json_out, 'w', encoding='utf-8'),
indent=2, separators=(',', ': '), sort_keys=True, ensure_ascii=False)
| {
"content_hash": "81183f7401fddca552db7eff82d60abc",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 84,
"avg_line_length": 35.1044776119403,
"alnum_prop": 0.5846088435374149,
"repo_name": "James-Yu/LaTeX-Workshop",
"id": "665d69c353a7847bd949ce50aa11d14eb4e6efb3",
"size": "2354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/pyintel/uni.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "345"
},
{
"name": "CSS",
"bytes": "77447"
},
{
"name": "Common Workflow Language",
"bytes": "61821"
},
{
"name": "Dockerfile",
"bytes": "3278"
},
{
"name": "HTML",
"bytes": "49281"
},
{
"name": "JavaScript",
"bytes": "456224"
},
{
"name": "Python",
"bytes": "45291"
},
{
"name": "Shell",
"bytes": "453"
},
{
"name": "TypeScript",
"bytes": "813442"
}
],
"symlink_target": ""
} |
__author__ = 'frank'
import zstacklib.utils.daemon as daemon
import zstacklib.utils.http as http
import zstacklib.utils.log as log
import zstacklib.utils.shell as shell
import zstacklib.utils.iptables as iptables
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.lock as lock
import zstacklib.utils.linux as linux
from zstacklib.utils import plugin
import os
import functools
import traceback
import pprint
import threading
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
def replyerror(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
rsp = AgentResponse()
rsp.success = False
rsp.error = str(e)
logger.warn(err)
return jsonobject.dumps(rsp)
return wrap
class IscsiAgent(object):
http_server = http.HttpServer(port=7760)
http_server.logfile_path = log.get_logfile_path()
def __init__(self):
self.plugin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plugins')
self.plugin_rgty = plugin.PluginRegistry(self.plugin_path)
def start(self, in_thread=True):
self.plugin_rgty.configure_plugins(self)
self.plugin_rgty.start_plugins()
if in_thread:
self.http_server.start_in_thread()
else:
self.http_server.start()
def stop(self):
self.plugin_rgty.stop_plugins()
self.http_server.stop()
class IscsiDaemon(daemon.Daemon):
def __init__(self, pidfile):
super(IscsiDaemon, self).__init__(pidfile)
def run(self):
self.agent = IscsiAgent()
self.agent.start(in_thread=False)
| {
"content_hash": "9c66addb10ee0977463663e4876a9435",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 95,
"avg_line_length": 28.457142857142856,
"alnum_prop": 0.6470883534136547,
"repo_name": "live4thee/zstack-utility",
"id": "531074f1e2809cb45dac4f18383a45cc594ef0b0",
"size": "1992",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "iscsifilesystemagent/iscsifilesystemagent/iscsiagent.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4445"
},
{
"name": "Pascal",
"bytes": "187"
},
{
"name": "Puppet",
"bytes": "10417"
},
{
"name": "Python",
"bytes": "2346166"
},
{
"name": "Shell",
"bytes": "241290"
}
],
"symlink_target": ""
} |
import pygame
class Message(pygame.sprite.Sprite):
"""A class som simplify message writing."""
align_left = 'left';
align_right = 'right';
align_center = 'center';
def __init__(self, msglist, title=None, rect=None, vector=(0,0), align=align_left, linepadding=0.1,
padding=0.1, fontsize=20, fontname=None, center=None,
fgcolor=pygame.Color(255,255,255,255), bgcolor=pygame.Color(0,0,0,255)):
"""rect: pygame.Rect
msglist: tuple or list of strings
vector: tuple, coordinates added for each msg
align: alignet on the surface
padding: space aroung the text
fontsize:
fontname:
center:
fgcolor: pygame.Color
bgcolor: pygame.Color"""
pygame.sprite.Sprite.__init__(self);
if(rect != None and not isinstance(rect, pygame.Rect) and not isinstance(rect, tuple)):
raise TypeError('Rect is neither a pygame.Rect nor a tuple');
if(not isinstance(fgcolor, pygame.Color) and not isinstance(bgcolor, pygame.Color)):
raise TypeError('fg/bgcolor is not a pygame.Color');
if(not isinstance(fontsize, int)):
raise TypeError('font is not an int');
if(not isinstance(msglist, tuple) and not isinstance(msglist, list)):
raise TypeError('msglist is neither a list nor a tuple');
if(not isinstance(vector, tuple)):
raise TypeError('vector is not a tuple');
self.rect = pygame.Rect(rect) if(rect != None) else pygame.Rect(((1,1),(1,1)));
if(center == None and rect == None):
scR = pygame.display.get_surface().get_rect();
self.center = (scR.centerx, scR.centery -scR.centery*0.3);
else:
self.center = center;
self.msglist = msglist;
self.vector = vector;
self.image = pygame.Surface(self.rect.topleft);
self.align = align;
self.font = pygame.font.Font(fontname, fontsize);
self.fgcolor = fgcolor;
self.bgcolor = bgcolor;
self.padding = padding;
self.title = title;
self.linepadding = linepadding;
self.update();
def update(self):
"""Create a surface with the actual text."""
self.image.fill(self.bgcolor);
textList = [];
if(self.title != None):
self.font.set_underline(True);
self.font.set_bold(True);
textList.append(self.font.render(self.title, True, self.fgcolor));
self.font.set_underline(False);
self.font.set_bold(False);
for msg in self.msglist:
# Create text
textList.append(self.font.render(msg, True, self.fgcolor));
## Find the widest one
width = 0;
height = 0;
for txt in textList:
if(txt.get_rect().width > width): width = txt.get_rect().width;
if(self.vector[1] == 0): height += txt.get_rect().height;
width += self.vector[0]*len(textList);
height += self.vector[1]*len(textList);
## Rescale the surface to fit the whole text
self.image = pygame.transform.scale(self.image, (width+int(width*self.padding*2), height+int(height*self.padding*2)));
self.rect.size = self.image.get_rect().size;
## Set rect as left aligned. We might change it later.
tmpRect = pygame.Rect(((int(width*self.padding),int(height*self.padding)),(0,0)));
imgRect = self.image.get_rect();
for txt in textList:
## Make the text aligned right
if(self.align == self.align_right):
print(str.format("--- tmpRect {0} {1} {2}", tmpRect, tmpRect.left, tmpRect.right));
tmpRect.left = int(imgRect.width - txt.get_rect().width - imgRect.width*self.padding);
print(str.format("=== tmpRect {0} {1} {2}", tmpRect, tmpRect.left, tmpRect.right));
## Make the text aligned center
if(self.align == self.align_center):
tmpRect.left = int(self.image.get_rect().width/2 - txt.get_rect().width/2);
self.image.blit(txt, tmpRect.topleft);
if(self.vector[1] == 0):
self.vector = (self.vector[0],int(txt.get_rect().height*(1+self.linepadding)));
tmpRect = tmpRect.move(self.vector);
## Set the whole sprite to requested center
if(self.center != None):
self.rect.center = self.center;
| {
"content_hash": "3d7810b2bb3ae72ac48f1e467b99f3c8",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 126,
"avg_line_length": 44.15533980582524,
"alnum_prop": 0.5756376429199648,
"repo_name": "khan-git/pialarmclock",
"id": "3d217d6ce3ee6b2e8b1705c42de820b057cbf361",
"size": "4548",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "utils/message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20566"
}
],
"symlink_target": ""
} |
import yaml
from argparse import ArgumentParser
from constants import GOOGLE_CLOUD_TEST
from dict_util import deep_get
from yaml_util import load_resources_yaml
''' Remove all resources considered to be Kuberenetes Helm tests from
a given manifest file. '''
_HELM_HOOK_KEY = 'helm.sh/hook'
_HOOK_SUCCESS = 'test-success'
_HOOK_FAILURE = 'test-failure'
def main():
parser = ArgumentParser()
parser.add_argument("--manifest", help="the manifest file location to be cleared of tests")
parser.add_argument("--deploy_tests", action="store_true", help="indicates whether tests should be deployed")
args = parser.parse_args()
manifest = args.manifest
resources = load_resources_yaml(manifest)
filtered_resources = []
for resource in resources:
helm_hook = deep_get(resource, "metadata", "annotations", _HELM_HOOK_KEY)
if helm_hook is None:
filtered_resources.append(resource)
elif helm_hook == _HOOK_SUCCESS:
if args.deploy_tests:
annotations = deep_get(resource, "metadata", "annotations")
del annotations[_HELM_HOOK_KEY]
annotations[GOOGLE_CLOUD_TEST] = "test"
filtered_resources.append(resource)
elif helm_hook == _HOOK_FAILURE:
if args.deploy_tests:
raise Exception("Helm hook {} is not supported".format(helm_hook))
else:
raise Exception("Helm hook {} is not supported".format(helm_hook))
with open(manifest, "w") as out:
yaml.dump_all(filtered_resources, out,
default_flow_style=False, explicit_start=True)
if __name__ == "__main__":
main()
| {
"content_hash": "1a1471ebafa558326ea80ee6d6734d40",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 111,
"avg_line_length": 31.64,
"alnum_prop": 0.6877370417193426,
"repo_name": "cliveseldon/marketplace-k8s-app-tools",
"id": "9a6388fa6c5a069e62d5e2fa1fd80b29a7fb6ae2",
"size": "2183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marketplace/deployer_util/process_helm_hooks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "45111"
},
{
"name": "HTML",
"bytes": "13052"
},
{
"name": "Makefile",
"bytes": "1879"
},
{
"name": "Python",
"bytes": "106847"
},
{
"name": "Shell",
"bytes": "23662"
}
],
"symlink_target": ""
} |
__author__ = 'scarroll'
from pygov.usda.enums import *
from pygov.usda.domain import Nutrient, Food, FoodReport
from pygov.base.client import DataGovClientBase, get_response_data
class UsdaClient(DataGovClientBase):
def __init__(self, api_gov_key):
super(UsdaClient, self).__init__('usda/', api_gov_key)
def list_nutrients(self, max, offset=0, sort='n'):
uri = super(UsdaClient, self).build_uri(UsdaApis.ndb.value, UsdaUriActions.list.value,
lt=UsdaNdbListType.all_nutrients.value, max=max, offset=offset, sort=sort)
response_data = get_response_data(uri)
nutrients = self.__build_nutrients_list(response_data)
return nutrients
def list_foods(self, max, offset=0, sort='n'):
uri = super(UsdaClient, self).build_uri(UsdaApis.ndb.value, UsdaUriActions.list.value,
lt=UsdaNdbListType.food.value, max=max, offset=offset, sort=sort)
response_data = get_response_data(uri)
foods = self.__build_foods_list(response_data)
return foods
def get_food_report(self, ndb_food_id, report_type=UsdaNdbReportType.basic):
uri = super(UsdaClient, self).build_uri(UsdaApis.ndb.value, UsdaUriActions.report.value, type=report_type.value, ndbno=ndb_food_id)
response_data = get_response_data(uri)
return FoodReport.from_response_data(response_data)
def get_nutrient_report(self, ndb_nutrient_id, report_type=UsdaNdbReportType.basic):
uri = super(UsdaClient, self).build_uri(UsdaApis.ndb.value, UsdaUriActions.report.value, type=report_type.value, ndbno=ndb_nutrient_id)
def __build_item_list(self, data, usda_class):
result = list()
data_list = data['list']['item']
for raw_data in data_list:
result.append(usda_class.from_response_data(raw_data))
return result
def __build_nutrients_list(self, response_data):
return self.__build_item_list(response_data, Nutrient)
def __build_foods_list(self, response_data):
return self.__build_item_list(response_data, Food)
def __build_food_report(self, response_data):
return FoodReport(response_data) | {
"content_hash": "4d789f0ef18c4cf7e46313341fe6cd17",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 143,
"avg_line_length": 44.734693877551024,
"alnum_prop": 0.6747262773722628,
"repo_name": "skeryl/pygov",
"id": "4fe20b2f87185031a7a73ca455ee954a8f6e9774",
"size": "2192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygov/usda/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9662"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cbh_chembl_model_extension', '0019_auto_20150721_0515'),
]
operations = [
migrations.RemoveField(
model_name='skinningconfig',
name='created_by',
),
]
| {
"content_hash": "b81bb07e89dd0142bb8b7bdabbbc20e0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 21.11764705882353,
"alnum_prop": 0.6128133704735376,
"repo_name": "thesgc/cbh_chembl_model_extension",
"id": "cba303ffb7c08c7a7404e7e077d732d46c1f9256",
"size": "383",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cbh_chembl_model_extension/migrations/0020_remove_skinningconfig_created_by.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1298"
},
{
"name": "Python",
"bytes": "138965"
}
],
"symlink_target": ""
} |
"""
Python API for KB SRU
"""
import sys
import urllib
import requests
from lxml import etree
SRU_BASEURL = 'http://jsru.kb.nl/sru/sru'
SRU_BASEURL += '?version=1.2&maximumRecords=%i'
SRU_BASEURL += '&operation=searchRetrieve'
SRU_BASEURL += '&startRecord=%i'
SRU_BASEURL += '&recordSchema=%s'
SRU_BASEURL += '&x-collection=%s&query=%s'
SETS = {'ANP': {'collection': 'ANP',
'description_en': 'Radio Bulletins ANP Press Agency',
'description_nl': 'ANP Radiobulletins Digitaal',
'metadataPrefix': 'didl',
'recordschema': 'dcx',
'setname': 'anp',
'time_period': [1937, 1989]},
'DPO': {'collection': 'DPO_boekdeel',
'description_en': 'Early Dutch Books Online',
'description_nl': 'Early Dutch Books Online',
'metadataPrefix': 'didl',
'recordschema': 'ddd',
'setname': 'DPO',
'time_period': [1781, 1800]},
'BYVANCK': {'description_en': 'Medieval Illuminated Manuscripts',
'description_nl': 'Middeleeuwse Verluchte Handschriften',
'metadataPrefix': 'dcx',
'setname': 'BYVANCK',
'time_period': [500, 1500]},
'SGD': {'description_en': 'States General Digital',
'description_nl': 'Staten-Generaal Digitaal',
'metadataPrefix': 'dcx',
'setname': 'sgd:register',
'time_period': [1962, 1994]},
'GGC': {'collection': 'GGC',
'description_en': 'General Catalogue KB',
'description_nl': 'Algemene Catalogus KB',
'metadataPrefix': 'dcx',
'recordschema': 'dcx',
'setname': 'ggc',
'time_period': [1937, 2016]}} # No idea what to use here?
# Name spaces in GGC records
srw_ns = 'http://www.loc.gov/zing/srw/'
tel_ns = 'http://krait.kb.nl/coop/tel/handbook/telterms.html'
xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance'
dc_ns = 'http://purl.org/dc/elements/1.1/'
dcterms_ns = 'http://purl.org/dc/terms/'
dcx_ns = 'http://krait.kb.nl/coop/tel/handbook/telterms.html'
NSMAPGGC = {"srw": srw_ns,
"tel": tel_ns,
"xsi": xsi_ns,
"dc": dc_ns,
"dcterms": dcterms_ns,
"dcx": dcx_ns}
class response():
def __init__(self, record_data, sru):
self.record_data = record_data
self.sru = sru
def getElementText(self, tagName, attributeName, attributeValue):
# Returns text content of all elements for which tag matches tagName,
# and attribute value equals attributeValue. Set attributeName to empty
# string to get all tagName matches.
textFields = []
for r in self.record_data.iter():
if r.tag == tagName:
if attributeName != '':
try:
if r.attrib[attributeName] == attributeValue:
textFields.append(r.text)
except KeyError:
pass
else:
textFields.append(r.text)
return textFields
@property
def records(self):
if self.sru.nr_of_records == 0:
record_data = "<xml></xml>"
else:
ns = {'zs': 'http://www.loc.gov/zing/srw/'}
record_data = self.record_data.xpath("zs:records/zs:record",
namespaces=ns)[0]
return record(record_data, self.sru)
# Below property functions all return a list with all instances that satisfy
# criteria
@property
def typesDutch(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}type',
'{http://www.w3.org/XML/1998/namespace}lang',
'nl'))
@property
def typesDCMI(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}type',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'DCMIType'))
@property
def identifiersISBN(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:ISBN'))
@property
def identifiersBrinkman(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:Brinkman'))
@property
def identifiersURI(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:URI'))
@property
def identifiersOCLC(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'OCLC'))
@property
def languagesDutch(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'nl'))
@property
def languagesEnglish(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'en'))
@property
def languagesFrench(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'fr'))
@property
def languagesISO639(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:ISO639-2'))
@property
def dates(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}date',
'',
''))
@property
def extents(self):
return(self.getElementText('{http://purl.org/dc/terms/}extent',
'',
''))
@property
def creators(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}creator',
'',
''))
@property
def contributors(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}contributor',
'',
''))
@property
def titles(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'',
''))
@property
def titlesMain(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:maintitle'))
@property
def titlesIntermediate(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:intermediatetitle'))
@property
def publishers(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}publisher',
'',
''))
@property
def countries(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}country',
'',
''))
@property
def subjectsBrinkman(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:Brinkman'))
@property
def subjectsISO9707(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'ISO_9707_[Brinkman]'))
@property
def subjectsUNESCO(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'UNESCO'))
@property
def collectionIdentifiers(self):
return(self.getElementText('{http://purl.org/dc/terms/}isPartOf',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:collectionIdentifier'))
@property
def recordIdentifiersURI(self):
return(self.getElementText('{http://krait.kb.nl/coop/tel/handbook/telterms.html}recordIdentifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:URI'))
@property
def annotations(self):
# Note that annotations sometimes contain language or itenID attibutes;
# ignored for now (collect everything).
return(self.getElementText('{http://krait.kb.nl/coop/tel/handbook/telterms.html}annotation',
'',
''))
class record():
def __init__(self, record_data, sru):
self.record_data = record_data
self.sru = sru
def __iter__(self):
return self
# This works under Python 2.7
def next(self):
if self.sru.nr_of_records == 0:
raise StopIteration
if self.sru.startrecord < self.sru.nr_of_records + 1:
record_data = self.sru.run_query()
self.sru.startrecord += 1
return response(record_data, self.sru)
else:
raise StopIteration
# This works under Python 3
def __next__(self):
if self.sru.nr_of_records == 0:
raise StopIteration
if self.sru.startrecord < self.sru.nr_of_records + 1:
record_data = self.sru.run_query()
self.sru.startrecord += 1
return response(record_data, self.sru)
else:
raise StopIteration
class sru():
DEBUG = False
collection = False
maximumrecords = 50
nr_of_records = 0
query = ""
recordschema = False
sru_collections = SETS
startrecord = 0
def search(self, query, collection=False,
startrecord=1, maximumrecords=1, recordschema=False):
self.maximumrecords = maximumrecords
if sys.version.startswith('3'):
self.query = urllib.parse.quote_plus(query)
elif sys.version.startswith('2'):
self.query = urllib.quote_plus(query)
self.startrecord = startrecord
if collection not in self.sru_collections:
raise Exception('Unknown collection')
self.collection = self.sru_collections[collection]['collection']
if not self.collection:
raise Exception('Error, no collection specified')
if not recordschema:
self.recordschema = self.sru_collections[collection]['recordschema']
else:
self.recordschema = recordschema
record_data = self.run_query()
nr_of_records = [i.text for i in record_data.iter() if
i.tag.endswith('numberOfRecords')][0]
self.nr_of_records = int(nr_of_records)
if self.nr_of_records > 0:
return response(record_data, self)
return False
def run_query(self):
url = SRU_BASEURL % (self.maximumrecords, self.startrecord,
self.recordschema, self.collection, self.query)
if self.DEBUG:
sys.stdout.write(url)
r = requests.get(url)
if not r.status_code == 200:
raise Exception('Error while getting data from %s' % url)
record_data = etree.fromstring(r.content)
return record_data
| {
"content_hash": "ac41b440b1d784cc69f13a1c0f788d1b",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 106,
"avg_line_length": 36.633720930232556,
"alnum_prop": 0.5153943818441518,
"repo_name": "KBNLresearch/omSipCreator",
"id": "4d986bc4e8b34e71f5af799c5a0cb6fff78aa276",
"size": "12625",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "omSipCreator/kbapi/sru.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "106535"
},
{
"name": "Shell",
"bytes": "849"
}
],
"symlink_target": ""
} |