code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def _configure_env_source_rc(config):
config.set('global', 'env_source_rc', False)
if system.is_osx():
logger.info("On OSX, login shells are default, which only source sprinter's 'env' configuration.")
logger.info("I.E. environment variables would be sourced, but not shell functions "
+ "or terminal status lines.")
logger.info("The typical solution to get around this is to source your rc file (.bashrc, .zshrc) "
+ "from your login shell.")
env_source_rc = lib.prompt("would you like sprinter to source the rc file too?", default="yes",
boolean=True)
config.set('global', 'env_source_rc', env_source_rc) | Configures wether to have .env source .rc |
def get_members(self):
res = self.__con__.search_s(
self.__ldap_base_dn__,
ldap.SCOPE_SUBTREE,
"(memberof=%s)" % self.__dn__,
['uid'])
ret = []
for val in res:
val = val[1]['uid'][0]
try:
ret.append(val.decode('utf-8'))
except UnicodeDecodeError:
ret.append(val)
except KeyError:
continue
return [CSHMember(self.__lib__,
result,
uid=True)
for result in ret] | Return all members in the group as CSHMember objects |
def check_member(self, member, dn=False):
if dn:
res = self.__con__.search_s(
self.__dn__,
ldap.SCOPE_BASE,
"(member=%s)" % dn,
['ipaUniqueID'])
else:
res = self.__con__.search_s(
self.__dn__,
ldap.SCOPE_BASE,
"(member=%s)" % member.get_dn(),
['ipaUniqueID'])
return len(res) > 0 | Check if a Member is in the bound group.
Arguments:
member -- the CSHMember object (or distinguished name) of the member to
check against
Keyword arguments:
dn -- whether or not member is a distinguished name |
def add_member(self, member, dn=False):
if dn:
if self.check_member(member, dn=True):
return
mod = (ldap.MOD_ADD, 'member', member.encode('ascii'))
else:
if self.check_member(member):
return
mod = (ldap.MOD_ADD, 'member', member.get_dn().encode('ascii'))
if self.__lib__.__batch_mods__:
self.__lib__.enqueue_mod(self.__dn__, mod)
elif not self.__lib__.__ro__:
mod_attrs = [mod]
self.__con__.modify_s(self.__dn__, mod_attrs)
else:
print("ADD VALUE member = {} FOR {}".format(mod[2], self.__dn__)) | Add a member to the bound group
Arguments:
member -- the CSHMember object (or distinguished name) of the member
Keyword arguments:
dn -- whether or not member is a distinguished name |
def read_object_from_yaml(desired_type: Type[Any], file_object: TextIOBase, logger: Logger,
fix_imports: bool = True, errors: str = 'strict', *args, **kwargs) -> Any:
return yaml.load(file_object) | Parses a yaml file.
:param desired_type:
:param file_object:
:param logger:
:param fix_imports:
:param errors:
:param args:
:param kwargs:
:return: |
def read_collection_from_yaml(desired_type: Type[Any], file_object: TextIOBase, logger: Logger,
conversion_finder: ConversionFinder, fix_imports: bool = True, errors: str = 'strict',
**kwargs) -> Any:
res = yaml.load(file_object)
# convert if required
return ConversionFinder.convert_collection_values_according_to_pep(res, desired_type, conversion_finder, logger,
**kwargs) | Parses a collection from a yaml file.
:param desired_type:
:param file_object:
:param logger:
:param fix_imports:
:param errors:
:param args:
:param kwargs:
:return: |
def get_default_yaml_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]:
return [# yaml for any object
SingleFileParserFunction(parser_function=read_object_from_yaml,
streaming_mode=True,
supported_exts={'.yaml','.yml'},
supported_types={AnyObject},
),
# yaml for collection objects
SingleFileParserFunction(parser_function=read_collection_from_yaml,
custom_name='read_collection_from_yaml',
streaming_mode=True,
supported_exts={'.yaml','.yml'},
supported_types={Tuple, Dict, List, Set},
function_args={'conversion_finder': conversion_finder}
)
] | Utility method to return the default parsers able to parse an object from a file.
Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in
RootParser
:return: |
def pass_feature(*feature_names):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
for name in feature_names:
kwargs[name] = feature_proxy(name)
return f(*args, **kwargs)
return wrapper
return decorator | Injects a feature instance into the kwargs |
def extract_tar(url, target_dir, additional_compression="", remove_common_prefix=False, overwrite=False):
try:
if not os.path.exists(target_dir):
os.makedirs(target_dir)
tf = tarfile.TarFile.open(fileobj=download_to_bytesio(url))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
common_prefix = os.path.commonprefix(tf.getnames())
if not common_prefix.endswith('/'):
common_prefix += "/"
for tfile in tf.getmembers():
if remove_common_prefix:
tfile.name = tfile.name.replace(common_prefix, "", 1)
if tfile.name != "":
target_path = os.path.join(target_dir, tfile.name)
if target_path != target_dir and os.path.exists(target_path):
if overwrite:
remove_path(target_path)
else:
continue
tf.extract(tfile, target_dir)
except OSError:
e = sys.exc_info()[1]
raise ExtractException(str(e))
except IOError:
e = sys.exc_info()[1]
raise ExtractException(str(e)) | extract a targz and install to the target directory |
def remove_path(target_path):
if os.path.isdir(target_path):
shutil.rmtree(target_path)
else:
os.unlink(target_path) | Delete the target path |
def ids(cls, values, itype=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html
Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field.
'''
instance = cls(ids={'values': values})
if itype is not None:
instance['ids']['type'] = itype
return instancf ids(cls, values, itype=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html
Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field.
'''
instance = cls(ids={'values': values})
if itype is not None:
instance['ids']['type'] = itype
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/ids-filter.html
Filters documents that only have the provided ids. Note, this filter does not require the _id field to be indexed since it works using the _uid field. |
def geo_distance(cls, field, center, distance, distance_type=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-filter.html
Filters documents that include only hits that exists within a specific distance from a geo point.
field - Field name
center - Center point (Geo point)
distance - String for the distance
distance_type - (arc | plane) How to compute the distance. Can either be arc (better precision) or plane (faster). Defaults to arc
> bounds = ElasticFilter().geo_distance('pin.location', [40.73, -74.1], '300km')
'''
instance = cls(geo_distance={'distance': distance, field: center})
if distance_type is not None:
instance['geo_distance']['distance_type'] = distance_type
return instancf geo_distance(cls, field, center, distance, distance_type=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-filter.html
Filters documents that include only hits that exists within a specific distance from a geo point.
field - Field name
center - Center point (Geo point)
distance - String for the distance
distance_type - (arc | plane) How to compute the distance. Can either be arc (better precision) or plane (faster). Defaults to arc
> bounds = ElasticFilter().geo_distance('pin.location', [40.73, -74.1], '300km')
'''
instance = cls(geo_distance={'distance': distance, field: center})
if distance_type is not None:
instance['geo_distance']['distance_type'] = distance_type
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-filter.html
Filters documents that include only hits that exists within a specific distance from a geo point.
field - Field name
center - Center point (Geo point)
distance - String for the distance
distance_type - (arc | plane) How to compute the distance. Can either be arc (better precision) or plane (faster). Defaults to arc
> bounds = ElasticFilter().geo_distance('pin.location', [40.73, -74.1], '300km') |
def geo_distance_range(cls, field, center, from_distance, to_distance, distance_type=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-range-filter.html
Filters documents that exists within a range from a specific point
'''
instance = cls(geo_distance_range={'from': from_distance, 'to': to_distance, field: center})
if distance_type is not None:
instance['geo_distance_range']['distance_type'] = distance_type
return instancf geo_distance_range(cls, field, center, from_distance, to_distance, distance_type=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-range-filter.html
Filters documents that exists within a range from a specific point
'''
instance = cls(geo_distance_range={'from': from_distance, 'to': to_distance, field: center})
if distance_type is not None:
instance['geo_distance_range']['distance_type'] = distance_type
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/geo-distance-range-filter.html
Filters documents that exists within a range from a specific point |
def numeric_range(cls, field, from_value, to_value, include_lower=None, include_upper=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html
Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently.
'''
instance = cls(numeric_range={field: {'from': from_value, 'to': to_value}})
if include_lower is not None:
instance['numeric_range'][field]['include_lower'] = include_lower
if include_upper is not None:
instance['numeric_range'][field]['include_upper'] = include_upper
return instancf numeric_range(cls, field, from_value, to_value, include_lower=None, include_upper=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html
Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently.
'''
instance = cls(numeric_range={field: {'from': from_value, 'to': to_value}})
if include_lower is not None:
instance['numeric_range'][field]['include_lower'] = include_lower
if include_upper is not None:
instance['numeric_range'][field]['include_upper'] = include_upper
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/numeric-range-filter.html
Filters documents with fields that have values within a certain numeric range. Similar to range filter, except that it works only with numeric values, and the filter execution works differently. |
def range(cls, field, from_value=None, to_value=None, include_lower=None, include_upper=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html
Filters documents with fields that have terms within a certain range. Similar to range query, except that it acts as a filter. Can be placed within queries that accept a filter.
'''
instance = cls({'range': {field: {}}})
if from_value is not None:
instance['range'][field]['from'] = from_value
if to_value is not None:
instance['range'][field]['to'] = to_value
if include_lower is not None:
instance['range'][field]['include_lower'] = include_lower
if include_upper is not None:
instance['range'][field]['include_upper'] = include_upper
return instancf range(cls, field, from_value=None, to_value=None, include_lower=None, include_upper=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html
Filters documents with fields that have terms within a certain range. Similar to range query, except that it acts as a filter. Can be placed within queries that accept a filter.
'''
instance = cls({'range': {field: {}}})
if from_value is not None:
instance['range'][field]['from'] = from_value
if to_value is not None:
instance['range'][field]['to'] = to_value
if include_lower is not None:
instance['range'][field]['include_lower'] = include_lower
if include_upper is not None:
instance['range'][field]['include_upper'] = include_upper
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/range-filter.html
Filters documents with fields that have terms within a certain range. Similar to range query, except that it acts as a filter. Can be placed within queries that accept a filter. |
def save(self, obj, id_code):
filestream = open('{0}/{1}'.format(self.data_path, id_code), 'w+')
pickle.dump(obj, filestream)
filestream.close() | Save an object, and use id_code in the filename
obj - any object
id_code - unique identifier |
def load(self, id_code):
filestream = open('{0}/{1}'.format(self.data_path, id_code), 'rb')
workflow = pickle.load(filestream)
return workflow | Loads a workflow identified by id_code
id_code - unique identifier, previously must have called save with same id_code |
def init(self):
if os.path.isdir(self.path):
raise InvalidTodoFile
if os.path.exists(self.path):
with open(self.path, 'r') as f:
tls = [tl.strip() for tl in f if tl]
todos = map(_todo_from_file, tls)
self.todos = todos
for todo in todos:
if self.current_max_idx < todo['idx']:
self.current_max_idx = todo['idx']
else:
logger.warning('No todo files found, initialization a empty todo file')
with open(self.path, 'w') as f:
f.flush() | init `todo` file
if file exists, then initialization self.todos
and record current max index of todos
: when add a new todo, the `idx` via only `self.current_max_idx + 1` |
def _show(self, status=None, idx=None):
_show('', 50)
if not self.todos:
self._show_no_todos()
elif idx is not None:
for todo in self.todos:
if todo['idx'] == idx:
self._show_todos(todo)
elif status is not None:
if status not in STATUS_CODE:
raise InvalidTodoStatus
_todos = []
for todo in self.todos:
if todo['status'] == status:
_todos.append(todo)
if not _todos:
self._show_no_todos(text_fix='No {} todos...'.format(
STATUS_CODE.get(status, None)))
else:
for todo in _todos:
self._show_todos(todo)
else:
for todo in self.todos:
self._show_todos(todo)
_show('', 50) | show todos after format
:param status: what status's todos wants to show.
default is None, means show all |
def write(self, delete_if_empty=False):
with open(self.path, 'w') as f:
if not self.todos:
f.flush()
else:
for todo in _todo_to_file(self.todos):
f.write(todo) | flush todos to file
:param delete_if_empty: delete if todo is empty |
def read_object_from_pickle(desired_type: Type[T], file_path: str, encoding: str,
fix_imports: bool = True, errors: str = 'strict', *args, **kwargs) -> Any:
import pickle
file_object = open(file_path, mode='rb')
try:
return pickle.load(file_object, fix_imports=fix_imports, encoding=encoding, errors=errors)
finally:
file_object.close() | Parses a pickle file.
:param desired_type:
:param file_path:
:param encoding:
:param fix_imports:
:param errors:
:param args:
:param kwargs:
:return: |
def should_display_warnings_for(to_type):
if not hasattr(to_type, '__module__'):
return True
elif to_type.__module__ in {'builtins'} or to_type.__module__.startswith('parsyfiles') \
or to_type.__name__ in {'DataFrame'}:
return False
elif issubclass(to_type, int) or issubclass(to_type, str) \
or issubclass(to_type, float) or issubclass(to_type, bool):
return False
else:
return True | Central method where we control whether warnings should be displayed |
def _is_valid_for_dict_to_object_conversion(strict_mode: bool, from_type: Type, to_type: Type) -> bool:
# cache previous results
try:
res, subclasses_hash = _cache_valid_for_dict_to_object[to_type][strict_mode]
# Check if are any new subclasses are available
if not strict_mode and to_type is not None and not is_any_type(to_type):
if hash(tuple(get_all_subclasses(to_type))) != subclasses_hash:
raise KeyError('fake error to recompute the cache entry')
except KeyError:
res = __is_valid_for_dict_to_object_conversion(strict_mode=strict_mode, from_type=from_type, to_type=to_type)
# Store an entry in the cache containing the result and the hash of the subclasses list
subclasses_hash = None
if not strict_mode and to_type is not None and not is_any_type(to_type):
subclasses_hash = hash(tuple(get_all_subclasses(to_type)))
entry = (res, subclasses_hash)
try:
_cache_valid_for_dict_to_object[to_type][strict_mode] = entry
except KeyError:
_cache_valid_for_dict_to_object[to_type] = {strict_mode: entry}
return res | Returns true if the provided types are valid for dict_to_object conversion
Explicitly declare that we are not able to parse collections nor able to create an object from a dictionary if the
object's constructor is non correctly PEP484-specified.
None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time)
:param strict_mode:
:param from_type:
:param to_type:
:return: |
def dict_to_object(desired_type: Type[T], contents_dict: Dict[str, Any], logger: Logger,
options: Dict[str, Dict[str, Any]], conversion_finder: ConversionFinder = None,
is_dict_of_dicts: bool = False) -> T:
check_var(desired_type, var_types=type, var_name='obj_type')
check_var(contents_dict, var_types=dict, var_name='contents_dict')
if is_collection(desired_type, strict=True):
# if the destination type is 'strictly a collection' (not a subclass of a collection) we know that we can't
# handle it here, the constructor is not pep484-typed
raise TypeError('Desired object type \'' + get_pretty_type_str(desired_type) + '\' is a collection, '
'so it cannot be created using this generic object creator')
else:
# Try the type itself
# try:
return _dict_to_object(desired_type, contents_dict, logger=logger, options=options,
conversion_finder=conversion_finder, is_dict_of_dicts=is_dict_of_dicts) | Utility method to create an object from a dictionary of constructor arguments. Constructor arguments that dont have
the correct type are intelligently converted if possible
:param desired_type:
:param contents_dict:
:param logger:
:param options:
:param conversion_finder:
:param is_dict_of_dicts:
:return: |
def print_dict(dict_name, dict_value, logger: Logger = None):
if logger is None:
print(dict_name + ' = ')
try:
from pprint import pprint
pprint(dict_value)
except:
print(dict_value)
else:
logger.info(dict_name + ' = ')
try:
from pprint import pformat
logger.info(pformat(dict_value))
except:
logger.info(dict_value) | Utility method to print a named dictionary
:param dict_name:
:param dict_value:
:return: |
def get_default_object_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]:
return [SingleFileParserFunction(parser_function=read_object_from_pickle,
streaming_mode=False,
supported_exts={'.pyc'},
supported_types={AnyObject}),
MultifileObjectParser(parser_finder, conversion_finder)
] | Utility method to return the default parsers able to parse an object from a file.
Note that MultifileObjectParser is not provided in this list, as it is already added in a hardcoded way in
RootParser
:return: |
def get_default_object_converters(conversion_finder: ConversionFinder) \
-> List[Union[Converter[Any, Type[None]], Converter[Type[None], Any]]]:
return [
ConverterFunction(from_type=b64str, to_type=AnyObject, conversion_method=base64_ascii_str_pickle_to_object),
ConverterFunction(from_type=DictOfDict, to_type=Any, conversion_method=dict_to_object,
custom_name='dict_of_dict_to_object',
is_able_to_convert_func=_is_valid_for_dict_to_object_conversion, unpack_options=False,
function_args={'conversion_finder': conversion_finder, 'is_dict_of_dicts': True}),
ConverterFunction(from_type=dict, to_type=AnyObject, conversion_method=dict_to_object,
custom_name='dict_to_object', unpack_options=False,
is_able_to_convert_func=_is_valid_for_dict_to_object_conversion,
function_args={'conversion_finder': conversion_finder, 'is_dict_of_dicts': False})
] | Utility method to return the default converters associated to dict (from dict to other type,
and from other type to dict)
:return: |
def create(obj: PersistedObject, obj_type: Type[Any], arg_name: str):
return MissingMandatoryAttributeFiles('Multifile object ' + str(obj) + ' cannot be built from constructor of '
'type ' + get_pretty_type_str(obj_type) +
', mandatory constructor argument \'' + arg_name + '\'was not found on '
'filesystem') | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param obj_type:
:param arg_name:
:return: |
def create(item_type: Type[Any], constructor_atts: List[str], invalid_property_name: str):
return InvalidAttributeNameForConstructorError('Cannot parse object of type <' + get_pretty_type_str(item_type)
+ '> using the provided configuration file: configuration '
+ 'contains a property name (\'' + invalid_property_name + '\')'\
+ 'that is not an attribute of the object constructor. <'
+ get_pretty_type_str(item_type) + '> constructor attributes '
+ 'are : ' + str(constructor_atts)) | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param item_type:
:return: |
def create(item_type: Type[Any], constructor_args: Dict[str, Any], cause: Exception):
return ObjectInstantiationException('Error while building object of type <' + get_pretty_type_str(item_type)
+ '> using its constructor and parsed contents : ' + str(constructor_args)
+ ' : \n' + str(cause.__class__) + ' ' + str(cause)
).with_traceback(cause.__traceback__) | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param item_type:
:return: |
def create(desired_type: Type[Any], contents_dict: Dict, caught: Exception):
msg = 'Error while trying to instantiate object of type ' + str(desired_type) + ' using dictionary input_dict:'\
+ 'Caught error message is : ' + caught.__class__.__name__ + ' : ' + str(caught) + '\n'
try:
from pprint import pformat
msg += 'Dict provided was ' + pformat(contents_dict)
except:
msg += 'Dict provided was ' + str(contents_dict)
return CaughtTypeErrorDuringInstantiation(msg).with_traceback(caught.__traceback__) | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param desired_type:
:param contents_dict:
:param caught:
:return: |
def is_able_to_parse_detailed(self, desired_type: Type[Any], desired_ext: str, strict: bool):
if not _is_valid_for_dict_to_object_conversion(strict, None, None if desired_type is JOKER else desired_type):
return False, None
else:
return super(MultifileObjectParser, self).is_able_to_parse_detailed(desired_type, desired_ext, strict) | Explicitly declare that we are not able to parse collections
:param desired_type:
:param desired_ext:
:param strict:
:return: |
def _get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any],
logger: Logger) -> Dict[str, Any]:
if is_collection(desired_type, strict=True):
# if the destination type is 'strictly a collection' (not a subclass of a collection) we know that we can't
# handle it here, the constructor is not pep484-typed
raise TypeError('Desired object type \'' + get_pretty_type_str(desired_type) + '\' is a collection, '
'so it cannot be parsed with this default object parser')
else:
# First get the file children
children_on_fs = obj_on_fs.get_multifile_children()
# Try the type itself
# try:
return self.__get_parsing_plan_for_multifile_children(obj_on_fs, desired_type, children_on_fs,
logger=logger) | Simply inspects the required type to find the names and types of its constructor arguments.
Then relies on the inner ParserFinder to parse each of them.
:param obj_on_fs:
:param desired_type:
:param logger:
:return: |
def parsyfiles_global_config(multiple_errors_tb_limit: int = None, full_paths_in_logs: bool = None,
dict_to_object_subclass_limit: int = None):
if multiple_errors_tb_limit is not None:
GLOBAL_CONFIG.multiple_errors_tb_limit = multiple_errors_tb_limit
if full_paths_in_logs is not None:
GLOBAL_CONFIG.full_paths_in_logs = full_paths_in_logs
if dict_to_object_subclass_limit is not None:
GLOBAL_CONFIG.dict_to_object_subclass_limit = dict_to_object_subclass_limit | This is the method you should use to configure the parsyfiles library
:param multiple_errors_tb_limit: the traceback size (default is 3) of individual parsers exceptions displayed when
parsyfiles tries several parsing chains and all of them fail.
:param full_paths_in_logs: if True, full file paths will be displayed in logs. Otherwise only the parent path will
be displayed and children paths will be indented (default is False)
:param dict_to_object_subclass_limit: the number of subclasses that the <dict_to_object> converter will try, when
instantiating an object from a dictionary. Default is 50
:return: |
def is_valid(self, context):
if self.requires:
for r in self.requires:
if not r in context.executed_actions:
raise RequirementMissingError("Action '%s' requires '%s'" % (self.name, r))
return True | Checks through the previous_actions iterable if required actions have
been executed |
def get_file_contents(file_path):
full_path = os.path.join(package_dir, file_path)
return open(full_path, 'r').read() | Get the context of the file using full path name |
def refresh(self):
# new_device = {}
if self.type in CONST.BINARY_SENSOR_TYPES:
response = self._lupusec.get_sensors()
for device in response:
if device['device_id'] == self._device_id:
self.update(device)
return device
elif self.type == CONST.ALARM_TYPE:
response = self._lupusec.get_panel()
self.update(response)
return response
elif self.type == CONST.TYPE_POWER_SWITCH:
response = self._lupusec.get_power_switches()
for pss in response:
if pss['device_id'] == self._device_id:
self.update(pss)
return pss | Refresh a device |
def update(self, json_state):
if self._type in CONST.BINARY_SENSOR_TYPES:
self._json_state['status'] = json_state['status']
else:
self._json_state.update(
{k: json_state[k] for k in json_state if self._json_state.get(k)}) | Update the json data from a dictionary.
Only updates if it already exists in the device. |
def desc(self):
return '{0} (ID: {1}) - {2} - {3}'.format(
self.name, self.device_id, self.type, self.status) | Get a short description of the device. |
def list(declared, undeclared):
queues = current_queues.queues.values()
if declared:
queues = filter(lambda queue: queue.exists, queues)
elif undeclared:
queues = filter(lambda queue: not queue.exists, queues)
queue_names = [queue.routing_key for queue in queues]
queue_names.sort()
for queue in queue_names:
click.secho(queue) | List configured queues. |
def declare(queues):
current_queues.declare(queues=queues)
click.secho(
'Queues {} have been declared.'.format(
queues or current_queues.queues.keys()),
fg='green'
) | Initialize the given queues. |
def purge_queues(queues=None):
current_queues.purge(queues=queues)
click.secho(
'Queues {} have been purged.'.format(
queues or current_queues.queues.keys()),
fg='green'
) | Purge the given queues. |
def delete_queue(queues):
current_queues.delete(queues=queues)
click.secho(
'Queues {} have been deleted.'.format(
queues or current_queues.queues.keys()),
fg='green'
) | Delete the given queues. |
def find_needed_formatter(input_format, output_format):
#Only take the formatters in the registry
selected_registry = [re.cls for re in registry if re.category==RegistryCategories.formatters]
needed_formatters = []
for formatter in selected_registry:
#Initialize the formatter (needed so it can discover its formats)
formatter_inst = formatter()
if input_format in formatter_inst.input_formats and output_format in formatter_inst.output_formats:
needed_formatters.append(formatter)
if len(needed_formatters)>0:
return needed_formatters[0]
return None | Find a data formatter given an input and output format
input_format - needed input format. see utils.input.dataformats
output_format - needed output format. see utils.input.dataformats |
def find_needed_input(input_format):
needed_inputs = [re.cls for re in registry if re.category==RegistryCategories.inputs and re.cls.input_format == input_format]
if len(needed_inputs)>0:
return needed_inputs[0]
return None | Find a needed input class
input_format - needed input format, see utils.input.dataformats |
def exists_in_registry(category, namespace, name):
selected_registry = [re for re in registry if re.category==category and re.namespace==namespace and re.name == name]
if len(selected_registry)>0:
return True
return False | See if a given category, namespace, name combination exists in the registry
category - See registrycategories. Type of module
namespace - Namespace of the module, defined in settings
name - the lowercase name of the module |
def register(cls):
registry_entry = RegistryEntry(category = cls.category, namespace = cls.namespace, name = cls.name, cls=cls)
if registry_entry not in registry and not exists_in_registry(cls.category, cls.namespace, cls.name):
registry.append(registry_entry)
else:
log.warn("Class {0} already in registry".format(cls)) | Register a given model in the registry |
def _set_fields(self):
self.fields = []
self.required_input = []
for member_name, member_object in inspect.getmembers(self.__class__):
if inspect.isdatadescriptor(member_object) and not member_name.startswith("__"):
self.fields.append(member_name)
if member_object.required_input:
self.required_input.append(member_name) | Initialize the fields for data caching. |
def subscriber(address,topics,callback,message_type):
return Subscriber(address,topics,callback,message_type) | Creates a subscriber binding to the given address and
subscribe the given topics.
The callback is invoked for every message received.
Args:
- address: the address to bind the PUB socket to.
- topics: the topics to subscribe
- callback: the callback to invoke for every message. Must accept 2 variables - topic and message
- message_type: the type of message to receive |
def start(self):
t=threading.Thread(target=self._consume)
t.start() | Start a thread that consumes the messages and invokes the callback |
def _get_forecast(api_result: dict) -> List[SmhiForecast]:
forecasts = []
# Need the ordered dict to get
# the days in order in next stage
forecasts_ordered = OrderedDict()
forecasts_ordered = _get_all_forecast_from_api(api_result)
# Used to calc the daycount
day_nr = 1
for day in forecasts_ordered:
forecasts_day = forecasts_ordered[day]
if day_nr == 1:
# Add the most recent forecast
forecasts.append(copy.deepcopy(forecasts_day[0]))
total_precipitation = float(0.0)
forecast_temp_max = -100.0
forecast_temp_min = 100.0
forecast = None
for forcast_day in forecasts_day:
temperature = forcast_day.temperature
if forecast_temp_min > temperature:
forecast_temp_min = temperature
if forecast_temp_max < temperature:
forecast_temp_max = temperature
if forcast_day.valid_time.hour == 12:
forecast = copy.deepcopy(forcast_day)
total_precipitation = total_precipitation + \
forcast_day._total_precipitation
if forecast is None:
# We passed 12 noon, set to current
forecast = forecasts_day[0]
forecast._temperature_max = forecast_temp_max
forecast._temperature_min = forecast_temp_min
forecast._total_precipitation = total_precipitation
forecast._mean_precipitation = total_precipitation/24
forecasts.append(forecast)
day_nr = day_nr + 1
return forecasts | Converts results fråm API to SmhiForeCast list |
def get_forecast_api(self, longitude: str, latitude: str) -> {}:
api_url = APIURL_TEMPLATE.format(longitude, latitude)
response = urlopen(api_url)
data = response.read().decode('utf-8')
json_data = json.loads(data)
return json_data | gets data from API |
async def async_get_forecast_api(self, longitude: str,
latitude: str) -> {}:
api_url = APIURL_TEMPLATE.format(longitude, latitude)
if self.session is None:
self.session = aiohttp.ClientSession()
async with self.session.get(api_url) as response:
if response.status != 200:
raise SmhiForecastException(
"Failed to access weather API with status code {}".format(
response.status)
)
data = await response.text()
return json.loads(data) | gets data from API asyncronious |
def get_forecast(self) -> List[SmhiForecast]:
json_data = self._api.get_forecast_api(self._longitude, self._latitude)
return _get_forecast(json_data) | Returns a list of forecasts. The first in list are the current one |
async def async_get_forecast(self) -> List[SmhiForecast]:
json_data = await self._api.async_get_forecast_api(self._longitude,
self._latitude)
return _get_forecast(json_data) | Returns a list of forecasts. The first in list are the current one |
def _make_decorator(measuring_func):
def _decorator(name = None, metric = call_default):
def wrapper(func):
name_ = name if name is not None else func.__module__ + '.' +func.__name__
class instrument_decorator(object): # must be a class for descriptor magic to work
@wraps(func)
def __call__(self, *args, **kwargs):
return measuring_func(func(*args, **kwargs), name_, metric)
def __get__(self, instance, class_):
name_ = name if name is not None else\
".".join((class_.__module__, class_.__name__, func.__name__))
@wraps(func)
def wrapped_method(*args, **kwargs):
return measuring_func(func(instance, *args, **kwargs), name_, metric)
return wrapped_method
return instrument_decorator()
return wrapper
return _decorator | morass of closures for making decorators/descriptors |
def all(iterable = None, *, name = None, metric = call_default):
if iterable is None:
return _iter_decorator(name, metric)
else:
return _do_all(iterable, name, metric) | Measure total time and item count for consuming an iterable
:arg iterable: any iterable
:arg function metric: f(name, count, total_time)
:arg str name: name for the metric |
def each(iterable = None, *, name = None, metric = call_default):
if iterable is None:
return _each_decorator(name, metric)
else:
return _do_each(iterable, name, metric) | Measure time elapsed to produce each item of an iterable
:arg iterable: any iterable
:arg function metric: f(name, 1, time)
:arg str name: name for the metric |
def first(iterable = None, *, name = None, metric = call_default):
if iterable is None:
return _first_decorator(name, metric)
else:
return _do_first(iterable, name, metric) | Measure time elapsed to produce first item of an iterable
:arg iterable: any iterable
:arg function metric: f(name, 1, time)
:arg str name: name for the metric |
def _iterable_to_varargs_method(func):
def wrapped(self, *args, **kwargs):
return func(self, args, **kwargs)
return wrapped | decorator to convert a method taking a iterable to a *args one |
def _varargs_to_iterable_method(func):
def wrapped(self, iterable, **kwargs):
return func(self, *iterable, **kwargs)
return wrapped | decorator to convert a *args method to one taking a iterable |
def producer(*, name = None, metric = call_default):
def wrapper(func):
def instrumenter(name_, *args, **kwargs):
t = time.time()
try:
ret = func(*args, **kwargs)
except Exception:
# record a metric for other exceptions, than raise
metric(name_, 0, time.time() - t)
raise
else:
# normal path, record metric & return
metric(name_, len(ret), time.time() - t)
return ret
name_ = name if name is not None else func.__module__ + '.' +func.__name__
class instrument_decorator(object): # must be a class for descriptor magic to work
@wraps(func)
def __call__(self, *args, **kwargs):
return instrumenter(name_, *args, **kwargs)
def __get__(self, instance, class_):
name_ = name if name is not None else\
".".join((class_.__module__, class_.__name__, func.__name__))
@wraps(func)
def wrapped_method(*args, **kwargs):
return instrumenter(name_, instance, *args, **kwargs)
return wrapped_method
return instrument_decorator()
return wrapper | Decorator to measure a function that produces many items.
The function should return an object that supports ``__len__`` (ie, a
list). If the function returns an iterator, use :func:`all` instead.
:arg function metric: f(name, count, total_time)
:arg str name: name for the metric |
def block(*, name = None, metric = call_default, count = 1):
t = time.time()
try:
yield
finally:
metric(name, count, time.time() - t) | Context manager to measure execution time of a block
:arg function metric: f(name, 1, time)
:arg str name: name for the metric
:arg int count: user-supplied number of items, defaults to 1 |
def import_from_string(import_string):
import_split = import_string.split(".")
import_class = import_split[-1]
module_path = ".".join(import_split[:-1])
mod = __import__(module_path, fromlist=[import_class])
klass = getattr(mod, import_class)
return klass | Import a class from a string
import_string - string path to module to import using dot notation (foo.bar) |
def send(self,message,message_type,topic=''):
if message_type == RAW:
self._sock.send(message)
elif message_type == PYOBJ:
self._sock.send_pyobj(message)
elif message_type == JSON:
self._sock.send_json(message)
elif message_type == MULTIPART:
self._sock.send_multipart([topic, message])
elif message_type == STRING:
self._sock.send_string(message)
elif message_type == UNICODE:
self._sock.send_unicode(message)
else:
raise Exception("Unknown message type %s"%(message_type,)) | Send the message on the socket.
Args:
- message: the message to publish
- message_type: the type of message being sent
- topic: the topic on which to send the message. Defaults to ''. |
def receive(self,message_type):
topic = None
message = None
if message_type == RAW:
message = self._sock.recv(flags=zmq.NOBLOCK)
elif message_type == PYOBJ:
message = self._sock.recv_pyobj(flags=zmq.NOBLOCK)
elif message_type == JSON:
message = self._sock.recv_json(flags=zmq.NOBLOCK)
elif message_type == MULTIPART:
data = self._sock.recv_multipart(flags=zmq.NOBLOCK)
message = data[1]
topic = data[0]
elif message_type == STRING:
message = self._sock.recv_string(flags=zmq.NOBLOCK)
elif message_type == UNICODE:
message = self._sock.recv_unicode(flags=zmq.NOBLOCK)
else:
raise Exception("Unknown message type %s"%(self._message_type,))
return (topic, message) | Receive the message of the specified type and retun
Args:
- message_type: the type of the message to receive
Returns:
- the topic of the message
- the message received from the socket |
def __get_subscript(self, name, ctx=None):
assert isinstance(name, string_types), name
return ast.Subscript(
value=ast.Name(id=self.data_var, ctx=ast.Load()),
slice=ast.Index(value=ast.Str(s=name)),
ctx=ctx) | Returns `<data_var>["<name>"]` |
def __get_subscript_assign(self, name):
return ast.Assign(
targets=[self.__get_subscript(name, ast.Store())],
value=ast.Name(id=name, ctx=ast.Load())) | Returns `<data_var>["<name>"] = <name>`. |
def __get_subscript_delete(self, name):
return ast.Delete(targets=[self.__get_subscript(name, ast.Del())]) | Returns `del <data_var>["<name>"]`. |
def __visit_target(self, node):
if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store):
self.__add_variable(node.id)
elif isinstance(node, (ast.Tuple, ast.List)):
[self.__visit_target(x) for x in node.elts] | Call this method to visit assignment targets and to add local variables
to the current stack frame. Used in #visit_Assign() and
#__visit_comprehension(). |
def __get_package_manager(self):
package_manager = ""
args = ""
sudo_required = True
if system.is_osx():
package_manager = "brew"
sudo_required = False
args = " install"
elif system.is_debian():
package_manager = "apt-get"
args = " -y install"
elif system.is_fedora():
package_manager = "yum"
args = " install"
elif system.is_arch():
package_manager = "pacman"
args = " --noconfirm -S"
if lib.which(package_manager) is None:
self.logger.warn("Package manager %s not installed! Packages will not be installed."
% package_manager)
self.package_manager = None
self.package_manager = package_manager
self.sudo_required = sudo_required
self.args = args | Installs and verifies package manager |
def eval_expr(expr, context):
if isinstance(expr, list):
rv = []
for item in expr:
rv.append(eval_expr(item, context))
return rv
if isinstance(expr, dict):
rv = {}
for k, v in expr.iteritems():
rv[k] = eval_expr(v, context)
kwargs = rv.pop("__kwargs", None)
if kwargs:
rv.update(kwargs)
return rv
if isinstance(expr, Expression):
return expr.eval(context)
return expr | Recursively evaluates a compiled expression using the specified context.
Dict instances can contain a "__kwargs" key which will be used to update the
dict with its content |
def can_convert(strict: bool, from_type: Type[S], to_type: Type[T]):
if (to_type is not None) and (to_type not in (all_primitive_types + all_np_primitive_types)):
return False
else:
return True | None should be treated as a Joker here (but we know that never from_type and to_type will be None at the same time)
:param strict:
:param from_type:
:param to_type:
:return: |
def parse(self, data, doctype):
'''
Parse an input string, and return an AST
doctype must have WCADocument as a baseclass
'''
self.doctype = doctype
self.lexer.lineno = 0
del self.errors[:]
del self.warnings[:]
self.lexer.lexerror = False
ast = self.parser.parse(data, lexer=self.lexer)
if self.lexer.lexerror:
ast = None
if ast is None:
self.errors.append("Couldn't build AST.")
else:
for check in self.sema[self.doctype]:
visitor = check()
if not visitor.visit(ast):
self.errors.append("Couldn't visit AST.")
self.errors.extend(visitor.errors)
self.warnings.extend(visitor.warnings)
return (ast, list(self.errors), list(self.warnings)f parse(self, data, doctype):
'''
Parse an input string, and return an AST
doctype must have WCADocument as a baseclass
'''
self.doctype = doctype
self.lexer.lineno = 0
del self.errors[:]
del self.warnings[:]
self.lexer.lexerror = False
ast = self.parser.parse(data, lexer=self.lexer)
if self.lexer.lexerror:
ast = None
if ast is None:
self.errors.append("Couldn't build AST.")
else:
for check in self.sema[self.doctype]:
visitor = check()
if not visitor.visit(ast):
self.errors.append("Couldn't visit AST.")
self.errors.extend(visitor.errors)
self.warnings.extend(visitor.warnings)
return (ast, list(self.errors), list(self.warnings)) | Parse an input string, and return an AST
doctype must have WCADocument as a baseclass |
def _act_on_list(self, lhs):
'''
Act on the following rule :
items : items item
| item
'''
lhs[0] = []
if len(lhs) == 3:
lhs[0] = lhs[1]
# lhs[len(lhs)-1] may be different from lhs[-1]
# Yacc use some internal method to get the element, see yacc.py:240
item = lhs[len(lhs) - 1]
if item:
lhs[0].append(itemf _act_on_list(self, lhs):
'''
Act on the following rule :
items : items item
| item
'''
lhs[0] = []
if len(lhs) == 3:
lhs[0] = lhs[1]
# lhs[len(lhs)-1] may be different from lhs[-1]
# Yacc use some internal method to get the element, see yacc.py:240
item = lhs[len(lhs) - 1]
if item:
lhs[0].append(item) | Act on the following rule :
items : items item
| item |
def p_content(self, content):
'''content : TITLE opttexts VERSION opttexts sections
| TITLE STATESTAG VERSION opttexts states_sections'''
content[0] = self.doctype(content[1], content[3], content[4], content[5])
if self.toc:
self.toc.set_articles([a for a in content[0].sections if isinstance(a, Article)]f p_content(self, content):
'''content : TITLE opttexts VERSION opttexts sections
| TITLE STATESTAG VERSION opttexts states_sections'''
content[0] = self.doctype(content[1], content[3], content[4], content[5])
if self.toc:
self.toc.set_articles([a for a in content[0].sections if isinstance(a, Article)]) | content : TITLE opttexts VERSION opttexts sections
| TITLE STATESTAG VERSION opttexts states_sections |
def p_text(self, text):
'''text : TEXT PARBREAK
| TEXT
| PARBREAK'''
item = text[1]
text[0] = item if item[0] != "\n" else u""
if len(text) > 2:
text[0] += "\nf p_text(self, text):
'''text : TEXT PARBREAK
| TEXT
| PARBREAK'''
item = text[1]
text[0] = item if item[0] != "\n" else u""
if len(text) > 2:
text[0] += "\n" | text : TEXT PARBREAK
| TEXT
| PARBREAK |
def p_toc(self, toc):
'''toc : HEADERSEC opttexts TOC opttexts'''
toc[0] = TableOfContent(toc[1], toc[2], [])
self.toc = toc[0f p_toc(self, toc):
'''toc : HEADERSEC opttexts TOC opttexts'''
toc[0] = TableOfContent(toc[1], toc[2], [])
self.toc = toc[0] | toc : HEADERSEC opttexts TOC opttexts |
def p_article(self, article):
'''article : ARTICLEHEADER opttexts rules opttexts'''
article[0] = Article(article[1][4], article[2], article[3], article[1][0],
article[1][1], article[1][2], article[1][3], article[1][5]f p_article(self, article):
'''article : ARTICLEHEADER opttexts rules opttexts'''
article[0] = Article(article[1][4], article[2], article[3], article[1][0],
article[1][1], article[1][2], article[1][3], article[1][5]) | article : ARTICLEHEADER opttexts rules opttexts |
def p_regularsec(self, regularsec):
'''regularsec : HEADERSEC opttexts optsubsections'''
texts = []
sections = regularsec[2]
if len(regularsec) > 3:
texts = regularsec[2]
sections = regularsec[3]
regularsec[0] = Section(regularsec[1], texts, sectionsf p_regularsec(self, regularsec):
'''regularsec : HEADERSEC opttexts optsubsections'''
texts = []
sections = regularsec[2]
if len(regularsec) > 3:
texts = regularsec[2]
sections = regularsec[3]
regularsec[0] = Section(regularsec[1], texts, sections) | regularsec : HEADERSEC opttexts optsubsections |
def p_subsection(self, subsection):
'''subsection : HEADERSUBSEC texts
| HEADERSUBSEC texts labeldecls opttexts'''
content = subsection[3] if len(subsection) > 3 else []
subsection[0] = Subsection(subsection[1], subsection[2], contentf p_subsection(self, subsection):
'''subsection : HEADERSUBSEC texts
| HEADERSUBSEC texts labeldecls opttexts'''
content = subsection[3] if len(subsection) > 3 else []
subsection[0] = Subsection(subsection[1], subsection[2], content) | subsection : HEADERSUBSEC texts
| HEADERSUBSEC texts labeldecls opttexts |
def p_state(self, state):
'''state : STATE opttexts'''
state[0] = State(state[1][0], state[1][1], state[1][2], state[1][3], state[2]f p_state(self, state):
'''state : STATE opttexts'''
state[0] = State(state[1][0], state[1][1], state[1][2], state[1][3], state[2]) | state : STATE opttexts |
def p_error(self, elem):
'''Handle syntax error'''
self.errors.append("Syntax error on line " + str(self.lexer.lineno)
+ ". Got unexpected token " + elem.typef p_error(self, elem):
'''Handle syntax error'''
self.errors.append("Syntax error on line " + str(self.lexer.lineno)
+ ". Got unexpected token " + elem.type) | Handle syntax error |
def set_progress_brackets(self, start, end):
self.sep_start = start
self.sep_end = end | Set brackets to set around a progress bar. |
def add_progress(self, count, symbol='#',
color=None, on_color=None, attrs=None):
chunk = _ProgressChunk(count, symbol, color, on_color, attrs)
self._progress_chunks.append(chunk) | Add a section of progress to the progressbar.
The progress is captured by "count" and displayed as a fraction
of the statusbar width proportional to this count over the total
progress displayed. The progress will be displayed using the "symbol"
character and the foreground and background colours and display style
determined by the the "color", "on_color" and "attrs" parameters.
These parameters work as the termcolor.colored function. |
def format_progress(self, width):
chunk_widths = self._get_chunk_sizes(width)
progress_chunks = [chunk.format_chunk(chunk_width)
for (chunk, chunk_width)
in zip(self._progress_chunks, chunk_widths)]
return "{sep_start}{progress}{sep_end}".format(
sep_start=self.sep_start,
progress="".join(progress_chunks),
sep_end=self.sep_end
) | Create the formatted string that displays the progress. |
def summary_width(self):
chunk_counts = [chunk.count for chunk in self._progress_chunks]
numbers_width = sum(max(1, ceil(log10(count + 1)))
for count in chunk_counts)
separators_with = len(chunk_counts) - 1
return numbers_width + separators_with | Calculate how long a string is needed to show a summary string.
This is not simply the length of the formatted summary string
since that string might contain ANSI codes. |
def format_summary(self):
chunks = [chunk.format_chunk_summary()
for chunk in self._progress_chunks]
return "/".join(chunks) | Generate a summary string for the progress bar. |
def add_progress(self, count, symbol='#',
color=None, on_color=None, attrs=None):
self._progress.add_progress(count, symbol, color, on_color, attrs) | Add a section of progress to the progressbar.
The progress is captured by "count" and displayed as a fraction
of the statusbar width proportional to this count over the total
progress displayed. The progress will be displayed using the "symbol"
character and the foreground and background colours and display style
determined by the the "fg", "bg" and "style" parameters. For these,
use the colorama package to set up the formatting. |
def format_status(self, width=None,
label_width=None,
progress_width=None,
summary_width=None):
if width is None: # pragma: no cover
width = shutil.get_terminal_size()[0]
if label_width is None:
label_width = len(self.label)
if summary_width is None:
summary_width = self.summary_width()
if progress_width is None:
progress_width = width - label_width - summary_width - 2
if len(self.label) > label_width:
# FIXME: This actually *will* break if we ever have fewer than
# three characters assigned to format the label, but that would
# be an extreme situation so I won't fix it just yet.
label = self.label[:label_width - 3] + "..."
else:
label_format = "{{label:{fill_char}<{width}}}".format(
width=label_width,
fill_char=self.fill_char)
label = label_format.format(label=self.label)
summary_format = "{{:>{width}}}".format(width=summary_width)
summary = summary_format.format(self._progress.format_summary())
progress = self._progress.format_progress(width=progress_width)
return "{label} {progress} {summary}".format(
label=label,
progress=progress,
summary=summary
) | Generate the formatted status bar string. |
def add_status_line(self, label):
status_line = StatusBar(label,
self._sep_start, self._sep_end,
self._fill_char)
self._lines.append(status_line)
return status_line | Add a status bar line to the table.
This function returns the status bar and it can be modified
from this return value. |
def calculate_field_widths(self, width=None,
min_label_width=10,
min_progress_width=10):
if width is None: # pragma: no cover
width = shutil.get_terminal_size()[0]
summary_width = self.summary_width()
label_width = self.label_width()
remaining = width - summary_width - label_width - 2
if remaining >= min_progress_width:
progress_width = remaining
else:
progress_width = min_progress_width
remaining = width - summary_width - progress_width - 2
if remaining >= min_label_width:
label_width = remaining
else:
label_width = min_label_width
return (label_width, progress_width, summary_width) | Calculate how wide each field should be so we can align them.
We always find room for the summaries since these are short and
packed with information. If possible, we will also find room for
labels, but if this would make the progress bar width shorter than
the specified minium then we will shorten the labels, though never
below the minium there. If this mean we have bars that are too wide
for the terminal, then your terminal needs to be wider. |
def format_table(self, width=None,
min_label_width=10, min_progress_width=10):
# handle the special case of an empty table.
if len(self._lines) == 0:
return []
if width is None: # pragma: no cover
width = shutil.get_terminal_size()[0]
labelw, progw, summaryw = self.calculate_field_widths(
width=width,
min_label_width=min_label_width,
min_progress_width=min_progress_width
)
output = [
sb.format_status(
label_width=labelw,
progress_width=progw,
summary_width=summaryw
)
for sb in self._lines
]
return output | Format the entire table of progress bars.
The function first computes the widths of the fields so they can be
aligned across lines and then returns formatted lines as a list of
strings. |
def create_log_dict(request, response):
remote_addr = request.META.get('REMOTE_ADDR')
if remote_addr in getattr(settings, 'INTERNAL_IPS', []):
remote_addr = request.META.get(
'HTTP_X_FORWARDED_FOR') or remote_addr
user_email = "-"
if hasattr(request, 'user'):
user_email = getattr(request.user, 'email', '-')
if response.streaming:
content_length = 'streaming'
else:
content_length = len(response.content)
return {
# 'event' makes event-based filtering possible in logging backends
# like logstash
'event': settings.LOGUTILS_LOGGING_MIDDLEWARE_EVENT,
'remote_address': remote_addr,
'user_email': user_email,
'method': request.method,
'url': request.get_full_path(),
'status': response.status_code,
'content_length': content_length,
'request_time': -1, # NA value: real value added by LoggingMiddleware
} | Create a dictionary with logging data. |
def create_log_message(log_dict, use_sql_info=False, fmt=True):
log_msg = (
"%(remote_address)s %(user_email)s %(method)s %(url)s %(status)d "
"%(content_length)d (%(request_time).2f seconds)"
)
if use_sql_info:
sql_time = sum(
float(q['time']) for q in connection.queries) * 1000
extra_log = {
'nr_queries': len(connection.queries),
'sql_time': sql_time}
log_msg += " (%(nr_queries)d SQL queries, %(sql_time)f ms)"
log_dict.update(extra_log)
return log_msg % log_dict if fmt else log_msg | Create the logging message string. |
def process_response(self, request, response):
try:
log_dict = create_log_dict(request, response)
# add the request time to the log_dict; if no start time is
# available, use -1 as NA value
request_time = (
time.time() - self.start_time if hasattr(self, 'start_time')
and self.start_time else -1)
log_dict.update({'request_time': request_time})
is_request_time_too_high = (
request_time > float(settings.LOGUTILS_REQUEST_TIME_THRESHOLD))
use_sql_info = settings.DEBUG or is_request_time_too_high
log_msg = create_log_message(log_dict, use_sql_info, fmt=False)
if is_request_time_too_high:
logger.warning(log_msg, log_dict, extra=log_dict)
else:
logger.info(log_msg, log_dict, extra=log_dict)
except Exception as e:
logger.exception(e)
return response | Create the logging message.. |
def synchronized(obj):
if hasattr(obj, 'synchronizable_condition'):
return obj.synchronizable_condition
elif callable(obj):
@functools.wraps(obj)
def wrapper(self, *args, **kwargs):
with self.synchronizable_condition:
return obj(self, *args, **kwargs)
return wrapper
else:
raise TypeError('expected Synchronizable instance or callable to decorate') | This function has two purposes:
1. Decorate a function that automatically synchronizes access to the object
passed as the first argument (usually `self`, for member methods)
2. Synchronize access to the object, used in a `with`-statement.
Note that you can use #wait(), #notify() and #notify_all() only on
synchronized objects.
# Example
```python
class Box(Synchronizable):
def __init__(self):
self.value = None
@synchronized
def get(self):
return self.value
@synchronized
def set(self, value):
self.value = value
box = Box()
box.set('foobar')
with synchronized(box):
box.value = 'taz\'dingo'
print(box.get())
```
# Arguments
obj (Synchronizable, function): The object to synchronize access to, or a
function to decorate.
# Returns
1. The decorated function.
2. The value of `obj.synchronizable_condition`, which should implement the
context-manager interface (to be used in a `with`-statement). |
def wait(obj, timeout=None):
if timeout is None:
return obj.synchronizable_condition.wait()
else:
return obj.synchronizable_condition.wait(timeout) | Wait until *obj* gets notified with #notify() or #notify_all(). If a timeout
is specified, the function can return without the object being notified if
the time runs out.
Note that you can only use this function on #synchronized() objects.
# Arguments
obj (Synchronizable): An object that can be synchronized.
timeout (number, None): The number of seconds to wait for the object to get
notified before returning. If not value or the value #None is specified,
the function will wait indefinetily. |
def wait_for_condition(obj, cond, timeout=None):
with synchronized(obj):
if timeout is None:
while not cond(obj):
wait(obj)
else:
t_start = time.time()
while not cond(obj):
t_delta = time.time() - t_start
if t_delta >= timeout:
return False
wait(obj, timeout - t_delta)
return True | This is an extended version of #wait() that applies the function *cond* to
check for a condition to break free from waiting on *obj*. Note that *obj*
must be notified when its state changes in order to check the condition.
Note that access to *obj* is synchronized when *cond* is called.
# Arguments
obj (Synchronizable): The object to synchronize and wait for *cond*.
cond (function): A function that accepts *obj* as an argument. Must return
#True if the condition is met.
timeout (number, None): The maximum number of seconds to wait.
# Returns
bool: #True if the condition was met, #False if not and a timeout ocurred. |
def as_completed(jobs):
''' Generator function that yields the jobs in order of their
completion. Attaches a new listener to each job. '''
jobs = tuple(jobs)
event = threading.Event()
callback = lambda f, ev: event.set()
[job.add_listener(Job.SUCCESS, callback, once=True) for job in jobs]
[job.add_listener(Job.ERROR, callback, once=True) for job in jobs]
while jobs:
event.wait()
event.clear()
jobs, finished = split_list_by(jobs, lambda x: x.finished)
for job in finished:
yield jof as_completed(jobs):
''' Generator function that yields the jobs in order of their
completion. Attaches a new listener to each job. '''
jobs = tuple(jobs)
event = threading.Event()
callback = lambda f, ev: event.set()
[job.add_listener(Job.SUCCESS, callback, once=True) for job in jobs]
[job.add_listener(Job.ERROR, callback, once=True) for job in jobs]
while jobs:
event.wait()
event.clear()
jobs, finished = split_list_by(jobs, lambda x: x.finished)
for job in finished:
yield job | Generator function that yields the jobs in order of their
completion. Attaches a new listener to each job. |