signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
@property<EOL><INDENT>def head_values(self):<DEDENT>
values = set()<EOL>for head in self._heads:<EOL><INDENT>values.add(head.value)<EOL><DEDENT>return values<EOL>
Return set of the head values
f299:c1:m2
@property<EOL><INDENT>def head_count(self):<DEDENT>
return len(self._heads)<EOL>
Get head count
f299:c1:m3
def add_head(self, head):
if not isinstance(head, DependencyNode):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self._heads.append(head)<EOL>
Add head Node
f299:c1:m5
def fail(self):
raise ValidationError(self.message)<EOL>
Raise a validation error with the messsage.
f300:c1:m1
def validate(self, unused_value, unused_handler):
raise NotImplementedError<EOL>
Override this to implement validation logic. Return the validated value or call self.fail() to raise a ValidationError.
f300:c1:m2
def _fetch_arguments(handler, method):
if method.__name__ == '<STR_LIT>':<EOL><INDENT>arguments = {}<EOL>for key, value in six.iteritems(handler.request.arguments):<EOL><INDENT>if isinstance(value, list):<EOL><INDENT>arguments[key] = '<STR_LIT:U+002C>'.join(value)<EOL><DEDENT>else:<EOL><INDENT>arguments[key] = value<EOL><DEDENT><DEDENT><DEDENT>else: <EOL><INDENT>arguments = handler.get_post_arguments()<EOL><DEDENT>return arguments<EOL>
Get the arguments depending on the type of HTTP method.
f301:m0
def _apply_validator_chain(chain, value, handler):
if hasattr(chain, '<STR_LIT>'): <EOL><INDENT>chain = [chain, ]<EOL><DEDENT>for validator in chain:<EOL><INDENT>if hasattr(validator, '<STR_LIT>'):<EOL><INDENT>value = validator.validate(value, handler)<EOL><DEDENT>else:<EOL><INDENT>raise web.HTTPError(<NUM_LIT>)<EOL><DEDENT><DEDENT>return value<EOL>
Apply validators in sequence to a value.
f301:m1
def _parse_arguments(self, method, parameters):
<EOL>arguments = _fetch_arguments(self, method)<EOL>arg_dict = {}<EOL>errors = []<EOL>for key, properties in parameters:<EOL><INDENT>if key in arguments:<EOL><INDENT>value = arguments[key]<EOL>try:<EOL><INDENT>arg_dict[key] = _apply_validator_chain(<EOL>properties.get('<STR_LIT>', []), value, self)<EOL><DEDENT>except validators.ValidationError as err:<EOL><INDENT>errors.append(err)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if properties.get('<STR_LIT>', False):<EOL><INDENT>raise web.HTTPError(<EOL><NUM_LIT>,<EOL>('<STR_LIT>'<EOL>% (key, ))<EOL>)<EOL><DEDENT>else:<EOL><INDENT>if properties.get('<STR_LIT:default>', None) is not None:<EOL><INDENT>arg_dict[key] = properties['<STR_LIT:default>']<EOL><DEDENT>else:<EOL><INDENT>arg_dict[key] = None<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if errors:<EOL><INDENT>raise web.HTTPError(<NUM_LIT>, '<STR_LIT>' % len(errors))<EOL><DEDENT>return arg_dict<EOL>
Parse arguments to method, returning a dictionary.
f301:m2
def parse(parameters):
<EOL>@decorators.include_original<EOL>def decorate(method):<EOL><INDENT>"""<STR_LIT>"""<EOL>def call(self, *args):<EOL><INDENT>"""<STR_LIT>"""<EOL>kwargs = _parse_arguments(self, method, parameters)<EOL>return method(self, *args, **kwargs)<EOL><DEDENT>return call<EOL><DEDENT>return decorate<EOL>
Decorator to parse parameters according to a set of criteria. This outer method is called to set up the decorator. Arguments: parameters: An array of parameter declarations tuples in the format: ('<param_name>', {'validate': [<ValidatorClass>,...], <options...>}) Usage: @chassis.util.parameters.parse([ ('email', {'validators': [validators.Email], 'required': True}), ('password', {'validators': [validators.Password], 'required': True}) ]) def post(self, email=None, password=None): # Render JSON for the provided parameters self.render_json({'email': email, 'password': password})
f301:m3
def parse_dict(parameters):
<EOL>@decorators.include_original<EOL>def decorate(method):<EOL><INDENT>"""<STR_LIT>"""<EOL>def call(self, *args):<EOL><INDENT>"""<STR_LIT>"""<EOL>arg_dict = _parse_arguments(self, method, parameters)<EOL>return method(self, *args, data=arg_dict)<EOL><DEDENT>return call<EOL><DEDENT>return decorate<EOL>
Decorator to parse parameters as a dict according to a set of criteria. This outer method is called to set up the decorator. Arguments: parameters: An array of parameter declarations tuples in the format: ('<param_name>', {'validate': [<ValidatorClass>,...], <options...>}) Usage: @chassis.util.parameters.parse_dict([ ('email', {'validators': [validators.Email], 'required': True}), ('password', {'validators': [validators.Password], 'required': True}) ]) def post(self, data): # Render JSON for the provided parameters self.render_json({'email': data['email'], 'password': data['password']})
f301:m4
def default(self, obj):
if hasattr(obj, '<STR_LIT>'):<EOL><INDENT>return obj.strftime("<STR_LIT>")<EOL><DEDENT>elif hasattr(obj, '<STR_LIT>'):<EOL><INDENT>return obj.get_public_dict()<EOL><DEDENT>else:<EOL><INDENT>return json.JSONEncoder.default(self, obj)<EOL><DEDENT>
Use the default behavior unless the object to be encoded has a `strftime` attribute.
f302:c0:m0
def include_original(dec):
def meta_decorator(method):<EOL><INDENT>"""<STR_LIT>"""<EOL>decorator = dec(method)<EOL>decorator._original = method<EOL>return decorator<EOL><DEDENT>return meta_decorator<EOL>
Decorate decorators so they include a copy of the original function.
f303:m0
def retrieve_object(cache, template, indexes):
keys = []<EOL>with cache as redis_connection:<EOL><INDENT>pipe = redis_connection.pipeline()<EOL>for (result_key, redis_key_template) in template.items():<EOL><INDENT>keys.append(result_key)<EOL>pipe.get(redis_key_template % indexes)<EOL><DEDENT>results = pipe.execute()<EOL><DEDENT>return None if None in results else dict(zip(keys, results))<EOL>
Retrieve an object from Redis using a pipeline. Arguments: template: a dictionary containg the keys for the object and template strings for the corresponding redis keys. The template string uses named string interpolation format. Example: { 'username': 'user:$(id)s:username', 'email': 'user:$(id)s:email', 'phone': 'user:$(id)s:phone' } indexes: a dictionary containing the values to use to cosntruct the redis keys: Example: { 'id': 342 } Returns: a dictionary with the same keys as template, but containing the values retrieved from redis, if all the values are retrieved. If any value is missing, returns None. Example: { 'username': 'bob', 'email': 'bob@example.com', 'phone': '555-555-5555' }
f305:m0
def set_object(cache, template, indexes, data):
<EOL>with cache as redis_connection:<EOL><INDENT>pipe = redis_connection.pipeline()<EOL>for key in set(template.keys()) & set(data.keys()):<EOL><INDENT>pipe.set(template[key] % indexes, str(data[key]))<EOL><DEDENT>pipe.execute()<EOL><DEDENT>
Set an object in Redis using a pipeline. Only sets the fields that are present in both the template and the data. Arguments: template: a dictionary containg the keys for the object and template strings for the corresponding redis keys. The template string uses named string interpolation format. Example: { 'username': 'user:%(id)s:username', 'email': 'user:%(id)s:email', 'phone': 'user:%(id)s:phone' } indexes: a dictionary containing the values to use to cosntruct the redis keys: Example: { 'id': 342 } data: a dictionary returning the data to store. Example: { 'username': 'bob', 'email': 'bob@example.com', 'phone': '555-555-5555' }
f305:m1
def delete_object(cache, template, indexes):
with cache as redis_connection:<EOL><INDENT>pipe = redis_connection.pipeline()<EOL>for key in set(template.keys()):<EOL><INDENT>pipe.delete(template[key] % indexes)<EOL><DEDENT>pipe.execute()<EOL><DEDENT>
Delete an object in Redis using a pipeline. Deletes all fields defined by the template. Arguments: template: a dictionary containg the keys for the object and template strings for the corresponding redis keys. The template string uses named string interpolation format. Example: { 'username': 'user:%(id)s:username', 'email': 'user:%(id)s:email', 'phone': 'user:%(id)s:phone' } indexes: a dictionary containing the values to use to construct the redis keys: Example: { 'id': 342 }
f305:m2
def multi_get(cache, local_list):
with cache as redis_connection:<EOL><INDENT>return redis_connection.mget(local_list)<EOL><DEDENT>
Get multiple records by a list of keys. Arguments: cache: instance of Cache local_list: [ 'user:342:username', 'user:342:email', 'user:342:phone' ]
f305:m3
def set_value(cache, key, value):
with cache as redis_connection:<EOL><INDENT>return redis_connection.set(key, value)<EOL><DEDENT>
Set a value by key. Arguments: cache: instance of Cache key: 'user:342:username',
f305:m4
def delete_value(cache, *key):
with cache as redis_connection:<EOL><INDENT>return redis_connection.delete(*key)<EOL><DEDENT>
Delete a value by key. Arguments: cache: instance of Cache key: 'user:342:username',
f305:m5
def get_value(cache, key):
with cache as redis_connection:<EOL><INDENT>return redis_connection.get(key)<EOL><DEDENT>
Get a value by key. Arguments: cache: instance of Cache key: 'user:342:username',
f305:m6
def _close_connection(self):
pass<EOL>
Finish up. No need to close the connection: http://stackoverflow.com/questions/12967107/managing-connection-to-redis-from-python
f305:c1:m2
def insert(self, query, params):
<EOL>return<EOL>
Nothing to implement
f305:c1:m3
def detect_circle(nodes):
<EOL>if not isinstance(nodes, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>dependencies = set(nodes.keys())<EOL>traveled = []<EOL>heads = _detect_circle(nodes, dependencies, traveled)<EOL>return DependencyTree(heads)<EOL>
Wrapper for recursive _detect_circle function
f306:m0
def _detect_circle(nodes=None, dependencies=None, traveled=None, path=None):
<EOL>if nodes is None:<EOL><INDENT>nodes = {}<EOL><DEDENT>elif not isinstance(nodes, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if dependencies is None:<EOL><INDENT>return<EOL><DEDENT>elif not isinstance(dependencies, set):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if traveled is None:<EOL><INDENT>traveled = []<EOL><DEDENT>elif not isinstance(traveled, list):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if path is None:<EOL><INDENT>path = []<EOL><DEDENT>if not dependencies:<EOL><INDENT>return []<EOL><DEDENT>children = []<EOL>for name in dependencies:<EOL><INDENT>new_path = list(path)<EOL>new_path.append(name)<EOL>if name in traveled:<EOL><INDENT>raise CircularDependencyException(new_path)<EOL><DEDENT>node_children = _detect_circle(nodes=nodes,<EOL>dependencies=nodes[name],<EOL>traveled=traveled + list(name),<EOL>path=new_path)<EOL>node = DependencyNode(name)<EOL>for child in node_children:<EOL><INDENT>child.parent = node<EOL>node.add_child(child)<EOL><DEDENT>children.append(node)<EOL><DEDENT>return children<EOL>
Recursively iterate over nodes checking if we've traveled to that node before.
f306:m1
def solve(nodes):
<EOL>if not isinstance(nodes, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>return detect_circle(nodes)<EOL>
Solve graph into Solution
f306:m2
def is_dependency_name(name):
if not isinstance(name, str):<EOL><INDENT>return False<EOL><DEDENT>return name[<NUM_LIT:0>:<NUM_LIT:1>] == '<STR_LIT:@>'<EOL>
Returns true if of the form "@some_string
f306:m3
def __init__(self, config, scalars=None):
if scalars is None:<EOL><INDENT>scalars = {}<EOL><DEDENT>self._nodes = {}<EOL>self._config = config<EOL>self._factory = ServiceFactory(scalars)<EOL>self._init_nodes(config)<EOL>
Initialize Resolver
f306:c1:m0
@property<EOL><INDENT>def nodes(self):<DEDENT>
return self._nodes<EOL>
Return nodes
f306:c1:m1
def do(self):
if not self._nodes:<EOL><INDENT>return<EOL><DEDENT>node_copy = dict(self._nodes)<EOL>self._do(node_copy)<EOL>return self._factory.get_instantiated_services()<EOL>
Instantiate Services
f306:c1:m2
def _do(self, nodes):
if not isinstance(nodes, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if not nodes:<EOL><INDENT>return<EOL><DEDENT>starting_num_nodes = len(nodes)<EOL>newly_instantiated = set()<EOL>for (name, dependency_set) in six.iteritems(nodes):<EOL><INDENT>if dependency_set:<EOL><INDENT>continue<EOL><DEDENT>config = self._config[name]<EOL>service = self._factory.create_from_dict(config)<EOL>self._factory.add_instantiated_service(name, service)<EOL>newly_instantiated.add(name)<EOL><DEDENT>if not newly_instantiated:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>for name in newly_instantiated:<EOL><INDENT>del nodes[name]<EOL><DEDENT>if starting_num_nodes == len(nodes):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>for (name, dependency_set) in six.iteritems(nodes):<EOL><INDENT>nodes[name] = dependency_set.difference(newly_instantiated)<EOL><DEDENT>self._do(nodes)<EOL>
Recursive method to instantiate services
f306:c1:m3
def _init_nodes(self, config):
if not isinstance(config, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>for (name, conf) in six.iteritems(config):<EOL><INDENT>args = [] if '<STR_LIT:args>' not in conf else conf['<STR_LIT:args>']<EOL>kwargs = {} if '<STR_LIT>' not in conf else conf['<STR_LIT>']<EOL>dependencies = set()<EOL>arg_deps = self._get_dependencies_from_args(args)<EOL>kwarg_deps = self._get_dependencies_from_kwargs(kwargs)<EOL>dependencies.update(arg_deps)<EOL>dependencies.update(kwarg_deps)<EOL>self._nodes[name] = dependencies<EOL><DEDENT>
Gathers dependency sets onto _nodes
f306:c1:m4
def _get_dependencies_from_args(self, args):
if not isinstance(args, list):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>dependency_names = set()<EOL>for arg in args:<EOL><INDENT>new_names = self._check_arg(arg)<EOL>dependency_names.update(new_names)<EOL><DEDENT>return dependency_names<EOL>
Parse arguments
f306:c1:m5
def _get_dependencies_from_kwargs(self, args):
if not isinstance(args, dict):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>dependency_names = set()<EOL>for arg in args.values():<EOL><INDENT>new_names = self._check_arg(arg)<EOL>dependency_names.update(new_names)<EOL><DEDENT>return dependency_names<EOL>
Parse keyed arguments
f306:c1:m6
def _check_arg(self, arg):
if isinstance(arg, list):<EOL><INDENT>return self._get_dependencies_from_args(arg)<EOL><DEDENT>elif isinstance(arg, dict):<EOL><INDENT>return self._get_dependencies_from_kwargs(arg)<EOL><DEDENT>if not is_dependency_name(arg):<EOL><INDENT>return set()<EOL><DEDENT>return set([arg[<NUM_LIT:1>:]])<EOL>
Check individual argument (list/tuple/string/etc)
f306:c1:m7
def is_arg_scalar(arg):
return arg[:<NUM_LIT:1>] == '<STR_LIT:$>'<EOL>
Returns true if arg starts with a dollar sign
f307:m0
def is_arg_service(arg):
return arg[:<NUM_LIT:1>] == '<STR_LIT:@>'<EOL>
Returns true if arg starts with an at symbol
f307:m1
def _check_type(name, obj, expected_type):
if not isinstance(obj, expected_type):<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>' % (name, expected_type.__name__)<EOL>)<EOL><DEDENT>
Raise a TypeError if object is not of expected type
f307:m2
def _import_module(module_name):
fromlist = []<EOL>dot_position = module_name.rfind('<STR_LIT:.>')<EOL>if dot_position > -<NUM_LIT:1>:<EOL><INDENT>fromlist.append(<EOL>module_name[dot_position+<NUM_LIT:1>:len(module_name)]<EOL>)<EOL><DEDENT>module = __import__(module_name, globals(), locals(), fromlist, <NUM_LIT:0>)<EOL>return module<EOL>
Imports the module dynamically _import_module('foo.bar') calls: __import__('foo.bar', globals(), locals(), ['bar', ], 0)
f307:m3
def _verify_create_args(module_name, class_name, static):
<EOL>if module_name is None:<EOL><INDENT>raise InvalidServiceConfiguration(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>if not static and class_name is None:<EOL><INDENT>tmpl0 = '<STR_LIT>'<EOL>tmpl1 = '<STR_LIT>'<EOL>raise InvalidServiceConfiguration((tmpl0 + tmpl1) % module_name)<EOL><DEDENT>
Verifies a subset of the arguments to create()
f307:m4
def create(self, module_name, class_name,<EOL>args=None, kwargs=None, factory_method=None,<EOL>factory_args=None, factory_kwargs=None, static=False,<EOL>calls=None):
if args is None:<EOL><INDENT>args = []<EOL><DEDENT>if kwargs is None:<EOL><INDENT>kwargs = {}<EOL><DEDENT>if factory_args is None:<EOL><INDENT>factory_args = []<EOL><DEDENT>if factory_kwargs is None:<EOL><INDENT>factory_kwargs = {}<EOL><DEDENT>if static is None:<EOL><INDENT>static = False<EOL><DEDENT>_verify_create_args(module_name, class_name, static)<EOL>module = _import_module(module_name)<EOL>service_obj = self._instantiate(module, class_name,<EOL>args, kwargs, static)<EOL>if factory_method is not None:<EOL><INDENT>service_obj = self._handle_factory_method(service_obj,<EOL>factory_method,<EOL>factory_args,<EOL>factory_kwargs)<EOL><DEDENT>if calls is not None and isinstance(calls, list):<EOL><INDENT>self._handle_calls(service_obj, calls)<EOL><DEDENT>return service_obj<EOL>
Initializes an instance of the service
f307:c2:m1
def create_from_dict(self, dictionary):
<EOL>args = []<EOL>kwargs = {}<EOL>factory_method = None<EOL>factory_args = []<EOL>factory_kwargs = {}<EOL>static = False<EOL>calls = None<EOL>if '<STR_LIT:args>' in dictionary:<EOL><INDENT>args = dictionary['<STR_LIT:args>']<EOL><DEDENT>if '<STR_LIT>' in dictionary:<EOL><INDENT>kwargs = dictionary['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in dictionary:<EOL><INDENT>factory_method = dictionary['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in dictionary:<EOL><INDENT>factory_args = dictionary['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in dictionary:<EOL><INDENT>factory_kwargs = dictionary['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in dictionary:<EOL><INDENT>static = dictionary['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' in dictionary:<EOL><INDENT>calls = dictionary['<STR_LIT>']<EOL><DEDENT>return self.create(<EOL>dictionary['<STR_LIT>'],<EOL>dictionary['<STR_LIT:class>'],<EOL>args=args,<EOL>kwargs=kwargs,<EOL>factory_method=factory_method,<EOL>factory_args=factory_args,<EOL>factory_kwargs=factory_kwargs,<EOL>static=static,<EOL>calls=calls<EOL>)<EOL>
Initializes an instance from a dictionary blueprint
f307:c2:m2
def add_instantiated_service(self, name, service):
self.instantiated_services[name] = service<EOL>
Add an instatiated service by name
f307:c2:m3
def get_instantiated_services(self):
return self.instantiated_services<EOL>
Get instantiated services
f307:c2:m4
def get_instantiated_service(self, name):
if name not in self.instantiated_services:<EOL><INDENT>raise UninstantiatedServiceException<EOL><DEDENT>return self.instantiated_services[name]<EOL>
Get instantiated service by name
f307:c2:m5
def _replace_service_arg(self, name, index, args):
args[index] = self.get_instantiated_service(name)<EOL>
Replace index in list with service
f307:c2:m6
def _replace_service_kwarg(self, key, kwarg):
kwarg[key] = self.get_instantiated_service(key)<EOL>
Replace key in dictionary with service
f307:c2:m7
def _replace_scalars_in_args(self, args):
_check_type('<STR_LIT:args>', args, list)<EOL>new_args = []<EOL>for arg in args:<EOL><INDENT>if isinstance(arg, list):<EOL><INDENT>to_append = self._replace_scalars_in_args(arg)<EOL><DEDENT>elif isinstance(arg, dict):<EOL><INDENT>to_append = self._replace_scalars_in_kwargs(arg)<EOL><DEDENT>elif isinstance(arg, string_types):<EOL><INDENT>to_append = self._replace_scalar(arg)<EOL><DEDENT>else:<EOL><INDENT>to_append = arg<EOL><DEDENT>new_args.append(to_append)<EOL><DEDENT>return new_args<EOL>
Replace scalars in arguments list
f307:c2:m8
def _replace_scalars_in_kwargs(self, kwargs):
_check_type('<STR_LIT>', kwargs, dict)<EOL>new_kwargs = {}<EOL>for (name, value) in iteritems(kwargs):<EOL><INDENT>if isinstance(value, list):<EOL><INDENT>new_kwargs[name] = self._replace_scalars_in_args(value)<EOL><DEDENT>elif isinstance(value, dict):<EOL><INDENT>new_kwargs[name] = self._replace_scalars_in_kwargs(value)<EOL><DEDENT>elif isinstance(value, string_types):<EOL><INDENT>new_kwargs[name] = self._replace_scalar(value)<EOL><DEDENT>else:<EOL><INDENT>new_kwargs[name] = value<EOL><DEDENT><DEDENT>return new_kwargs<EOL>
Replace scalars in keyed arguments dictionary
f307:c2:m9
def _replace_services_in_args(self, args):
_check_type('<STR_LIT:args>', args, list)<EOL>new_args = []<EOL>for arg in args:<EOL><INDENT>if isinstance(arg, list):<EOL><INDENT>new_args.append(self._replace_services_in_args(arg))<EOL><DEDENT>elif isinstance(arg, dict):<EOL><INDENT>new_args.append(self._replace_services_in_kwargs(arg))<EOL><DEDENT>elif isinstance(arg, string_types):<EOL><INDENT>new_args.append(self._replace_service(arg))<EOL><DEDENT>else:<EOL><INDENT>new_args.append(arg)<EOL><DEDENT><DEDENT>return new_args<EOL>
Replace service references in arguments list
f307:c2:m10
def _replace_services_in_kwargs(self, kwargs):
_check_type('<STR_LIT>', kwargs, dict)<EOL>new_kwargs = {}<EOL>for (name, value) in iteritems(kwargs):<EOL><INDENT>if isinstance(value, list):<EOL><INDENT>new_kwargs[name] = self._replace_services_in_args(value)<EOL><DEDENT>elif isinstance(value, dict):<EOL><INDENT>new_kwargs[name] = self._replace_services_in_kwargs(value)<EOL><DEDENT>elif isinstance(value, string_types):<EOL><INDENT>new_kwargs[name] = self._replace_service(value)<EOL><DEDENT>else:<EOL><INDENT>new_kwargs[name] = value<EOL><DEDENT><DEDENT>return new_kwargs<EOL>
Replace service references in keyed arguments dictionary
f307:c2:m11
def get_scalar_value(self, name):
if name not in self.scalars:<EOL><INDENT>raise InvalidServiceConfiguration(<EOL>'<STR_LIT>' % name<EOL>)<EOL><DEDENT>new_value = self.scalars.get(name)<EOL>return new_value<EOL>
Get scalar value by name
f307:c2:m12
def _replace_scalar(self, scalar):
if not is_arg_scalar(scalar):<EOL><INDENT>return scalar<EOL><DEDENT>name = scalar[<NUM_LIT:1>:]<EOL>return self.get_scalar_value(name)<EOL>
Replace scalar name with scalar value
f307:c2:m13
def _replace_service(self, service):
if not is_arg_service(service):<EOL><INDENT>return service<EOL><DEDENT>return self.get_instantiated_service(service[<NUM_LIT:1>:])<EOL>
Replace service name with service instance
f307:c2:m14
def _instantiate(self, module, class_name,<EOL>args=None, kwargs=None, static=None):
if args is None:<EOL><INDENT>args = []<EOL><DEDENT>if kwargs is None:<EOL><INDENT>kwargs = {}<EOL><DEDENT>if static is None:<EOL><INDENT>static = False<EOL><DEDENT>_check_type('<STR_LIT:args>', args, list)<EOL>_check_type('<STR_LIT>', kwargs, dict)<EOL>if static and class_name is None:<EOL><INDENT>return module<EOL><DEDENT>if static and class_name is not None:<EOL><INDENT>return getattr(module, class_name)<EOL><DEDENT>service_obj = getattr(module, class_name)<EOL>args = self._replace_scalars_in_args(args)<EOL>kwargs = self._replace_scalars_in_kwargs(kwargs)<EOL>args = self._replace_services_in_args(args)<EOL>kwargs = self._replace_services_in_kwargs(kwargs)<EOL>return service_obj(*args, **kwargs)<EOL>
Instantiates a class if provided
f307:c2:m15
def _handle_factory_method(self, service_obj, method_name,<EOL>args=None, kwargs=None):
if args is None:<EOL><INDENT>args = []<EOL><DEDENT>if kwargs is None:<EOL><INDENT>kwargs = {}<EOL><DEDENT>_check_type('<STR_LIT:args>', args, list)<EOL>_check_type('<STR_LIT>', kwargs, dict)<EOL>new_args = self._replace_scalars_in_args(args)<EOL>new_kwargs = self._replace_scalars_in_kwargs(kwargs)<EOL>return getattr(service_obj, method_name)(*new_args, **new_kwargs)<EOL>
Returns an object returned from a factory method
f307:c2:m16
def _handle_calls(self, service_obj, calls):
for call in calls:<EOL><INDENT>method = call.get('<STR_LIT>')<EOL>args = call.get('<STR_LIT:args>', [])<EOL>kwargs = call.get('<STR_LIT>', {})<EOL>_check_type('<STR_LIT:args>', args, list)<EOL>_check_type('<STR_LIT>', kwargs, dict)<EOL>if method is None:<EOL><INDENT>raise InvalidServiceConfiguration(<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>new_args = self._replace_scalars_in_args(args)<EOL>new_kwargs = self._replace_scalars_in_kwargs(kwargs)<EOL>getattr(service_obj, method)(*new_args, **new_kwargs)<EOL><DEDENT>
Performs method calls on service object
f307:c2:m17
def _get_connection(self):
raise NotImplementedError<EOL>
Override this method to set up the connection.
f308:c1:m1
def _close_connection(self):
raise NotImplementedError<EOL>
Override this method to close the connection.
f308:c1:m2
def insert(self, query, params):
raise NotImplementedError<EOL>
Override this method to close the connection.
f308:c1:m3
def serialize_email_messages(messages: List[EmailMessage]):
return [<EOL>base64.b64encode(zlib.compress(pickle.dumps(m, protocol=<NUM_LIT:4>))).decode()<EOL>for m in messages<EOL>]<EOL>
Serialize EmailMessages to be passed as task argument. Pickle is used because serializing an EmailMessage to json can be a bit tricky and would probably break if Django modifies the structure of the object in the future.
f331:m0
def deserialize_email_messages(messages: List[str]):
return [<EOL>pickle.loads(zlib.decompress(base64.b64decode(m)))<EOL>for m in messages<EOL>]<EOL>
Deserialize EmailMessages passed as task argument.
f331:m1
def register_sentry(raven_client, namespace: Optional[str]=None,<EOL>send_retries: bool=False):
@signals.job_started.connect_via(namespace)<EOL>def job_started(namespace, job, **kwargs):<EOL><INDENT>raven_client.context.activate()<EOL>raven_client.transaction.push(job.task_name)<EOL><DEDENT>@signals.job_finished.connect_via(namespace)<EOL>def job_finished(namespace, job, **kwargs):<EOL><INDENT>raven_client.transaction.pop(job.task_name)<EOL>raven_client.context.clear()<EOL><DEDENT>@signals.job_failed.connect_via(namespace)<EOL>def job_failed(namespace, job, **kwargs):<EOL><INDENT>raven_client.captureException(<EOL>extra={attr: getattr(job, attr) for attr in job.__slots__}<EOL>)<EOL><DEDENT>if send_retries:<EOL><INDENT>@signals.job_schedule_retry.connect_via(namespace)<EOL>def job_schedule_retry(namespace, job, **kwargs):<EOL><INDENT>raven_client.captureException(<EOL>extra={attr: getattr(job, attr) for attr in job.__slots__}<EOL>)<EOL><DEDENT><DEDENT>
Register the Sentry integration. Exceptions making jobs fail are sent to Sentry. :param raven_client: configured Raven client used to sent errors to Sentry :param namespace: optionally only register the Sentry integration for a particular Spinach :class:`Engine` :param send_retries: whether to also send to Sentry exceptions resulting in a job being retried
f332:m0
def register_datadog(tracer=None, namespace: Optional[str]=None,<EOL>service: str='<STR_LIT>'):
if tracer is None:<EOL><INDENT>from ddtrace import tracer<EOL><DEDENT>@signals.job_started.connect_via(namespace)<EOL>def job_started(namespace, job, **kwargs):<EOL><INDENT>tracer.trace(<EOL>'<STR_LIT>', service=service, span_type='<STR_LIT>',<EOL>resource=job.task_name<EOL>)<EOL><DEDENT>@signals.job_finished.connect_via(namespace)<EOL>def job_finished(namespace, job, **kwargs):<EOL><INDENT>root_span = tracer.current_root_span()<EOL>for attr in job.__slots__:<EOL><INDENT>root_span.set_tag(attr, getattr(job, attr))<EOL><DEDENT>root_span.finish()<EOL><DEDENT>@signals.job_failed.connect_via(namespace)<EOL>def job_failed(namespace, job, **kwargs):<EOL><INDENT>root_span = tracer.current_root_span()<EOL>root_span.set_traceback()<EOL><DEDENT>@signals.job_schedule_retry.connect_via(namespace)<EOL>def job_schedule_retry(namespace, job, **kwargs):<EOL><INDENT>root_span = tracer.current_root_span()<EOL>root_span.set_traceback()<EOL><DEDENT>
Register the Datadog integration. Exceptions making jobs fail are sent to Sentry. :param tracer: optionally use a custom ddtrace Tracer instead of the global one. :param namespace: optionally only register the Datadog integration for a particular Spinach :class:`Engine` :param service: Datadog service associated with the trace, defaults to `spinach`
f333:m0
def start(self):
Start the broker. Only needed by arbiter.
f334:c0:m2
def stop(self):
self._something_happened.set()<EOL>
Stop the broker. Only needed by arbiter.
f334:c0:m3
@abstractmethod<EOL><INDENT>def register_periodic_tasks(self, tasks: Iterable[Task]):<DEDENT>
Register tasks that need to be scheduled periodically.
f334:c0:m7
@abstractmethod<EOL><INDENT>def inspect_periodic_tasks(self) -> List[Tuple[int, str]]:<DEDENT>
Get the next periodic task schedule. Used only for debugging and during tests.
f334:c0:m8
@abstractmethod<EOL><INDENT>def enqueue_jobs(self, jobs: Iterable[Job]):<DEDENT>
Enqueue a batch of jobs.
f334:c0:m9
@abstractmethod<EOL><INDENT>def remove_job_from_running(self, job: Job):<DEDENT>
Remove a job from the list of running ones.
f334:c0:m10
@abstractmethod<EOL><INDENT>def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:<DEDENT>
Get jobs from a queue.
f334:c0:m11
@abstractmethod<EOL><INDENT>def move_future_jobs(self) -> int:<DEDENT>
Move ready jobs from the future queue to their normal queues. :returns the number of jobs moved
f334:c0:m12
@abstractmethod<EOL><INDENT>def _get_next_future_job(self)-> Optional[Job]:<DEDENT>
Get the next future job.
f334:c0:m13
@property<EOL><INDENT>def next_future_job_delta(self) -> Optional[float]:<DEDENT>
job = self._get_next_future_job()<EOL>if not job:<EOL><INDENT>return None<EOL><DEDENT>return (job.at - datetime.now(timezone.utc)).total_seconds()<EOL>
Give the amount of seconds before the next future job is due.
f334:c0:m14
@property<EOL><INDENT>@abstractmethod<EOL>def next_future_periodic_delta(self) -> Optional[float]:<DEDENT>
Give the amount of seconds before the next periodic task is due.
f334:c0:m15
@abstractmethod<EOL><INDENT>def flush(self):<DEDENT>
Delete everything in the namespace.
f334:c0:m16
def enqueue_jobs(self, jobs: Iterable[Job]):
for job in jobs:<EOL><INDENT>if job.should_start:<EOL><INDENT>job.status = JobStatus.QUEUED<EOL>queue = self._get_queue(job.queue)<EOL>queue.put(job.serialize())<EOL><DEDENT>else:<EOL><INDENT>with self._lock:<EOL><INDENT>job.status = JobStatus.WAITING<EOL>self._future_jobs.append(job.serialize())<EOL>self._future_jobs.sort(key=lambda j: Job.deserialize(j).at)<EOL><DEDENT><DEDENT><DEDENT>self._something_happened.set()<EOL>
Enqueue a batch of jobs.
f335:c0:m2
def register_periodic_tasks(self, tasks: Iterable[Task]):
for task in tasks:<EOL><INDENT>self._scheduler.enter(<EOL>int(task.periodicity.total_seconds()),<EOL><NUM_LIT:0>,<EOL>self._schedule_periodic_task,<EOL>argument=(task,)<EOL>)<EOL><DEDENT>
Register tasks that need to be scheduled periodically.
f335:c0:m4
@property<EOL><INDENT>def next_future_periodic_delta(self) -> Optional[float]:<DEDENT>
try:<EOL><INDENT>next_event = self._scheduler.queue[<NUM_LIT:0>]<EOL><DEDENT>except IndexError:<EOL><INDENT>return None<EOL><DEDENT>now = time.monotonic()<EOL>next_event_time = next_event[<NUM_LIT:0>]<EOL>if next_event_time < now:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>return next_event_time - now<EOL>
Give the amount of seconds before the next periodic task is due.
f335:c0:m6
def inspect_periodic_tasks(self) -> List[Tuple[int, str]]:
return [(int(e[<NUM_LIT:0>]), e[<NUM_LIT:3>][<NUM_LIT:0>].name) for e in self._scheduler.queue]<EOL>
Get the next periodic task schedule. Used only for debugging and during tests.
f335:c0:m7
def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
rv = list()<EOL>while len(rv) < max_jobs:<EOL><INDENT>try:<EOL><INDENT>job_json_string = self._get_queue(queue).get(block=False)<EOL><DEDENT>except Empty:<EOL><INDENT>break<EOL><DEDENT>job = Job.deserialize(job_json_string)<EOL>job.status = JobStatus.RUNNING<EOL>rv.append(job)<EOL><DEDENT>return rv<EOL>
Get jobs from a queue.
f335:c0:m9
def remove_job_from_running(self, job: Job):
self._something_happened.set()<EOL>
Remove a job from the list of running ones. Easy, the memory broker doesn't track running jobs. If the broker dies there is nothing we can do.
f335:c0:m11
def _reset(self):
self._subscriber_thread = None<EOL>self._must_stop = threading.Event()<EOL>self._number_periodic_tasks = <NUM_LIT:0><EOL>
Initialization that must happen before the broker is (re)started.
f336:c0:m1
def _load_script(self, filename: str) -> Script:
with open(path.join(here, '<STR_LIT>', filename), mode='<STR_LIT:rb>') as f:<EOL><INDENT>script_data = f.read()<EOL><DEDENT>rv = self._r.register_script(script_data)<EOL>if script_data.startswith(b'<STR_LIT>'):<EOL><INDENT>self._idempotency_protected_scripts.append(rv)<EOL><DEDENT>return rv<EOL>
Load a Lua script. Read the Lua script file to generate its Script object. If the script starts with a magic string, add it to the list of scripts requiring an idempotency token to execute.
f336:c0:m2
def enqueue_jobs(self, jobs: Iterable[Job]):
jobs_to_queue = list()<EOL>for job in jobs:<EOL><INDENT>if job.should_start:<EOL><INDENT>job.status = JobStatus.QUEUED<EOL><DEDENT>else:<EOL><INDENT>job.status = JobStatus.WAITING<EOL><DEDENT>jobs_to_queue.append(job.serialize())<EOL><DEDENT>if jobs_to_queue:<EOL><INDENT>self._run_script(<EOL>self._enqueue_job,<EOL>self._to_namespaced(NOTIFICATIONS_KEY),<EOL>self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),<EOL>self.namespace,<EOL>self._to_namespaced(FUTURE_JOBS_KEY),<EOL>*jobs_to_queue<EOL>)<EOL><DEDENT>
Enqueue a batch of jobs.
f336:c0:m4
def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
jobs_json_string = self._run_script(<EOL>self._get_jobs_from_queue,<EOL>self._to_namespaced(queue),<EOL>self._to_namespaced(RUNNING_JOBS_KEY.format(self._id)),<EOL>JobStatus.RUNNING.value,<EOL>max_jobs<EOL>)<EOL>jobs = json.loads(jobs_json_string.decode())<EOL>jobs = [Job.deserialize(job) for job in jobs]<EOL>return jobs<EOL>
Get jobs from a queue.
f336:c0:m7
def register_periodic_tasks(self, tasks: Iterable[Task]):
tasks = [task.serialize() for task in tasks]<EOL>self._number_periodic_tasks = len(tasks)<EOL>self._run_script(<EOL>self._register_periodic_tasks,<EOL>math.ceil(datetime.now(timezone.utc).timestamp()),<EOL>self._to_namespaced(PERIODIC_TASKS_HASH_KEY),<EOL>self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),<EOL>*tasks<EOL>)<EOL>
Register tasks that need to be scheduled periodically.
f336:c0:m13
def inspect_periodic_tasks(self) -> List[Tuple[int, str]]:
rv = self._r.zrangebyscore(<EOL>self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),<EOL>'<STR_LIT>', '<STR_LIT>', withscores=True<EOL>)<EOL>return [(int(r[<NUM_LIT:1>]), r[<NUM_LIT:0>].decode()) for r in rv]<EOL>
Get the next periodic task schedule. Used only for debugging and during tests.
f336:c0:m14
@property<EOL><INDENT>def next_future_periodic_delta(self) -> Optional[float]:<DEDENT>
rv = self._r.zrangebyscore(<EOL>self._to_namespaced(PERIODIC_TASKS_QUEUE_KEY),<EOL>'<STR_LIT>', '<STR_LIT>', start=<NUM_LIT:0>, num=<NUM_LIT:1>, withscores=True,<EOL>score_cast_func=int<EOL>)<EOL>if not rv:<EOL><INDENT>return None<EOL><DEDENT>now = datetime.now(timezone.utc).timestamp()<EOL>next_event_time = rv[<NUM_LIT:0>][<NUM_LIT:1>]<EOL>if next_event_time < now:<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>return next_event_time - now<EOL>
Give the amount of seconds before the next periodic task is due.
f336:c0:m15
def _reset(self):
self._arbiter = None<EOL>self._workers = None<EOL>self._working_queue = None<EOL>self._must_stop = threading.Event()<EOL>
Initialization that must happen before the arbiter is (re)started
f337:c0:m1
@property<EOL><INDENT>def namespace(self) -> str:<DEDENT>
return self._namespace<EOL>
Namespace the Engine uses.
f337:c0:m2
def attach_tasks(self, tasks: Tasks):
if tasks._spin is not None and tasks._spin is not self:<EOL><INDENT>logger.warning('<STR_LIT>')<EOL><DEDENT>self._tasks.update(tasks)<EOL>tasks._spin = self<EOL>
Attach a set of tasks. A task cannot be scheduled or executed before it is attached to an Engine. >>> tasks = Tasks() >>> spin.attach_tasks(tasks)
f337:c0:m3
def schedule(self, task: Schedulable, *args, **kwargs):
at = datetime.now(timezone.utc)<EOL>return self.schedule_at(task, at, *args, **kwargs)<EOL>
Schedule a job to be executed as soon as possible. :arg task: the task or its name to execute in the background :arg args: args to be passed to the task function :arg kwargs: kwargs to be passed to the task function
f337:c0:m5
def schedule_at(self, task: Schedulable, at: datetime, *args, **kwargs):
task = self._tasks.get(task)<EOL>job = Job(task.name, task.queue, at, task.max_retries, task_args=args,<EOL>task_kwargs=kwargs)<EOL>return self._broker.enqueue_jobs([job])<EOL>
Schedule a job to be executed in the future. :arg task: the task or its name to execute in the background :arg at: date at which the job should start. It is advised to pass a timezone aware datetime to lift any ambiguity. However if a timezone naive datetime if given, it will be assumed to contain UTC time. :arg args: args to be passed to the task function :arg kwargs: kwargs to be passed to the task function
f337:c0:m6
def schedule_batch(self, batch: Batch):
jobs = list()<EOL>for task, at, args, kwargs in batch.jobs_to_create:<EOL><INDENT>task = self._tasks.get(task)<EOL>jobs.append(<EOL>Job(task.name, task.queue, at, task.max_retries,<EOL>task_args=args, task_kwargs=kwargs)<EOL>)<EOL><DEDENT>return self._broker.enqueue_jobs(jobs)<EOL>
Schedule many jobs at once. Scheduling jobs in batches allows to enqueue them fast by avoiding round-trips to the broker. :arg batch: :class:`Batch` instance containing jobs to schedule
f337:c0:m7
def start_workers(self, number: int=DEFAULT_WORKER_NUMBER,<EOL>queue=DEFAULT_QUEUE, block=True,<EOL>stop_when_queue_empty=False):
if self._arbiter or self._workers:<EOL><INDENT>raise RuntimeError('<STR_LIT>')<EOL><DEDENT>self._working_queue = queue<EOL>tasks_names = '<STR_LIT:\n>'.join(<EOL>['<STR_LIT>' + task.name for task in self._tasks.tasks.values()<EOL>if task.queue == self._working_queue]<EOL>)<EOL>logger.info('<STR_LIT>',<EOL>number, self._working_queue, tasks_names)<EOL>self._broker.start()<EOL>self._workers = Workers(<EOL>num_workers=number,<EOL>namespace=self.namespace,<EOL>)<EOL>self._result_notifier = threading.Thread(<EOL>target=run_forever,<EOL>args=(self._result_notifier_func, self._must_stop, logger),<EOL>name='<STR_LIT>'.format(self.namespace)<EOL>)<EOL>self._result_notifier.start()<EOL>self._arbiter = threading.Thread(<EOL>target=run_forever,<EOL>args=(self._arbiter_func, self._must_stop, logger,<EOL>stop_when_queue_empty),<EOL>name='<STR_LIT>'.format(self.namespace)<EOL>)<EOL>self._arbiter.start()<EOL>if block:<EOL><INDENT>with handle_sigterm():<EOL><INDENT>try:<EOL><INDENT>self._arbiter.join()<EOL><DEDENT>except KeyboardInterrupt:<EOL><INDENT>self.stop_workers()<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>
Start the worker threads. :arg number: number of worker threads to launch :arg queue: name of the queue to consume, see :doc:`queues` :arg block: whether to block the calling thread until a signal arrives and workers get terminated :arg stop_when_queue_empty: automatically stop the workers when the queue is empty. Useful mostly for one-off scripts and testing.
f337:c0:m9
def stop_workers(self, _join_arbiter=True):
<EOL>self._must_stop.set()<EOL>self._workers.stop()<EOL>self._result_notifier.join()<EOL>self._broker.stop()<EOL>if _join_arbiter:<EOL><INDENT>self._arbiter.join()<EOL><DEDENT>self._reset()<EOL>
Stop the workers and wait for them to terminate.
f337:c0:m10
def send(self, *sender, **kwargs):
if len(sender) == <NUM_LIT:0>:<EOL><INDENT>sender = None<EOL><DEDENT>elif len(sender) > <NUM_LIT:1>:<EOL><INDENT>raise TypeError('<STR_LIT>'<EOL>'<STR_LIT>' % len(sender))<EOL><DEDENT>else:<EOL><INDENT>sender = sender[<NUM_LIT:0>]<EOL><DEDENT>if not self.receivers:<EOL><INDENT>return []<EOL><DEDENT>rv = list()<EOL>for receiver in self.receivers_for(sender):<EOL><INDENT>try:<EOL><INDENT>rv.append((receiver, receiver(sender, **kwargs)))<EOL><DEDENT>except Exception:<EOL><INDENT>logger.exception('<STR_LIT>'<EOL>'<STR_LIT>'.format(self.name))<EOL><DEDENT><DEDENT>return rv<EOL>
Emit this signal on behalf of `sender`, passing on kwargs. This is an extension of `Signal.send` that changes one thing: Exceptions raised in calling the receiver are logged but do not fail
f338:c0:m0
def task(self, func: Optional[Callable]=None, name: Optional[str]=None,<EOL>queue: Optional[str]=None, max_retries: Optional[Number]=None,<EOL>periodicity: Optional[timedelta]=None):
if func is None:<EOL><INDENT>return functools.partial(self.task, name=name, queue=queue,<EOL>max_retries=max_retries,<EOL>periodicity=periodicity)<EOL><DEDENT>self.add(func, name=name, queue=queue, max_retries=max_retries,<EOL>periodicity=periodicity)<EOL>func.task_name = name<EOL>return func<EOL>
Decorator to register a task function. :arg name: name of the task, used later to schedule jobs :arg queue: queue of the task, the default is used if not provided :arg max_retries: maximum number of retries, the default is used if not provided :arg periodicity: for periodic tasks, delay between executions as a timedelta >>> tasks = Tasks() >>> @tasks.task(name='foo') >>> def foo(): ... pass
f339:c1:m5
def add(self, func: Callable, name: Optional[str]=None,<EOL>queue: Optional[str]=None, max_retries: Optional[Number]=None,<EOL>periodicity: Optional[timedelta]=None):
if not name:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if name in self._tasks:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(name))<EOL><DEDENT>if queue is None:<EOL><INDENT>if self.queue:<EOL><INDENT>queue = self.queue<EOL><DEDENT>else:<EOL><INDENT>queue = const.DEFAULT_QUEUE<EOL><DEDENT><DEDENT>if max_retries is None:<EOL><INDENT>if self.max_retries:<EOL><INDENT>max_retries = self.max_retries<EOL><DEDENT>else:<EOL><INDENT>max_retries = const.DEFAULT_MAX_RETRIES<EOL><DEDENT><DEDENT>if periodicity is None:<EOL><INDENT>periodicity = self.periodicity<EOL><DEDENT>if queue and queue.startswith('<STR_LIT:_>'):<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>self._tasks[name] = Task(func, name, queue, max_retries, periodicity)<EOL>
Register a task function. :arg func: a callable to be executed :arg name: name of the task, used later to schedule jobs :arg queue: queue of the task, the default is used if not provided :arg max_retries: maximum number of retries, the default is used if not provided :arg periodicity: for periodic tasks, delay between executions as a timedelta >>> tasks = Tasks() >>> tasks.add(lambda x: x, name='do_nothing')
f339:c1:m6
def schedule(self, task: Schedulable, *args, **kwargs):
self._require_attached_tasks()<EOL>self._spin.schedule(task, *args, **kwargs)<EOL>
Schedule a job to be executed as soon as possible. :arg task: the task or its name to execute in the background :arg args: args to be passed to the task function :arg kwargs: kwargs to be passed to the task function This method can only be used once tasks have been attached to a Spinach :class:`Engine`.
f339:c1:m8
def schedule_at(self, task: Schedulable, at: datetime, *args, **kwargs):
self._require_attached_tasks()<EOL>self._spin.schedule_at(task, at, *args, **kwargs)<EOL>
Schedule a job to be executed in the future. :arg task: the task or its name to execute in the background :arg at: Date at which the job should start. It is advised to pass a timezone aware datetime to lift any ambiguity. However if a timezone naive datetime if given, it will be assumed to contain UTC time. :arg args: args to be passed to the task function :arg kwargs: kwargs to be passed to the task function This method can only be used once tasks have been attached to a Spinach :class:`Engine`.
f339:c1:m9