signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
---|---|---|---|
def build_rrule_from_dateutil_rrule(rule): | lines = str(rule).splitlines()<EOL>for line in lines:<EOL><INDENT>if line.startswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>if line.startswith('<STR_LIT>'):<EOL><INDENT>line = line[<NUM_LIT:6>:]<EOL><DEDENT>return build_rrule_from_text(line)<EOL><DEDENT> | Build rrule dictionary for vRecur class from a dateutil rrule.
Dateutils rrule is a popular implementation of rrule in python.
https://pypi.org/project/python-dateutil/
this is a shortcut to interface between dateutil and icalendar. | f196:m3 |
def write(self, outfile, encoding): | cal = Calendar()<EOL>cal.add('<STR_LIT:version>', '<STR_LIT>')<EOL>cal.add('<STR_LIT>', '<STR_LIT>')<EOL>for ifield, efield in FEED_FIELD_MAP:<EOL><INDENT>val = self.feed.get(ifield)<EOL>if val is not None:<EOL><INDENT>cal.add(efield, val)<EOL><DEDENT><DEDENT>self.write_items(cal)<EOL>to_ical = getattr(cal, '<STR_LIT>', None)<EOL>if not to_ical:<EOL><INDENT>to_ical = cal.to_ical<EOL><DEDENT>outfile.write(to_ical())<EOL> | u"""
Writes the feed to the specified file in the
specified encoding. | f197:c0:m0 |
def write_items(self, calendar): | for item in self.items:<EOL><INDENT>event = Event()<EOL>for ifield, efield in ITEM_EVENT_FIELD_MAP:<EOL><INDENT>val = item.get(ifield)<EOL>if val is not None:<EOL><INDENT>event.add(efield, val)<EOL><DEDENT><DEDENT>calendar.add_component(event)<EOL><DEDENT> | Write all events to the calendar | f197:c0:m1 |
def main(): | os.environ["<STR_LIT>"] = "<STR_LIT>"<EOL>from django.conf import global_settings<EOL>global_settings.SECRET_KEY = '<STR_LIT>'<EOL>global_settings.TIME_ZONE = '<STR_LIT>'<EOL>global_settings.INSTALLED_APPS = (<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>global_settings.DATABASES = {<EOL>'<STR_LIT:default>': {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>}<EOL>}<EOL>global_settings.MIDDLEWARE_CLASSES = (<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>)<EOL>if django.VERSION > (<NUM_LIT:1>, <NUM_LIT:7>):<EOL><INDENT>django.setup()<EOL><DEDENT>from django.test.utils import get_runner<EOL>test_runner = get_runner(global_settings)<EOL>test_runner = test_runner()<EOL>failures = test_runner.run_tests(['<STR_LIT>'])<EOL>sys.exit(failures)<EOL> | Standalone django model test with a 'memory-only-django-installation'.
You can play with a django model without a complete django app installation.
http://www.djangosnippets.org/snippets/1044/ | f199:m0 |
def _sequence_query(self): | klass = self.__class__<EOL>query = klass.select().where(klass.sequence.is_null(False))<EOL>seq_scope_field_names =(self.__seq_scope_field_name__ or '<STR_LIT>').split('<STR_LIT:U+002C>')<EOL>for name in seq_scope_field_names:<EOL><INDENT>seq_scope_field = getattr(klass, name, None)<EOL>if seq_scope_field:<EOL><INDENT>seq_scope_field_value = getattr(self, name)<EOL>query = query.where(seq_scope_field == seq_scope_field_value)<EOL><DEDENT><DEDENT>return query<EOL> | query all sequence rows | f213:c0:m0 |
def change_sequence(self, new_sequence): | if new_sequence < <NUM_LIT:1>:<EOL><INDENT>raise ValueError("<STR_LIT>") <EOL><DEDENT>self._change_sequence(new_sequence)<EOL> | :param new_sequence: 要排到第几个
基本的排序思路是,找到要插入位置的前一个和后一个对象,把要
拖动对象的sequence值设置成介于两个对象之间
注意 current_sequence,new_sequence 两个变量是数组中
的 index,与对象的 sequence 值不要混淆 | f213:c0:m3 |
def __init__(cls, name, bases, attrs): | super().__init__(name, bases, attrs)<EOL>setattr(cls, '<STR_LIT>', {}) <EOL>for k, v in attrs.items():<EOL><INDENT>if k.startswith('<STR_LIT>') and inspect.isfunction(v):<EOL><INDENT>fn = k[<NUM_LIT:9>:] <EOL>if fn in cls._meta.fields:<EOL><INDENT>cls._validators[fn] = v<EOL><DEDENT><DEDENT><DEDENT> | Store validator. | f214:c0:m0 |
@classmethod<EOL><INDENT>def create(cls, **query):<DEDENT> | return super().create(**cls._filter_attrs(query))<EOL> | secure create, mass assignment protected | f214:c1:m1 |
def update_with(self, **query): | for k, v in self._filter_attrs(query).items():<EOL><INDENT>setattr(self, k, v)<EOL><DEDENT>return self.save()<EOL> | secure update, mass assignment protected | f214:c1:m2 |
@classmethod<EOL><INDENT>def _filter_attrs(cls, attrs):<DEDENT> | if cls.__attr_whitelist__:<EOL><INDENT>whitelist = cls.__attr_accessible__ - cls.__attr_protected__<EOL>return {k: v for k, v in attrs.items() if k in whitelist}<EOL><DEDENT>else:<EOL><INDENT>blacklist = cls.__attr_protected__ - cls.__attr_accessible__<EOL>return {k: v for k, v in attrs.items() if k not in blacklist}<EOL><DEDENT> | attrs: { attr_name: attr_value }
if __attr_whitelist__ is True:
only attr in __attr_accessible__ AND not in __attr_protected__
will pass
else:
only attr not in __attr_protected__ OR in __attr_accessible__
will pass | f214:c1:m3 |
def _validate(self): | errors = {}<EOL>for name, validator in self._validators.items():<EOL><INDENT>value = getattr(self, name)<EOL>try:<EOL><INDENT>validator(self, value)<EOL><DEDENT>except ValidationError as e:<EOL><INDENT>errors[name] = str(e)<EOL><DEDENT><DEDENT>self._validate_errors = errors<EOL> | Validate model data and save errors | f214:c1:m7 |
def validate(self, value): | pass<EOL> | Validate value.
:param value: value to validate
:return None
:raise ValidationError | f215:c1:m0 |
def validate(self, value): | if not self._compiled_regex.match(value):<EOL><INDENT>raise ValidationError(<EOL>'<STR_LIT>'.format(value, self._regex))<EOL><DEDENT> | Validate string by regex
:param value: str
:return: | f215:c4:m1 |
def find_package_data(package): | walk = [(dirpath.replace(package + os.sep, '<STR_LIT>', <NUM_LIT:1>), filenames)<EOL>for dirpath, dirnames, filenames in os.walk(package)]<EOL>filepaths = []<EOL>for base, filenames in walk:<EOL><INDENT>filepaths.extend([os.path.join(base, filename)<EOL>for filename in filenames])<EOL><DEDENT>return filepaths<EOL> | Return all files under the root package, that are not in a
package themselves. | f220:m0 |
def names(self): | data = None<EOL>if not self.connected:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>data = self.rdb.keys("<STR_LIT>")<EOL><DEDENT>except redis.exceptions.ConnectionError as err:<EOL><INDENT>raise ConnectionError(str(err))<EOL><DEDENT>return [name[<NUM_LIT:12>:] for name in data]<EOL> | Returns a list of queues available, ``None`` if no such
queues found. Remember this will only shows queues with
at least one item enqueued. | f228:c0:m1 |
@property<EOL><INDENT>def length(self):<DEDENT> | if not self.connected:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>length = self.rdb.llen(self._name)<EOL><DEDENT>except redis.exceptions.ConnectionError as err:<EOL><INDENT>raise ConnectionError(str(err))<EOL><DEDENT>return length<EOL> | Gives the length of the queue. Returns ``None`` if the queue is not
connected.
If the queue is not connected then it will raise
:class:`retask.ConnectionError`. | f228:c0:m2 |
def connect(self): | config = self.config<EOL>self.rdb = redis.Redis(config['<STR_LIT:host>'], config['<STR_LIT:port>'], config['<STR_LIT>'],config['<STR_LIT:password>'])<EOL>try:<EOL><INDENT>info = self.rdb.info()<EOL>self.connected = True<EOL><DEDENT>except redis.ConnectionError:<EOL><INDENT>return False<EOL><DEDENT>return True<EOL> | Creates the connection with the redis server.
Return ``True`` if the connection works, else returns
``False``. It does not take any arguments.
:return: ``Boolean`` value
.. note::
After creating the ``Queue`` object the user should call
the ``connect`` method to create the connection.
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True | f228:c0:m3 |
def wait(self, wait_time=<NUM_LIT:0>): | if not self.connected:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>data = self.rdb.brpop(self._name, wait_time)<EOL>if data:<EOL><INDENT>task = Task()<EOL>task.__dict__ = json.loads(data[<NUM_LIT:1>])<EOL>return task<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT> | Returns a :class:`~retask.task.Task` object from the queue. Returns ``False`` if it timeouts.
:arg wait_time: Time in seconds to wait, default is infinite.
:return: :class:`~retask.task.Task` object from the queue or False if it timeouts.
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True
>>> task = q.wait()
>>> print task.data
{u'name': u'kushal'}
.. note::
This is a blocking call, you can specity wait_time argument for timeout. | f228:c0:m4 |
def dequeue(self): | if not self.connected:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>if self.rdb.llen(self._name) == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>data = self.rdb.rpop(self._name)<EOL>if not data:<EOL><INDENT>return None<EOL><DEDENT>if isinstance(data, six.binary_type):<EOL><INDENT>data = six.text_type(data, '<STR_LIT:utf-8>', errors = '<STR_LIT:replace>')<EOL><DEDENT>task = Task()<EOL>task.__dict__ = json.loads(data)<EOL>return task<EOL> | Returns a :class:`~retask.task.Task` object from the queue. Returns ``None`` if the
queue is empty.
:return: :class:`~retask.task.Task` object from the queue
If the queue is not connected then it will raise
:class:`retask.ConnectionError`
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True
>>> t = q.dequeue()
>>> print t.data
{u'name': u'kushal'} | f228:c0:m5 |
def enqueue(self, task): | if not self.connected:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>job = Job(self.rdb)<EOL>task.urn = job.urn<EOL>text = json.dumps(task.__dict__)<EOL>self.rdb.lpush(self._name, text)<EOL><DEDENT>except Exception as err:<EOL><INDENT>return False<EOL><DEDENT>return job<EOL> | Enqueues the given :class:`~retask.task.Task` object to the queue and returns
a :class:`~retask.queue.Job` object.
:arg task: ::class:`~retask.task.Task` object
:return: :class:`~retask.queue.Job` object
If the queue is not connected then it will raise
:class:`retask.ConnectionError`.
.. doctest::
>>> from retask import Queue
>>> q = Queue('test')
>>> q.connect()
True
>>> from retask.task import Task
>>> task = Task({'name':'kushal'})
>>> job = q.enqueue(task) | f228:c0:m6 |
def send(self, task, result, expire=<NUM_LIT>): | self.rdb.lpush(task.urn, json.dumps(result))<EOL>self.rdb.expire(task.urn, expire)<EOL> | Sends the result back to the producer. This should be called if only you
want to return the result in async manner.
:arg task: ::class:`~retask.task.Task` object
:arg result: Result data to be send back. Should be in JSON serializable.
:arg expire: Time in seconds after the key expires. Default is 60 seconds. | f228:c0:m7 |
def find(self, obj): | if not self.connected:<EOL><INDENT>raise ConnectionError('<STR_LIT>')<EOL><DEDENT>data = self.rdb.lrange(self._name, <NUM_LIT:0>, -<NUM_LIT:1>)<EOL>for i, datum in enumerate(data):<EOL><INDENT>if datum.find(str(obj)) != -<NUM_LIT:1>:<EOL><INDENT>return i<EOL><DEDENT><DEDENT>return -<NUM_LIT:1><EOL> | Returns the index of the given object in the queue, it might be string
which will be searched inside each task.
:arg obj: object we are looking
:return: -1 if the object is not found or else the location of the task | f228:c0:m9 |
@property<EOL><INDENT>def result(self):<DEDENT> | if self.__result:<EOL><INDENT>return self.__result<EOL><DEDENT>data = self.rdb.rpop(self.urn)<EOL>if data:<EOL><INDENT>self.rdb.delete(self.urn)<EOL>data = json.loads(data)<EOL>self.__result = data<EOL>return data<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT> | Returns the result from the worker for this job. This is used to pass
result in async way. | f228:c1:m1 |
def wait(self, wait_time=<NUM_LIT:0>): | if self.__result:<EOL><INDENT>return True<EOL><DEDENT>data = self.rdb.brpop(self.urn, wait_time)<EOL>if data:<EOL><INDENT>self.rdb.delete(self.urn)<EOL>data = json.loads(data[<NUM_LIT:1>])<EOL>self.__result = data<EOL>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT> | Blocking call to check if the worker returns the result. One can use
job.result after this call returns ``True``.
:arg wait_time: Time in seconds to wait, default is infinite.
:return: `True` or `False`.
.. note::
This is a blocking call, you can specity wait_time argument for timeout. | f228:c1:m2 |
@property<EOL><INDENT>def data(self):<DEDENT> | return json.loads(self._data)<EOL> | The python object containing information for the current task | f229:c0:m1 |
@property<EOL><INDENT>def rawdata(self):<DEDENT> | return self._data<EOL> | The string representation of the actual python objects for the task
.. note::
This should not be used directly by the users. This is for internal use
only. | f229:c0:m2 |
def read_file(path): | with open(path, '<STR_LIT:r>') as f:<EOL><INDENT>return f.read()<EOL><DEDENT> | Read a file and return its entire contents. | f234:m0 |
def setup_files(tmpdir, file_groups, flat): | name_groups = []<EOL>for group_num, file_group in enumerate(file_groups):<EOL><INDENT>names = []<EOL>for file_num, file_content in enumerate(file_group):<EOL><INDENT>if flat:<EOL><INDENT>basename = "<STR_LIT>" % (group_num, file_num)<EOL>f = tmpdir.join(basename)<EOL><DEDENT>else:<EOL><INDENT>subdir = tmpdir.join("<STR_LIT>" % file_num)<EOL>if not subdir.check(dir=True):<EOL><INDENT>subdir.mkdir()<EOL><DEDENT>f = subdir.join("<STR_LIT>" % group_num)<EOL><DEDENT>f.write(file_content)<EOL>names.append(str(f))<EOL><DEDENT>name_groups.append(names)<EOL><DEDENT>return name_groups<EOL> | Create a flat file structure for testing.
Receives a tmpdir fixture, and the file content to write in the
directory. If flat is True, all files are created in the same
directory; otherwise, they are created in subdirectories. See
test_find_dups_in_dirs doc for details.
Returns a list of groups of filenames, equal amongst themselves. | f237:m0 |
def setup_flat_dir_errors(tmpdir, count): | dir_errors = []<EOL>for i in range(count):<EOL><INDENT>basename = "<STR_LIT>" % i<EOL>d = tmpdir.mkdir(basename)<EOL>os.chmod(str(d), <NUM_LIT:0>)<EOL>dir_errors.append(str(d))<EOL><DEDENT>return dir_errors<EOL> | Create unreadable directories for testing.
Receives a tmpdir fixture, and the number of unreadable directories to
create inside it.
Doesn't work in Windows: can't portably remove read permissions there.
Returns a list of pathnames of the directories. | f237:m1 |
def setup_flat_read_errors(tmpdir, count): | read_errors = []<EOL>for i in range(count):<EOL><INDENT>basename = "<STR_LIT>" % i<EOL>f = tmpdir.join(basename)<EOL>f.write("<STR_LIT>")<EOL>os.chmod(str(f), <NUM_LIT:0>)<EOL>read_errors.append(str(f))<EOL><DEDENT>return read_errors<EOL> | Create unreadable files for testing.
Receives a tmpdir fixture, and the number of unreadable files to create
inside it. The number should not be 1, as capidup may not even try to
hash the file if there's no other file with the same size. That means
there would be no error.
Doesn't work in Windows: can't portably remove read permissions there.
Returns a list of filenames. | f237:m2 |
def unnest_sequence(seq): | class_ = type(seq)<EOL>return functools.reduce(class_.__add__, seq, class_())<EOL> | [[a, b], [c, d]] -> [a, b, c, d]
Receives a sequence of sequences (e.g. list of lists), and returns a
new sequence of the same type, with one level of nesting removed.
The sequence must be concatenable (it must support the 'add' operator).
It must also be homogeneous: all elements of the sequence must be a
sequence of the same type as their parent. A corollary of this is that
all elements of the sequence must be nested at least one level. | f239:m0 |
def round_up_to_mult(n, mult): | return ((n + mult - <NUM_LIT:1>) // mult) * mult<EOL> | Round an integer up to the next multiple. | f243:m0 |
def should_be_excluded(name, exclude_patterns): | for pattern in exclude_patterns:<EOL><INDENT>if fnmatch.fnmatch(name, pattern):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL> | Check if a name should be excluded.
Returns True if name matches at least one of the exclude patterns in
the exclude_patterns list. | f243:m1 |
def prune_names(names, exclude_patterns): | return [x for x in names if not should_be_excluded(x, exclude_patterns)]<EOL> | Prune subdirs or files from an index crawl.
This is used to control the search performed by os.walk() in
index_files_by_size().
names is the list of file or subdir names, to be pruned as per the
exclude_patterns list.
Returns a new (possibly pruned) names list. | f243:m2 |
def filter_visited(curr_dir, subdirs, already_visited, follow_dirlinks, on_error): | filtered = []<EOL>to_visit = set()<EOL>_already_visited = already_visited.copy()<EOL>try:<EOL><INDENT>file_info = os.stat(curr_dir) if follow_dirlinks else os.lstat(curr_dir)<EOL>_already_visited.add((file_info.st_dev, file_info.st_ino))<EOL><DEDENT>except OSError as e:<EOL><INDENT>on_error(e)<EOL><DEDENT>for subdir in subdirs:<EOL><INDENT>full_path = os.path.join(curr_dir, subdir)<EOL>try:<EOL><INDENT>file_info = os.stat(full_path) if follow_dirlinks else os.lstat(full_path)<EOL><DEDENT>except OSError as e:<EOL><INDENT>on_error(e)<EOL>continue<EOL><DEDENT>if not follow_dirlinks and stat.S_ISLNK(file_info.st_mode):<EOL><INDENT>continue<EOL><DEDENT>dev_inode = (file_info.st_dev, file_info.st_ino)<EOL>if dev_inode not in _already_visited:<EOL><INDENT>filtered.append(subdir)<EOL>to_visit.add(dev_inode)<EOL><DEDENT>else:<EOL><INDENT>on_error(OSError(errno.ELOOP, "<STR_LIT>", full_path))<EOL><DEDENT><DEDENT>return filtered, _already_visited.union(to_visit)<EOL> | Filter subdirs that have already been visited.
This is used to avoid loops in the search performed by os.walk() in
index_files_by_size.
curr_dir is the path of the current directory, as returned by os.walk().
subdirs is the list of subdirectories for the current directory, as
returned by os.walk().
already_visited is a set of tuples (st_dev, st_ino) of already
visited directories. This set will not be modified.
on error is a function f(OSError) -> None, to be called in case of
error.
Returns a tuple: the new (possibly filtered) subdirs list, and a new
set of already visited directories, now including the subdirs. | f243:m3 |
def index_files_by_size(root, files_by_size, exclude_dirs, exclude_files,<EOL>follow_dirlinks): | <EOL>errors = []<EOL>already_visited = set()<EOL>def _print_error(error):<EOL><INDENT>"""<STR_LIT>"""<EOL>msg = "<STR_LIT>" % (error.filename, error.strerror)<EOL>sys.stderr.write("<STR_LIT>" % msg)<EOL>errors.append(msg)<EOL><DEDENT>for curr_dir, subdirs, filenames in os.walk(root, topdown=True,<EOL>onerror=_print_error, followlinks=follow_dirlinks):<EOL><INDENT>subdirs[:] = prune_names(subdirs, exclude_dirs)<EOL>filenames = prune_names(filenames, exclude_files)<EOL>subdirs[:], already_visited = filter_visited(curr_dir, subdirs,<EOL>already_visited, follow_dirlinks, _print_error)<EOL>for base_filename in filenames:<EOL><INDENT>full_path = os.path.join(curr_dir, base_filename)<EOL>try:<EOL><INDENT>file_info = os.lstat(full_path)<EOL><DEDENT>except OSError as e:<EOL><INDENT>_print_error(e)<EOL>continue<EOL><DEDENT>if stat.S_ISREG(file_info.st_mode):<EOL><INDENT>size = file_info.st_size<EOL>if size in files_by_size:<EOL><INDENT>files_by_size[size].append(full_path)<EOL><DEDENT>else:<EOL><INDENT>files_by_size[size] = [full_path]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return errors<EOL> | Recursively index files under a root directory.
Each regular file is added *in-place* to the files_by_size dictionary,
according to the file size. This is a (possibly empty) dictionary of
lists of filenames, indexed by file size.
exclude_dirs is a list of glob patterns to exclude directories.
exclude_files is a list of glob patterns to exclude files.
follow_dirlinks controls whether to follow symbolic links to
subdirectories while crawling.
Returns True if there were any I/O errors while listing directories.
Returns a list of error messages that occurred. If empty, there were no
errors. | f243:m4 |
def calculate_md5(filename, length): | assert length >= <NUM_LIT:0><EOL>if length == <NUM_LIT:0>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>md5_summer = hashlib.md5()<EOL>f = open(filename, '<STR_LIT:rb>')<EOL>try:<EOL><INDENT>bytes_read = <NUM_LIT:0><EOL>while bytes_read < length:<EOL><INDENT>chunk_size = min(MD5_CHUNK_SIZE, length - bytes_read)<EOL>chunk = f.read(chunk_size)<EOL>if not chunk:<EOL><INDENT>break<EOL><DEDENT>md5_summer.update(chunk)<EOL>bytes_read += len(chunk)<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>f.close()<EOL><DEDENT>md5 = md5_summer.digest()<EOL>return md5<EOL> | Calculate the MD5 hash of a file, up to length bytes.
Returns the MD5 in its binary form, as an 8-byte string. Raises IOError
or OSError in case of error. | f243:m5 |
def find_duplicates(filenames, max_size): | errors = []<EOL>if len(filenames) < <NUM_LIT:2>:<EOL><INDENT>return [], errors<EOL><DEDENT>if max_size == <NUM_LIT:0>:<EOL><INDENT>return [filenames], errors<EOL><DEDENT>files_by_md5 = {}<EOL>for filename in filenames:<EOL><INDENT>try:<EOL><INDENT>md5 = calculate_md5(filename, max_size)<EOL><DEDENT>except EnvironmentError as e:<EOL><INDENT>msg = "<STR_LIT>" % (filename, e.strerror)<EOL>sys.stderr.write("<STR_LIT>" % msg)<EOL>errors.append(msg)<EOL>continue<EOL><DEDENT>if md5 not in files_by_md5:<EOL><INDENT>files_by_md5[md5] = [filename]<EOL><DEDENT>else:<EOL><INDENT>files_by_md5[md5].append(filename)<EOL><DEDENT><DEDENT>duplicates = [l for l in py3compat.itervalues(files_by_md5) if len(l) >= <NUM_LIT:2>]<EOL>return duplicates, errors<EOL> | Find duplicates in a list of files, comparing up to `max_size` bytes.
Returns a 2-tuple of two values: ``(duplicate_groups, errors)``.
`duplicate_groups` is a (possibly empty) list of lists: the names of
files that have at least two copies, grouped together.
`errors` is a list of error messages that occurred. If empty, there were
no errors.
For example, assuming ``a1`` and ``a2`` are identical, ``c1`` and ``c2`` are
identical, and ``b`` is different from all others::
>>> dups, errs = find_duplicates(['a1', 'a2', 'b', 'c1', 'c2'], 1024)
>>> dups
[['a1', 'a2'], ['c1', 'c2']]
>>> errs
[]
Note that ``b`` is not included in the results, as it has no duplicates. | f243:m6 |
def find_duplicates_in_dirs(directories, exclude_dirs=None, exclude_files=None,<EOL>follow_dirlinks=False): | if exclude_dirs is None:<EOL><INDENT>exclude_dirs = []<EOL><DEDENT>if exclude_files is None:<EOL><INDENT>exclude_files = []<EOL><DEDENT>errors_in_total = []<EOL>files_by_size = {}<EOL>for directory in directories:<EOL><INDENT>sub_errors = index_files_by_size(directory, files_by_size, exclude_dirs,<EOL>exclude_files, follow_dirlinks)<EOL>errors_in_total += sub_errors<EOL><DEDENT>all_duplicates = []<EOL>for size in iter(files_by_size):<EOL><INDENT>if size >= PARTIAL_MD5_THRESHOLD:<EOL><INDENT>partial_size = min(round_up_to_mult(size // PARTIAL_MD5_READ_RATIO,<EOL>PARTIAL_MD5_READ_MULT),<EOL>PARTIAL_MD5_MAX_READ)<EOL>possible_duplicates_list, sub_errors = find_duplicates(files_by_size[size], partial_size)<EOL>errors_in_total += sub_errors<EOL><DEDENT>else:<EOL><INDENT>possible_duplicates_list = [files_by_size[size]]<EOL><DEDENT>for possible_duplicates in possible_duplicates_list:<EOL><INDENT>duplicates, sub_errors = find_duplicates(possible_duplicates, size)<EOL>all_duplicates += duplicates<EOL>errors_in_total += sub_errors<EOL><DEDENT><DEDENT>return all_duplicates, errors_in_total<EOL> | Recursively scan a list of directories, looking for duplicate files.
`exclude_dirs`, if provided, should be a list of glob patterns.
Subdirectories whose names match these patterns are excluded from the
scan.
`exclude_files`, if provided, should be a list of glob patterns. Files
whose names match these patterns are excluded from the scan.
``follow_dirlinks`` controls whether to follow symbolic links to
subdirectories while crawling.
Returns a 2-tuple of two values: ``(duplicate_groups, errors)``.
`duplicate_groups` is a (possibly empty) list of lists: the names of files
that have at least two copies, grouped together.
`errors` is a list of error messages that occurred. If empty, there were no
errors.
For example, assuming ``./a1`` and ``/dir1/a2`` are identical,
``/dir1/c1`` and ``/dir2/c2`` are identical, ``/dir2/b`` is different
from all others, that any subdirectories called ``tmp`` should not
be scanned, and that files ending in ``.bak`` should be ignored:
>>> dups, errs = find_duplicates_in_dirs(['.', '/dir1', '/dir2'], ['tmp'], ['*.bak'])
>>> dups
[['./a1', '/dir1/a2'], ['/dir1/c1', '/dir2/c2']]
>>> errs
[] | f243:m7 |
def random_string(length): | <EOL>return urlsafe_b64encode(os.urandom(length))[:length]<EOL> | Generates a random string for a given length. | f246:m1 |
def calculate_payload_hash(payload, algorithm, content_type): | p_hash = hashlib.new(algorithm)<EOL>parts = []<EOL>parts.append('<STR_LIT>' + str(HAWK_VER) + '<STR_LIT>')<EOL>parts.append(parse_content_type(content_type) + '<STR_LIT:\n>')<EOL>parts.append(payload or '<STR_LIT>')<EOL>parts.append('<STR_LIT:\n>')<EOL>for i, p in enumerate(parts):<EOL><INDENT>if not isinstance(p, six.binary_type):<EOL><INDENT>p = p.encode('<STR_LIT:utf8>')<EOL><DEDENT>p_hash.update(p)<EOL>parts[i] = p<EOL><DEDENT>log.debug('<STR_LIT>'<EOL>.format(parts=pprint.pformat(parts)))<EOL>return b64encode(p_hash.digest())<EOL> | Calculates a hash for a given payload. | f246:m2 |
def calculate_mac(mac_type, resource, content_hash): | normalized = normalize_string(mac_type, resource, content_hash)<EOL>log.debug(u'<STR_LIT>'<EOL>.format(norm=normalized))<EOL>digestmod = getattr(hashlib, resource.credentials['<STR_LIT>'])<EOL>if not isinstance(normalized, six.binary_type):<EOL><INDENT>normalized = normalized.encode('<STR_LIT:utf8>')<EOL><DEDENT>key = resource.credentials['<STR_LIT:key>']<EOL>if not isinstance(key, six.binary_type):<EOL><INDENT>key = key.encode('<STR_LIT:ascii>')<EOL><DEDENT>result = hmac.new(key, normalized, digestmod)<EOL>return b64encode(result.digest())<EOL> | Calculates a message authorization code (MAC). | f246:m3 |
def calculate_ts_mac(ts, credentials): | normalized = ('<STR_LIT>'<EOL>.format(hawk_ver=HAWK_VER, ts=ts))<EOL>log.debug(u'<STR_LIT>'<EOL>.format(norm=normalized))<EOL>digestmod = getattr(hashlib, credentials['<STR_LIT>'])<EOL>if not isinstance(normalized, six.binary_type):<EOL><INDENT>normalized = normalized.encode('<STR_LIT:utf8>')<EOL><DEDENT>key = credentials['<STR_LIT:key>']<EOL>if not isinstance(key, six.binary_type):<EOL><INDENT>key = key.encode('<STR_LIT:ascii>')<EOL><DEDENT>result = hmac.new(key, normalized, digestmod)<EOL>return b64encode(result.digest())<EOL> | Calculates a message authorization code (MAC) for a timestamp. | f246:m4 |
def normalize_string(mac_type, resource, content_hash): | normalized = [<EOL>'<STR_LIT>' + str(HAWK_VER) + '<STR_LIT:.>' + mac_type,<EOL>normalize_header_attr(resource.timestamp),<EOL>normalize_header_attr(resource.nonce),<EOL>normalize_header_attr(resource.method or '<STR_LIT>'),<EOL>normalize_header_attr(resource.name or '<STR_LIT>'),<EOL>normalize_header_attr(resource.host),<EOL>normalize_header_attr(resource.port),<EOL>normalize_header_attr(content_hash or '<STR_LIT>')<EOL>]<EOL>normalized.append(normalize_header_attr(resource.ext or '<STR_LIT>'))<EOL>if resource.app:<EOL><INDENT>normalized.append(normalize_header_attr(resource.app))<EOL>normalized.append(normalize_header_attr(resource.dlg or '<STR_LIT>'))<EOL><DEDENT>normalized.append('<STR_LIT>')<EOL>normalized = '<STR_LIT:\n>'.join(normalized)<EOL>return normalized<EOL> | Serializes mac_type and resource into a HAWK string. | f246:m5 |
def parse_content_type(content_type): | if content_type:<EOL><INDENT>return content_type.split('<STR_LIT:;>')[<NUM_LIT:0>].strip().lower()<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT> | Cleans up content_type. | f246:m6 |
def parse_authorization_header(auth_header): | if len(auth_header) > MAX_LENGTH:<EOL><INDENT>raise BadHeaderValue('<STR_LIT>'.format(<EOL>max_length=MAX_LENGTH))<EOL><DEDENT>if isinstance(auth_header, six.binary_type):<EOL><INDENT>auth_header = auth_header.decode('<STR_LIT:utf8>')<EOL><DEDENT>scheme, attributes_string = auth_header.split('<STR_LIT:U+0020>', <NUM_LIT:1>)<EOL>if scheme.lower() != '<STR_LIT>':<EOL><INDENT>raise HawkFail("<STR_LIT>"<EOL>.format(scheme=scheme))<EOL><DEDENT>attributes = {}<EOL>def replace_attribute(match):<EOL><INDENT>"""<STR_LIT>"""<EOL>key = match.group('<STR_LIT:key>')<EOL>value = match.group('<STR_LIT:value>')<EOL>if key not in allowable_header_keys:<EOL><INDENT>raise HawkFail("<STR_LIT>"<EOL>.format(key=key))<EOL><DEDENT>validate_header_attr(value, name=key)<EOL>if key in attributes:<EOL><INDENT>raise BadHeaderValue('<STR_LIT>'.format(key=key))<EOL><DEDENT>attributes[key] = value<EOL><DEDENT>unparsed_header = HAWK_HEADER_RE.sub(replace_attribute, attributes_string)<EOL>if unparsed_header != '<STR_LIT>':<EOL><INDENT>raise BadHeaderValue("<STR_LIT>", unparsed_header)<EOL><DEDENT>log.debug('<STR_LIT>'<EOL>.format(header=auth_header, parsed=pprint.pformat(attributes)))<EOL>return attributes<EOL> | Example Authorization header:
'Hawk id="dh37fgj492je", ts="1367076201", nonce="NPHgnG", ext="and
welcome!", mac="CeWHy4d9kbLGhDlkyw2Nh3PJ7SDOdZDa267KH4ZaNMY="' | f246:m7 |
def get_bewit(resource): | if resource.method != '<STR_LIT:GET>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if resource.nonce != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>mac = calculate_mac(<EOL>'<STR_LIT>',<EOL>resource,<EOL>None,<EOL>)<EOL>if isinstance(mac, six.binary_type):<EOL><INDENT>mac = mac.decode('<STR_LIT:ascii>')<EOL><DEDENT>if resource.ext is None:<EOL><INDENT>ext = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>validate_header_attr(resource.ext, name='<STR_LIT>')<EOL>ext = resource.ext<EOL><DEDENT>inner_bewit = u"<STR_LIT>".format(<EOL>id=resource.credentials['<STR_LIT:id>'],<EOL>exp=resource.timestamp,<EOL>mac=mac,<EOL>ext=ext,<EOL>)<EOL>inner_bewit_bytes = inner_bewit.encode('<STR_LIT:ascii>')<EOL>bewit_bytes = urlsafe_b64encode(inner_bewit_bytes)<EOL>return bewit_bytes.decode('<STR_LIT:ascii>')<EOL> | Returns a bewit identifier for the resource as a string.
:param resource:
Resource to generate a bewit for
:type resource: `mohawk.base.Resource` | f249:m0 |
def parse_bewit(bewit): | decoded_bewit = b64decode(bewit).decode('<STR_LIT:ascii>')<EOL>bewit_parts = decoded_bewit.split("<STR_LIT:\\>")<EOL>if len(bewit_parts) != <NUM_LIT:4>:<EOL><INDENT>raise InvalidBewit('<STR_LIT>' % decoded_bewit)<EOL><DEDENT>return bewittuple(*bewit_parts)<EOL> | Returns a `bewittuple` representing the parts of an encoded bewit string.
This has the following named attributes:
(id, expiration, mac, ext)
:param bewit:
A base64 encoded bewit string
:type bewit: str | f249:m1 |
def strip_bewit(url): | m = re.search('<STR_LIT>', url)<EOL>if not m:<EOL><INDENT>raise InvalidBewit('<STR_LIT>')<EOL><DEDENT>bewit = m.group(<NUM_LIT:1>)<EOL>stripped_url = url[:m.start()] + url[m.end():]<EOL>return bewit, stripped_url<EOL> | Strips the bewit parameter out of a url.
Returns (encoded_bewit, stripped_url)
Raises InvalidBewit if no bewit found.
:param url:
The url containing a bewit parameter
:type url: str | f249:m2 |
def check_bewit(url, credential_lookup, now=None): | raw_bewit, stripped_url = strip_bewit(url)<EOL>bewit = parse_bewit(raw_bewit)<EOL>try:<EOL><INDENT>credentials = credential_lookup(bewit.id)<EOL><DEDENT>except LookupError:<EOL><INDENT>raise CredentialsLookupError('<STR_LIT>'<EOL>.format(bewit.id))<EOL><DEDENT>res = Resource(url=stripped_url,<EOL>method='<STR_LIT:GET>',<EOL>credentials=credentials,<EOL>timestamp=bewit.expiration,<EOL>nonce='<STR_LIT>',<EOL>ext=bewit.ext,<EOL>)<EOL>mac = calculate_mac('<STR_LIT>', res, None)<EOL>mac = mac.decode('<STR_LIT:ascii>')<EOL>if not strings_match(mac, bewit.mac):<EOL><INDENT>raise MacMismatch('<STR_LIT>'<EOL>.format(bewit_mac=bewit.mac,<EOL>expected_mac=mac))<EOL><DEDENT>if now is None:<EOL><INDENT>now = utc_now()<EOL><DEDENT>if int(bewit.expiration) < now:<EOL><INDENT>raise TokenExpired('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>.format(ts=bewit.expiration, now=now),<EOL>localtime_in_seconds=now,<EOL>www_authenticate='<STR_LIT>'<EOL>)<EOL><DEDENT>return True<EOL> | Validates the given bewit.
Returns True if the resource has a valid bewit parameter attached,
or raises a subclass of HawkFail otherwise.
:param credential_lookup:
Callable to look up the credentials dict by sender ID.
The credentials dict must have the keys:
``id``, ``key``, and ``algorithm``.
See :ref:`receiving-request` for an example.
:type credential_lookup: callable
:param now=None:
Unix epoch time for the current time to determine if bewit has expired.
If None, then the current time as given by utc_now() is used.
:type now=None: integer | f249:m3 |
def accept_response(self,<EOL>response_header,<EOL>content=EmptyValue,<EOL>content_type=EmptyValue,<EOL>accept_untrusted_content=False,<EOL>localtime_offset_in_seconds=<NUM_LIT:0>,<EOL>timestamp_skew_in_seconds=default_ts_skew_in_seconds,<EOL>**auth_kw): | log.debug('<STR_LIT>'<EOL>.format(header=response_header))<EOL>parsed_header = parse_authorization_header(response_header)<EOL>resource = Resource(ext=parsed_header.get('<STR_LIT>', None),<EOL>content=content,<EOL>content_type=content_type,<EOL>timestamp=self.req_resource.timestamp,<EOL>nonce=self.req_resource.nonce,<EOL>url=self.req_resource.url,<EOL>method=self.req_resource.method,<EOL>app=self.req_resource.app,<EOL>dlg=self.req_resource.dlg,<EOL>credentials=self.credentials,<EOL>seen_nonce=self.seen_nonce)<EOL>self._authorize(<EOL>'<STR_LIT>', parsed_header, resource,<EOL>their_timestamp=resource.timestamp,<EOL>timestamp_skew_in_seconds=timestamp_skew_in_seconds,<EOL>localtime_offset_in_seconds=localtime_offset_in_seconds,<EOL>accept_untrusted_content=accept_untrusted_content,<EOL>**auth_kw)<EOL> | Accept a response to this request.
:param response_header:
A `Hawk`_ ``Server-Authorization`` header
such as one created by :class:`mohawk.Receiver`.
:type response_header: str
:param content=EmptyValue: Byte string of the response body received.
:type content=EmptyValue: str
:param content_type=EmptyValue:
Content-Type header value of the response received.
:type content_type=EmptyValue: str
:param accept_untrusted_content=False:
When True, allow responses that do not hash their content.
Read :ref:`skipping-content-checks` to learn more.
:type accept_untrusted_content=False: bool
:param localtime_offset_in_seconds=0:
Seconds to add to local time in case it's out of sync.
:type localtime_offset_in_seconds=0: float
:param timestamp_skew_in_seconds=60:
Max seconds until a message expires. Upon expiry,
:class:`mohawk.exc.TokenExpired` is raised.
:type timestamp_skew_in_seconds=60: float
.. _`Hawk`: https://github.com/hueniverse/hawk | f250:c0:m1 |
def respond(self,<EOL>content=EmptyValue,<EOL>content_type=EmptyValue,<EOL>always_hash_content=True,<EOL>ext=None): | log.debug('<STR_LIT>')<EOL>resource = Resource(url=self.resource.url,<EOL>credentials=self.resource.credentials,<EOL>ext=ext,<EOL>app=self.parsed_header.get('<STR_LIT>', None),<EOL>dlg=self.parsed_header.get('<STR_LIT>', None),<EOL>method=self.resource.method,<EOL>content=content,<EOL>content_type=content_type,<EOL>always_hash_content=always_hash_content,<EOL>nonce=self.parsed_header['<STR_LIT>'],<EOL>timestamp=self.parsed_header['<STR_LIT>'])<EOL>mac = calculate_mac('<STR_LIT>', resource, resource.gen_content_hash())<EOL>self.response_header = self._make_header(resource, mac,<EOL>additional_keys=['<STR_LIT>'])<EOL>return self.response_header<EOL> | Respond to the request.
This generates the :attr:`mohawk.Receiver.response_header`
attribute.
:param content=EmptyValue: Byte string of response body that will be sent.
:type content=EmptyValue: str
:param content_type=EmptyValue: content-type header value for response.
:type content_type=EmptyValue: str
:param always_hash_content=True:
When True, ``content`` and ``content_type`` must be provided.
Read :ref:`skipping-content-checks` to learn more.
:type always_hash_content=True: bool
:param ext=None:
An external `Hawk`_ string. If not None, this value will be
signed so that the sender can trust it.
:type ext=None: str
.. _`Hawk`: https://github.com/hueniverse/hawk | f252:c0:m1 |
@database_sync_to_async<EOL>def assert_subscribers(num, observer_id=None): | if observer_id:<EOL><INDENT>observer = observer_models.Observer.objects.get(id=observer_id)<EOL>assert observer.subscribers.all().count() == num<EOL><DEDENT>else:<EOL><INDENT>assert observer_models.Subscriber.objects.all().count() == num<EOL><DEDENT> | Test the number of subscribers. | f257:m1 |
def list(self, request, *args, **kwargs): | queryset = self.filter_queryset(self.get_queryset())<EOL>return Response({'<STR_LIT:count>': queryset.count()})<EOL> | Filtered query, which just returns a count.
Such a formulation is used to force the compiler to generate a
subquery, which uses the M2M relation. | f260:c3:m0 |
def get_queryobserver_settings(): | defaults = {<EOL>'<STR_LIT>': {'<STR_LIT>': <NUM_LIT:1000>, '<STR_LIT>': <NUM_LIT:1.0>},<EOL>'<STR_LIT>': {'<STR_LIT>': <NUM_LIT>},<EOL>'<STR_LIT>': <NUM_LIT:2>,<EOL>}<EOL>defaults.update(getattr(settings, '<STR_LIT>', {}))<EOL>return defaults<EOL> | Query observer connection configuration. | f266:m0 |
@dispatch.receiver(model_signals.pre_migrate)<EOL>def model_pre_migrate(*args, **kwargs): | global IN_MIGRATIONS<EOL>IN_MIGRATIONS = True<EOL> | Set 'in migrations' flag. | f267:m0 |
@dispatch.receiver(model_signals.post_migrate)<EOL>def model_post_migrate(*args, **kwargs): | global IN_MIGRATIONS<EOL>IN_MIGRATIONS = False<EOL> | Clear 'in migrations' flag. | f267:m1 |
def notify_observers(table, kind, primary_key=None): | if IN_MIGRATIONS:<EOL><INDENT>return<EOL><DEDENT>if not Observer.objects.filter(dependencies__table=table).exists():<EOL><INDENT>return<EOL><DEDENT>def handler():<EOL><INDENT>"""<STR_LIT>"""<EOL>try:<EOL><INDENT>async_to_sync(get_channel_layer().send)(<EOL>CHANNEL_MAIN,<EOL>{<EOL>'<STR_LIT:type>': TYPE_ORM_NOTIFY,<EOL>'<STR_LIT>': table,<EOL>'<STR_LIT>': kind,<EOL>'<STR_LIT:primary_key>': str(primary_key),<EOL>},<EOL>)<EOL><DEDENT>except ChannelFull:<EOL><INDENT>logger.exception("<STR_LIT>")<EOL><DEDENT><DEDENT>batcher = PrioritizedBatcher.global_instance()<EOL>if batcher.is_started:<EOL><INDENT>batcher.add(<EOL>'<STR_LIT>', handler, group_by=(table, kind, primary_key)<EOL>)<EOL><DEDENT>else:<EOL><INDENT>handler()<EOL><DEDENT> | Transmit ORM table change notification.
:param table: Name of the table that has changed
:param kind: Change type
:param primary_key: Primary key of the affected instance | f267:m2 |
@dispatch.receiver(model_signals.post_save)<EOL>def model_post_save(sender, instance, created=False, **kwargs): | if sender._meta.app_label == '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>def notify():<EOL><INDENT>table = sender._meta.db_table<EOL>if created:<EOL><INDENT>notify_observers(table, ORM_NOTIFY_KIND_CREATE, instance.pk)<EOL><DEDENT>else:<EOL><INDENT>notify_observers(table, ORM_NOTIFY_KIND_UPDATE, instance.pk)<EOL><DEDENT><DEDENT>transaction.on_commit(notify)<EOL> | Signal emitted after any model is saved via Django ORM.
:param sender: Model class that was saved
:param instance: The actual instance that was saved
:param created: True if a new row was created | f267:m3 |
@dispatch.receiver(model_signals.post_delete)<EOL>def model_post_delete(sender, instance, **kwargs): | if sender._meta.app_label == '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>def notify():<EOL><INDENT>table = sender._meta.db_table<EOL>notify_observers(table, ORM_NOTIFY_KIND_DELETE, instance.pk)<EOL><DEDENT>transaction.on_commit(notify)<EOL> | Signal emitted after any model is deleted via Django ORM.
:param sender: Model class that was deleted
:param instance: The actual instance that was removed | f267:m4 |
@dispatch.receiver(model_signals.m2m_changed)<EOL>def model_m2m_changed(sender, instance, action, **kwargs): | if sender._meta.app_label == '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>def notify():<EOL><INDENT>table = sender._meta.db_table<EOL>if action == '<STR_LIT>':<EOL><INDENT>notify_observers(table, ORM_NOTIFY_KIND_CREATE)<EOL><DEDENT>elif action in ('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>notify_observers(table, ORM_NOTIFY_KIND_DELETE)<EOL><DEDENT><DEDENT>transaction.on_commit(notify)<EOL> | Signal emitted after any M2M relation changes via Django ORM.
:param sender: M2M intermediate model
:param instance: The actual instance that was saved
:param action: M2M action | f267:m5 |
def post(self, request): | try:<EOL><INDENT>observer_id = request.query_params['<STR_LIT>']<EOL>session_id = request.query_params['<STR_LIT>']<EOL><DEDENT>except KeyError:<EOL><INDENT>return response.Response(status=<NUM_LIT>)<EOL><DEDENT>observer.remove_subscriber(session_id, observer_id)<EOL>return response.Response()<EOL> | Handle a query observer unsubscription request. | f272:c0:m0 |
def __init__(self, viewset_class, viewset_method, request, args=None, kwargs=None): | super().__init__()<EOL>self.viewset_class = viewset_class<EOL>self.viewset_method = viewset_method<EOL>self.args = args or []<EOL>self.kwargs = kwargs or {}<EOL>self.method = request.method<EOL>self.META = {}<EOL>for key, value in request._request.META.items():<EOL><INDENT>if isinstance(value, str):<EOL><INDENT>self.META[key] = value<EOL><DEDENT><DEDENT>self.GET = request._request.GET.copy()<EOL>if OBSERVABLE_QUERY_PARAMETER in self.GET:<EOL><INDENT>del self.GET[OBSERVABLE_QUERY_PARAMETER]<EOL><DEDENT>self.path = request._request.path<EOL>self.path_info = request._request.path_info<EOL>self._force_auth_user = request.user<EOL>self._observe_id = None<EOL> | :param request: The original API request | f274:c0:m0 |
@property<EOL><INDENT>def observe_id(self):<DEDENT> | if self._observe_id is None:<EOL><INDENT>hasher = hashlib.sha256()<EOL>hasher.update(self.viewset_class.__module__.encode('<STR_LIT:utf8>'))<EOL>hasher.update(self.viewset_class.__name__.encode('<STR_LIT:utf8>'))<EOL>hasher.update(self.viewset_method.encode('<STR_LIT:utf8>'))<EOL>for key in sorted(self.GET.keys()):<EOL><INDENT>hasher.update(key.encode('<STR_LIT:utf8>'))<EOL>hasher.update(self.GET[key].encode('<STR_LIT:utf8>'))<EOL><DEDENT>hasher.update(self.path.encode('<STR_LIT:utf8>'))<EOL>hasher.update(self.path_info.encode('<STR_LIT:utf8>'))<EOL>if self._force_auth_user is not None:<EOL><INDENT>hasher.update(<EOL>(str(self._force_auth_user.id) or '<STR_LIT>').encode('<STR_LIT:utf8>')<EOL>)<EOL><DEDENT>else:<EOL><INDENT>hasher.update(b'<STR_LIT>')<EOL><DEDENT>self._observe_id = hasher.hexdigest()<EOL><DEDENT>return self._observe_id<EOL> | Unique identifier that identifies the observer. | f274:c0:m1 |
def ready(self): | <EOL>from . import signals<EOL> | Perform application initialization. | f275:c0:m0 |
def remove_subscriber(session_id, observer_id): | models.Observer.subscribers.through.objects.filter(<EOL>subscriber_id=session_id, observer_id=observer_id<EOL>).delete()<EOL> | Remove subscriber from the given observer.
:param session_id: Subscriber's session identifier
:param observer_id: Observer identifier | f276:m0 |
def __init__(self, request): | <EOL>viewset = request.viewset_class()<EOL>viewset.request = api_request.Request(request)<EOL>viewset.request.method = request.method<EOL>viewset.format_kwarg = None<EOL>viewset.args = request.args<EOL>viewset.kwargs = request.kwargs<EOL>self._request = request<EOL>self._viewset = viewset<EOL>self._viewset_method = getattr(viewset, request.viewset_method)<EOL>self._meta = Options(viewset, self._viewset_method)<EOL> | Create new query observer.
:param request: A `queryobserver.request.Request` instance | f276:c1:m0 |
@property<EOL><INDENT>def id(self):<DEDENT> | return self._request.observe_id<EOL> | Unique observer identifier. | f276:c1:m1 |
def _get_logging_extra(self, duration=None, results=None): | return {<EOL>'<STR_LIT>': duration,<EOL>'<STR_LIT>': results,<EOL>'<STR_LIT>': self.id,<EOL>'<STR_LIT>': '<STR_LIT>'.format(<EOL>self._request.viewset_class.__module__,<EOL>self._request.viewset_class.__name__,<EOL>),<EOL>'<STR_LIT>': self._request.viewset_method,<EOL>'<STR_LIT:path>': self._request.path,<EOL>'<STR_LIT>': self._request.GET,<EOL>}<EOL> | Extra information for logger. | f276:c1:m2 |
def _get_logging_id(self): | return "<STR_LIT>".format(<EOL>self._request.viewset_class.__module__,<EOL>self._request.viewset_class.__name__,<EOL>self._request.viewset_method,<EOL>)<EOL> | Get logging identifier. | f276:c1:m3 |
def _warning(self, msg, duration=None, results=None): | logger.warning(<EOL>"<STR_LIT>".format(msg, self._get_logging_id()),<EOL>extra=self._get_logging_extra(duration=duration, results=results),<EOL>)<EOL> | Log warnings. | f276:c1:m4 |
def subscribe(self, session_id, dependencies=None): | try:<EOL><INDENT>change_detection = self._meta.change_detection<EOL>if change_detection not in [<EOL>Options.CHANGE_DETECTION_PUSH,<EOL>Options.CHANGE_DETECTION_POLL,<EOL>]:<EOL><INDENT>raise NotImplementedError(<EOL>"<STR_LIT>".format(<EOL>change_detection<EOL>)<EOL>)<EOL><DEDENT>viewset_results = self._viewset_results()<EOL>poll_interval = (<EOL>self._meta.poll_interval<EOL>if change_detection == Options.CHANGE_DETECTION_POLL<EOL>else None<EOL>)<EOL>for retry in range(MAX_INTEGRITY_ERROR_RETRIES):<EOL><INDENT>is_subscribed = False<EOL>cursor = connection.cursor()<EOL>try:<EOL><INDENT>cursor.execute(<EOL>"""<STR_LIT>""".format(<EOL>observer_table=models.Observer._meta.db_table,<EOL>subscriber_table=models.Subscriber._meta.db_table,<EOL>observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,<EOL>),<EOL>params={<EOL>'<STR_LIT>': self.id,<EOL>'<STR_LIT>': pickle.dumps(self._request),<EOL>'<STR_LIT>': poll_interval,<EOL>'<STR_LIT>': session_id,<EOL>},<EOL>)<EOL>is_subscribed = True<EOL><DEDENT>except IntegrityError as err:<EOL><INDENT>msg = str(err)<EOL>if (<EOL>'<STR_LIT>' in msg<EOL>and '<STR_LIT>' in msg<EOL>):<EOL><INDENT>is_subscribed = True<EOL><DEDENT>elif (<EOL>'<STR_LIT>' in msg or '<STR_LIT>' in msg<EOL>) and '<STR_LIT>' in msg:<EOL><INDENT>if retry == MAX_INTEGRITY_ERROR_RETRIES - <NUM_LIT:1>:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>finally:<EOL><INDENT>cursor.close()<EOL><DEDENT>if is_subscribed:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if change_detection == Options.CHANGE_DETECTION_PUSH:<EOL><INDENT>if dependencies:<EOL><INDENT>tables = [model._meta.db_table for model in dependencies]<EOL><DEDENT>else:<EOL><INDENT>tables = [self._viewset.get_queryset().model._meta.db_table]<EOL><DEDENT>for table in tables:<EOL><INDENT>try:<EOL><INDENT>models.Dependency.objects.get_or_create(<EOL>observer_id=self.id, table=table<EOL>)<EOL><DEDENT>except models.Observer.DoesNotExist:<EOL><INDENT>return viewset_results<EOL><DEDENT><DEDENT><DEDENT>elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:<EOL><INDENT>async_to_sync(get_channel_layer().send)(<EOL>CHANNEL_MAIN,<EOL>{<EOL>'<STR_LIT:type>': TYPE_POLL,<EOL>'<STR_LIT>': self.id,<EOL>'<STR_LIT>': self._meta.poll_interval,<EOL>},<EOL>)<EOL><DEDENT>self._evaluate(viewset_results)<EOL><DEDENT>except Exception:<EOL><INDENT>logger.exception(<EOL>"<STR_LIT>".format(<EOL>self._get_logging_id()<EOL>),<EOL>extra=self._get_logging_extra(),<EOL>)<EOL><DEDENT>return viewset_results<EOL> | Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify | f276:c1:m5 |
async def evaluate(self): | @database_sync_to_async<EOL>def remove_subscribers():<EOL><INDENT>models.Observer.subscribers.through.objects.filter(<EOL>observer_id=self.id<EOL>).delete()<EOL><DEDENT>@database_sync_to_async<EOL>def get_subscriber_sessions():<EOL><INDENT>return list(<EOL>models.Observer.subscribers.through.objects.filter(observer_id=self.id)<EOL>.distinct('<STR_LIT>')<EOL>.values_list('<STR_LIT>', flat=True)<EOL>)<EOL><DEDENT>try:<EOL><INDENT>settings = get_queryobserver_settings()<EOL>start = time.time()<EOL>added, changed, removed = await database_sync_to_async(self._evaluate)()<EOL>duration = time.time() - start<EOL>if duration > settings['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>self._warning("<STR_LIT>", duration=duration)<EOL><DEDENT>if duration > settings['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>logger.error(<EOL>"<STR_LIT>".format(<EOL>self._get_logging_id()<EOL>),<EOL>extra=self._get_logging_extra(duration=duration),<EOL>)<EOL>await remove_subscribers()<EOL><DEDENT>if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:<EOL><INDENT>await get_channel_layer().send(<EOL>CHANNEL_MAIN,<EOL>{<EOL>'<STR_LIT:type>': TYPE_POLL,<EOL>'<STR_LIT>': self.id,<EOL>'<STR_LIT>': self._meta.poll_interval,<EOL>},<EOL>)<EOL><DEDENT>message = {<EOL>'<STR_LIT:type>': TYPE_ITEM_UPDATE,<EOL>'<STR_LIT>': self.id,<EOL>'<STR_LIT:primary_key>': self._meta.primary_key,<EOL>'<STR_LIT>': added,<EOL>'<STR_LIT>': changed,<EOL>'<STR_LIT>': removed,<EOL>}<EOL>if added or changed or removed:<EOL><INDENT>for session_id in await get_subscriber_sessions():<EOL><INDENT>await get_channel_layer().group_send(<EOL>GROUP_SESSIONS.format(session_id=session_id), message<EOL>)<EOL><DEDENT><DEDENT><DEDENT>except Exception:<EOL><INDENT>logger.exception(<EOL>"<STR_LIT>".format(self._get_logging_id()),<EOL>extra=self._get_logging_extra(),<EOL>)<EOL><DEDENT> | Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only) | f276:c1:m6 |
def _viewset_results(self): | results = []<EOL>try:<EOL><INDENT>response = self._viewset_method(<EOL>self._viewset.request, *self._request.args, **self._request.kwargs<EOL>)<EOL>if response.status_code == <NUM_LIT:200>:<EOL><INDENT>results = response.data<EOL>if not isinstance(results, list):<EOL><INDENT>if isinstance(results, dict):<EOL><INDENT>if '<STR_LIT>' in results and isinstance(<EOL>results['<STR_LIT>'], list<EOL>):<EOL><INDENT>results = results['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>results.setdefault(self._meta.primary_key, <NUM_LIT:1>)<EOL>results = [collections.OrderedDict(results)]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except Http404:<EOL><INDENT>pass<EOL><DEDENT>except django_exceptions.ObjectDoesNotExist:<EOL><INDENT>pass<EOL><DEDENT>return results<EOL> | Parse results from the viewset response. | f276:c1:m7 |
def _evaluate(self, viewset_results=None): | if viewset_results is None:<EOL><INDENT>viewset_results = self._viewset_results()<EOL><DEDENT>try:<EOL><INDENT>observer = models.Observer.objects.get(id=self.id)<EOL>if observer.subscribers.count() == <NUM_LIT:0>:<EOL><INDENT>return (None, None, None)<EOL><DEDENT>models.Observer.objects.filter(id=self.id).update(<EOL>last_evaluation=timezone.now()<EOL>)<EOL>max_result = get_queryobserver_settings()['<STR_LIT>']['<STR_LIT>']<EOL>if len(viewset_results) > max_result:<EOL><INDENT>self._warning(<EOL>"<STR_LIT>",<EOL>results=len(viewset_results),<EOL>)<EOL><DEDENT>new_results = collections.OrderedDict()<EOL>for order, item in enumerate(viewset_results):<EOL><INDENT>if not isinstance(item, dict):<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>item = {'<STR_LIT>': order, '<STR_LIT:data>': item}<EOL>try:<EOL><INDENT>new_results[str(item['<STR_LIT:data>'][self._meta.primary_key])] = item<EOL><DEDENT>except KeyError:<EOL><INDENT>raise KeyError(<EOL>"<STR_LIT>".format(<EOL>self._meta.primary_key<EOL>)<EOL>)<EOL><DEDENT><DEDENT>added, changed = [], []<EOL>new_ids = list(new_results.keys())<EOL>removed_qs = observer.items.exclude(primary_key__in=new_results.keys())<EOL>removed = list(removed_qs.values('<STR_LIT>', '<STR_LIT:data>'))<EOL>maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())<EOL>with transaction.atomic():<EOL><INDENT>removed_qs.delete()<EOL>with connection.cursor() as cursor:<EOL><INDENT>cursor.execute(<EOL>"<STR_LIT>"<EOL>)<EOL><DEDENT>for item_id, old_order, old_data in maybe_changed_qs.values_list(<EOL>'<STR_LIT:primary_key>', '<STR_LIT>', '<STR_LIT:data>'<EOL>):<EOL><INDENT>new_item = new_results[item_id]<EOL>new_ids.remove(item_id)<EOL>if new_item['<STR_LIT:data>'] != old_data:<EOL><INDENT>changed.append(new_item)<EOL>observer.items.filter(primary_key=item_id).update(<EOL>data=new_item['<STR_LIT:data>'], order=new_item['<STR_LIT>']<EOL>)<EOL><DEDENT>elif new_item['<STR_LIT>'] != old_order:<EOL><INDENT>changed.append(new_item)<EOL>observer.items.filter(primary_key=item_id).update(<EOL>order=new_item['<STR_LIT>']<EOL>)<EOL><DEDENT><DEDENT>for item_id in new_ids:<EOL><INDENT>item = new_results[item_id]<EOL>added.append(item)<EOL>observer.items.create(<EOL>primary_key=item_id, order=item['<STR_LIT>'], data=item['<STR_LIT:data>']<EOL>)<EOL><DEDENT><DEDENT>return (added, changed, removed)<EOL><DEDENT>except models.Observer.DoesNotExist:<EOL><INDENT>return (None, None, None)<EOL><DEDENT> | Evaluate query observer.
:param viewset_results: Objects returned by the viewset query | f276:c1:m8 |
def handle(self, *args, **options): | models.Observer.objects.all().delete()<EOL>models.Subscriber.objects.all().delete()<EOL>for cache_key in cache.keys(search='<STR_LIT>'.format(THROTTLE_CACHE_PREFIX)):<EOL><INDENT>cache.delete(cache_key)<EOL><DEDENT> | Command handle. | f278:c0:m0 |
def observable(<EOL>_method_or_viewset=None, poll_interval=None, primary_key=None, dependencies=None<EOL>): | if poll_interval and dependencies:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>def decorator_observable(method_or_viewset):<EOL><INDENT>if inspect.isclass(method_or_viewset):<EOL><INDENT>list_method = getattr(method_or_viewset, '<STR_LIT:list>', None)<EOL>if list_method is not None:<EOL><INDENT>method_or_viewset.list = observable(list_method)<EOL><DEDENT>return method_or_viewset<EOL><DEDENT>if getattr(method_or_viewset, '<STR_LIT>', False):<EOL><INDENT>return method_or_viewset<EOL><DEDENT>@functools.wraps(method_or_viewset)<EOL>def wrapper(self, request, *args, **kwargs):<EOL><INDENT>if observer_request.OBSERVABLE_QUERY_PARAMETER in request.query_params:<EOL><INDENT>session_id = request.query_params[<EOL>observer_request.OBSERVABLE_QUERY_PARAMETER<EOL>]<EOL>request = observer_request.Request(<EOL>self.__class__, method_or_viewset.__name__, request, args, kwargs<EOL>)<EOL>instance = observer.QueryObserver(request)<EOL>data = instance.subscribe(session_id, dependencies)<EOL>return response.Response({'<STR_LIT>': instance.id, '<STR_LIT>': data})<EOL><DEDENT>else:<EOL><INDENT>return method_or_viewset(self, request, *args, **kwargs)<EOL><DEDENT><DEDENT>wrapper.is_observable = True<EOL>if poll_interval is not None:<EOL><INDENT>wrapper.observable_change_detection = observer.Options.CHANGE_DETECTION_POLL<EOL>wrapper.observable_poll_interval = poll_interval<EOL><DEDENT>if primary_key is not None:<EOL><INDENT>wrapper.observable_primary_key = primary_key<EOL><DEDENT>return wrapper<EOL><DEDENT>if _method_or_viewset is None:<EOL><INDENT>return decorator_observable<EOL><DEDENT>else:<EOL><INDENT>return decorator_observable(_method_or_viewset)<EOL><DEDENT> | Make ViewSet or ViewSet method observable.
Decorating a ViewSet class is the same as decorating its `list` method.
If decorated method returns a response containing a list of items, it must
use the provided `LimitOffsetPagination` for any pagination. In case a
non-list response is returned, the resulting item will be wrapped into a
list.
When multiple decorators are used, `observable` must be the first one to be
applied as it needs access to the method name.
:param poll_interval: Configure given observable as a polling observable
:param primary_key: Primary key for tracking observable items
:param dependencies: List of ORM to register as dependencies for
orm_notify. If None the observer will subscribe to notifications from
the queryset model. | f280:m0 |
async def observer_orm_notify(self, message): | @database_sync_to_async<EOL>def get_observers(table):<EOL><INDENT>return list(<EOL>Observer.objects.filter(<EOL>dependencies__table=table, subscribers__isnull=False<EOL>)<EOL>.distinct('<STR_LIT>')<EOL>.values_list('<STR_LIT>', flat=True)<EOL>)<EOL><DEDENT>observers_ids = await get_observers(message['<STR_LIT>'])<EOL>for observer_id in observers_ids:<EOL><INDENT>await self.channel_layer.send(<EOL>CHANNEL_WORKER, {'<STR_LIT:type>': TYPE_EVALUATE, '<STR_LIT>': observer_id}<EOL>)<EOL><DEDENT> | Process notification from ORM. | f281:c0:m0 |
async def observer_poll(self, message): | <EOL>await asyncio.sleep(message['<STR_LIT>'])<EOL>await self.channel_layer.send(<EOL>CHANNEL_WORKER, {'<STR_LIT:type>': TYPE_EVALUATE, '<STR_LIT>': message['<STR_LIT>']}<EOL>)<EOL> | Poll observer after a delay. | f281:c0:m1 |
def __init__(self, *args, **kwargs): | self._executor_cache = collections.OrderedDict()<EOL>super().__init__(*args, **kwargs)<EOL> | Construct observer worker consumer. | f281:c1:m0 |
async def observer_evaluate(self, message): | observer_id = message['<STR_LIT>']<EOL>throttle_rate = get_queryobserver_settings()['<STR_LIT>']<EOL>if throttle_rate <= <NUM_LIT:0>:<EOL><INDENT>await self._evaluate(observer_id)<EOL>return<EOL><DEDENT>cache_key = throttle_cache_key(observer_id)<EOL>try:<EOL><INDENT>count = cache.incr(cache_key)<EOL>if count == <NUM_LIT:2>:<EOL><INDENT>await self.channel_layer.send(<EOL>CHANNEL_MAIN,<EOL>{<EOL>'<STR_LIT:type>': TYPE_POLL,<EOL>'<STR_LIT>': observer_id,<EOL>'<STR_LIT>': throttle_rate,<EOL>},<EOL>)<EOL><DEDENT><DEDENT>except ValueError:<EOL><INDENT>count = cache.get_or_set(cache_key, default=<NUM_LIT:1>, timeout=throttle_rate)<EOL>if count == <NUM_LIT:1>:<EOL><INDENT>await self._evaluate(observer_id)<EOL><DEDENT><DEDENT> | Execute observer evaluation on the worker or throttle. | f281:c1:m2 |
def websocket_connect(self, message): | self.session_id = self.scope['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL>super().websocket_connect(message)<EOL>Subscriber.objects.get_or_create(session_id=self.session_id)<EOL> | Called when WebSocket connection is established. | f281:c2:m0 |
@property<EOL><INDENT>def groups(self):<DEDENT> | if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>return []<EOL><DEDENT>return [GROUP_SESSIONS.format(session_id=self.session_id)]<EOL> | Groups this channel should add itself to. | f281:c2:m1 |
def disconnect(self, code): | Subscriber.objects.filter(session_id=self.session_id).delete()<EOL> | Called when WebSocket connection is closed. | f281:c2:m2 |
def observer_update(self, message): | <EOL>for action in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>for item in message[action]:<EOL><INDENT>self.send_json(<EOL>{<EOL>'<STR_LIT>': action,<EOL>'<STR_LIT>': message['<STR_LIT>'],<EOL>'<STR_LIT:primary_key>': message['<STR_LIT:primary_key>'],<EOL>'<STR_LIT>': item['<STR_LIT>'],<EOL>'<STR_LIT>': item['<STR_LIT:data>'],<EOL>}<EOL>)<EOL><DEDENT><DEDENT> | Called when update from observer is received. | f281:c2:m3 |
def assertRaisesWithMessage(self, exception_type, message, func,<EOL>*args, **kwargs): | try:<EOL><INDENT>func(*args, **kwargs)<EOL><DEDENT>except exception_type as err:<EOL><INDENT>self.assertEqual(err.args[<NUM_LIT:0>], message)<EOL><DEDENT>else:<EOL><INDENT>self.fail('<STR_LIT>'<EOL>.format(func.__name__, exception_type.__name__))<EOL><DEDENT> | Assert that executing func with the provided arguments raise the
given exception with the given message string. | f293:c0:m0 |
def get_config_yaml(): | return yaml.load(open(<EOL>'<STR_LIT>',<EOL>'<STR_LIT:r>')) or {}<EOL> | Load test config YAML file | f295:m0 |
def get_config_yaml(): | config_file = open('<STR_LIT>', '<STR_LIT:r>')<EOL>return yaml.load(config_file) or {}<EOL> | Load Test Config | f298:m0 |
def setUp(self): | if self._config is None:<EOL><INDENT>self._config = get_config_yaml()<EOL><DEDENT>self._cache = cache.Cache(self._config['<STR_LIT>'])<EOL> | Create Redis | f298:c0:m0 |
def tearDown(self): | with self._cache as redis_connection:<EOL><INDENT>redis_connection.flushdb()<EOL><DEDENT> | Flush Database | f298:c0:m1 |
def __init__(self, value): | self._parent = None<EOL>self._children = []<EOL>self._value = value<EOL> | Initialize Node | f299:c0:m0 |
@property<EOL><INDENT>def parent(self):<DEDENT> | return self._parent<EOL> | Return parent node | f299:c0:m1 |
@parent.setter<EOL><INDENT>def parent(self, parent):<DEDENT> | self._parent = parent<EOL> | Set parent node | f299:c0:m2 |
@property<EOL><INDENT>def value(self):<DEDENT> | return self._value<EOL> | Return value | f299:c0:m3 |
@property<EOL><INDENT>def children(self):<DEDENT> | return self._children<EOL> | Get Children | f299:c0:m4 |
@property<EOL><INDENT>def child_count(self):<DEDENT> | return len(self._children)<EOL> | Get child count | f299:c0:m5 |
def add_child(self, child): | if not isinstance(child, DependencyNode):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>self._children.append(child)<EOL> | Add a child node | f299:c0:m6 |
def add_children(self, children): | if not isinstance(children, list):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>for child in children:<EOL><INDENT>self.add_child(child)<EOL><DEDENT> | Add multiple children | f299:c0:m7 |
def __init__(self, heads): | self._heads = []<EOL>if heads is not None:<EOL><INDENT>if not isinstance(heads, list):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>for head in heads:<EOL><INDENT>self.add_head(head)<EOL><DEDENT><DEDENT> | Initialize Tree | f299:c1:m0 |
@property<EOL><INDENT>def heads(self):<DEDENT> | return self._heads<EOL> | Get heads | f299:c1:m1 |