text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Add an element to a RangeSet.
<END_TASK>
<USER_TASK:>
Description:
def add(self, element, pad=0):
"""Add an element to a RangeSet.
This has no effect if the element is already present.
""" |
set.add(self, int(element))
if pad > 0 and self.padding is None:
self.padding = pad |
<SYSTEM_TASK:>
Remove element from the RangeSet if it is a member.
<END_TASK>
<USER_TASK:>
Description:
def discard(self, element):
"""Remove element from the RangeSet if it is a member.
If the element is not a member, do nothing.
""" |
try:
i = int(element)
set.discard(self, i)
except ValueError:
pass |
<SYSTEM_TASK:>
Universal open file facility.
<END_TASK>
<USER_TASK:>
Description:
def _open(filename, mode="r"):
"""
Universal open file facility.
With normal files, this function behaves as the open builtin.
With gzip-ed files, it decompress or compress according to the specified mode.
In addition, when filename is '-', it opens the standard input or output according to
the specified mode.
Mode are expected to be either 'r' or 'w'.
""" |
if filename.endswith(".gz"):
return GzipFile(filename, mode, COMPRESSION_LEVEL)
elif filename == "-":
if mode == "r":
return _stdin
elif mode == "w":
return _stdout
else:
# TODO: set encoding to UTF-8?
return open(filename, mode=mode) |
<SYSTEM_TASK:>
Stable sort of the sequence a according to the keys given in r.
<END_TASK>
<USER_TASK:>
Description:
def _radixPass(a, b, r, n, K):
"""
Stable sort of the sequence a according to the keys given in r.
>>> a=range(5)
>>> b=[0]*5
>>> r=[2,1,3,0,4]
>>> _radixPass(a, b, r, 5, 5)
>>> b
[3, 1, 0, 2, 4]
When n is less than the length of a, the end of b must be left unaltered.
>>> b=[5]*5
>>> _radixPass(a, b, r, 2, 2)
>>> b
[1, 0, 5, 5, 5]
>>> _a=a=[1, 0]
>>> b= [0]*2
>>> r=[0, 1]
>>> _radixPass(a, b, r, 2, 2)
>>> a=_a
>>> b
[0, 1]
>>> a=[1, 1]
>>> _radixPass(a, b, r, 2, 2)
>>> b
[1, 1]
>>> a=[0, 1, 1, 0]
>>> b= [0]*4
>>> r=[0, 1]
>>> _radixPass(a, b, r, 4, 2)
>>> a=_a
>>> b
[0, 0, 1, 1]
""" |
c = _array("i", [0] * (K + 1)) # counter array
for i in range(n): # count occurrences
c[r[a[i]]] += 1
sum = 0
for i in range(K + 1): # exclusive prefix sums
t = c[i]
c[i] = sum
sum += t
for a_i in a[:n]: # sort
b[c[r[a_i]]] = a_i
c[r[a_i]] += 1 |
<SYSTEM_TASK:>
Exact number of atomic operations in _radixPass.
<END_TASK>
<USER_TASK:>
Description:
def _nbOperations(n):
"""
Exact number of atomic operations in _radixPass.
""" |
if n < 2:
return 0
else:
n0 = (n + 2) // 3
n02 = n0 + n // 3
return 3 * (n02) + n0 + _nbOperations(n02) |
<SYSTEM_TASK:>
Returns the length of the longest common prefix of seq1
<END_TASK>
<USER_TASK:>
Description:
def _longestCommonPrefix(seq1, seq2, start1=0, start2=0):
"""
Returns the length of the longest common prefix of seq1
starting at offset start1 and seq2 starting at offset start2.
>>> _longestCommonPrefix("abcdef", "abcghj")
3
>>> _longestCommonPrefix("abcghj", "abcdef")
3
>>> _longestCommonPrefix("miss", "")
0
>>> _longestCommonPrefix("", "mr")
0
>>> _longestCommonPrefix(range(128), range(128))
128
>>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6)
3
>>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0)
3
>>> _longestCommonPrefix("abc", "abcabc", 1, 4)
2
>>> _longestCommonPrefix("abcabc", "abc", 4, 1)
2
""" |
len1 = len(seq1) - start1
len2 = len(seq2) - start2
# We set seq2 as the shortest sequence
if len1 < len2:
seq1, seq2 = seq2, seq1
start1, start2 = start2, start1
len1, len2 = len2, len1
# if seq2 is empty returns 0
if len2 == 0:
return 0
i = 0
pos2 = start2
for i in range(min(len1, len2)):
# print seq1, seq2, start1, start2
if seq1[start1 + i] != seq2[start2 + i]:
return i
# we have reached the end of seq2 (need to increment i)
return i + 1 |
<SYSTEM_TASK:>
Compute the longest common prefix for every adjacent suffixes.
<END_TASK>
<USER_TASK:>
Description:
def LCP(SA):
"""
Compute the longest common prefix for every adjacent suffixes.
The result is a list of same size as SA.
Given two suffixes at positions i and i+1,
their LCP is stored at position i+1.
A zero is stored at position 0 of the output.
>>> SA=SuffixArray("abba", unit=UNIT_BYTE)
>>> SA._LCP_values
array('i', [0, 1, 0, 1])
>>> SA=SuffixArray("", unit=UNIT_BYTE)
>>> SA._LCP_values
array('i')
>>> SA=SuffixArray("", unit=UNIT_CHARACTER)
>>> SA._LCP_values
array('i')
>>> SA=SuffixArray("", unit=UNIT_WORD)
>>> SA._LCP_values
array('i')
>>> SA=SuffixArray("abab", unit=UNIT_BYTE)
>>> SA._LCP_values
array('i', [0, 2, 0, 1])
""" |
string = SA.string
length = SA.length
lcps = _array("i", [0] * length)
SA = SA.SA
if _trace:
delta = max(length // 100, 1)
for i, pos in enumerate(SA):
if i % delta == 0:
percent = float((i + 1) * 100) / length
print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r" % (percent, i + 1, length),
lcps[i] = _longestCommonPrefix(string, string, SA[i - 1], pos)
else:
for i, pos in enumerate(SA):
lcps[i] = _longestCommonPrefix(string, string, SA[i - 1], pos)
if _trace:
print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r" % (100.0, length, length)
if lcps: # Correct the case where string[0] == string[-1]
lcps[0] = 0
return lcps |
<SYSTEM_TASK:>
Entry point for the standalone script.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
Entry point for the standalone script.
""" |
(options, strings) = parseArgv()
global _suffixArray, _trace
#############
# Verbosity #
#############
_trace = options.verbose
###################
# Processing unit #
###################
if options.unit == "byte":
options.unit = UNIT_BYTE
elif options.unit == "character":
options.unit = UNIT_CHARACTER
elif options.unit == "word":
options.unit = UNIT_WORD
else:
print >> _stderr, "Please specify a valid unit type."
exit(EXIT_BAD_OPTION)
######################
# Build suffix array #
######################
if not options.SAFile: # Build the suffix array from INPUT
if not options.input: # default is standard input
options.input = "-"
try:
string = _open(options.input, "r").read()
except IOError:
print >> _stderr, "File %s does not exist." % options.input
exit(EXIT_ERROR_FILE)
SA = SuffixArray(string, options.unit, options.encoding, options.noLCPs)
########################
# Or load suffix array #
########################
elif not options.input and options.SAFile: # Load suffix array from SA_FILE
try:
SA = SuffixArray.fromFile(options.SAFile)
except IOError:
print >> _stderr, "SA_FILE %s does not exist." % options.SAFile
exit(EXIT_ERROR_FILE)
else:
print >> _stderr, "Please set only one option amongst --input and --load.\n" + \
"Type %s --help for more details." % _argv[0]
exit(EXIT_BAD_OPTION)
######################
# Print suffix array #
######################
if options.printSA:
# Buffered ouptut
deltaLength = 1000
start = 0
while start < SA.length:
print >> _stderr, SA.__str__(start, start + deltaLength)
start += deltaLength
####################################
# Look for every string in strings #
####################################
for string in strings:
print >> _stderr, ""
print >> _stderr, "Positions of %s:" % string
print >> _stderr, " %s" % list(SA.find(string))
#########################
# Save SAFILE if needed #
#########################
if options.output:
SA.toFile(options.output)
if _trace: print >> _stderr, "Done\r\n" |
<SYSTEM_TASK:>
Tokenizer utility.
<END_TASK>
<USER_TASK:>
Description:
def tokenize(self, string):
"""
Tokenizer utility.
When processing byte, outputs the string unaltered.
The character unit type is used for unicode data, the string is
decoded according to the encoding provided.
In the case of word unit, EOL characters are detached from the
preceding word, and outputs the list of words, i.e. the list of non-space strings
separated by space strings.
>>> SA=SuffixArray('abecedaire', UNIT_BYTE)
>>> SA.tokenize('abecedaire')=='abecedaire'
True
>>> len(SA.tokenize('abecedaire'))
10
>>> SA=SuffixArray('abecedaire', UNIT_BYTE, "utf-8")
>>> SA.tokenize('abecedaire')==u'abecedaire'
True
>>> len(SA.tokenize('abecedaire'))
10
>>> SA=SuffixArray('mississippi', UNIT_WORD)
>>> SA.tokenize('miss issi ppi')
['miss', 'issi', 'ppi']
>>> SA.tokenize('miss issi\\nppi')
['miss', 'issi', '\\n', 'ppi']
""" |
if self.unit == UNIT_WORD:
# the EOL character is treated as a word, hence a substitution
# before split
return [token for token in string.replace("\n", " \n ").split(self.tokSep) if token != ""]
elif self.unit == UNIT_CHARACTER:
return string.decode(self.encoding)
else:
return string |
<SYSTEM_TASK:>
Save the suffix array instance including all features attached in
<END_TASK>
<USER_TASK:>
Description:
def toFile(self, filename):
"""
Save the suffix array instance including all features attached in
filename. Accept any filename following the _open conventions,
for example if it ends with .gz the file created will be a compressed
GZip file.
""" |
start = _time()
fd = _open(filename, "w")
savedData = [self.string, self.unit, self.voc, self.vocSize, self.SA, self.features]
for featureName in self.features:
featureValues = getattr(self, "_%s_values" % featureName)
featureDefault = getattr(self, "%s_default" % featureName)
savedData.append((featureValues, featureDefault))
fd.write(_dumps(savedData, _HIGHEST_PROTOCOL))
fd.flush()
try:
self.sizeOfSavedFile = getsize(fd.name)
except OSError: # if stdout is used
self.sizeOfSavedFile = "-1"
self.toFileTime = _time() - start
if _trace: print >> _stderr, "toFileTime %.2fs" % self.toFileTime
if _trace: print >> _stderr, "sizeOfSavedFile %sb" % self.sizeOfSavedFile
fd.close() |
<SYSTEM_TASK:>
Load a suffix array instance from filename, a file created by
<END_TASK>
<USER_TASK:>
Description:
def fromFile(cls, filename):
"""
Load a suffix array instance from filename, a file created by
toFile.
Accept any filename following the _open conventions.
""" |
self = cls.__new__(cls) # new instance which does not call __init__
start = _time()
savedData = _loads(_open(filename, "r").read())
# load common attributes
self.string, self.unit, self.voc, self.vocSize, self.SA, features = savedData[:6]
self.length = len(self.SA)
# determine token delimiter
if self.unit == UNIT_WORD:
self.tokSep = " "
elif self.unit in (UNIT_CHARACTER, UNIT_BYTE):
self.tokSep = ""
else:
raise Exception("Unknown unit type identifier:", self.unit)
# recompute tokId based on voc
self.tokId = dict((char, iChar) for iChar, char in enumerate(self.voc))
self.nbSentences = self.string.count(self.tokId.get("\n", 0))
# Load features
self.features = []
for featureName, (featureValues, featureDefault) in zip(features, savedData[6:]):
self.addFeatureSA((lambda _: featureValues), name=featureName, default=featureDefault)
self.fromFileTime = _time() - start
if _trace: print >> _stderr, "fromFileTime %.2fs" % self.fromFileTime
return self |
<SYSTEM_TASK:>
Dichotomy search of subString in the suffix array.
<END_TASK>
<USER_TASK:>
Description:
def find(self, subString, features=[]):
"""
Dichotomy search of subString in the suffix array.
As soon as a suffix which starts with subString is found,
it uses the LCPs in order to find the other matching suffixes.
The outputs consists in a list of tuple (pos, feature0, feature1, ...)
where feature0, feature1, ... are the features attached to the suffix
at position pos.
Features are listed in the same order as requested in the input list of
features [featureName0, featureName1, ...]
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("ssi")
array('i', [5, 2])
>>> SA.find("mi")
array('i', [0])
>>> SA=SuffixArray('miss A and miss B', UNIT_WORD)
>>> SA.find("miss")
array('i', [0, 3])
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("iss", ['LCP'])
[(4, 1), (1, 4)]
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("A")
array('i')
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("pp")
array('i', [8])
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("ppp")
array('i')
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.find("im")
array('i')
""" |
SA = self.SA
LCPs = self._LCP_values
string = self.string
middle = self._findOne(subString)
if middle is False:
return _array('i')
subString = _array("i", [self.tokId[c] for c in self.tokenize(subString)])
lenSubString = len(subString)
###########################################
# Use LCPS to retrieve the other suffixes #
###########################################
lower = middle
upper = middle + 1
middleLCP = LCPs[middle]
while lower > 0 and LCPs[lower] >= lenSubString:
lower -= 1
while upper < self.length and LCPs[upper] >= lenSubString:
upper += 1
###############################################
# When features is empty, outputs a flat list #
###############################################
res = SA[lower:upper]
if len(features) == 0:
return res
##############################################
# When features is non empty, outputs a list #
# of tuples (pos, feature_1, feature_2, ...) #
##############################################
else:
features = [getattr(self, "_%s_values" % featureName) for featureName in features]
features = [featureValues[lower:upper] for featureValues in features]
return zip(res, *features) |
<SYSTEM_TASK:>
Return a generator that yields pieces of XML.
<END_TASK>
<USER_TASK:>
Description:
def generate(self):
"""Return a generator that yields pieces of XML.""" |
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': u'unbekannter Autor'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated, self.timezone)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n' |
<SYSTEM_TASK:>
Temporarily override config values.
<END_TASK>
<USER_TASK:>
Description:
def override_djconfig(**new_cache_values):
"""
Temporarily override config values.
This is similar to :py:func:`django.test.override_settings`,\
use it in testing.
:param new_cache_values: Keyword arguments,\
the key should match one in the config,\
a new one is created otherwise,\
the value is overridden within\
the decorated function
""" |
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kw):
old_cache_values = {
key: getattr(conf.config, key)
for key in new_cache_values}
conf.config._set_many(new_cache_values)
try:
# todo: make a note about this in the docs:
# don't populate the config within migrations
# This works coz the config table is empty,
# so even if the middleware gets called,
# it won't update the config (_updated_at
# will be None), this is assuming the table
# is not populated by the user (ie: within
# a migration), in which case it will load
# all the default values
return func(*args, **kw)
finally:
conf.config._set_many(old_cache_values)
return func_wrapper
return decorator |
<SYSTEM_TASK:>
Form values serialization
<END_TASK>
<USER_TASK:>
Description:
def serialize(value, field):
"""
Form values serialization
:param object value: A value to be serialized\
for saving it into the database and later\
loading it into the form as initial value
""" |
assert isinstance(field, forms.Field)
if isinstance(field, forms.ModelMultipleChoiceField):
return json.dumps([v.pk for v in value])
# todo: remove
if isinstance(value, models.Model):
return value.pk
return value |
<SYSTEM_TASK:>
Get version without importing the lib
<END_TASK>
<USER_TASK:>
Description:
def get_version(package):
"""Get version without importing the lib""" |
with io.open(os.path.join(BASE_DIR, package, '__init__.py'), encoding='utf-8') as fh:
return [
l.split('=', 1)[1].strip().strip("'").strip('"')
for l in fh.readlines()
if '__version__' in l][0] |
<SYSTEM_TASK:>
Register a config form into the registry
<END_TASK>
<USER_TASK:>
Description:
def _register(self, form_class, check_middleware=True):
"""
Register a config form into the registry
:param object form_class: The form class to register.\
Must be an instance of :py:class:`djconfig.forms.ConfigForm`
:param bool check_middleware: Check\
:py:class:`djconfig.middleware.DjConfigMiddleware`\
is registered into ``settings.MIDDLEWARE_CLASSES``. Default True
""" |
if not issubclass(form_class, _ConfigFormBase):
raise ValueError(
"The form does not inherit from `forms.ConfigForm`")
self._registry.add(form_class)
if check_middleware:
_check_backend() |
<SYSTEM_TASK:>
Gets every registered form's field value.\
<END_TASK>
<USER_TASK:>
Description:
def _reload(self):
"""
Gets every registered form's field value.\
If a field name is found in the db, it will load it from there.\
Otherwise, the initial value from the field form is used
""" |
ConfigModel = apps.get_model('djconfig.Config')
cache = {}
data = dict(
ConfigModel.objects
.all()
.values_list('key', 'value'))
# populate cache with initial form values,
# then with cleaned database values,
# then with raw database file/image paths
for form_class in self._registry:
empty_form = form_class()
cache.update({
name: field.initial
for name, field in empty_form.fields.items()})
form = form_class(data={
name: _deserialize(data[name], field)
for name, field in empty_form.fields.items()
if name in data and not isinstance(field, forms.FileField)})
form.is_valid()
cache.update({
name: _unlazify(value)
for name, value in form.cleaned_data.items()
if name in data})
# files are special because they don't have an initial value
# and the POSTED data must contain the file. So, we keep
# the stored path as is
# TODO: see if serialize/deserialize/unlazify can be used for this instead
cache.update({
name: data[name]
for name, field in empty_form.fields.items()
if name in data and isinstance(field, forms.FileField)})
cache['_updated_at'] = data.get('_updated_at')
self._cache = cache |
<SYSTEM_TASK:>
Reload the config if the config\
<END_TASK>
<USER_TASK:>
Description:
def _reload_maybe(self):
"""
Reload the config if the config\
model has been updated. This is called\
once on every request by the middleware.\
Should not be called directly.
""" |
ConfigModel = apps.get_model('djconfig.Config')
data = dict(
ConfigModel.objects
.filter(key='_updated_at')
.values_list('key', 'value'))
if (not hasattr(self, '_updated_at') or
self._updated_at != data.get('_updated_at')):
self._reload() |
<SYSTEM_TASK:>
Register a new admin section.
<END_TASK>
<USER_TASK:>
Description:
def register(conf, conf_admin, **options):
"""
Register a new admin section.
:param conf: A subclass of ``djconfig.admin.Config``
:param conf_admin: A subclass of ``djconfig.admin.ConfigAdmin``
:param options: Extra options passed to ``django.contrib.admin.site.register``
""" |
assert issubclass(conf_admin, ConfigAdmin), (
'conf_admin is not a ConfigAdmin subclass')
assert issubclass(
getattr(conf_admin, 'change_list_form', None),
ConfigForm), 'No change_list_form set'
assert issubclass(conf, Config), (
'conf is not a Config subclass')
assert conf.app_label, 'No app_label set'
assert conf.verbose_name_plural, 'No verbose_name_plural set'
assert not conf.name or re.match(r"^[a-zA-Z_]+$", conf.name), (
'Not a valid name. Valid chars are [a-zA-Z_]')
config_class = type("Config", (), {})
config_class._meta = type("Meta", (_ConfigMeta,), {
'app_label': conf.app_label,
'verbose_name_plural': conf.verbose_name_plural,
'object_name': 'Config',
'model_name': conf.name,
'module_name': conf.name})
admin.site.register([config_class], conf_admin, **options) |
<SYSTEM_TASK:>
Validates that a field value matches one of the values
<END_TASK>
<USER_TASK:>
Description:
def one_of(*args):
"""
Validates that a field value matches one of the values
given to this validator.
""" |
if len(args) == 1 and isinstance(args[0], list):
items = args[0]
else:
items = list(args)
def validate(value):
if not value in items:
return e("{} is not in the list {}", value, items)
return validate |
<SYSTEM_TASK:>
Validates that a field value is greater than or equal to the
<END_TASK>
<USER_TASK:>
Description:
def gte(min_value):
"""
Validates that a field value is greater than or equal to the
value given to this validator.
""" |
def validate(value):
if value < min_value:
return e("{} is not greater than or equal to {}", value, min_value)
return validate |
<SYSTEM_TASK:>
Validates that a field value is less than or equal to the
<END_TASK>
<USER_TASK:>
Description:
def lte(max_value):
"""
Validates that a field value is less than or equal to the
value given to this validator.
""" |
def validate(value):
if value > max_value:
return e("{} is not less than or equal to {}", value, max_value)
return validate |
<SYSTEM_TASK:>
Validates that a field value is greater than the
<END_TASK>
<USER_TASK:>
Description:
def gt(gt_value):
"""
Validates that a field value is greater than the
value given to this validator.
""" |
def validate(value):
if value <= gt_value:
return e("{} is not greater than {}", value, gt_value)
return validate |
<SYSTEM_TASK:>
Validates that a field value is less than the
<END_TASK>
<USER_TASK:>
Description:
def lt(lt_value):
"""
Validates that a field value is less than the
value given to this validator.
""" |
def validate(value):
if value >= lt_value:
return e("{} is not less than {}", value, lt_value)
return validate |
<SYSTEM_TASK:>
Validates that a field value is between the two values
<END_TASK>
<USER_TASK:>
Description:
def between(min_value, max_value):
"""
Validates that a field value is between the two values
given to this validator.
""" |
def validate(value):
if value < min_value:
return e("{} is not greater than or equal to {}",
value, min_value)
if value > max_value:
return e("{} is not less than or equal to {}",
value, max_value)
return validate |
<SYSTEM_TASK:>
Validates that a field value's length is between the bounds given to this
<END_TASK>
<USER_TASK:>
Description:
def length(min=None, max=None):
"""
Validates that a field value's length is between the bounds given to this
validator.
""" |
def validate(value):
if min and len(value) < min:
return e("{} does not have a length of at least {}", value, min)
if max and len(value) > max:
return e("{} does not have a length of at most {}", value, max)
return validate |
<SYSTEM_TASK:>
Validates that a field value matches the regex given to this validator.
<END_TASK>
<USER_TASK:>
Description:
def match(pattern):
"""
Validates that a field value matches the regex given to this validator.
""" |
regex = re.compile(pattern)
def validate(value):
if not regex.match(value):
return e("{} does not match the pattern {}", value, pattern)
return validate |
<SYSTEM_TASK:>
Validates that a fields value is a valid email address.
<END_TASK>
<USER_TASK:>
Description:
def is_email():
"""
Validates that a fields value is a valid email address.
""" |
email = (
ur'(?!^\.)' # No dot at start
ur'(?!.*\.@)' # No dot before at sign
ur'(?!.*@\.)' # No dot after at sign
ur'(?!.*\.$)' # No dot at the end
ur'(?!.*\.\.)' # No double dots anywhere
ur'^\S+' # Starts with one or more non-whitespace characters
ur'@' # Contains an at sign
ur'\S+$' # Ends with one or more non-whitespace characters
)
regex = re.compile(email, re.IGNORECASE | re.UNICODE)
def validate(value):
if not regex.match(value):
return e("{} is not a valid email address", value)
return validate |
<SYSTEM_TASK:>
Validates that a fields value is a valid URL.
<END_TASK>
<USER_TASK:>
Description:
def is_url():
"""
Validates that a fields value is a valid URL.
""" |
# Stolen from Django
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def validate(value):
if not regex.match(value):
return e("{} is not a valid URL", value)
return validate |
<SYSTEM_TASK:>
A wrapper which applies the given validators to each item in a field
<END_TASK>
<USER_TASK:>
Description:
def each_item(*validators):
"""
A wrapper which applies the given validators to each item in a field
value of type `list`.
Example usage in a Schema:
"my_list_field": {"type": Array(int), "validates": each_item(lte(10))}
""" |
def validate(value):
for item in value:
for validator in validators:
error = validator(item)
if error:
return error
return None
return validate |
<SYSTEM_TASK:>
Validates that all items in the given field list value are distinct,
<END_TASK>
<USER_TASK:>
Description:
def distinct():
"""
Validates that all items in the given field list value are distinct,
i.e. that the list contains no duplicates.
""" |
def validate(value):
for i, item in enumerate(value):
if item in value[i+1:]:
return e("{} is not a distinct set of values", value)
return validate |
<SYSTEM_TASK:>
Applies the defaults described by the this schema to the given
<END_TASK>
<USER_TASK:>
Description:
def apply_defaults(self, instance):
"""Applies the defaults described by the this schema to the given
document instance as appropriate. Defaults are only applied to
fields which are currently unset.""" |
for field, spec in self.doc_spec.iteritems():
field_type = spec['type']
if field not in instance:
if 'default' in spec:
default = spec['default']
if callable(default):
instance[field] = default()
else:
instance[field] = copy.deepcopy(default)
# Determine if a value already exists for the field
if field in instance:
value = instance[field]
# recurse into nested docs
if isinstance(field_type, Schema) and isinstance(value, dict):
field_type.apply_defaults(value)
elif isinstance(field_type, Array) and isinstance(field_type.contained_type, Schema) and isinstance(value, list):
for item in value:
field_type.contained_type.apply_defaults(item) |
<SYSTEM_TASK:>
Validates the given document against this schema. Raises a
<END_TASK>
<USER_TASK:>
Description:
def validate(self, instance):
"""Validates the given document against this schema. Raises a
ValidationException if there are any failures.""" |
errors = {}
self._validate_instance(instance, errors)
if len(errors) > 0:
raise ValidationException(errors) |
<SYSTEM_TASK:>
Verifies that this schema's doc spec is valid and makes sense.
<END_TASK>
<USER_TASK:>
Description:
def _verify(self, path_prefix=None):
"""Verifies that this schema's doc spec is valid and makes sense.""" |
for field, spec in self.doc_spec.iteritems():
path = self._append_path(path_prefix, field)
# Standard dict-based spec
if isinstance(spec, dict):
self._verify_field_spec(spec, path)
else:
raise SchemaFormatException("Invalid field definition for {}", path) |
<SYSTEM_TASK:>
Verifies a given field specification is valid, recursing into nested schemas if required.
<END_TASK>
<USER_TASK:>
Description:
def _verify_field_spec(self, spec, path):
"""Verifies a given field specification is valid, recursing into nested schemas if required.""" |
# Required should be a boolean
if 'required' in spec and not isinstance(spec['required'], bool):
raise SchemaFormatException("{} required declaration should be True or False", path)
# Required should be a boolean
if 'nullable' in spec and not isinstance(spec['nullable'], bool):
raise SchemaFormatException("{} nullable declaration should be True or False", path)
# Must have a type specified
if 'type' not in spec:
raise SchemaFormatException("{} has no type declared.", path)
self._verify_type(spec, path)
# Validations should be either a single function or array of functions
if 'validates' in spec:
self._verify_validates(spec, path)
# Defaults must be of the correct type or a function
if 'default' in spec:
self._verify_default(spec, path)
# Only expected spec keys are supported
if not set(spec.keys()).issubset(set(['type', 'required', 'validates', 'default', 'nullable'])):
raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path) |
<SYSTEM_TASK:>
Verify that the 'type' in the spec is valid
<END_TASK>
<USER_TASK:>
Description:
def _verify_type(self, spec, path):
"""Verify that the 'type' in the spec is valid""" |
field_type = spec['type']
if isinstance(field_type, Schema):
# Nested documents cannot have validation
if not set(spec.keys()).issubset(set(['type', 'required', 'nullable', 'default'])):
raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path)
return
elif isinstance(field_type, Array):
if not isinstance(field_type.contained_type, (type, Schema, Array, types.FunctionType)):
raise SchemaFormatException("Unsupported field type contained by Array at {}.", path)
elif not isinstance(field_type, type) and not isinstance(field_type, types.FunctionType):
raise SchemaFormatException("Unsupported field type at {}. Type must be a type, a function, an Array or another Schema", path) |
<SYSTEM_TASK:>
Verifies that the default specified in the given spec is valid.
<END_TASK>
<USER_TASK:>
Description:
def _verify_default(self, spec, path):
"""Verifies that the default specified in the given spec is valid.""" |
field_type = spec['type']
default = spec['default']
# If it's a function there's nothing we can really do except assume its valid
if callable(default):
return
if isinstance(field_type, Array):
# Verify we'd got a list as our default
if not isinstance(default, list):
raise SchemaFormatException("Default value for Array at {} is not a list of values.", path)
# Ensure the contents are of the correct type
for i, item in enumerate(default):
if isinstance(field_type.contained_type, Schema):
if not self._valid_schema_default(item):
raise SchemaFormatException("Default value for Schema is not valid.", path)
elif not isinstance(item, field_type.contained_type):
raise SchemaFormatException("Not all items in the default list for the Array field at {} are of the correct type.", path)
elif isinstance(field_type, Schema):
if not self._valid_schema_default(default):
raise SchemaFormatException("Default value for Schema is not valid.", path)
else:
if not isinstance(default, field_type):
raise SchemaFormatException("Default value for {} is not of the nominated type.", path) |
<SYSTEM_TASK:>
Verify thats the 'validates' argument is valid.
<END_TASK>
<USER_TASK:>
Description:
def _verify_validates(self, spec, path):
"""Verify thats the 'validates' argument is valid.""" |
validates = spec['validates']
if isinstance(validates, list):
for validator in validates:
self._verify_validator(validator, path)
else:
self._verify_validator(validates, path) |
<SYSTEM_TASK:>
Verifies that a given validator associated with the field at the given path is legitimate.
<END_TASK>
<USER_TASK:>
Description:
def _verify_validator(self, validator, path):
"""Verifies that a given validator associated with the field at the given path is legitimate.""" |
# Validator should be a function
if not callable(validator):
raise SchemaFormatException("Invalid validations for {}", path)
# Validator should accept a single argument
(args, varargs, keywords, defaults) = getargspec(validator)
if len(args) != 1:
raise SchemaFormatException("Invalid validations for {}", path) |
<SYSTEM_TASK:>
Validates that the given instance of a document conforms to the given schema's
<END_TASK>
<USER_TASK:>
Description:
def _validate_instance(self, instance, errors, path_prefix=''):
"""Validates that the given instance of a document conforms to the given schema's
structure and validations. Any validation errors are added to the given errors
collection. The caller should assume the instance is considered valid if the
errors collection is empty when this method returns.""" |
if not isinstance(instance, dict):
errors[path_prefix] = "Expected instance of dict to validate against schema."
return
# validate against the schema level validators
self._apply_validations(errors, path_prefix, self._validates, instance)
# Loop over each field in the schema and check the instance value conforms
# to its spec
for field, spec in self.doc_spec.iteritems():
path = self._append_path(path_prefix, field)
# If the field is present, validate it's value.
if field in instance:
self._validate_value(instance[field], spec, path, errors)
else:
# If not, add an error if it was a required key.
if spec.get('required', False):
errors[path] = "{} is required.".format(path)
# Now loop over each field in the given instance and make sure we don't
# have any fields not declared in the schema, unless strict mode has been
# explicitly disabled.
if self._strict:
for field in instance:
if field not in self.doc_spec:
errors[self._append_path(path_prefix, field)] = "Unexpected document field not present in schema" |
<SYSTEM_TASK:>
Validates that the given field value is valid given the associated
<END_TASK>
<USER_TASK:>
Description:
def _validate_value(self, value, field_spec, path, errors):
"""Validates that the given field value is valid given the associated
field spec and path. Any validation failures are added to the given errors
collection.""" |
# Check if the value is None and add an error if the field is not nullable.
# Note that for backward compatibility reasons, the default value of 'nullable'
# is the inverse of 'required' (which use to mean both that the key be present
# and not set to None).
if value is None:
if not field_spec.get('nullable', not field_spec.get('required', False)):
errors[path] = "{} is not nullable.".format(path)
return
# All fields should have a type
field_type = field_spec['type']
if isinstance(field_type, types.FunctionType):
try:
field_type = field_type(value)
except Exception as e:
raise SchemaFormatException("Dynamic schema function raised exception: {}".format(str(e)), path)
if not isinstance(field_type, (type, Schema, Array)):
raise SchemaFormatException("Dynamic schema function did not return a type at path {}", path)
# If our field is an embedded document, recurse into it
if isinstance(field_type, Schema):
if isinstance(value, dict):
field_type._validate_instance(value, errors, path)
else:
errors[path] = "{} should be an embedded document".format(path)
return
elif isinstance(field_type, Array):
if isinstance(value, list):
is_dynamic = isinstance(field_type.contained_type, types.FunctionType)
for i, item in enumerate(value):
contained_type = field_type.contained_type
if is_dynamic:
contained_type = contained_type(item)
instance_path = self._append_path(path, i)
if isinstance(contained_type, Schema):
contained_type._validate_instance(item, errors, instance_path)
elif not isinstance(item, contained_type):
errors[instance_path] = "Array item at {} is of incorrect type".format(instance_path)
continue
else:
errors[path] = "{} should be an embedded array".format(path)
return
elif not isinstance(value, field_type):
errors[path] = "Field should be of type {}".format(field_type)
return
validations = field_spec.get('validates', None)
if validations is None:
return
self._apply_validations(errors, path, validations, value) |
<SYSTEM_TASK:>
Converts a ConfigParser object into a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def as_dict(config):
"""
Converts a ConfigParser object into a dictionary.
The resulting dictionary has sections as keys which point to a dict of the
sections options as key => value pairs.
""" |
settings = defaultdict(lambda: {})
for section in config.sections():
for key, val in config.items(section):
settings[section][key] = val
return settings |
<SYSTEM_TASK:>
Bind or connect the nanomsg socket to some address
<END_TASK>
<USER_TASK:>
Description:
def initialize(self, timeouts):
""" Bind or connect the nanomsg socket to some address """ |
# Bind or connect to address
if self.bind is True:
self.socket.bind(self.address)
else:
self.socket.connect(self.address)
# Set send and recv timeouts
self._set_timeouts(timeouts) |
<SYSTEM_TASK:>
Set socket timeouts for send and receive respectively
<END_TASK>
<USER_TASK:>
Description:
def _set_timeouts(self, timeouts):
""" Set socket timeouts for send and receive respectively """ |
(send_timeout, recv_timeout) = (None, None)
try:
(send_timeout, recv_timeout) = timeouts
except TypeError:
raise EndpointError(
'`timeouts` must be a pair of numbers (2, 3) which represent '
'the timeout values for send and receive respectively')
if send_timeout is not None:
self.socket.set_int_option(
nanomsg.SOL_SOCKET, nanomsg.SNDTIMEO, send_timeout)
if recv_timeout is not None:
self.socket.set_int_option(
nanomsg.SOL_SOCKET, nanomsg.RCVTIMEO, recv_timeout) |
<SYSTEM_TASK:>
Receive from socket, authenticate and decode payload
<END_TASK>
<USER_TASK:>
Description:
def receive(self, decode=True):
""" Receive from socket, authenticate and decode payload """ |
payload = self.socket.recv()
payload = self.verify(payload)
if decode:
payload = self.decode(payload)
return payload |
<SYSTEM_TASK:>
Sign payload using the supplied authenticator
<END_TASK>
<USER_TASK:>
Description:
def sign(self, payload):
""" Sign payload using the supplied authenticator """ |
if self.authenticator:
return self.authenticator.signed(payload)
return payload |
<SYSTEM_TASK:>
Verify payload authenticity via the supplied authenticator
<END_TASK>
<USER_TASK:>
Description:
def verify(self, payload):
""" Verify payload authenticity via the supplied authenticator """ |
if not self.authenticator:
return payload
try:
self.authenticator.auth(payload)
return self.authenticator.unsigned(payload)
except AuthenticatorInvalidSignature:
raise
except Exception as exception:
raise AuthenticateError(str(exception)) |
<SYSTEM_TASK:>
Start and listen for calls
<END_TASK>
<USER_TASK:>
Description:
def start(self):
""" Start and listen for calls """ |
if threading.current_thread().name == 'MainThread':
signal.signal(signal.SIGINT, self.stop)
logging.info('Started on {}'.format(self.address))
while True:
self.process() |
<SYSTEM_TASK:>
Fetch the function registered for a certain subscription
<END_TASK>
<USER_TASK:>
Description:
def parse(self, subscription):
""" Fetch the function registered for a certain subscription """ |
for name in self.methods:
tag = bytes(name.encode('utf-8'))
if subscription.startswith(tag):
fun = self.methods.get(name)
message = subscription[len(tag):]
return tag, message, fun
return None, None, None |
<SYSTEM_TASK:>
Subscribe to something and register a function
<END_TASK>
<USER_TASK:>
Description:
def subscribe(self, tag, fun, description=None):
""" Subscribe to something and register a function """ |
self.methods[tag] = fun
self.descriptions[tag] = description
self.socket.set_string_option(nanomsg.SUB, nanomsg.SUB_SUBSCRIBE, tag) |
<SYSTEM_TASK:>
Receive a subscription from the socket and process it
<END_TASK>
<USER_TASK:>
Description:
def process(self):
""" Receive a subscription from the socket and process it """ |
subscription = None
result = None
try:
subscription = self.socket.recv()
except AuthenticateError as exception:
logging.error(
'Subscriber error while authenticating request: {}'
.format(exception), exc_info=1)
except AuthenticatorInvalidSignature as exception:
logging.error(
'Subscriber error while authenticating request: {}'
.format(exception), exc_info=1)
except DecodeError as exception:
logging.error(
'Subscriber error while decoding request: {}'
.format(exception), exc_info=1)
except RequestParseError as exception:
logging.error(
'Subscriber error while parsing request: {}'
.format(exception), exc_info=1)
else:
logging.debug(
'Subscriber received payload: {}'
.format(subscription))
_tag, message, fun = self.parse(subscription)
message = self.verify(message)
message = self.decode(message)
try:
result = fun(message)
except Exception as exception:
logging.error(exception, exc_info=1)
# Return result to check successful execution of `fun` when testing
return result |
<SYSTEM_TASK:>
Publish a message down the socket
<END_TASK>
<USER_TASK:>
Description:
def publish(self, tag, message):
""" Publish a message down the socket """ |
payload = self.build_payload(tag, message)
self.socket.send(payload) |
<SYSTEM_TASK:>
Get the Webpack object for a given webpack config.
<END_TASK>
<USER_TASK:>
Description:
def get_webpack(request, name='DEFAULT'):
"""
Get the Webpack object for a given webpack config.
Called at most once per request per config name.
""" |
if not hasattr(request, '_webpack_map'):
request._webpack_map = {}
wp = request._webpack_map.get(name)
if wp is None:
wp = request._webpack_map[name] = Webpack(request, name)
return wp |
<SYSTEM_TASK:>
Add pyramid_webpack methods and config to the app
<END_TASK>
<USER_TASK:>
Description:
def includeme(config):
""" Add pyramid_webpack methods and config to the app """ |
settings = config.registry.settings
root_package_name = config.root_package.__name__
config.registry.webpack = {
'DEFAULT': WebpackState(settings, root_package_name)
}
for extra_config in aslist(settings.get('webpack.configs', [])):
state = WebpackState(settings, root_package_name, name=extra_config)
config.registry.webpack[extra_config] = state
# Set up any static views
for state in six.itervalues(config.registry.webpack):
if state.static_view:
config.add_static_view(name=state.static_view_name,
path=state.static_view_path,
cache_max_age=state.cache_max_age)
config.add_request_method(get_webpack, 'webpack') |
<SYSTEM_TASK:>
Helper function to fetch settings, inheriting from the base
<END_TASK>
<USER_TASK:>
Description:
def _get_setting(self, setting, default=None, name=None, inherit=True):
""" Helper function to fetch settings, inheriting from the base """ |
if name is None:
name = self.name
if name == 'DEFAULT':
return self._settings.get('webpack.{0}'.format(setting), default)
else:
val = self._settings.get('webpack.{0}.{1}'.format(name, setting),
SENTINEL)
if val is SENTINEL:
if inherit:
return self._get_setting(setting, default, 'DEFAULT')
else:
return default
else:
return val |
<SYSTEM_TASK:>
Create a filter from the extensions and ignore files
<END_TASK>
<USER_TASK:>
Description:
def _chunk_filter(self, extensions):
""" Create a filter from the extensions and ignore files """ |
if isinstance(extensions, six.string_types):
extensions = extensions.split()
def _filter(chunk):
""" Exclusion filter """
name = chunk['name']
if extensions is not None:
if not any(name.endswith(e) for e in extensions):
return False
for pattern in self.state.ignore_re:
if pattern.match(name):
return False
for pattern in self.state.ignore:
if fnmatch.fnmatchcase(name, pattern):
return False
return True
return _filter |
<SYSTEM_TASK:>
Add a 'url' property to a chunk and return it
<END_TASK>
<USER_TASK:>
Description:
def _add_url(self, chunk):
""" Add a 'url' property to a chunk and return it """ |
if 'url' in chunk:
return chunk
public_path = chunk.get('publicPath')
if public_path:
chunk['url'] = public_path
else:
fullpath = posixpath.join(self.state.static_view_path,
chunk['name'])
chunk['url'] = self._request.static_url(fullpath)
return chunk |
<SYSTEM_TASK:>
Get all the chunks contained in a bundle
<END_TASK>
<USER_TASK:>
Description:
def get_bundle(self, bundle_name, extensions=None):
""" Get all the chunks contained in a bundle """ |
if self.stats.get('status') == 'done':
bundle = self.stats.get('chunks', {}).get(bundle_name, None)
if bundle is None:
raise KeyError('No such bundle {0!r}.'.format(bundle_name))
test = self._chunk_filter(extensions)
return [self._add_url(c) for c in bundle if test(c)]
elif self.stats.get('status') == 'error':
raise RuntimeError("{error}: {message}".format(**self.stats))
else:
raise RuntimeError(
"Bad webpack stats file {0} status: {1!r}"
.format(self.state.stats_file, self.stats.get('status'))) |
<SYSTEM_TASK:>
Generates unique sequences of bytes.
<END_TASK>
<USER_TASK:>
Description:
def _unique_names():
"""Generates unique sequences of bytes.
""" |
characters = ("abcdefghijklmnopqrstuvwxyz"
"0123456789")
characters = [characters[i:i + 1] for i in irange(len(characters))]
rng = random.Random()
while True:
letters = [rng.choice(characters) for i in irange(10)]
yield ''.join(letters) |
<SYSTEM_TASK:>
Gets an SSH client to connect with.
<END_TASK>
<USER_TASK:>
Description:
def _ssh_client(self):
"""Gets an SSH client to connect with.
""" |
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
return ssh |
<SYSTEM_TASK:>
Gets the SSH client.
<END_TASK>
<USER_TASK:>
Description:
def get_client(self):
"""Gets the SSH client.
This will check that the connection is still alive first, and reconnect
if necessary.
""" |
if self._ssh is None:
self._connect()
return self._ssh
else:
try:
chan = self._ssh.get_transport().open_session()
except (socket.error, paramiko.SSHException):
logger.warning("Lost connection, reconnecting...")
self._ssh.close()
self._connect()
else:
chan.close()
return self._ssh |
<SYSTEM_TASK:>
Calls a command through the SSH connection.
<END_TASK>
<USER_TASK:>
Description:
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
""" |
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close() |
<SYSTEM_TASK:>
Calls a command through SSH.
<END_TASK>
<USER_TASK:>
Description:
def check_call(self, cmd):
"""Calls a command through SSH.
""" |
ret, _ = self._call(cmd, False)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret) |
<SYSTEM_TASK:>
Calls a command through SSH and returns its output.
<END_TASK>
<USER_TASK:>
Description:
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
""" |
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output |
<SYSTEM_TASK:>
Finds the location of tej's queue directory on the server.
<END_TASK>
<USER_TASK:>
Description:
def _resolve_queue(self, queue, depth=0, links=None):
"""Finds the location of tej's queue directory on the server.
The `queue` set when constructing this `RemoteQueue` might be relative
to the home directory and might contain ``~user`` placeholders. Also,
each queue may in fact be a link to another path (a file containing
the string ``tejdir:``, a space, and a new pathname, relative to this
link's location).
""" |
if depth == 0:
logger.debug("resolve_queue(%s)", queue)
answer = self.check_output(
'if [ -d %(queue)s ]; then '
' cd %(queue)s; echo "dir"; cat version; pwd; '
'elif [ -f %(queue)s ]; then '
' cat %(queue)s; '
'else '
' echo no; '
'fi' % {
'queue': escape_queue(queue)})
if answer == b'no':
if depth > 0:
logger.debug("Broken link at depth=%d", depth)
else:
logger.debug("Path doesn't exist")
return None, depth
elif answer.startswith(b'dir\n'):
version, runtime, path = answer[4:].split(b'\n', 2)
try:
version = tuple(int(e)
for e in version.decode('ascii', 'ignore')
.split('.'))
except ValueError:
version = 0, 0
if version[:2] != self.PROTOCOL_VERSION:
raise QueueExists(
msg="Queue exists and is using incompatible protocol "
"version %s" % '.'.join('%s' % e for e in version))
path = PosixPath(path)
runtime = runtime.decode('ascii', 'replace')
if self.need_runtime is not None:
if (self.need_runtime is not None and
runtime not in self.need_runtime):
raise QueueExists(
msg="Queue exists and is using explicitely disallowed "
"runtime %s" % runtime)
logger.debug("Found directory at %s, depth=%d, runtime=%s",
path, depth, runtime)
return path, depth
elif answer.startswith(b'tejdir: '):
new = queue.parent / answer[8:]
logger.debug("Found link to %s, recursing", new)
if links is not None:
links.append(queue)
return self._resolve_queue(new, depth + 1)
else: # pragma: no cover
logger.debug("Server returned %r", answer)
raise RemoteCommandFailure(msg="Queue resolution command failed "
"in unexpected way") |
<SYSTEM_TASK:>
Gets the actual location of the queue, or None.
<END_TASK>
<USER_TASK:>
Description:
def _get_queue(self):
"""Gets the actual location of the queue, or None.
""" |
if self._queue is None:
self._links = []
queue, depth = self._resolve_queue(self.queue, links=self._links)
if queue is None and depth > 0:
raise QueueLinkBroken
self._queue = queue
return self._queue |
<SYSTEM_TASK:>
Installs the runtime at the target location.
<END_TASK>
<USER_TASK:>
Description:
def setup(self, links=None, force=False, only_links=False):
"""Installs the runtime at the target location.
This will not replace an existing installation, unless `force` is True.
After installation, creates links to this installation at the specified
locations.
""" |
if not links:
links = []
if only_links:
logger.info("Only creating links")
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(self.queue),
'link': escape_queue(link)})
return
queue, depth = self._resolve_queue(self.queue)
if queue is not None or depth > 0:
if force:
if queue is None:
logger.info("Replacing broken link")
elif depth > 0:
logger.info("Replacing link to %s...", queue)
else:
logger.info("Replacing existing queue...")
self.check_call('rm -Rf %s' % escape_queue(self.queue))
else:
if queue is not None and depth > 0:
raise QueueExists("Queue already exists (links to %s)\n"
"Use --force to replace" % queue)
elif depth > 0:
raise QueueExists("Broken link exists\n"
"Use --force to replace")
else:
raise QueueExists("Queue already exists\n"
"Use --force to replace")
queue = self._setup()
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(queue),
'link': escape_queue(link)}) |
<SYSTEM_TASK:>
Actually installs the runtime.
<END_TASK>
<USER_TASK:>
Description:
def _setup(self):
"""Actually installs the runtime.
""" |
# Expands ~user in queue
if self.queue.path[0:1] == b'/':
queue = self.queue
else:
if self.queue.path[0:1] == b'~':
output = self.check_output('echo %s' %
escape_queue(self.queue))
queue = PosixPath(output.rstrip(b'\r\n'))
else:
output = self.check_output('pwd')
queue = PosixPath(output.rstrip(b'\r\n')) / self.queue
logger.debug("Resolved to %s", queue)
# Select runtime
if not self.setup_runtime:
# Autoselect
if self._call('which qsub', False)[0] == 0:
logger.debug("qsub is available, using runtime 'pbs'")
runtime = 'pbs'
else:
logger.debug("qsub not found, using runtime 'default'")
runtime = 'default'
else:
runtime = self.setup_runtime
if self.need_runtime is not None and runtime not in self.need_runtime:
raise ValueError("About to setup runtime %s but that wouldn't "
"match explicitely allowed runtimes" % runtime)
logger.info("Installing runtime %s%s at %s",
runtime,
"" if self.setup_runtime else " (auto)",
self.queue)
# Uploads runtime
scp_client = self.get_scp_client()
filename = pkg_resources.resource_filename('tej',
'remotes/%s' % runtime)
scp_client.put(filename, str(queue), recursive=True)
logger.debug("Files uploaded")
# Runs post-setup script
self.check_call('/bin/sh %s' % shell_escape(queue / 'commands/setup'))
logger.debug("Post-setup script done")
self._queue = queue
return queue |
<SYSTEM_TASK:>
Submits a job to the queue.
<END_TASK>
<USER_TASK:>
Description:
def submit(self, job_id, directory, script=None):
"""Submits a job to the queue.
If the runtime is not there, it will be installed. If it is a broken
chain of links, error.
""" |
if job_id is None:
job_id = '%s_%s_%s' % (Path(directory).unicodename,
self.destination['username'],
make_unique_name())
else:
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
queue = self._setup()
if script is None:
script = 'start.sh'
# Create directory
ret, target = self._call('%s %s' % (
shell_escape(queue / 'commands/new_job'),
job_id),
True)
if ret == 4:
raise JobAlreadyExists
elif ret != 0:
raise JobNotFound("Couldn't create job")
target = PosixPath(target)
logger.debug("Server created directory %s", target)
# Upload to directory
try:
scp_client = self.get_scp_client()
scp_client.put(str(Path(directory)),
str(target),
recursive=True)
except BaseException as e:
try:
self.delete(job_id)
except BaseException:
raise e
raise
logger.debug("Files uploaded")
# Submit job
self.check_call('%s %s %s %s' % (
shell_escape(queue / 'commands/submit'),
job_id, shell_escape(target),
shell_escape(script)))
logger.info("Submitted job %s", job_id)
return job_id |
<SYSTEM_TASK:>
Gets the status of a previously-submitted job.
<END_TASK>
<USER_TASK:>
Description:
def status(self, job_id):
"""Gets the status of a previously-submitted job.
""" |
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/status'),
job_id),
True)
if ret == 0:
directory, result = output.splitlines()
result = result.decode('utf-8')
return RemoteQueue.JOB_DONE, PosixPath(directory), result
elif ret == 2:
directory = output.splitlines()[0]
return RemoteQueue.JOB_RUNNING, PosixPath(directory), None
elif ret == 3:
raise JobNotFound
else:
raise RemoteCommandFailure(command="commands/status",
ret=ret) |
<SYSTEM_TASK:>
Kills a job on the server.
<END_TASK>
<USER_TASK:>
Description:
def kill(self, job_id):
"""Kills a job on the server.
""" |
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/kill'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret != 0:
raise RemoteCommandFailure(command='commands/kill',
ret=ret) |
<SYSTEM_TASK:>
Lists the jobs on the server.
<END_TASK>
<USER_TASK:>
Description:
def list(self):
"""Lists the jobs on the server.
""" |
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
output = self.check_output('%s' %
shell_escape(queue / 'commands/list'))
job_id, info = None, None
for line in output.splitlines():
line = line.decode('utf-8')
if line.startswith(' '):
key, value = line[4:].split(': ', 1)
info[key] = value
else:
if job_id is not None:
yield job_id, info
job_id = line
info = {}
if job_id is not None:
yield job_id, info |
<SYSTEM_TASK:>
Take a sequence of pairs specifying substitutions, and create
<END_TASK>
<USER_TASK:>
Description:
def multi_substitution(*substitutions):
"""
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
""" |
substitutions = itertools.starmap(substitution, substitutions)
# compose function applies last function first, so reverse the
# substitutions to get the expected order.
substitutions = reversed(tuple(substitutions))
return compose(*substitutions) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def simple_html_strip(s):
r"""
Remove HTML from the string `s`.
>>> str(simple_html_strip(''))
''
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
A stormy day in paradise
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
Somebody tell the truth.
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
What about
multiple lines?
""" |
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
texts = (
match.group(3) or ''
for match
in html_stripper.finditer(s)
)
return ''.join(texts) |
<SYSTEM_TASK:>
Remove the prefix from the text if it exists.
<END_TASK>
<USER_TASK:>
Description:
def remove_prefix(text, prefix):
"""
Remove the prefix from the text if it exists.
>>> remove_prefix('underwhelming performance', 'underwhelming ')
'performance'
>>> remove_prefix('something special', 'sample')
'something special'
""" |
null, prefix, rest = text.rpartition(prefix)
return rest |
<SYSTEM_TASK:>
Remove the suffix from the text if it exists.
<END_TASK>
<USER_TASK:>
Description:
def remove_suffix(text, suffix):
"""
Remove the suffix from the text if it exists.
>>> remove_suffix('name.git', '.git')
'name'
>>> remove_suffix('something special', 'sample')
'something special'
""" |
rest, suffix, null = text.partition(suffix)
return rest |
<SYSTEM_TASK:>
Return the common prefix of two lines.
<END_TASK>
<USER_TASK:>
Description:
def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
""" |
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index] |
<SYSTEM_TASK:>
Run a graph and render the tag contents for each output
<END_TASK>
<USER_TASK:>
Description:
def _get_graph(self, ctx, bundle, extensions, caller=None):
""" Run a graph and render the tag contents for each output """ |
request = ctx.get('request')
if request is None:
request = get_current_request()
if ':' in bundle:
config_name, bundle = bundle.split(':')
else:
config_name = 'DEFAULT'
webpack = request.webpack(config_name)
assets = (caller(a) for a in webpack.get_bundle(bundle, extensions))
return ''.join(assets) |
<SYSTEM_TASK:>
Switch to this project.
<END_TASK>
<USER_TASK:>
Description:
def activate(lancet, method, project):
"""Switch to this project.""" |
with taskstatus("Looking up project") as ts:
if method == "key":
func = get_project_keys
elif method == "dir":
func = get_project_keys
for key, project_path in func(lancet):
if key.lower() == project.lower():
break
else:
ts.abort(
'Project "{}" not found (using {}-based lookup)',
project,
method,
)
# Load the configuration
config = load_config(os.path.join(project_path, LOCAL_CONFIG))
# cd to the project directory
lancet.defer_to_shell("cd", project_path)
# Activate virtualenv
venv = config.get("lancet", "virtualenv", fallback=None)
if venv:
venv_path = os.path.join(project_path, os.path.expanduser(venv))
activate_script = os.path.join(venv_path, "bin", "activate")
lancet.defer_to_shell("source", activate_script)
else:
if "VIRTUAL_ENV" in os.environ:
lancet.defer_to_shell("deactivate") |
<SYSTEM_TASK:>
Start work on a given issue.
<END_TASK>
<USER_TASK:>
Description:
def workon(ctx, issue_id, new, base_branch):
"""
Start work on a given issue.
This command retrieves the issue from the issue tracker, creates and checks
out a new aptly-named branch, puts the issue in the configured active,
status, assigns it to you and starts a correctly linked Harvest timer.
If a branch with the same name as the one to be created already exists, it
is checked out instead. Variations in the branch name occuring after the
issue ID are accounted for and the branch renamed to match the new issue
summary.
If the `default_project` directive is correctly configured, it is enough to
give the issue ID (instead of the full project prefix + issue ID).
""" |
lancet = ctx.obj
if not issue_id and not new:
raise click.UsageError("Provide either an issue ID or the --new flag.")
elif issue_id and new:
raise click.UsageError(
"Provide either an issue ID or the --new flag, but not both."
)
if new:
# Create a new issue
summary = click.prompt("Issue summary")
issue = create_issue(
lancet, summary=summary, add_to_active_sprint=True
)
else:
issue = get_issue(lancet, issue_id)
username = lancet.tracker.whoami()
active_status = lancet.config.get("tracker", "active_status")
if not base_branch:
base_branch = lancet.config.get("repository", "base_branch")
# Get the working branch
branch = get_branch(lancet, issue, base_branch)
# Make sure the issue is in a correct status
transition = get_transition(ctx, lancet, issue, active_status)
# Make sure the issue is assigned to us
assign_issue(lancet, issue, username, active_status)
# Activate environment
set_issue_status(lancet, issue, active_status, transition)
with taskstatus("Checking out working branch") as ts:
lancet.repo.checkout(branch.name)
ts.ok('Checked out working branch based on "{}"'.format(base_branch))
with taskstatus("Starting harvest timer") as ts:
lancet.timer.start(issue)
ts.ok("Started harvest timer") |
<SYSTEM_TASK:>
Start an Harvest timer for the given issue.
<END_TASK>
<USER_TASK:>
Description:
def time(lancet, issue):
"""
Start an Harvest timer for the given issue.
This command takes care of linking the timer with the issue tracker page
for the given issue. If the issue is not passed to command it's taken
from currently active branch.
""" |
issue = get_issue(lancet, issue)
with taskstatus("Starting harvest timer") as ts:
lancet.timer.start(issue)
ts.ok("Started harvest timer") |
<SYSTEM_TASK:>
Pause work on the current issue.
<END_TASK>
<USER_TASK:>
Description:
def pause(ctx):
"""
Pause work on the current issue.
This command puts the issue in the configured paused status and stops the
current Harvest timer.
""" |
lancet = ctx.obj
paused_status = lancet.config.get("tracker", "paused_status")
# Get the issue
issue = get_issue(lancet)
# Make sure the issue is in a correct status
transition = get_transition(ctx, lancet, issue, paused_status)
# Activate environment
set_issue_status(lancet, issue, paused_status, transition)
with taskstatus("Pausing harvest timer") as ts:
lancet.timer.pause()
ts.ok("Harvest timer paused") |
<SYSTEM_TASK:>
Resume work on the currently active issue.
<END_TASK>
<USER_TASK:>
Description:
def resume(ctx):
"""
Resume work on the currently active issue.
The issue is retrieved from the currently active branch name.
""" |
lancet = ctx.obj
username = lancet.tracker.whoami()
active_status = lancet.config.get("tracker", "active_status")
# Get the issue
issue = get_issue(lancet)
# Make sure the issue is in a correct status
transition = get_transition(ctx, lancet, issue, active_status)
# Make sure the issue is assigned to us
assign_issue(lancet, issue, username, active_status)
# Activate environment
set_issue_status(lancet, issue, active_status, transition)
with taskstatus("Resuming harvest timer") as ts:
lancet.timer.start(issue)
ts.ok("Resumed harvest timer") |
<SYSTEM_TASK:>
SSH into the given environment, based on the dploi configuration.
<END_TASK>
<USER_TASK:>
Description:
def ssh(lancet, print_cmd, environment):
"""
SSH into the given environment, based on the dploi configuration.
""" |
namespace = {}
with open(lancet.config.get('dploi', 'deployment_spec')) as fh:
code = compile(fh.read(), 'deployment.py', 'exec')
exec(code, {}, namespace)
config = namespace['settings'][environment]
host = '{}@{}'.format(config['user'], config['hosts'][0])
cmd = ['ssh', '-p', str(config.get('port', 22)), host]
if print_cmd:
click.echo(' '.join(quote(s) for s in cmd))
else:
lancet.defer_to_shell(*cmd) |
<SYSTEM_TASK:>
Print the shell integration code.
<END_TASK>
<USER_TASK:>
Description:
def _setup_helper():
"""Print the shell integration code.""" |
base = os.path.abspath(os.path.dirname(__file__))
helper = os.path.join(base, "helper.sh")
with open(helper) as fh:
click.echo(fh.read()) |
<SYSTEM_TASK:>
Prints a list of commands for shell completion hooks.
<END_TASK>
<USER_TASK:>
Description:
def _commands(ctx):
"""Prints a list of commands for shell completion hooks.""" |
ctx = ctx.parent
ctx.show_hidden_subcommands = False
main = ctx.command
for subcommand in main.list_commands(ctx):
cmd = main.get_command(ctx, subcommand)
if cmd is None:
continue
help = cmd.short_help or ""
click.echo("{}:{}".format(subcommand, help)) |
<SYSTEM_TASK:>
Prints a list of arguments for shell completion hooks.
<END_TASK>
<USER_TASK:>
Description:
def _arguments(ctx, command_name=None):
"""Prints a list of arguments for shell completion hooks.
If a command name is given, returns the arguments for that subcommand.
The command name has to refer to a command; aliases are not supported.
""" |
ctx = ctx.parent
main = ctx.command
if command_name:
command = main.get_command(ctx, command_name)
if not command:
return
else:
command = main
types = ["option", "argument"]
all_params = sorted(
command.get_params(ctx), key=lambda p: types.index(p.param_type_name)
)
def get_name(param):
return max(param.opts, key=len)
for param in all_params:
if param.param_type_name == "option":
option = get_name(param)
same_dest = [
get_name(p) for p in all_params if p.name == param.name
]
if same_dest:
option = "({})".format(" ".join(same_dest)) + option
if param.help:
option += "[{}]".format(param.help or "")
if not param.is_flag:
option += "=:( )"
click.echo(option)
elif param.param_type_name == "argument":
option = get_name(param)
click.echo(":{}".format(option)) |
<SYSTEM_TASK:>
Print the shell autocompletion code.
<END_TASK>
<USER_TASK:>
Description:
def _autocomplete(ctx, shell):
"""Print the shell autocompletion code.""" |
if not shell:
shell = os.environ.get("SHELL", "")
shell = os.path.basename(shell).lower()
if not shell:
click.secho(
"Your shell could not be detected, please pass its name "
"as the argument.",
fg="red",
)
ctx.exit(-1)
base = os.path.abspath(os.path.dirname(__file__))
autocomplete = os.path.join(base, "autocomplete", "{}.sh".format(shell))
if not os.path.exists(autocomplete):
click.secho(
"Autocompletion for your shell ({}) is currently not "
"supported.",
fg="red",
)
ctx.exit(-1)
with open(autocomplete) as fh:
click.echo(fh.read()) |
<SYSTEM_TASK:>
Call Python 3 raise from or emulate it for Python 2
<END_TASK>
<USER_TASK:>
Description:
def raisefrom(exc_type, message, exc):
# type: (Any, str, BaseException) -> None
"""Call Python 3 raise from or emulate it for Python 2
Args:
exc_type (Any): Type of Exception
message (str): Error message to display
exc (BaseException): original exception
Returns:
None
""" |
if sys.version_info[:2] >= (3, 2):
six.raise_from(exc_type(message), exc)
else:
six.reraise(exc_type, '%s - %s' % (message, exc), sys.exc_info()[2]) |
<SYSTEM_TASK:>
Calls the method until the return value is not False.
<END_TASK>
<USER_TASK:>
Description:
def until(method, timeout = 30, message=''):
"""Calls the method until the return value is not False.""" |
end_time = time.time() + timeout
while True:
try:
value = method()
if value:
return value
except:
pass
time.sleep(1)
if time.time() > end_time:
break
raise Exception(message) |
<SYSTEM_TASK:>
import module from python file path and return imported module
<END_TASK>
<USER_TASK:>
Description:
def get_imported_module_from_file(file_path):
""" import module from python file path and return imported module
""" |
if p_compat.is_py3:
imported_module = importlib.machinery.SourceFileLoader('module_name', file_path).load_module()
elif p_compat.is_py2:
imported_module = imp.load_source('module_name', file_path)
else:
raise RuntimeError("Neither Python 3 nor Python 2.")
return imported_module |
<SYSTEM_TASK:>
filter functions or variables from import module
<END_TASK>
<USER_TASK:>
Description:
def filter_module(module, filter_type):
""" filter functions or variables from import module
@params
module: imported module
filter_type: "function" or "variable"
""" |
filter_type = ModuleUtils.is_function if filter_type == "function" else ModuleUtils.is_variable
module_functions_dict = dict(filter(filter_type, vars(module).items()))
return module_functions_dict |
<SYSTEM_TASK:>
search expected function or variable recursive upward
<END_TASK>
<USER_TASK:>
Description:
def search_conf_item(start_path, item_type, item_name):
""" search expected function or variable recursive upward
@param
start_path: search start path
item_type: "function" or "variable"
item_name: function name or variable name
e.g.
search_conf_item('C:/Users/RockFeng/Desktop/s/preference.py','function','test_func')
""" |
dir_path = os.path.dirname(os.path.abspath(start_path))
target_file = os.path.join(dir_path, "preference.py")
if os.path.isfile(target_file):
imported_module = ModuleUtils.get_imported_module_from_file(target_file)
items_dict = ModuleUtils.filter_module(imported_module, item_type)
if item_name in items_dict:
return items_dict[item_name]
else:
return ModuleUtils.search_conf_item(dir_path, item_type, item_name)
if dir_path == start_path:
# system root path
err_msg = "'{}' not found in recursive upward path!".format(item_name)
if item_type == "function":
raise p_exception.FunctionNotFound(err_msg)
else:
raise p_exception.VariableNotFound(err_msg)
return ModuleUtils.search_conf_item(dir_path, item_type, item_name) |
<SYSTEM_TASK:>
convert mapping in list to ordered dict
<END_TASK>
<USER_TASK:>
Description:
def convert_to_order_dict(map_list):
""" convert mapping in list to ordered dict
@param (list) map_list
[
{"a": 1},
{"b": 2}
]
@return (OrderDict)
OrderDict({
"a": 1,
"b": 2
})
""" |
ordered_dict = OrderedDict()
for map_dict in map_list:
ordered_dict.update(map_dict)
return ordered_dict |
<SYSTEM_TASK:>
Flattens a list, ignore all the lambdas.
<END_TASK>
<USER_TASK:>
Description:
def flatten(nested_list: list) -> list:
"""Flattens a list, ignore all the lambdas.""" |
return list(sorted(filter(lambda y: y is not None,
list(map(lambda x: (nested_list.extend(x) # noqa: T484
if isinstance(x, list) else x),
nested_list))))) |
<SYSTEM_TASK:>
Raise SystemExit with correct status code and output logs.
<END_TASK>
<USER_TASK:>
Description:
def exit(self) -> None:
"""Raise SystemExit with correct status code and output logs.""" |
total = sum(len(logs) for logs in self.logs.values())
if self.json:
self.logs['total'] = total
print(json.dumps(self.logs, indent=self.indent))
else:
for name, log in self.logs.items():
if not log or self.parser[name].as_bool("quiet"):
continue
print("[[{0}]]".format(name))
getattr(snekchek.format, name + "_format")(log)
print("\n")
print("-" * 30)
print("Total:", total)
sys.exit(self.status_code) |
<SYSTEM_TASK:>
Try to read a rcfile from a list of paths
<END_TASK>
<USER_TASK:>
Description:
def read_rcfile():
"""
Try to read a rcfile from a list of paths
""" |
files = [
'{}/.millipederc'.format(os.environ.get('HOME')),
'/usr/local/etc/millipederc',
'/etc/millipederc',
]
for filepath in files:
if os.path.isfile(filepath):
with open(filepath) as rcfile:
return parse_rcfile(rcfile)
return {} |
<SYSTEM_TASK:>
Merge arguments and rc_settings.
<END_TASK>
<USER_TASK:>
Description:
def compute_settings(args, rc_settings):
"""
Merge arguments and rc_settings.
""" |
settings = {}
for key, value in args.items():
if key in ['reverse', 'opposite']:
settings[key] = value ^ rc_settings.get(key, False)
else:
settings[key] = value or rc_settings.get(key)
if not settings['size']:
settings['size'] = DEFAULT_SIZE
return settings |
<SYSTEM_TASK:>
Send `message` as `name` to `url`.
<END_TASK>
<USER_TASK:>
Description:
def api_post(message, url, name, http_data=None, auth=None):
""" Send `message` as `name` to `url`.
You can specify extra variables in `http_data`
""" |
try:
import requests
except ImportError:
print('requests is required to do api post.', file=sys.stderr)
sys.exit(1)
data = {name : message}
if http_data:
for var in http_data:
key, value = var.split('=')
data[key] = value
response = requests.post(
url,
data=data,
auth=auth
)
if response.status_code != 200:
raise RuntimeError('Unable to post data') |