text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Setup console logging. Info and below go to stdout, others go to stderr. <END_TASK> <USER_TASK:> Description: def setup_logging(verbose=False, logger=None): """Setup console logging. Info and below go to stdout, others go to stderr. :param bool verbose: Print debug statements. :param str logger: Which logger to set handlers to. Used for testing. """
if not verbose: logging.getLogger('requests').setLevel(logging.WARNING) format_ = '%(asctime)s %(levelname)-8s %(name)-40s %(message)s' if verbose else '%(message)s' level = logging.DEBUG if verbose else logging.INFO handler_stdout = logging.StreamHandler(sys.stdout) handler_stdout.setFormatter(logging.Formatter(format_)) handler_stdout.setLevel(logging.DEBUG) handler_stdout.addFilter(InfoFilter()) handler_stderr = logging.StreamHandler(sys.stderr) handler_stderr.setFormatter(logging.Formatter(format_)) handler_stderr.setLevel(logging.WARNING) root_logger = logging.getLogger(logger) root_logger.setLevel(level) root_logger.addHandler(handler_stdout) root_logger.addHandler(handler_stderr)
<SYSTEM_TASK:> Automatically adds a named logger to a function upon function call. <END_TASK> <USER_TASK:> Description: def with_log(func): """Automatically adds a named logger to a function upon function call. :param func: Function to decorate. :return: Decorated function. :rtype: function """
@functools.wraps(func) def wrapper(*args, **kwargs): """Inject `log` argument into wrapped function. :param list args: Pass through all positional arguments. :param dict kwargs: Pass through all keyword arguments. """ decorator_logger = logging.getLogger('@with_log') decorator_logger.debug('Entering %s() function call.', func.__name__) log = kwargs.get('log', logging.getLogger(func.__name__)) try: ret = func(log=log, *args, **kwargs) finally: decorator_logger.debug('Leaving %s() function call.', func.__name__) return ret return wrapper
<SYSTEM_TASK:> Get command line arguments or values from environment variables. <END_TASK> <USER_TASK:> Description: def get_arguments(argv=None, environ=None): """Get command line arguments or values from environment variables. :param list argv: Command line argument list to process. For testing. :param dict environ: Environment variables. For testing. :return: Parsed options. :rtype: dict """
name = 'appveyor-artifacts' environ = environ or os.environ require = getattr(pkg_resources, 'require') # Stupid linting error. commit, owner, pull_request, repo, tag = '', '', '', '', '' # Run docopt. project = [p for p in require(name) if p.project_name == name][0] version = project.version args = docopt(__doc__, argv=argv or sys.argv[1:], version=version) # Handle Travis environment variables. if environ.get('TRAVIS') == 'true': commit = environ.get('TRAVIS_COMMIT', '') owner = environ.get('TRAVIS_REPO_SLUG', '/').split('/')[0] pull_request = environ.get('TRAVIS_PULL_REQUEST', '') if pull_request == 'false': pull_request = '' repo = environ.get('TRAVIS_REPO_SLUG', '/').split('/')[1].replace('_', '-') tag = environ.get('TRAVIS_TAG', '') # Command line arguments override. commit = args['--commit'] or commit owner = args['--owner-name'] or owner pull_request = args['--pull-request'] or pull_request repo = args['--repo-name'] or repo tag = args['--tag-name'] or tag # Merge env variables and have command line args override. config = { 'always_job_dirs': args['--always-job-dirs'], 'commit': commit, 'dir': args['--dir'] or '', 'ignore_errors': args['--ignore-errors'], 'job_name': args['--job-name'] or '', 'mangle_coverage': args['--mangle-coverage'], 'no_job_dirs': args['--no-job-dirs'] or '', 'owner': owner, 'pull_request': pull_request, 'raise': args['--raise'], 'repo': repo, 'tag': tag, 'verbose': args['--verbose'], } return config
<SYSTEM_TASK:> Find the build version we're looking for. <END_TASK> <USER_TASK:> Description: def query_build_version(config, log): """Find the build version we're looking for. AppVeyor calls build IDs "versions" which is confusing but whatever. Job IDs aren't available in the history query, only on latest, specific version, and deployment queries. Hence we need two queries to get a one-time status update. Returns None if the job isn't queued yet. :raise HandledError: On invalid JSON data. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Build version. :rtype: str """
url = '/projects/{0}/{1}/history?recordsNumber=10'.format(config['owner'], config['repo']) # Query history. log.debug('Querying AppVeyor history API for %s/%s...', config['owner'], config['repo']) json_data = query_api(url) if 'builds' not in json_data: log.error('Bad JSON reply: "builds" key missing.') raise HandledError # Find AppVeyor build "version". for build in json_data['builds']: if config['tag'] and config['tag'] == build.get('tag'): log.debug('This is a tag build.') elif config['pull_request'] and config['pull_request'] == build.get('pullRequestId'): log.debug('This is a pull request build.') elif config['commit'] == build['commitId']: log.debug('This is a branch build.') else: continue log.debug('Build JSON dict: %s', str(build)) return build['version'] return None
<SYSTEM_TASK:> Get one or more job IDs and their status associated with a build version. <END_TASK> <USER_TASK:> Description: def query_job_ids(build_version, config, log): """Get one or more job IDs and their status associated with a build version. Filters jobs by name if --job-name is specified. :raise HandledError: On invalid JSON data or bad job name. :param str build_version: AppVeyor build version from query_build_version(). :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of two-item tuples. Job ID (first) and its status (second). :rtype: list """
url = '/projects/{0}/{1}/build/{2}'.format(config['owner'], config['repo'], build_version) # Query version. log.debug('Querying AppVeyor version API for %s/%s at %s...', config['owner'], config['repo'], build_version) json_data = query_api(url) if 'build' not in json_data: log.error('Bad JSON reply: "build" key missing.') raise HandledError if 'jobs' not in json_data['build']: log.error('Bad JSON reply: "jobs" key missing.') raise HandledError # Find AppVeyor job. all_jobs = list() for job in json_data['build']['jobs']: if config['job_name'] and config['job_name'] == job['name']: log.debug('Filtering by job name: found match!') return [(job['jobId'], job['status'])] all_jobs.append((job['jobId'], job['status'])) if config['job_name']: log.error('Job name "%s" not found.', config['job_name']) raise HandledError return all_jobs
<SYSTEM_TASK:> Query API again for artifacts. <END_TASK> <USER_TASK:> Description: def query_artifacts(job_ids, log): """Query API again for artifacts. :param iter job_ids: List of AppVeyor jobIDs. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: List of tuples: (job ID, artifact file name, artifact file size). :rtype: list """
jobs_artifacts = list() for job in job_ids: url = '/buildjobs/{0}/artifacts'.format(job) log.debug('Querying AppVeyor artifact API for %s...', job) json_data = query_api(url) for artifact in json_data: jobs_artifacts.append((job, artifact['fileName'], artifact['size'])) return jobs_artifacts
<SYSTEM_TASK:> Determine destination file paths for job artifacts. <END_TASK> <USER_TASK:> Description: def artifacts_urls(config, jobs_artifacts, log): """Determine destination file paths for job artifacts. :param dict config: Dictionary from get_arguments(). :param iter jobs_artifacts: List of job artifacts from query_artifacts(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Destination file paths (keys), download URLs (value[0]), and expected file size (value[1]). :rtype: dict """
artifacts = dict() # Determine if we should create job ID directories. if config['always_job_dirs']: job_dirs = True elif config['no_job_dirs']: job_dirs = False elif len(set(i[0] for i in jobs_artifacts)) == 1: log.debug('Only one job ID, automatically setting job_dirs = False.') job_dirs = False elif len(set(i[1] for i in jobs_artifacts)) == len(jobs_artifacts): log.debug('No local file conflicts, automatically setting job_dirs = False') job_dirs = False else: log.debug('Multiple job IDs with file conflicts, automatically setting job_dirs = True') job_dirs = True # Get final URLs and destination file paths. root_dir = config['dir'] or os.getcwd() for job, file_name, size in jobs_artifacts: artifact_url = '{0}/buildjobs/{1}/artifacts/{2}'.format(API_PREFIX, job, file_name) artifact_local = os.path.join(root_dir, job if job_dirs else '', file_name) if artifact_local in artifacts: if config['no_job_dirs'] == 'skip': log.debug('Skipping %s from %s', artifact_local, artifact_url) continue if config['no_job_dirs'] == 'rename': new_name = artifact_local while new_name in artifacts: path, ext = os.path.splitext(new_name) new_name = (path + '_' + ext) if ext else (new_name + '_') log.debug('Renaming %s to %s from %s', artifact_local, new_name, artifact_url) artifact_local = new_name elif config['no_job_dirs'] == 'overwrite': log.debug('Overwriting %s from %s with %s', artifact_local, artifacts[artifact_local][0], artifact_url) else: log.error('Collision: %s from %s and %s', artifact_local, artifacts[artifact_local][0], artifact_url) raise HandledError artifacts[artifact_local] = (artifact_url, size) return artifacts
<SYSTEM_TASK:> Wait for AppVeyor job to finish and get all artifacts' URLs. <END_TASK> <USER_TASK:> Description: def get_urls(config, log): """Wait for AppVeyor job to finish and get all artifacts' URLs. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. :return: Paths and URLs from artifacts_urls. :rtype: dict """
# Wait for job to be queued. Once it is we'll have the "version". build_version = None for _ in range(3): build_version = query_build_version(config) if build_version: break log.info('Waiting for job to be queued...') time.sleep(SLEEP_FOR) if not build_version: log.error('Timed out waiting for job to be queued or build not found.') raise HandledError # Get job IDs. Wait for AppVeyor job to finish. job_ids = list() valid_statuses = ['success', 'failed', 'running', 'queued'] while True: job_ids = query_job_ids(build_version, config) statuses = set([i[1] for i in job_ids]) if 'failed' in statuses: job = [i[0] for i in job_ids if i[1] == 'failed'][0] url = 'https://ci.appveyor.com/project/{0}/{1}/build/job/{2}'.format(config['owner'], config['repo'], job) log.error('AppVeyor job failed: %s', url) raise HandledError if statuses == set(valid_statuses[:1]): log.info('Build successful. Found %d job%s.', len(job_ids), '' if len(job_ids) == 1 else 's') break if 'running' in statuses: log.info('Waiting for job%s to finish...', '' if len(job_ids) == 1 else 's') elif 'queued' in statuses: log.info('Waiting for all jobs to start...') else: log.error('Got unknown status from AppVeyor API: %s', ' '.join(statuses - set(valid_statuses))) raise HandledError time.sleep(SLEEP_FOR) # Get artifacts. artifacts = query_artifacts([i[0] for i in job_ids]) log.info('Found %d artifact%s.', len(artifacts), '' if len(artifacts) == 1 else 's') return artifacts_urls(config, artifacts) if artifacts else dict()
<SYSTEM_TASK:> Edit .coverage file substituting Windows file paths to Linux paths. <END_TASK> <USER_TASK:> Description: def mangle_coverage(local_path, log): """Edit .coverage file substituting Windows file paths to Linux paths. :param str local_path: Destination path to save file to. :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """
# Read the file, or return if not a .coverage file. with open(local_path, mode='rb') as handle: if handle.read(13) != b'!coverage.py:': log.debug('File %s not a coverage file.', local_path) return handle.seek(0) # I'm lazy, reading all of this into memory. What could possibly go wrong? file_contents = handle.read(52428800).decode('utf-8') # 50 MiB limit, surely this is enough? # Substitute paths. for windows_path in set(REGEX_MANGLE.findall(file_contents)): unix_relative_path = windows_path.replace(r'\\', '/').split('/', 3)[-1] unix_absolute_path = os.path.abspath(unix_relative_path) if not os.path.isfile(unix_absolute_path): log.debug('Windows path: %s', windows_path) log.debug('Unix relative path: %s', unix_relative_path) log.error('No such file: %s', unix_absolute_path) raise HandledError file_contents = file_contents.replace(windows_path, unix_absolute_path) # Write. with open(local_path, 'w') as handle: handle.write(file_contents)
<SYSTEM_TASK:> Main function. Runs the program. <END_TASK> <USER_TASK:> Description: def main(config, log): """Main function. Runs the program. :param dict config: Dictionary from get_arguments(). :param logging.Logger log: Logger for this function. Populated by with_log() decorator. """
validate(config) paths_and_urls = get_urls(config) if not paths_and_urls: log.warning('No artifacts; nothing to download.') return # Download files. total_size = 0 chunk_size = max(min(max(v[1] for v in paths_and_urls.values()) // 50, 1048576), 1024) log.info('Downloading file%s (1 dot ~ %d KiB):', '' if len(paths_and_urls) == 1 else 's', chunk_size // 1024) for size, local_path, url in sorted((v[1], k, v[0]) for k, v in paths_and_urls.items()): download_file(config, local_path, url, size, chunk_size) total_size += size if config['mangle_coverage']: mangle_coverage(local_path) log.info('Downloaded %d file(s), %d bytes total.', len(paths_and_urls), total_size)
<SYSTEM_TASK:> Consume the receive buffer and return the messages. <END_TASK> <USER_TASK:> Description: def incoming_messages(self) -> t.List[t.Tuple[float, bytes]]: """Consume the receive buffer and return the messages. If there are new messages added to the queue while this funciton is being processed, they will not be returned. This ensures that this terminates in a timely manner. """
approximate_messages = self._receive_buffer.qsize() messages = [] for _ in range(approximate_messages): try: messages.append(self._receive_buffer.get_nowait()) except queue.Empty: break return messages
<SYSTEM_TASK:> Helper for accessing style values. <END_TASK> <USER_TASK:> Description: def _safe_get(mapping, key, default=None): """Helper for accessing style values. It exists to avoid checking whether `mapping` is indeed a mapping before trying to get a key. In the context of style dicts, this eliminates "is this a mapping" checks in two common situations: 1) a style argument is None, and 2) a style key's value (e.g., width) can be either a mapping or a plain value. """
try: return mapping.get(key, default) except AttributeError: return default
<SYSTEM_TASK:> Extract callable values from `row`. <END_TASK> <USER_TASK:> Description: def strip_callables(row): """Extract callable values from `row`. Replace the callable values with the initial value (if specified) or an empty string. Parameters ---------- row : mapping A data row. The keys are either a single column name or a tuple of column names. The values take one of three forms: 1) a non-callable value, 2) a tuple (initial_value, callable), 3) or a single callable (in which case the initial value is set to an empty string). Returns ------- list of (column, callable) """
callables = [] to_delete = [] to_add = [] for columns, value in row.items(): if isinstance(value, tuple): initial, fn = value else: initial = NOTHING # Value could be a normal (non-callable) value or a # callable with no initial value. fn = value if callable(fn) or inspect.isgenerator(fn): lgr.debug("Using %r as the initial value " "for columns %r in row %r", initial, columns, row) if not isinstance(columns, tuple): columns = columns, else: to_delete.append(columns) for column in columns: to_add.append((column, initial)) callables.append((columns, fn)) for column, value in to_add: row[column] = value for multi_columns in to_delete: del row[multi_columns] return callables
<SYSTEM_TASK:> Build the style and fields. <END_TASK> <USER_TASK:> Description: def build(self, columns): """Build the style and fields. Parameters ---------- columns : list of str Column names. """
self.columns = columns default = dict(elements.default("default_"), **_safe_get(self.init_style, "default_", {})) self.style = elements.adopt({c: default for c in columns}, self.init_style) # Store special keys in _style so that they can be validated. self.style["default_"] = default self.style["header_"] = self._compose("header_", {"align", "width"}) self.style["aggregate_"] = self._compose("aggregate_", {"align", "width"}) self.style["separator_"] = _safe_get(self.init_style, "separator_", elements.default("separator_")) lgr.debug("Validating style %r", self.style) self.style["width_"] = _safe_get(self.init_style, "width_", elements.default("width_")) elements.validate(self.style) self._setup_fields() ngaps = len(self.columns) - 1 self.width_separtor = len(self.style["separator_"]) * ngaps lgr.debug("Calculated separator width as %d", self.width_separtor)
<SYSTEM_TASK:> Construct a style taking `attributes` from the column styles. <END_TASK> <USER_TASK:> Description: def _compose(self, name, attributes): """Construct a style taking `attributes` from the column styles. Parameters ---------- name : str Name of main style (e.g., "header_"). attributes : set of str Adopt these elements from the column styles. Returns ------- The composite style for `name`. """
name_style = _safe_get(self.init_style, name, elements.default(name)) if self.init_style is not None and name_style is not None: result = {} for col in self.columns: cstyle = {k: v for k, v in self.style[col].items() if k in attributes} result[col] = dict(cstyle, **name_style) return result
<SYSTEM_TASK:> Update auto-width Fields based on `row`. <END_TASK> <USER_TASK:> Description: def _set_widths(self, row, proc_group): """Update auto-width Fields based on `row`. Parameters ---------- row : dict proc_group : {'default', 'override'} Whether to consider 'default' or 'override' key for pre- and post-format processors. Returns ------- True if any widths required adjustment. """
width_free = self.style["width_"] - sum( [sum(self.fields[c].width for c in self.columns), self.width_separtor]) if width_free < 0: width_fixed = sum( [sum(self.fields[c].width for c in self.columns if c not in self.autowidth_columns), self.width_separtor]) assert width_fixed > self.style["width_"], "bug in width logic" raise elements.StyleError( "Fixed widths specified in style exceed total width") elif width_free == 0: lgr.debug("Not checking widths; no free width left") return False lgr.debug("Checking width for row %r", row) adjusted = False for column in sorted(self.columns, key=lambda c: self.fields[c].width): # ^ Sorting the columns by increasing widths isn't necessary; we do # it so that columns that already take up more of the screen don't # continue to grow and use up free width before smaller columns # have a chance to claim some. if width_free < 1: lgr.debug("Giving up on checking widths; no free width left") break if column in self.autowidth_columns: field = self.fields[column] lgr.debug("Checking width of column %r " "(field width: %d, free width: %d)", column, field.width, width_free) # If we've added any style transform functions as # pre-format processors, we want to measure the width # of their result rather than the raw value. if field.pre[proc_group]: value = field(row[column], keys=[proc_group], exclude_post=True) else: value = row[column] value = six.text_type(value) value_width = len(value) wmax = self.autowidth_columns[column]["max"] if value_width > field.width: width_old = field.width width_available = width_free + field.width width_new = min(value_width, wmax or width_available, width_available) if width_new > width_old: adjusted = True field.width = width_new lgr.debug("Adjusting width of %r column from %d to %d " "to accommodate value %r", column, width_old, field.width, value) self._truncaters[column].length = field.width width_free -= field.width - width_old lgr.debug("Free width is %d after processing column %r", width_free, column) return adjusted
<SYSTEM_TASK:> Return whether group is "default" or "override". <END_TASK> <USER_TASK:> Description: def _proc_group(self, style, adopt=True): """Return whether group is "default" or "override". In the case of "override", the self.fields pre-format and post-format processors will be set under the "override" key. Parameters ---------- style : dict A style that follows the schema defined in pyout.elements. adopt : bool, optional Merge `self.style` and `style`, giving priority to the latter's keys when there are conflicts. If False, treat `style` as a standalone style. """
fields = self.fields if style is not None: if adopt: style = elements.adopt(self.style, style) elements.validate(style) for column in self.columns: fields[column].add( "pre", "override", *(self.procgen.pre_from_style(style[column]))) fields[column].add( "post", "override", *(self.procgen.post_from_style(style[column]))) return "override" else: return "default"
<SYSTEM_TASK:> Render fields with values from `row`. <END_TASK> <USER_TASK:> Description: def render(self, row, style=None, adopt=True): """Render fields with values from `row`. Parameters ---------- row : dict A normalized row. style : dict, optional A style that follows the schema defined in pyout.elements. If None, `self.style` is used. adopt : bool, optional Merge `self.style` and `style`, using the latter's keys when there are conflicts. If False, treat `style` as a standalone style. Returns ------- A tuple with the rendered value (str) and a flag that indicates whether the field widths required adjustment (bool). """
group = self._proc_group(style, adopt=adopt) if group == "override": # Override the "default" processor key. proc_keys = ["width", "override"] else: # Use the set of processors defined by _setup_fields. proc_keys = None adjusted = self._set_widths(row, group) proc_fields = [self.fields[c](row[c], keys=proc_keys) for c in self.columns] return self.style["separator_"].join(proc_fields) + "\n", adjusted
<SYSTEM_TASK:> Sets up the basic config from the variables passed in <END_TASK> <USER_TASK:> Description: def get_config(self): """ Sets up the basic config from the variables passed in all of these are from what Heroku gives you. """
self.create_ssl_certs() config = { "bootstrap_servers": self.get_brokers(), "security_protocol": 'SSL', "ssl_cafile": self.ssl["ca"]["file"].name, "ssl_certfile": self.ssl["cert"]["file"].name, "ssl_keyfile": self.ssl["key"]["file"].name, "ssl_check_hostname": False, "ssl_password": None } self.config.update(config)
<SYSTEM_TASK:> Creates file, because environment variables are by default escaped it <END_TASK> <USER_TASK:> Description: def create_temp_file(self, suffix, content): """ Creates file, because environment variables are by default escaped it encodes and then decodes them before write so \n etc. work correctly. """
temp = tempfile.NamedTemporaryFile(suffix=suffix) temp.write(content.encode('latin1').decode('unicode_escape').encode('utf-8')) temp.seek(0) # Resets the temp file line to 0 return temp
<SYSTEM_TASK:> Appends the prefix to the topic before sendingf <END_TASK> <USER_TASK:> Description: def send(self, topic, *args, **kwargs): """ Appends the prefix to the topic before sendingf """
prefix_topic = self.heroku_kafka.prefix_topic(topic) return super(HerokuKafkaProducer, self).send(prefix_topic, *args, **kwargs)
<SYSTEM_TASK:> Inherited method should take all specified arguments. <END_TASK> <USER_TASK:> Description: def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ Inherited method should take all specified arguments. :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """
raise NotImplementedError
<SYSTEM_TASK:> Casts a type of ``val`` to ``coerce_type`` with ``coercer``. <END_TASK> <USER_TASK:> Description: def coerce(val: t.Any, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None) -> t.Any: """ Casts a type of ``val`` to ``coerce_type`` with ``coercer``. If ``coerce_type`` is bool and no ``coercer`` specified it uses :func:`~django_docker_helpers.utils.coerce_str_to_bool` by default. :param val: a value of any type :param coerce_type: any type :param coercer: provide a callback that takes ``val`` and returns a value with desired type :return: type casted value """
if not coerce_type and not coercer: return val if coerce_type and type(val) is coerce_type: return val if coerce_type and coerce_type is bool and not coercer: coercer = coerce_str_to_bool if coercer is None: coercer = coerce_type return coercer(val)
<SYSTEM_TASK:> Splice `value` at its center, retaining a total of `n` characters. <END_TASK> <USER_TASK:> Description: def _splice(value, n): """Splice `value` at its center, retaining a total of `n` characters. Parameters ---------- value : str n : int The total length of the returned ends will not be greater than this value. Characters will be dropped from the center to reach this limit. Returns ------- A tuple of str: (head, tail). """
if n <= 0: raise ValueError("n must be positive") value_len = len(value) center = value_len // 2 left, right = value[:center], value[center:] if n >= value_len: return left, right n_todrop = value_len - n right_idx = n_todrop // 2 left_idx = right_idx + n_todrop % 2 return left[:-left_idx], right[right_idx:]
<SYSTEM_TASK:> Add processor functions. <END_TASK> <USER_TASK:> Description: def add(self, kind, key, *values): """Add processor functions. Any previous list of processors for `kind` and `key` will be overwritten. Parameters ---------- kind : {"pre", "post"} key : str A registered key. Add the functions (in order) to this key's list of processors. *values : callables Processors to add. """
if kind == "pre": procs = self.pre elif kind == "post": procs = self.post else: raise ValueError("kind is not 'pre' or 'post'") self._check_if_registered(key) procs[key] = values
<SYSTEM_TASK:> Wrap format call as a two-argument processor function. <END_TASK> <USER_TASK:> Description: def _format(self, _, result): """Wrap format call as a two-argument processor function. """
return self._fmt.format(six.text_type(result))
<SYSTEM_TASK:> Return a processor for a style's "transform" function. <END_TASK> <USER_TASK:> Description: def transform(function): """Return a processor for a style's "transform" function. """
def transform_fn(_, result): if isinstance(result, Nothing): return result lgr.debug("Transforming %r with %r", result, function) try: return function(result) except: exctype, value, tb = sys.exc_info() try: new_exc = StyleFunctionError(function, exctype, value) # Remove the "During handling ..." since we're # reraising with the traceback. new_exc.__cause__ = None six.reraise(StyleFunctionError, new_exc, tb) finally: # Remove circular reference. # https://docs.python.org/2/library/sys.html#sys.exc_info del tb return transform_fn
<SYSTEM_TASK:> Return a processor for a "simple" style value. <END_TASK> <USER_TASK:> Description: def by_key(self, style_key, style_value): """Return a processor for a "simple" style value. Parameters ---------- style_key : str A style key. style_value : bool or str A "simple" style value that is either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. Returns ------- A function. """
if self.style_types[style_key] is bool: style_attr = style_key else: style_attr = style_value def proc(_, result): return self.render(style_attr, result) return proc
<SYSTEM_TASK:> Return a processor that extracts the style from `mapping`. <END_TASK> <USER_TASK:> Description: def by_lookup(self, style_key, style_value): """Return a processor that extracts the style from `mapping`. Parameters ---------- style_key : str A style key. style_value : dict A dictionary with a "lookup" key whose value is a "mapping" style value that maps a field value to either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. Returns ------- A function. """
style_attr = style_key if self.style_types[style_key] is bool else None mapping = style_value["lookup"] def proc(value, result): try: lookup_value = mapping[value] except (KeyError, TypeError): # ^ TypeError is included in case the user passes non-hashable # values. return result if not lookup_value: return result return self.render(style_attr or lookup_value, result) return proc
<SYSTEM_TASK:> Return a processor for a "re_lookup" style value. <END_TASK> <USER_TASK:> Description: def by_re_lookup(self, style_key, style_value, re_flags=0): """Return a processor for a "re_lookup" style value. Parameters ---------- style_key : str A style key. style_value : dict A dictionary with a "re_lookup" style value that consists of a sequence of items where each item should have the form `(regexp, x)`, where regexp is a regular expression to match against the field value and x is either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. re_flags : int Passed through as flags argument to re.compile. Returns ------- A function. """
style_attr = style_key if self.style_types[style_key] is bool else None regexps = [(re.compile(r, flags=re_flags), v) for r, v in style_value["re_lookup"]] def proc(value, result): if not isinstance(value, six.string_types): return result for r, lookup_value in regexps: if r.search(value): if not lookup_value: return result return self.render(style_attr or lookup_value, result) return result return proc
<SYSTEM_TASK:> Return a processor for an "interval" style value. <END_TASK> <USER_TASK:> Description: def by_interval_lookup(self, style_key, style_value): """Return a processor for an "interval" style value. Parameters ---------- style_key : str A style key. style_value : dict A dictionary with an "interval" key whose value consists of a sequence of tuples where each tuple should have the form `(start, end, x)`, where start is the start of the interval (inclusive), end is the end of the interval, and x is either a style attribute (str) and a boolean flag indicating to use the style attribute named by `style_key`. Returns ------- A function. """
style_attr = style_key if self.style_types[style_key] is bool else None intervals = style_value["interval"] def proc(value, result): try: value = float(value) except TypeError: return result for start, end, lookup_value in intervals: if start is None: start = float("-inf") if end is None: end = float("inf") if start <= value < end: if not lookup_value: return result return self.render(style_attr or lookup_value, result) return result return proc
<SYSTEM_TASK:> Yield post-format processors based on `column_style`. <END_TASK> <USER_TASK:> Description: def post_from_style(self, column_style): """Yield post-format processors based on `column_style`. Parameters ---------- column_style : dict A style where the top-level keys correspond to style attributes such as "bold" or "color". Returns ------- A generator object. """
flanks = Flanks() yield flanks.split_flanks fns = {"simple": self.by_key, "lookup": self.by_lookup, "re_lookup": self.by_re_lookup, "interval": self.by_interval_lookup} for key in self.style_types: if key not in column_style: continue vtype = value_type(column_style[key]) fn = fns[vtype] args = [key, column_style[key]] if vtype == "re_lookup": args.append(sum(getattr(re, f) for f in column_style.get("re_flags", []))) yield fn(*args) yield flanks.join_flanks
<SYSTEM_TASK:> Return `result` without flanking whitespace. <END_TASK> <USER_TASK:> Description: def split_flanks(self, _, result): """Return `result` without flanking whitespace. """
if not result.strip(): self.left, self.right = "", "" return result match = self.flank_re.match(result) assert match, "This regexp should always match" self.left, self.right = match.group(1), match.group(3) return match.group(2)
<SYSTEM_TASK:> Prepend terminal code for `key` to `value`. <END_TASK> <USER_TASK:> Description: def render(self, style_attr, value): """Prepend terminal code for `key` to `value`. Parameters ---------- style_attr : str A style attribute (e.g., "bold" or "blue"). value : str The value to render. Returns ------- The code for `key` (e.g., "\x1b[1m" for bold) plus the original value. """
if not value.strip(): # We've got an empty string. Don't bother adding any # codes. return value return six.text_type(getattr(self.term, style_attr)) + value
<SYSTEM_TASK:> Get per-instance subscribers from the signal. <END_TASK> <USER_TASK:> Description: def get_subscribers(self): """Get per-instance subscribers from the signal. """
data = self.signal.instance_subscribers if self.instance not in data: data[self.instance] = MethodAwareWeakList() return data[self.instance]
<SYSTEM_TASK:> Add a function or a method as an handler of this signal. <END_TASK> <USER_TASK:> Description: def connect(self, cback, subscribers=None, instance=None): """Add a function or a method as an handler of this signal. Any handler added can be a coroutine. :param cback: the callback (or *handler*) to be added to the set :returns: ``None`` or the value returned by the corresponding wrapper """
if subscribers is None: subscribers = self.subscribers # wrapper if self._fconnect is not None: def _connect(cback): self._connect(subscribers, cback) notify = partial(self._notify_one, instance) if instance is not None: result = self._fconnect(instance, cback, subscribers, _connect, notify) else: result = self._fconnect(cback, subscribers, _connect, notify) if inspect.isawaitable(result): result = pull_result(result) else: self._connect(subscribers, cback) result = None return result
<SYSTEM_TASK:> Remove a previously added function or method from the set of the <END_TASK> <USER_TASK:> Description: def disconnect(self, cback, subscribers=None, instance=None): """Remove a previously added function or method from the set of the signal's handlers. :param cback: the callback (or *handler*) to be added to the set :returns: ``None`` or the value returned by the corresponding wrapper """
if subscribers is None: subscribers = self.subscribers # wrapper if self._fdisconnect is not None: def _disconnect(cback): self._disconnect(subscribers, cback) notify = partial(self._notify_one, instance) if instance is not None: result = self._fdisconnect(instance, cback, subscribers, _disconnect, notify) else: result = self._fdisconnect(cback, subscribers, _disconnect, notify) if inspect.isawaitable(result): result = pull_result(result) else: self._disconnect(subscribers, cback) result = None return result
<SYSTEM_TASK:> If 'external_signaller' is defined, calls it's publish method to <END_TASK> <USER_TASK:> Description: def ext_publish(self, instance, loop, *args, **kwargs): """If 'external_signaller' is defined, calls it's publish method to notify external event systems. This is for internal usage only, but it's doumented because it's part of the interface with external notification systems. """
if self.external_signaller is not None: # Assumes that the loop is managed by the external handler return self.external_signaller.publish_signal(self, instance, loop, args, kwargs)
<SYSTEM_TASK:> Configure logging module. <END_TASK> <USER_TASK:> Description: def configure_logging( filename=None, filemode="a", datefmt=FMT_DATE, fmt=FMT, stdout_fmt=FMT_STDOUT, level=logging.DEBUG, stdout_level=logging.WARNING, initial_file_message="", max_size=1048576, rotations_number=5, remove_handlers=True, ): """Configure logging module. Args: filename (str): Specifies a filename to log to. filemode (str): Specifies the mode to open the log file. Values: ``'a'``, ``'w'``. *Default:* ``a``. datefmt (str): Use the specified date/time format. fmt (str): Format string for the file handler. stdout_fmt (str): Format string for the stdout handler. level (int): Log level for the file handler. Log levels are the same as the log levels from the standard :mod:`logging` module. *Default:* ``logging.DEBUG`` stdout_level (int): Log level for the stdout handler. Log levels are the same as the log levels from the standard :mod:`logging` module. *Default:* ``logging.WARNING`` initial_file_message (str): First log entry written in file. max_size (int): Maximal size of the logfile. If the size of the file exceed the maximal size it will be rotated. rotations_number (int): Number of rotations to save. remove_handlers (bool): Remove all existing handlers. """
logger = logging.getLogger() logger.level = logging.NOTSET # Remove all handlers if remove_handlers: while len(logger.handlers) > 0: hdlr = logger.handlers[0] hdlr.close() logger.removeHandler(hdlr) # Create stdout handler if stdout_level is not None: stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(stdout_level) stdout_formatter = logging.Formatter(stdout_fmt, datefmt) # stdoutFormatter.converter = time.gmtime stdout_handler.setFormatter(stdout_formatter) logger.addHandler(stdout_handler) # Create file handler if filename is provided if filename is not None: # Check if filename directory exists and creates it if it doesn't directory = os.path.abspath(os.path.dirname(filename)) if not os.path.isdir(directory): shell.mkdir(directory) # Create file handler file_handler = RotatingFileHandler( filename, filemode, max_size, rotations_number ) file_handler.setLevel(level) file_formatter = logging.Formatter(fmt, datefmt) file_formatter.converter = time.gmtime file_handler.setFormatter(file_formatter) logger.addHandler(file_handler) if initial_file_message: message = " %s " % initial_file_message file_handler.stream.write("\n" + message.center(100, "=") + "\n\n")
<SYSTEM_TASK:> Determine what changes are required. <END_TASK> <USER_TASK:> Description: def create_plan(existing_users=None, proposed_users=None, purge_undefined=None, protected_users=None, allow_non_unique_id=None, manage_home=True, manage_keys=True): """Determine what changes are required. args: existing_users (Users): List of discovered users proposed_users (Users): List of proposed users purge_undefined (bool): Remove discovered users that have not been defined in proposed users list protected_users (list): List of users' names that should not be evaluated as part of the plan creation process allow_non_unique_id (bool): Allow more than one user to have the same uid manage_home (bool): Create/remove users' home directories manage_keys (bool): Add/update/remove users' keys (manage_home must also be true) returns: list: Differences between discovered and proposed users with a list of operations that will achieve the desired state. """
plan = list() proposed_usernames = list() if not purge_undefined: purge_undefined = constants.PURGE_UNDEFINED if not protected_users: protected_users = constants.PROTECTED_USERS if not allow_non_unique_id: allow_non_unique_id = constants.ALLOW_NON_UNIQUE_ID # Create list of modifications to make based on proposed users compared to existing users for proposed_user in proposed_users: proposed_usernames.append(proposed_user.name) user_matching_name = existing_users.describe_users(users_filter=dict(name=proposed_user.name)) user_matching_id = get_user_by_uid(uid=proposed_user.uid, users=existing_users) # If user does not exist if not allow_non_unique_id and user_matching_id and not user_matching_name: plan.append( dict(action='fail', error='uid_clash', proposed_user=proposed_user, state='existing', result=None)) elif not user_matching_name: plan.append( dict(action='add', proposed_user=proposed_user, state='missing', result=None, manage_home=manage_home, manage_keys=manage_keys)) # If they do, then compare else: user_comparison = compare_user(passed_user=proposed_user, user_list=existing_users) if user_comparison.get('result'): plan.append( dict(action='update', proposed_user=proposed_user, state='existing', user_comparison=user_comparison, manage_home=manage_home, manage_keys=manage_keys)) # Application of the proposed user list will not result in deletion of users that need to be removed # If 'PURGE_UNDEFINED' then look for existing users that are not defined in proposed usernames and mark for removal if purge_undefined: for existing_user in existing_users: if existing_user.name not in proposed_usernames: if existing_user.name not in protected_users: plan.append( dict(action='delete', username=existing_user.name, state='existing', manage_home=manage_home, manage_keys=manage_keys)) return plan
<SYSTEM_TASK:> Create, Modify or Delete, depending on plan item. <END_TASK> <USER_TASK:> Description: def execute_plan(plan=None): """Create, Modify or Delete, depending on plan item."""
execution_result = list() for task in plan: action = task['action'] if action == 'delete': command = generate_delete_user_command(username=task.get('username'), manage_home=task['manage_home']) command_output = execute_command(command) execution_result.append(dict(task=task, command_output=command_output)) remove_sudoers_entry(username=task.get('username')) elif action == 'add': command = generate_add_user_command(proposed_user=task.get('proposed_user'), manage_home=task['manage_home']) command_output = execute_command(command) if task['proposed_user'].public_keys and task['manage_home'] and task['manage_keys']: write_authorized_keys(task['proposed_user']) if task['proposed_user'].sudoers_entry: write_sudoers_entry(username=task['proposed_user'].name, sudoers_entry=task['proposed_user'].sudoers_entry) execution_result.append(dict(task=task, command_output=command_output)) elif action == 'update': result = task['user_comparison'].get('result') # Don't modify user if only keys have changed action_count = 0 for k, _ in iteritems(result): if '_action' in k: action_count += 1 command_output = None if task['manage_home'] and task['manage_keys'] and action_count == 1 and 'public_keys_action' in result: write_authorized_keys(task['proposed_user']) elif action_count == 1 and 'sudoers_entry_action' in result: write_sudoers_entry(username=task['proposed_user'].name, sudoers_entry=task['user_comparison']['result']['replacement_sudoers_entry']) else: command = generate_modify_user_command(task=task) command_output = execute_command(command) if task['manage_home'] and task['manage_keys'] and result.get('public_keys_action'): write_authorized_keys(task['proposed_user']) if result.get('sudoers_entry_action'): write_sudoers_entry(username=task['proposed_user'].name, sudoers_entry=task['user_comparison']['result']['replacement_sudoers_entry']) execution_result.append(dict(task=task, command_output=command_output))
<SYSTEM_TASK:> Reads a value of ``variable_path`` from environment. <END_TASK> <USER_TASK:> Description: def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ Reads a value of ``variable_path`` from environment. If ``coerce_type`` is ``bool`` and no ``coercer`` specified, ``coerces`` forced to be :func:`~django_docker_helpers.utils.coerce_str_to_bool` :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """
var_name = self.get_env_var_name(variable_path) val = self.env.get(var_name, self.sentinel) if val is self.sentinel: return default # coerce to bool with default env coercer if no coercer specified if coerce_type and coerce_type is bool and not coercer: coercer = coerce_str_to_bool return self.coerce(val, coerce_type=coerce_type, coercer=coercer)
<SYSTEM_TASK:> Recursively zip a directory. <END_TASK> <USER_TASK:> Description: def mkzip(archive, items, mode="w", save_full_paths=False): """Recursively zip a directory. Args: archive (zipfile.ZipFile or str): ZipFile object add to or path to the output zip archive. items (str or list of str): Single item or list of items (files and directories) to be added to zipfile. mode (str): w for create new and write a for append to. save_full_paths (bool): Preserve full paths. """
close = False try: if not isinstance(archive, zipfile.ZipFile): archive = zipfile.ZipFile(archive, mode, allowZip64=True) close = True logger.info("mkdzip: Creating %s, from: %s", archive.filename, items) if isinstance(items, str): items = [items] for item in items: item = os.path.abspath(item) basename = os.path.basename(item) if os.path.isdir(item): for root, directoires, filenames in os.walk(item): for filename in filenames: path = os.path.join(root, filename) if save_full_paths: archive_path = path.encode("utf-8") else: archive_path = os.path.join( basename, path.replace(item, "").strip("\\/") ).encode("utf-8") archive.write(path, archive_path) elif os.path.isfile(item): if save_full_paths: archive_name = item.encode("utf-8") else: archive_name = basename.encode("utf-8") archive.write(item, archive_name) # , zipfile.ZIP_DEFLATED) return True except Exception as e: logger.error("Error occurred during mkzip: %s" % e) return False finally: if close: archive.close()
<SYSTEM_TASK:> Runs Django migrate command. <END_TASK> <USER_TASK:> Description: def migrate(*argv) -> bool: """ Runs Django migrate command. :return: always ``True`` """
wf('Applying migrations... ', False) execute_from_command_line(['./manage.py', 'migrate'] + list(argv)) wf('[+]\n') return True
<SYSTEM_TASK:> Formats a response from a WSGI app to handle any RDF graphs <END_TASK> <USER_TASK:> Description: def output(self, output, accepts, set_http_code, set_content_type): """ Formats a response from a WSGI app to handle any RDF graphs If a view function returns a single RDF graph, serialize it based on Accept header If it's not an RDF graph, return it without any special handling """
graph = Decorator._get_graph(output) if graph is not None: # decide the format output_mimetype, output_format = self.format_selector.decide(accepts, graph.context_aware) # requested content couldn't find anything if output_mimetype is None: set_http_code("406 Not Acceptable") return ['406 Not Acceptable'.encode('utf-8')] # explicitly mark text mimetypes as utf-8 if 'text' in output_mimetype: output_mimetype = output_mimetype + '; charset=utf-8' # format the new response serialized = graph.serialize(format=output_format) set_content_type(output_mimetype) return [serialized] else: return output
<SYSTEM_TASK:> Wraps a WSGI application to return formatted RDF graphs <END_TASK> <USER_TASK:> Description: def decorate(self, app): """ Wraps a WSGI application to return formatted RDF graphs Uses content negotiation to serialize the graph to the client-preferred format Passes other content through unmodified """
from functools import wraps @wraps(app) def decorated(environ, start_response): # capture any start_response from the app app_response = {} app_response['status'] = "200 OK" app_response['headers'] = [] app_response['written'] = BytesIO() def custom_start_response(status, headers, *args, **kwargs): app_response['status'] = status app_response['headers'] = headers app_response['args'] = args app_response['kwargs'] = kwargs return app_response['written'].write returned = app(environ, custom_start_response) # callbacks from the serialization def set_http_code(status): app_response['status'] = str(status) def set_header(header, value): app_response['headers'] = [(h,v) for (h,v) in app_response['headers'] if h.lower() != header.lower()] app_response['headers'].append((header, value)) def set_content_type(content_type): set_header('Content-Type', content_type) # do the serialization accept = environ.get('HTTP_ACCEPT', '') new_return = self.output(returned, accept, set_http_code, set_content_type) # set the Vary header vary_headers = (v for (h,v) in app_response['headers'] if h.lower() == 'vary') vary_elements = list(itertools.chain(*[v.split(',') for v in vary_headers])) vary_elements = list(set([v.strip() for v in vary_elements])) if '*' not in vary_elements and 'accept' not in (v.lower() for v in vary_elements): vary_elements.append('Accept') set_header('Vary', ', '.join(vary_elements)) # pass on the result to the parent WSGI server parent_writer = start_response(app_response['status'], app_response['headers'], *app_response.get('args', []), **app_response.get('kwargs', {})) written = app_response['written'].getvalue() if len(written) > 0: parent_writer(written) return new_return return decorated
<SYSTEM_TASK:> Detect an handler and return its wanted signal name. <END_TASK> <USER_TASK:> Description: def is_handler(cls, name, value): """Detect an handler and return its wanted signal name."""
signal_name = False config = None if callable(value) and hasattr(value, SPEC_CONTAINER_MEMBER_NAME): spec = getattr(value, SPEC_CONTAINER_MEMBER_NAME) if spec['kind'] == 'handler': signal_name = spec['name'] config = spec['config'] return signal_name, config
<SYSTEM_TASK:> For all of the names build a ChainMap containing a map for every <END_TASK> <USER_TASK:> Description: def _build_inheritance_chain(cls, bases, *names, merge=False): """For all of the names build a ChainMap containing a map for every base class."""
result = [] for name in names: maps = [] for base in bases: bmap = getattr(base, name, None) if bmap is not None: assert isinstance(bmap, (dict, ChainMap)) if len(bmap): if isinstance(bmap, ChainMap): maps.extend(bmap.maps) else: maps.append(bmap) result.append(ChainMap({}, *maps)) if merge: result = [dict(map) for map in result] if len(names) == 1: return result[0] return result
<SYSTEM_TASK:> For every marked handler, see if there is a suitable signal. If <END_TASK> <USER_TASK:> Description: def _check_local_handlers(cls, signals, handlers, namespace, configs): """For every marked handler, see if there is a suitable signal. If not, raise an error."""
for aname, sig_name in handlers.items(): # WARN: this code doesn't take in account the case where a new # method with the same name of an handler in a base class is # present in this class but it isn't an handler (so the handler # with the same name should be removed from the handlers) if sig_name not in signals: disable_check = configs[aname].get('disable_check', False) if not disable_check: raise SignalError("Cannot find a signal named '%s'" % sig_name)
<SYSTEM_TASK:> Returns the handlers registered at class level. <END_TASK> <USER_TASK:> Description: def _get_class_handlers(cls, signal_name, instance): """Returns the handlers registered at class level. """
handlers = cls._signal_handlers_sorted[signal_name] return [getattr(instance, hname) for hname in handlers]
<SYSTEM_TASK:> Sort class defined handlers to give precedence to those declared at <END_TASK> <USER_TASK:> Description: def _sort_handlers(cls, signals, handlers, configs): """Sort class defined handlers to give precedence to those declared at lower level. ``config`` can contain two keys ``begin`` or ``end`` that will further reposition the handler at the two extremes. """
def macro_precedence_sorter(flags, hname): """The default is to sort 'bottom_up', with lower level getting executed first, but sometimes you need them reversed.""" data = configs[hname] topdown_sort = SignalOptions.SORT_TOPDOWN in flags if topdown_sort: level = levels_count - 1 - data['level'] else: level = data['level'] if 'begin' in data: return (-1, level, hname) elif 'end' in data: return (1, level, hname) else: return (0, level, hname) levels_count = len(handlers.maps) per_signal = defaultdict(list) for level, m in enumerate(reversed(handlers.maps)): for hname, sig_name in m.items(): sig_handlers = per_signal[sig_name] if hname not in sig_handlers: configs[hname]['level'] = level sig_handlers.append(hname) for sig_name, sig_handlers in per_signal.items(): if sig_name in signals: # it may be on a mixin flags = signals[sig_name].flags sig_handlers.sort(key=partial(macro_precedence_sorter, flags)) return per_signal
<SYSTEM_TASK:> Calculate per-instance signals and handlers. <END_TASK> <USER_TASK:> Description: def instance_signals_and_handlers(cls, instance): """Calculate per-instance signals and handlers."""
isignals = cls._signals.copy() ihandlers = cls._build_instance_handler_mapping( instance, cls._signal_handlers ) return isignals, ihandlers
<SYSTEM_TASK:> Return summary rows for `rows`. <END_TASK> <USER_TASK:> Description: def summarize(self, rows): """Return summary rows for `rows`. Parameters ---------- rows : list of dicts Normalized rows to summarize. Returns ------- A list of summary rows. Each row is a tuple where the first item is the data and the second is a dict of keyword arguments that can be passed to StyleFields.render. """
columns = list(rows[0].keys()) agg_styles = {c: self.style[c]["aggregate"] for c in columns if "aggregate" in self.style[c]} summaries = {} for col, agg_fn in agg_styles.items(): lgr.debug("Summarizing column %r with %r", col, agg_fn) colvals = filter(lambda x: not isinstance(x, Nothing), (row[col] for row in rows)) summaries[col] = agg_fn(list(colvals)) # The rest is just restructuring the summaries into rows that are # compatible with pyout.Content. Most the complexity below comes from # the fact that a summary function is allowed to return either a single # item or a list of items. maxlen = max(len(v) if isinstance(v, list) else 1 for v in summaries.values()) summary_rows = [] for rowidx in range(maxlen): sumrow = {} for column, values in summaries.items(): if isinstance(values, list): if rowidx >= len(values): continue sumrow[column] = values[rowidx] elif rowidx == 0: sumrow[column] = values for column in columns: if column not in sumrow: sumrow[column] = "" summary_rows.append((sumrow, {"style": self.style.get("aggregate_"), "adopt": False})) return summary_rows
<SYSTEM_TASK:> Do writer-specific setup. <END_TASK> <USER_TASK:> Description: def _init(self, style, streamer, processors=None): """Do writer-specific setup. Parameters ---------- style : dict Style, as passed to __init__. streamer : interface.Stream A stream interface that takes __init__'s `stream` and `interactive` arguments into account. processors : field.StyleProcessors, optional A writer-specific processors instance. Defaults to field.PlainProcessors(). """
self._stream = streamer if streamer.interactive: if streamer.supports_updates: self.mode = "update" else: self.mode = "incremental" else: self.mode = "final" if style and "width_" not in style and self._stream.width: style["width_"] = self._stream.width self._content = ContentWithSummary( StyleFields(style, processors or PlainProcessors()))
<SYSTEM_TASK:> A list of unique IDs used to identify a row. <END_TASK> <USER_TASK:> Description: def ids(self): """A list of unique IDs used to identify a row. If not explicitly set, it defaults to the first column name. """
if self._ids is None: if self._columns: if isinstance(self._columns, OrderedDict): return [list(self._columns.keys())[0]] return [self._columns[0]] else: return self._ids
<SYSTEM_TASK:> Acquire and release the lock around output calls. <END_TASK> <USER_TASK:> Description: def _write_lock(self): """Acquire and release the lock around output calls. This should allow multiple threads or processes to write output reliably. Code that modifies the `_content` attribute should also do so within this context. """
if self._lock: lgr.debug("Acquiring write lock") self._lock.acquire() try: yield finally: if self._lock: lgr.debug("Releasing write lock") self._lock.release()
<SYSTEM_TASK:> Start running `callables` asynchronously. <END_TASK> <USER_TASK:> Description: def _start_callables(self, row, callables): """Start running `callables` asynchronously. """
id_vals = {c: row[c] for c in self.ids} def callback(tab, cols, result): if isinstance(result, Mapping): pass elif isinstance(result, tuple): result = dict(zip(cols, result)) elif len(cols) == 1: # Don't bother raising an exception if cols != 1 # because it would be lost in the thread. result = {cols[0]: result} result.update(id_vals) tab._write(result) if self._pool is None: self._pool = Pool() if self._lock is None: self._lock = multiprocessing.Lock() for cols, fn in callables: cb_func = partial(callback, self, cols) gen = None if inspect.isgeneratorfunction(fn): gen = fn() elif inspect.isgenerator(fn): gen = fn if gen: def callback_for_each(): for i in gen: cb_func(i) self._pool.apply_async(callback_for_each) else: self._pool.apply_async(fn, callback=cb_func)
<SYSTEM_TASK:> Get a listing of all tables <END_TASK> <USER_TASK:> Description: def tables(self): """ Get a listing of all tables - if schema specified on connect, return unqualifed table names in that schema - in no schema specified on connect, return all tables, with schema prefixes """
if self.schema: return self.tables_in_schema(self.schema) else: tables = [] for schema in self.schemas: tables = tables + [ schema + "." + t for t in self.tables_in_schema(schema) ] return tables
<SYSTEM_TASK:> Modify table and field name variables in a sql string with a dict. <END_TASK> <USER_TASK:> Description: def build_query(self, sql, lookup): """ Modify table and field name variables in a sql string with a dict. This seems to be discouraged by psycopg2 docs but it makes small adjustments to large sql strings much easier, making prepped queries much more versatile. USAGE sql = 'SELECT $myInputField FROM $myInputTable' lookup = {'myInputField':'customer_id', 'myInputTable':'customers'} sql = db.build_query(sql, lookup) """
for key, val in six.iteritems(lookup): sql = sql.replace("$" + key, val) return sql
<SYSTEM_TASK:> Parse schema qualified table name <END_TASK> <USER_TASK:> Description: def parse_table_name(self, table): """Parse schema qualified table name """
if "." in table: schema, table = table.split(".") else: schema = None return (schema, table)
<SYSTEM_TASK:> Loads a table. Returns None if the table does not already exist in db <END_TASK> <USER_TASK:> Description: def load_table(self, table): """Loads a table. Returns None if the table does not already exist in db """
table = self._valid_table_name(table) schema, table = self.parse_table_name(table) if not schema: schema = self.schema tables = self.tables else: tables = self.tables_in_schema(schema) if table in tables: return Table(self, schema, table) else: return None
<SYSTEM_TASK:> Return the query string with parameters added <END_TASK> <USER_TASK:> Description: def mogrify(self, sql, params): """Return the query string with parameters added """
conn = self.engine.raw_connection() cursor = conn.cursor() return cursor.mogrify(sql, params)
<SYSTEM_TASK:> Just a pointer to engine.execute <END_TASK> <USER_TASK:> Description: def execute(self, sql, params=None): """Just a pointer to engine.execute """
# wrap in a transaction to ensure things are committed # https://github.com/smnorris/pgdata/issues/3 with self.engine.begin() as conn: result = conn.execute(sql, params) return result
<SYSTEM_TASK:> Create specified schema if it does not already exist <END_TASK> <USER_TASK:> Description: def create_schema(self, schema): """Create specified schema if it does not already exist """
if schema not in self.schemas: sql = "CREATE SCHEMA " + schema self.execute(sql)
<SYSTEM_TASK:> Drop specified schema <END_TASK> <USER_TASK:> Description: def drop_schema(self, schema, cascade=False): """Drop specified schema """
if schema in self.schemas: sql = "DROP SCHEMA " + schema if cascade: sql = sql + " CASCADE" self.execute(sql)
<SYSTEM_TASK:> Load a layer to provided pgdata database connection using OGR2OGR <END_TASK> <USER_TASK:> Description: def ogr2pg( self, in_file, in_layer=None, out_layer=None, schema="public", s_srs=None, t_srs="EPSG:3005", sql=None, dim=2, cmd_only=False, index=True ): """ Load a layer to provided pgdata database connection using OGR2OGR -sql option is like an ESRI where_clause or the ogr2ogr -where option, but to increase flexibility, it is in SQLITE dialect: SELECT * FROM <in_layer> WHERE <sql> """
# if not provided a layer name, use the name of the input file if not in_layer: in_layer = os.path.splitext(os.path.basename(in_file))[0] if not out_layer: out_layer = in_layer.lower() command = [ "ogr2ogr", "-t_srs", t_srs, "-f", "PostgreSQL", "PG:host={h} user={u} dbname={db} password={pwd}".format( h=self.host, u=self.user, db=self.database, pwd=self.password ), "-lco", "OVERWRITE=YES", "-overwrite", "-lco", "SCHEMA={schema}".format(schema=schema), "-lco", "GEOMETRY_NAME=geom", "-dim", "{d}".format(d=dim), "-nlt", "PROMOTE_TO_MULTI", "-nln", out_layer, in_file ] if sql: command.insert( len(command), "-sql" ) command.insert( len(command), "SELECT * FROM {} WHERE {}".format(in_layer, sql) ) command.insert(len(command), "-dialect") command.insert(len(command), "SQLITE") # only add output layer name if sql not included (it gets ignored) if not sql: command.insert( len(command), in_layer ) if s_srs: command.insert(len(command), "-s_srs") command.insert(len(command), s_srs) if not index: command.insert(len(command), "-lco") command.insert(len(command), "SPATIAL_INDEX=NO") if cmd_only: return " ".join(command) else: subprocess.run(command)
<SYSTEM_TASK:> Execute a command with arguments and wait for output. <END_TASK> <USER_TASK:> Description: def execute_and_report(command, *args, **kwargs): """Execute a command with arguments and wait for output. If execution was successful function will return True, if not, it will log the output using standard logging and return False. """
logging.info("Execute: %s %s" % (command, " ".join(args))) try: status, out, err = execute(command, *args, **kwargs) if status == 0: logging.info( "%s Finished successfully. Exit Code: 0.", os.path.basename(command), ) return True else: try: logging.error( "%s failed! Exit Code: %s\nOut: %s\nError: %s", os.path.basename(command), status, out, err, ) except Exception as e: # This fails when some non ASCII characters are returned # from the application logging.error( "%s failed [%s]! Exit Code: %s\nOut: %s\nError: %s", e, os.path.basename(command), status, repr(out), repr(err), ) return False except Exception: logging.exception( "%s failed! Exception thrown!", os.path.basename(command) ) return False
<SYSTEM_TASK:> Read public keys from specified user's authorized_keys file. <END_TASK> <USER_TASK:> Description: def read_authorized_keys(username=None): """Read public keys from specified user's authorized_keys file. args: username (str): username. returns: list: Authorised keys for the specified user. """
authorized_keys_path = '{0}/.ssh/authorized_keys'.format(os.path.expanduser('~{0}'.format(username))) rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH) tmp_authorized_keys_path = '/tmp/authorized_keys_{0}_{1}'.format(username, rnd_chars) authorized_keys = list() copy_result = execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), authorized_keys_path, tmp_authorized_keys_path)))) result_message = copy_result[0][1].decode('UTF-8') if 'you must have a tty to run sudo' in result_message: # pragma: no cover raise OSError("/etc/sudoers is blocked sudo. Remove entry: 'Defaults requiretty'.") elif 'No such file or directory' not in result_message: execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_authorized_keys_path)))) with open(tmp_authorized_keys_path) as keys_file: for key in keys_file: authorized_keys.append(PublicKey(raw=key)) execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_authorized_keys_path)))) return authorized_keys
<SYSTEM_TASK:> Write public keys back to authorized_keys file. Create keys directory if it doesn't already exist. <END_TASK> <USER_TASK:> Description: def write_authorized_keys(user=None): """Write public keys back to authorized_keys file. Create keys directory if it doesn't already exist. args: user (User): Instance of User containing keys. returns: list: Authorised keys for the specified user. """
authorized_keys = list() authorized_keys_dir = '{0}/.ssh'.format(os.path.expanduser('~{0}'.format(user.name))) rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH) authorized_keys_path = '{0}/authorized_keys'.format(authorized_keys_dir) tmp_authorized_keys_path = '/tmp/authorized_keys_{0}_{1}'.format(user.name, rnd_chars) if not os.path.isdir(authorized_keys_dir): execute_command(shlex.split(str('{0} mkdir -p {1}'.format(sudo_check(), authorized_keys_dir)))) for key in user.public_keys: authorized_keys.append('{0}\n'.format(key.raw)) with open(tmp_authorized_keys_path, mode=text_type('w+')) as keys_file: keys_file.writelines(authorized_keys) execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), tmp_authorized_keys_path, authorized_keys_path)))) execute_command(shlex.split(str('{0} chown -R {1} {2}'.format(sudo_check(), user.name, authorized_keys_dir)))) execute_command(shlex.split(str('{0} chmod 700 {1}'.format(sudo_check(), authorized_keys_dir)))) execute_command(shlex.split(str('{0} chmod 600 {1}'.format(sudo_check(), authorized_keys_path)))) execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_authorized_keys_path))))
<SYSTEM_TASK:> Return a base64 encoding of the key. <END_TASK> <USER_TASK:> Description: def b64encoded(self): """Return a base64 encoding of the key. returns: str: base64 encoding of the public key """
if self._b64encoded: return text_type(self._b64encoded).strip("\r\n") else: return base64encode(self.raw)
<SYSTEM_TASK:> Reads a value of ``variable_path`` from consul kv storage. <END_TASK> <USER_TASK:> Description: def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ Reads a value of ``variable_path`` from consul kv storage. :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default :raises config.exceptions.KVStorageKeyDoestNotExist: if specified ``endpoint`` does not exists :raises config.exceptions.KVStorageValueIsEmpty: if specified ``endpoint`` does not contain a config """
return self.inner_parser.get( variable_path, default=default, coerce_type=coerce_type, coercer=coercer, **kwargs, )
<SYSTEM_TASK:> Get the serialization format for the given mimetype <END_TASK> <USER_TASK:> Description: def get_serialize_format(self, mimetype): """ Get the serialization format for the given mimetype """
format = self.formats.get(mimetype, None) if format is None: format = formats.get(mimetype, None) return format
<SYSTEM_TASK:> Returns whether this client's Accept header indicates <END_TASK> <USER_TASK:> Description: def wants_rdf(self, accepts): """ Returns whether this client's Accept header indicates that the client wants to receive RDF """
mimetype = mimeparse.best_match(all_mimetypes + self.all_mimetypes + [WILDCARD], accepts) return mimetype and mimetype != WILDCARD
<SYSTEM_TASK:> Sends a HTTP request and implements a retry logic. <END_TASK> <USER_TASK:> Description: async def send_http(session, method, url, *, retries=1, interval=1, backoff=2, http_status_codes_to_retry=HTTP_STATUS_CODES_TO_RETRY, fn=lambda x:x, **kwargs): """ Sends a HTTP request and implements a retry logic. Arguments: session (obj): A client aiohttp session object method (str): Method to use url (str): URL for the request retries (int): Number of times to retry in case of failure interval (float): Time to wait before retries backoff (int): Multiply interval by this factor after each failure http_status_codes_to_retry (List[int]): List of status codes to retry fn (Callable[[x],x]: Function to call on successful connection """
backoff_interval = interval raised_exc = None attempt = 0 if method not in ['get', 'patch', 'post']: raise ValueError if retries == -1: # -1 means retry indefinitely attempt = -1 elif retries == 0: # Zero means don't retry attempt = 1 else: # any other value means retry N times attempt = retries + 1 while attempt != 0: if raised_exc: logger.error('Caught "%s" url:%s method:%s, remaining tries %s, ' 'sleeping %.2fsecs', raised_exc, method.upper(), url, attempt, backoff_interval) await asyncio.sleep(backoff_interval) # bump interval for the next possible attempt backoff_interval *= backoff # logger.info('sending %s %s with %s', method.upper(), url, kwargs) try: async with await getattr(session, method)(url, **kwargs) as response: if response.status == 200: return await fn(response) elif response.status in http_status_codes_to_retry: logger.error( 'Received invalid response code:%s error:%s' ' response:%s url:%s', response.status, '', response.reason, url) raise aiohttp.ClientResponseError( code=response.status, message=response.reason, request_info=response.request_info, history=response.history) else: raise FailedRequest( code=response.status, message='Non-retryable response code', raised='aiohttp.ClientResponseError', url=url) except aiohttp.ClientError as exc: try: code = exc.code except AttributeError: code = '' raised_exc = FailedRequest(code=code, message=exc, raised='%s.%s' % (exc.__class__.__module__, exc.__class__.__qualname__), url=url) except asyncio.TimeoutError as exc: raised_exc = FailedRequest(code='', message='asyncio.TimeoutError', raised='%s.%s' % (exc.__class__.__module__, exc.__class__.__qualname__), url=url) else: raised_exc = None break attempt -= 1 if raised_exc: raise raised_exc
<SYSTEM_TASK:> Check I can identify the necessary commands for managing users. <END_TASK> <USER_TASK:> Description: def get_missing_commands(_platform): """Check I can identify the necessary commands for managing users."""
missing = list() if _platform in ('Linux', 'OpenBSD'): if not LINUX_CMD_USERADD: missing.append('useradd') if not LINUX_CMD_USERMOD: missing.append('usermod') if not LINUX_CMD_USERDEL: missing.append('userdel') if not LINUX_CMD_GROUP_ADD: missing.append('groupadd') if not LINUX_CMD_GROUP_DEL: missing.append('groupdel') elif _platform == 'FreeBSD': # pragma: FreeBSD # FREEBSD COMMANDS if not FREEBSD_CMD_PW: missing.append('pw') if missing: print('\nMISSING = {0}'.format(missing)) return missing
<SYSTEM_TASK:> Execute a command and return the stdout and stderr. <END_TASK> <USER_TASK:> Description: def execute_command(command=None): """Execute a command and return the stdout and stderr."""
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stdin = process.communicate() process.wait() return (stdout, stdin), process.returncode
<SYSTEM_TASK:> Return base64 encoded representation of a string. <END_TASK> <USER_TASK:> Description: def base64encode(_input=None): """Return base64 encoded representation of a string."""
if PY2: # pragma: no cover return base64.b64encode(_input) elif PY3: # pragma: no cover if isinstance(_input, bytes): return base64.b64encode(_input).decode('UTF-8') elif isinstance(_input, str): return base64.b64encode(bytearray(_input, encoding='UTF-8')).decode('UTF-8')
<SYSTEM_TASK:> Take a base64 encoded string and return the decoded string. <END_TASK> <USER_TASK:> Description: def base64decode(_input=None): """Take a base64 encoded string and return the decoded string."""
missing_padding = 4 - len(_input) % 4 if missing_padding: _input += '=' * missing_padding if PY2: # pragma: no cover return base64.decodestring(_input) elif PY3: # pragma: no cover if isinstance(_input, bytes): return base64.b64decode(_input).decode('UTF-8') elif isinstance(_input, str): return base64.b64decode(bytearray(_input, encoding='UTF-8')).decode('UTF-8')
<SYSTEM_TASK:> Read the sudoers entry for the specified user. <END_TASK> <USER_TASK:> Description: def read_sudoers(): """ Read the sudoers entry for the specified user. args: username (str): username. returns:`r str: sudoers entry for the specified user. """
sudoers_path = '/etc/sudoers' rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH) tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars) sudoers_entries = list() copy_result = execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path)))) result_message = copy_result[0][1].decode('UTF-8') if 'No such file or directory' not in result_message: execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_sudoers_path)))) with open(tmp_sudoers_path) as tmp_sudoers_file: for line in tmp_sudoers_file: stripped = line.strip().replace(os.linesep, '') if stripped and not stripped.startswith('#'): sudoers_entries.append(stripped) execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path)))) return sudoers_entries
<SYSTEM_TASK:> Write sudoers entry. <END_TASK> <USER_TASK:> Description: def write_sudoers_entry(username=None, sudoers_entry=None): """Write sudoers entry. args: user (User): Instance of User containing sudoers entry. returns: str: sudoers entry for the specified user. """
sudoers_path = '/etc/sudoers' rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH) tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars) execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path)))) execute_command( shlex.split(str('{0} chmod 777 {1}'.format(sudo_check(), tmp_sudoers_path)))) with open(tmp_sudoers_path, mode=text_type('r')) as tmp_sudoers_file: sudoers_entries = tmp_sudoers_file.readlines() sudoers_output = list() for entry in sudoers_entries: if entry and not entry.startswith(username): sudoers_output.append(entry) if sudoers_entry: sudoers_output.append('{0} {1}'.format(username, sudoers_entry)) sudoers_output.append('\n') with open(tmp_sudoers_path, mode=text_type('w+')) as tmp_sudoers_file: tmp_sudoers_file.writelines(sudoers_output) sudoers_check_result = execute_command( shlex.split(str('{0} {1} -cf {2}'.format(sudo_check(), LINUX_CMD_VISUDO, tmp_sudoers_path)))) if sudoers_check_result[1] > 0: raise ValueError(sudoers_check_result[0][1]) execute_command( shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), tmp_sudoers_path, sudoers_path)))) execute_command(shlex.split(str('{0} chown root:root {1}'.format(sudo_check(), sudoers_path)))) execute_command(shlex.split(str('{0} chmod 440 {1}'.format(sudo_check(), sudoers_path)))) execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path))))
<SYSTEM_TASK:> Find the sudoers entry in the sudoers file for the specified user. <END_TASK> <USER_TASK:> Description: def get_sudoers_entry(username=None, sudoers_entries=None): """ Find the sudoers entry in the sudoers file for the specified user. args: username (str): username. sudoers_entries (list): list of lines from the sudoers file. returns:`r str: sudoers entry for the specified user. """
for entry in sudoers_entries: if entry.startswith(username): return entry.replace(username, '').strip()
<SYSTEM_TASK:> r"""Prepend or append a string to the current documentation of the function. <END_TASK> <USER_TASK:> Description: def docstring(documentation, prepend=False, join=""): r"""Prepend or append a string to the current documentation of the function. This decorator should be robust even if ``func.__doc__`` is None (for example, if -OO was passed to the interpreter). Usage:: @docstring('Appended this line') def func(): "This docstring will have a line below." pass >>> print(func.__doc__) This docstring will have a line below. Appended this line Args: documentation (str): Documentation string that should be added, appended or prepended to the current documentation string. prepend (bool): Prepend the documentation string to the current documentation if ``True`` else append. default=``False`` join (str): String used to separate docstrings. default='\n' """
def decorator(func): current = (func.__doc__ if func.__doc__ else "").strip() doc = documentation.strip() new = "\n".join( [doc, join, current] if prepend else [current, join, doc] ) lines = len(new.strip().splitlines()) if lines == 1: # If it's a one liner keep it that way and strip whitespace func.__doc__ = new.strip() else: # Else strip whitespace from the beginning and add a newline # at the end func.__doc__ = new.strip() + "\n" return func return decorator
<SYSTEM_TASK:> Runs gunicorn with a specified config. <END_TASK> <USER_TASK:> Description: def run_gunicorn(application: WSGIHandler, gunicorn_module_name: str = 'gunicorn_prod'): """ Runs gunicorn with a specified config. :param application: Django uwsgi application :param gunicorn_module_name: gunicorn settings module name :return: ``Application().run()`` """
from gunicorn.app.base import Application class DjangoApplication(Application): def init(self, parser, opts, args): cfg = self.get_config_from_module_name(gunicorn_module_name) clean_cfg = {} for k, v in cfg.items(): # Ignore unknown names if k not in self.cfg.settings: continue clean_cfg[k.lower()] = v return clean_cfg def load(self) -> WSGIHandler: return application return DjangoApplication().run()
<SYSTEM_TASK:> Print single line to console with ability to colorize parts of it. <END_TASK> <USER_TASK:> Description: def _colorize_single_line(line, regexp, color_def): """Print single line to console with ability to colorize parts of it."""
match = regexp.match(line) groupdict = match.groupdict() groups = match.groups() if not groupdict: # no named groups, just colorize whole line color = color_def[0] dark = color_def[1] cprint("%s\n" % line, color, fg_dark=dark) else: rev_groups = {v: k for k, v in groupdict.items()} for part in groups: if part in rev_groups and rev_groups[part] in color_def: group_name = rev_groups[part] cprint( part, color_def[group_name][0], fg_dark=color_def[group_name][1], ) else: cprint(part) cprint("\n")
<SYSTEM_TASK:> Clear last N lines of terminal output. <END_TASK> <USER_TASK:> Description: def clear_last_lines(self, n): """Clear last N lines of terminal output. """
self.term.stream.write( self.term.move_up * n + self.term.clear_eos) self.term.stream.flush()
<SYSTEM_TASK:> Move back N lines and overwrite line with `text`. <END_TASK> <USER_TASK:> Description: def overwrite_line(self, n, text): """Move back N lines and overwrite line with `text`. """
with self._moveback(n): self.term.stream.write(text)
<SYSTEM_TASK:> Move back N lines in terminal. <END_TASK> <USER_TASK:> Description: def move_to(self, n): """Move back N lines in terminal. """
self.term.stream.write(self.term.move_up * n)
<SYSTEM_TASK:> Tries to read a ``variable_path`` from each of the passed parsers. <END_TASK> <USER_TASK:> Description: def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, required: bool = False, **kwargs): """ Tries to read a ``variable_path`` from each of the passed parsers. It stops if read was successful and returns a retrieved value. If none of the parsers contain a value for the specified path it returns ``default``. :param variable_path: a path to variable in config :param default: a default value if ``variable_path`` is not present anywhere :param coerce_type: cast a result to a specified type :param coercer: perform the type casting with specified callback :param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result :param kwargs: additional options to all parsers :return: **the first successfully read** value from the list of parser instances or ``default`` :raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required`` flag is set, and there's no ``default`` specified """
for p in self.parsers: try: val = p.get( variable_path, default=self.sentinel, coerce_type=coerce_type, coercer=coercer, **kwargs ) if val != self.sentinel: self.enqueue(variable_path, p, val) return val except Exception as e: if not self.silent: raise if self.suppress_logs: continue self.logger.error('Parser {0} cannot get key `{1}`: {2}'.format( p.__class__.__name__, variable_path, str(e) )) self.enqueue(variable_path, value=default) if not default and required: raise exceptions.RequiredValueIsEmpty( 'No default provided and no value read for `{0}`'.format(variable_path)) return default
<SYSTEM_TASK:> Prepares a string with pretty printed config read queue. <END_TASK> <USER_TASK:> Description: def format_config_read_queue(self, use_color: bool = False, max_col_width: int = 50) -> str: """ Prepares a string with pretty printed config read queue. :param use_color: use terminal colors :param max_col_width: limit column width, ``50`` by default :return: """
try: from terminaltables import SingleTable except ImportError: import warnings warnings.warn('Cannot display config read queue. Install terminaltables first.') return '' col_names_order = ['path', 'value', 'type', 'parser'] pretty_bundles = [[self._colorize(name, name.capitalize(), use_color=use_color) for name in col_names_order]] for config_read_item in self.config_read_queue: pretty_attrs = [ config_read_item.variable_path, config_read_item.value, config_read_item.type, config_read_item.parser_name ] pretty_attrs = [self._pformat(pa, max_col_width) for pa in pretty_attrs] if config_read_item.is_default: pretty_attrs[0] = '*' + pretty_attrs[0] if use_color: pretty_attrs = [self._colorize(column_name, pretty_attr, use_color=use_color) for column_name, pretty_attr in zip(col_names_order, pretty_attrs)] pretty_bundles.append(pretty_attrs) table = SingleTable(pretty_bundles) table.title = self._colorize('title', 'CONFIG READ QUEUE', use_color=use_color) table.justify_columns[0] = 'right' # table.inner_row_border = True return str(table.table)
<SYSTEM_TASK:> Given a Flask response, find the rdflib Graph <END_TASK> <USER_TASK:> Description: def get_graph(cls, response): """ Given a Flask response, find the rdflib Graph """
if cls.is_graph(response): # single graph object return response if hasattr(response, '__getitem__'): # indexable tuple if len(response) > 0 and \ cls.is_graph(response[0]): # graph object return response[0]
<SYSTEM_TASK:> Replace the rdflib Graph in a Flask response <END_TASK> <USER_TASK:> Description: def replace_graph(cls, response, serialized): """ Replace the rdflib Graph in a Flask response """
if cls.is_graph(response): # single graph object return serialized if hasattr(response, '__getitem__'): # indexable tuple if len(response) > 0 and \ cls.is_graph(response[0]): # graph object return (serialized,) + response[1:] return response
<SYSTEM_TASK:> Perform encryption of provided data. <END_TASK> <USER_TASK:> Description: def encrypt(data, digest=True): """Perform encryption of provided data."""
alg = get_best_algorithm() enc = implementations["encryption"][alg]( data, implementations["get_key"]() ) return "%s$%s" % (alg, (_to_hex_digest(enc) if digest else enc))
<SYSTEM_TASK:> Decrypt provided data. <END_TASK> <USER_TASK:> Description: def decrypt(data, digest=True): """Decrypt provided data."""
alg, _, data = data.rpartition("$") if not alg: return data data = _from_hex_digest(data) if digest else data try: return implementations["decryption"][alg]( data, implementations["get_key"]() ) except KeyError: raise CryptError("Can not decrypt key for algorithm: %s" % alg)
<SYSTEM_TASK:> Return a dict mapping column name to type for all columns in table <END_TASK> <USER_TASK:> Description: def column_types(self): """Return a dict mapping column name to type for all columns in table """
column_types = {} for c in self.sqla_columns: column_types[c.name] = c.type return column_types
<SYSTEM_TASK:> Drop the table from the database <END_TASK> <USER_TASK:> Description: def drop(self): """Drop the table from the database """
if self._is_dropped is False: self.table.drop(self.engine) self._is_dropped = True
<SYSTEM_TASK:> Provides an interface to traverse nested dict values by dot-separated paths. Wrapper for ``dpath.util.get``. <END_TASK> <USER_TASK:> Description: def dotkey(obj: dict, path: str, default=None, separator='.'): """ Provides an interface to traverse nested dict values by dot-separated paths. Wrapper for ``dpath.util.get``. :param obj: dict like ``{'some': {'value': 3}}`` :param path: ``'some.value'`` :param separator: ``'.'`` or ``'/'`` or whatever :param default: default for KeyError :return: dict value or default value """
try: return get(obj, path, separator=separator) except KeyError: return default
<SYSTEM_TASK:> Converts a given string ``val`` into a boolean. <END_TASK> <USER_TASK:> Description: def coerce_str_to_bool(val: t.Union[str, int, bool, None], strict: bool = False) -> bool: """ Converts a given string ``val`` into a boolean. :param val: any string representation of boolean :param strict: raise ``ValueError`` if ``val`` does not look like a boolean-like object :return: ``True`` if ``val`` is thruthy, ``False`` otherwise. :raises ValueError: if ``strict`` specified and ``val`` got anything except ``['', 0, 1, true, false, on, off, True, False]`` """
if isinstance(val, str): val = val.lower() flag = ENV_STR_BOOL_COERCE_MAP.get(val, None) if flag is not None: return flag if strict: raise ValueError('Unsupported value for boolean flag: `%s`' % val) return bool(val)
<SYSTEM_TASK:> Reads env ``DOCKERIZED`` variable as a boolean. <END_TASK> <USER_TASK:> Description: def is_dockerized(flag_name: str = 'DOCKERIZED', strict: bool = False): """ Reads env ``DOCKERIZED`` variable as a boolean. :param flag_name: environment variable name :param strict: raise a ``ValueError`` if variable does not look like a normal boolean :return: ``True`` if has truthy ``DOCKERIZED`` env, ``False`` otherwise """
return env_bool_flag(flag_name, strict=strict)
<SYSTEM_TASK:> Reads env ``PRODUCTION`` variable as a boolean. <END_TASK> <USER_TASK:> Description: def is_production(flag_name: str = 'PRODUCTION', strict: bool = False): """ Reads env ``PRODUCTION`` variable as a boolean. :param flag_name: environment variable name :param strict: raise a ``ValueError`` if variable does not look like a normal boolean :return: ``True`` if has truthy ``PRODUCTION`` env, ``False`` otherwise """
return env_bool_flag(flag_name, strict=strict)
<SYSTEM_TASK:> Run the given code line by line with printing, as list of lines, and return variable 'ans'. <END_TASK> <USER_TASK:> Description: def runcode(code): """Run the given code line by line with printing, as list of lines, and return variable 'ans'."""
for line in code: print('# '+line) exec(line,globals()) print('# return ans') return ans