text
stringlengths
78
104k
score
float64
0
0.18
def obj_to_string(obj): '''Render an object into a unicode string if possible''' if not obj: return None elif isinstance(obj, bytes): return obj.decode('utf-8') elif isinstance(obj, basestring): return obj elif is_lazy_string(obj): return obj.value elif hasattr(obj, '__html__'): return obj.__html__() else: return str(obj)
0.002506
def is_purine(nucleotide, allow_extended_nucleotides=False): """Is the nucleotide a purine""" if not allow_extended_nucleotides and nucleotide not in STANDARD_NUCLEOTIDES: raise ValueError( "{} is a non-standard nucleotide, neither purine or pyrimidine".format(nucleotide)) return nucleotide in PURINE_NUCLEOTIDES
0.008696
def should_try_kafka_again(error): """Determine if the error means to retry or fail, True to retry.""" msg = 'Unable to retrieve' return isinstance(error, KafkaException) and str(error).startswith(msg)
0.004695
def sign_data(self, name, hash_input, key_version=None, hash_algorithm="sha2-256", context="", prehashed=False, signature_algorithm="pss", mount_point=DEFAULT_MOUNT_POINT): """Return the cryptographic signature of the given data using the named key and the specified hash algorithm. The key must be of a type that supports signing. Supported methods: POST: /{mount_point}/sign/{name}(/{hash_algorithm}). Produces: 200 application/json :param name: Specifies the name of the encryption key to use for signing. This is specified as part of the URL. :type name: str | unicode :param hash_input: Specifies the base64 encoded input data. :type hash_input: str | unicode :param key_version: Specifies the version of the key to use for signing. If not set, uses the latest version. Must be greater than or equal to the key's min_encryption_version, if set. :type key_version: int :param hash_algorithm: Specifies the hash algorithm to use for supporting key types (notably, not including ed25519 which specifies its own hash algorithm). This can also be specified as part of the URL. Currently-supported algorithms are: sha2-224, sha2-256, sha2-384, sha2-512 :type hash_algorithm: str | unicode :param context: Base64 encoded context for key derivation. Required if key derivation is enabled; currently only available with ed25519 keys. :type context: str | unicode :param prehashed: Set to true when the input is already hashed. If the key type is rsa-2048 or rsa-4096, then the algorithm used to hash the input should be indicated by the hash_algorithm parameter. Just as the value to sign should be the base64-encoded representation of the exact binary data you want signed, when set, input is expected to be base64-encoded binary hashed data, not hex-formatted. (As an example, on the command line, you could generate a suitable input via openssl dgst -sha256 -binary | base64.) :type prehashed: bool :param signature_algorithm: When using a RSA key, specifies the RSA signature algorithm to use for signing. Supported signature types are: pss, pkcs1v15 :type signature_algorithm: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response """ if hash_algorithm not in transit_constants.ALLOWED_HASH_DATA_ALGORITHMS: error_msg = 'invalid hash_algorithm argument provided "{arg}", supported types: "{allowed_types}"' raise exceptions.ParamValidationError(error_msg.format( arg=hash_algorithm, allowed_types=', '.join(transit_constants.ALLOWED_HASH_DATA_ALGORITHMS), )) if signature_algorithm not in transit_constants.ALLOWED_SIGNATURE_ALGORITHMS: error_msg = 'invalid signature_algorithm argument provided "{arg}", supported types: "{allowed_types}"' raise exceptions.ParamValidationError(error_msg.format( arg=signature_algorithm, allowed_types=', '.join(transit_constants.ALLOWED_SIGNATURE_ALGORITHMS), )) params = { 'input': hash_input, 'key_version': key_version, 'hash_algorithm': hash_algorithm, 'context': context, 'prehashed': prehashed, 'signature_algorithm': signature_algorithm, } api_path = '/v1/{mount_point}/sign/{name}'.format( mount_point=mount_point, name=name, ) response = self._adapter.post( url=api_path, json=params, ) return response.json()
0.006366
def run(self): '''Run loop''' logger.info("result_worker starting...") while not self._quit: try: task, result = self.inqueue.get(timeout=1) self.on_result(task, result) except Queue.Empty as e: continue except KeyboardInterrupt: break except AssertionError as e: logger.error(e) continue except Exception as e: logger.exception(e) continue logger.info("result_worker exiting...")
0.003322
def map(self, callback): """ Run a map over each of the item. :param callback: The map function :type callback: callable :rtype: Collection """ return self.__class__(list(map(callback, self.items)))
0.007813
def _match_abbrev(s, wordmap): """_match_abbrev(s : string, wordmap : {string : Option}) -> string Return the string key in 'wordmap' for which 's' is an unambiguous abbreviation. If 's' is found to be ambiguous or doesn't match any of 'words', raise BadOptionError. """ # Is there an exact match? if s in wordmap: return s else: # Isolate all words with s as a prefix. possibilities = [word for word in wordmap.keys() if word.startswith(s)] # No exact match, so there had better be just one possibility. if len(possibilities) == 1: return possibilities[0] elif not possibilities: raise BadOptionError(s) else: # More than one possible completion: ambiguous prefix. possibilities.sort() raise AmbiguousOptionError(s, possibilities)
0.001104
def bulk_write(self, requests, **kwargs): """ See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write Warning: this is wrapped in mongo_retry, and is therefore potentially unsafe if the write you want to execute isn't idempotent. """ self._arctic_lib.check_quota() return self._collection.bulk_write(requests, **kwargs)
0.009281
def remove(self, name): """Remove workspace from config file.""" if not (self.exists(name)): raise ValueError("Workspace `%s` doesn't exists." % name) self.config["workspaces"].pop(name, 0) self.config.write()
0.007874
def get_worksheet(self, index): """Returns a worksheet with specified `index`. :param index: An index of a worksheet. Indexes start from zero. :type index: int :returns: an instance of :class:`gsperad.models.Worksheet` or `None` if the worksheet is not found. Example. To get first worksheet of a spreadsheet: >>> sht = client.open('My fancy spreadsheet') >>> worksheet = sht.get_worksheet(0) """ sheet_data = self.fetch_sheet_metadata() try: properties = sheet_data['sheets'][index]['properties'] return Worksheet(self, properties) except (KeyError, IndexError): return None
0.002766
def from_value(cls, value): """This is how an instance is created when we read a MatlabObject from a MAT file. """ instance = OctaveUserClass.__new__(cls) instance._address = '%s_%s' % (instance._name, id(instance)) instance._ref().push(instance._address, value) return instance
0.005935
def htmlFromThing(thing,title): """create pretty formatted HTML from a things dictionary.""" try: thing2 = copy.copy(thing) except: print("crashed copying the thing! I can't document it.") return False stuff=analyzeThing(thing2) names2=list(stuff.keys()) for i,name in enumerate(names2): if name.startswith("_"): names2[i]="zzzzzzzzzz"+name html="""<html><head><style> body {font-family: courier, monospace;} .name {font-weight: bold;} .type {font-style: italic; font-family: serif; color: #AAA;} .desc {} .itemEval {background-color: #DDFFDD;} .itemEvalFail {} table {font-size: .8em; margin-top: 20px; border-collapse: collapse;} tr {border: 1px solid #CCC; vertical-align: text-top;} td {padding: 2px 10px 2px 10px;} .credits {text-align: center; opacity: 0.5; margin-top: 50px; font-size: .8em; font-family: sans-serif;} </style></head><body>""" if title: html+='<span style="color: #CCC;">title: </span>%s<br>'%title textTitle="" textType="" try: textTitle=websafe(str(thing)) textType=websafe(type(thing).__name__) except: pass html+='<span style="color: #CCC;">value: </span>%s<br>'%textTitle html+='<span style="color: #CCC;">&nbsp;type: </span>%s<br>'%textType html+='<table cellpadding=3 align="center">' html+='<tr style="background-color: #000; color: #FFF; font-weight: bold;">' html+='<td>property</td><td>type</td><td>value</td>' html+='<td>evaluated (without arguments)</td></tr>' for name in sorted(names2): if name.startswith("zzzzzzzzzz"): name=name[10:] itemName=str(name) itemType=websafe(stuff[name][0]) itemStr=websafe(stuff[name][1]) itemEval=websafe(stuff[name][2]) color="DDDDFF" color2="" if "method" in itemType: itemName+="()" color="FFDDDD" if itemName.startswith("_"): color="EEEEEE" if itemStr.startswith("&lt;") and not ", " in itemStr: itemStr="""<span style="color: #CCC; font-family: serif; font-style: italic;">%s</span>"""%itemStr else: color2="DDFFDD" if itemEval=="": itemEval="FAILED TO EVALUATE" html+='<tr>' html+='<td class="name" style="background-color: #%s;">%s</td>'%(color,itemName) html+='<td class="type">%s</td>'%(itemType) html+='<td class="itemStr" style="background-color: #%s;">%s</td>'%(color2,itemStr) if itemEval=="FAILED TO EVALUATE": html+='<td class="itemEvalFail"></td>' else: html+='<td class="itemEval">%s</td>'%(itemEval) html+='</tr>' dt=datetime.datetime.now() html+="""</table><p class="credits"> page automatically generated by <a href="https://pypi.python.org/pypi/webinspect/">webinspect</a> (version %s) %s</p> </body></html>"""%(__version__,dt.strftime("at %I:%M %p on %B %d, %Y")) return html
0.018507
def _start_element (self, tag, attrs, end): """ Print HTML element with end string. @param tag: tag name @type tag: string @param attrs: tag attributes @type attrs: dict @param end: either > or /> @type end: string @return: None """ tag = tag.encode(self.encoding, "ignore") self.fd.write("<%s" % tag.replace("/", "")) for key, val in attrs.items(): key = key.encode(self.encoding, "ignore") if val is None: self.fd.write(" %s" % key) else: val = val.encode(self.encoding, "ignore") self.fd.write(' %s="%s"' % (key, quote_attrval(val))) self.fd.write(end)
0.003989
def ec2eq(self): """Convert ecliptic coordinates to equatorial coordinates""" import math #from numpy.matlib import sin, cos, arcsin, arctan2 from math import sin, cos from math import asin as arcsin from math import atan2 as arctan2 from math import acos as arccos eb=self.eb el=self.el ob=math.radians(23.439281) dec = arcsin(sin(eb)*cos(ob)+cos(eb)*sin(ob)*sin(el)) sra = (sin(dec)*cos(ob)-sin(eb))/(cos(dec)*sin(ob)) cra = cos(el)*cos(eb)/cos(dec) if sra < 1 and sra > -1 : sa= arcsin(sra) else: sa = 0 ca= arccos(cra) tsa=sa tca=ca if tsa<0 : ca=2.0*math.pi-ca if tca>=math.pi/2.0: sa=math.pi-sa if ca >= math.pi*2.0: ca=ca-math.pi*2.0 self.tsa=sra self.tca=cra self.ra=ca self.dec=dec
0.023711
def unset(ctx, key): '''Removes the given key.''' file = ctx.obj['FILE'] quote = ctx.obj['QUOTE'] success, key = unset_key(file, key, quote) if success: click.echo("Successfully removed %s" % key) else: exit(1)
0.004
def _pool_put(pool_semaphore, tasks, put_to_pool_in, pool_size, id_self, \ is_stopping): """ (internal) Intended to be run in a seperate thread. Feeds tasks into to the pool whenever semaphore permits. Finishes if self._stopping is set. """ log.debug('NuMap(%s) started pool_putter.' % id_self) last_tasks = {} for task in xrange(tasks.lenght): last_tasks[task] = -1 stop_tasks = [] while True: # are we stopping the Weaver? if is_stopping(): log.debug('NuMap(%s) pool_putter has been told to stop.' % \ id_self) tasks.stop() # try to get a task try: log.debug('NuMap(%s) pool_putter waits for next task.' % \ id_self) task = tasks.next() log.debug('NuMap(%s) pool_putter received next task.' % id_self) except StopIteration: # Weaver raised a StopIteration stop_task = tasks.i # current task log.debug('NuMap(%s) pool_putter caught StopIteration from task %s.' % \ (id_self, stop_task)) if stop_task not in stop_tasks: # task raised stop for the first time. log.debug('NuMap(%s) pool_putter task %s first-time finished.' % \ (id_self, stop_task)) stop_tasks.append(stop_task) pool_semaphore.acquire() log.debug('NuMap(%s) pool_putter sends a sentinel for task %s.' % \ (id_self, stop_task)) put_to_pool_in((stop_task, None, last_tasks[stop_task])) if len(stop_tasks) == tasks.lenght: log.debug('NuMap(%s) pool_putter sent sentinels for all tasks.' % \ id_self) # all tasks have been stopped for _worker in xrange(pool_size): put_to_pool_in(None) log.debug('NuMap(%s) pool_putter sent sentinel for %s workers' % \ (id_self, pool_size)) # this kills the pool_putter break # multiple StopIterations for a tasks are ignored. # This is for stride. continue # got task last_tasks[tasks.i] = task[-1][0] # last valid result log.debug('NuMap(%s) pool_putter waits for semaphore for task %s' % \ (id_self, task)) pool_semaphore.acquire() log.debug('NuMap(%s) pool_putter gets semaphore for task %s' % \ (id_self, task)) #gc.disable() put_to_pool_in(task) #gc.enable() log.debug('NuMap(%s) pool_putter submits task %s to worker.' % \ (id_self, task)) log.debug('NuMap(%s) pool_putter returns' % id_self)
0.013354
def add_permissions(self, grp_name, resource, permissions): """ Add additional permissions for the group associated with the resource. Args: grp_name (string): Name of group. resource (intern.resource.boss.Resource): Identifies which data model object to operate on. permissions (list): List of permissions to add to the given resource Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) self.project_service.add_permissions(grp_name, resource, permissions)
0.004831
def dafgn(lenout=_default_len_out): """ Return (get) the name for the current array in the current DAF. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafgn_c.html :param lenout: Length of array name string. :type lenout: int :return: Name of current array. :rtype: str """ lenout = ctypes.c_int(lenout) name = stypes.stringToCharP(lenout) libspice.dafgn_c(lenout, name) return stypes.toPythonString(name)
0.002155
def zip(self, *args): """ Zip together multiple lists into a single array -- elements that share an index go together. """ args = list(args) args.insert(0, self.obj) maxLen = _(args).chain().collect(lambda x, *args: len(x)).max().value() for i, v in enumerate(args): l = len(args[i]) if l < maxLen: args[i] for x in range(maxLen - l): args[i].append(None) return self._wrap(zip(*args))
0.005714
def is_valid_dir(path): ''' Returns True if provided directory exists and is a directory, or False otherwise. ''' return os.path.exists(path) and os.path.isdir(path)
0.011561
def dimension_values(self, dimension, expanded=True, flat=True): """Return the values along the requested dimension. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension """ index = self.get_dimension_index(dimension) if index == 0: return np.array([self.x]) elif index == 1: return np.array([self.y]) else: return super(Arrow, self).dimension_values(dimension)
0.002981
def pg_ctl(self, cmd, *args, **kwargs): """Builds and executes pg_ctl command :returns: `!True` when return_code == 0, otherwise `!False`""" pg_ctl = [self._pgcommand('pg_ctl'), cmd] return subprocess.call(pg_ctl + ['-D', self._data_dir] + list(args), **kwargs) == 0
0.01
def expand_dataset(X, y_proba, factor=10, random_state=None, extra_arrays=None): """ Convert a dataset with float multiclass probabilities to a dataset with indicator probabilities by duplicating X rows and sampling true labels. """ rng = check_random_state(random_state) extra_arrays = extra_arrays or [] n_classes = y_proba.shape[1] classes = np.arange(n_classes, dtype=int) for el in zip(X, y_proba, *extra_arrays): x, probs = el[0:2] rest = el[2:] for label in rng.choice(classes, size=factor, p=probs): yield (x, label) + rest
0.003295
def prep_folder(self, seq): """Take in a sequence string and prepares the folder for the I-TASSER run.""" itasser_dir = op.join(self.root_dir, self.id) if not op.exists(itasser_dir): os.makedirs(itasser_dir) tmp = {self.id: seq} fasta.write_fasta_file_from_dict(indict=tmp, outname='seq', outext='.fasta', outdir=itasser_dir) return itasser_dir
0.00566
def dup_token(th): ''' duplicate the access token ''' # TODO: is `duplicate_token` the same? sec_attr = win32security.SECURITY_ATTRIBUTES() sec_attr.bInheritHandle = True return win32security.DuplicateTokenEx( th, win32security.SecurityImpersonation, win32con.MAXIMUM_ALLOWED, win32security.TokenPrimary, sec_attr, )
0.002611
def get_serializer_class(self, action=None): """ Return the serializer class depending on request method. Attribute of proper serializer should be defined. """ if action is not None: return getattr(self, '%s_serializer_class' % action) else: return super(GenericViewSet, self).get_serializer_class()
0.005391
def _generic_model(self, z3_model): """ Converts a Z3 model to a name->primitive dict. """ model = { } for m_f in z3_model: n = _z3_decl_name_str(m_f.ctx.ctx, m_f.ast).decode() m = m_f() me = z3_model.eval(m) model[n] = self._abstract_to_primitive(me.ctx.ctx, me.ast) return model
0.007958
def show(self, clustered=False, ax_carpet=None, label_x_axis='time point', label_y_axis='voxels/ROI'): """ Displays the carpet in the given axis. Parameters ---------- clustered : bool, optional Flag to indicate whether to show the clustered/reduced carpet or the original. You must run .cluster_rows_in_roi() before trying to show clustered carpet. ax_carpet : Axis, optional handle to a valid matplotlib Axis label_x_axis : str String label for the x-axis of the carpet label_y_axis : str String label for the y-axis of the carpet Returns ------- ax_carpet : Axis handle to axis where carpet is shown """ if clustered is True and self._carpet_clustered is False: print('You must run .cluster_rows_in_roi() ' 'before being able to show clustered carpet!') return if ax_carpet is None: self.ax_carpet = plt.gca() else: if not isinstance(ax_carpet, Axes): raise ValueError('Input must be a valid matplotlib Axis!') self.ax_carpet = ax_carpet plt.sca(self.ax_carpet) self.fig = plt.gcf() # vmin/vmax are controlled, because we rescale all to [0, 1] self.imshow_params_carpet = dict(interpolation='none', cmap='gray', aspect='auto', origin='lower', zorder=1) # should we control vmin=0.0, vmax=1.0 ?? if not clustered: self.carpet_handle = self.ax_carpet.imshow(self.carpet, **self.imshow_params_carpet) else: self.carpet_handle = self.ax_carpet.imshow(self.clustered_carpet, **self.imshow_params_carpet) # TODO decorating axes with labels self.ax_carpet.set(xlabel=label_x_axis, ylabel=label_y_axis, frame_on=False) self.ax_carpet.set_ylim(auto=True) return self.ax_carpet
0.004975
def git_clone(sub_repo, branch, commit = None, cwd = None, no_submodules = False): ''' This clone mimicks the way Travis-CI clones a project's repo. So far Travis-CI is the most limiting in the sense of only fetching partial history of the repo. ''' if not cwd: cwd = cwd = os.getcwd() root_dir = os.path.join(cwd,'boostorg',sub_repo) if not os.path.exists(os.path.join(root_dir,'.git')): utils.check_call("git","clone", "--depth=1", "--branch=%s"%(branch), "https://github.com/boostorg/%s.git"%(sub_repo), root_dir) os.chdir(root_dir) else: os.chdir(root_dir) utils.check_call("git","pull", # "--depth=1", # Can't do depth as we get merge errors. "--quiet","--no-recurse-submodules") if commit: utils.check_call("git","checkout","-qf",commit) if os.path.exists(os.path.join('.git','modules')): if sys.platform == 'win32': utils.check_call('dir',os.path.join('.git','modules')) else: utils.check_call('ls','-la',os.path.join('.git','modules')) if not no_submodules: utils.check_call("git","submodule","--quiet","update", "--quiet","--init","--recursive", ) utils.check_call("git","submodule","--quiet","foreach","git","fetch") return root_dir
0.029489
def _initializeBucketMap(self, maxBuckets, offset): """ Initialize the bucket map assuming the given number of maxBuckets. """ # The first bucket index will be _maxBuckets / 2 and bucket indices will be # allowed to grow lower or higher as long as they don't become negative. # _maxBuckets is required because the current SDR Classifier assumes bucket # indices must be non-negative. This normally does not need to be changed # but if altered, should be set to an even number. self._maxBuckets = maxBuckets self.minIndex = self._maxBuckets / 2 self.maxIndex = self._maxBuckets / 2 # The scalar offset used to map scalar values to bucket indices. The middle # bucket will correspond to numbers in the range # [offset-resolution/2, offset+resolution/2). # The bucket index for a number x will be: # maxBuckets/2 + int( round( (x-offset)/resolution ) ) self._offset = offset # This dictionary maps a bucket index into its bit representation # We initialize the class with a single bucket with index 0 self.bucketMap = {} def _permutation(n): r = numpy.arange(n, dtype=numpy.uint32) self.random.shuffle(r) return r self.bucketMap[self.minIndex] = _permutation(self.n)[0:self.w] # How often we need to retry when generating valid encodings self.numTries = 0
0.002909
def loads(content, ac_parser=None, ac_dict=None, ac_template=False, ac_context=None, **options): """ :param content: Configuration file's content (a string) :param ac_parser: Forced parser type or ID or parser object :param ac_dict: callable (function or class) to make mapping object will be returned as a result or None. If not given or ac_dict is None, default mapping object used to store resutls is dict or :class:`collections.OrderedDict` if ac_ordered is True and selected backend can keep the order of items in mapping objects. :param ac_template: Assume configuration file may be a template file and try to compile it AAR if True :param ac_context: Context dict to instantiate template :param options: Optional keyword arguments. See also the description of 'options' in :func:`single_load` function. :return: Mapping object or any query result might be primitive objects :raises: ValueError, UnknownProcessorTypeError """ if ac_parser is None: LOGGER.warning("ac_parser was not given but it's must to find correct " "parser to load configurations from string.") return None psr = find(None, forced_type=ac_parser) schema = None ac_schema = options.get("ac_schema", None) if ac_schema is not None: options["ac_schema"] = None schema = loads(ac_schema, ac_parser=psr, ac_dict=ac_dict, ac_template=ac_template, ac_context=ac_context, **options) if ac_template: compiled = anyconfig.template.try_render(content=content, ctx=ac_context, **options) if compiled is not None: content = compiled cnf = psr.loads(content, ac_dict=ac_dict, **options) cnf = _try_validate(cnf, schema, **options) return anyconfig.query.query(cnf, **options)
0.000508
def extract_helices_dssp(in_pdb): """Uses DSSP to find alpha-helices and extracts helices from a pdb file. Returns a length 3 list with a helix id, the chain id and a dict containing the coordinates of each residues CA. Parameters ---------- in_pdb : string Path to a PDB file. """ from ampal.pdb_parser import split_pdb_lines dssp_out = subprocess.check_output( [global_settings['dssp']['path'], in_pdb]) helix = 0 helices = [] h_on = False for line in dssp_out.splitlines(): dssp_line = line.split() try: if dssp_line[4] == 'H': if helix not in [x[0] for x in helices]: helices.append( [helix, dssp_line[2], {int(dssp_line[1]): None}]) else: helices[helix][2][int(dssp_line[1])] = None h_on = True else: if h_on: helix += 1 h_on = False except IndexError: pass with open(in_pdb, 'r') as pdb: pdb_atoms = split_pdb_lines(pdb.read()) for atom in pdb_atoms: for helix in helices: if (atom[2] == "CA") and (atom[5] == helix[1]) and (atom[6] in helix[2].keys()): helix[2][atom[6]] = tuple(atom[8:11]) return helices
0.001463
def cookietostr(self): "Cookie values are bytes in Python3. This function Convert bytes to string with env.encoding(default to utf-8)." self.cookies = dict((k, (v.decode(self.encoding) if not isinstance(v, str) else v)) for k,v in self.cookies.items()) return self.cookies
0.016892
def fit(self, sequences, y=None): """Fit the kcenters clustering on the data Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries, or ``md.Trajectory``. Each sequence may have a different length, but they all must have the same number of features, or the same number of atoms if they are ``md.Trajectory``s. Returns ------- self """ MultiSequenceClusterMixin.fit(self, sequences) self.cluster_ids_ = self._split_indices(self.cluster_ids_) return self
0.004498
def get_raw(self, name=None): '''Shortcut for getting a :class:`~statsd.raw.Raw` instance :keyword name: See :func:`~statsd.client.Client.get_client` :type name: str ''' return self.get_client(name=name, class_=statsd.Raw)
0.007605
def execute(tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' .. versionadded:: 2017.7.0 Execute ``fun`` on all minions matched by ``tgt`` and ``tgt_type``. Parameter ``fun`` is the name of execution module function to call. This function should mainly be used as a helper for runner modules, in order to avoid redundant code. For example, when inside a runner one needs to execute a certain function on arbitrary groups of minions, only has to: .. code-block:: python ret1 = __salt__['salt.execute']('*', 'mod.fun') ret2 = __salt__['salt.execute']('my_nodegroup', 'mod2.fun2', tgt_type='nodegroup') It can also be used to schedule jobs directly on the master, for example: .. code-block:: yaml schedule: collect_bgp_stats: function: salt.execute args: - edge-routers - bgp.neighbors kwargs: tgt_type: nodegroup days: 1 returner: redis ''' client = salt.client.get_local_client(__opts__['conf_file']) try: ret = client.cmd(tgt, fun, arg=arg, timeout=timeout or __opts__['timeout'], tgt_type=tgt_type, # no warn_until, as this is introduced only in 2017.7.0 ret=ret, jid=jid, kwarg=kwarg, **kwargs) except SaltClientError as client_error: log.error('Error while executing %s on %s (%s)', fun, tgt, tgt_type) log.error(client_error) return {} return ret
0.001605
def get_cookie_header(self, req): """ :param req: object with httplib.Request interface Actually, it have to have `url` and `headers` attributes """ mocked_req = MockRequest(req) self.cookiejar.add_cookie_header(mocked_req) return mocked_req.get_new_headers().get('Cookie')
0.006006
def latinize(mapping, bind, values): """ Transliterate a given string into the latin alphabet. """ for v in values: if isinstance(v, six.string_types): v = transliterate(v) yield v
0.00463
def run(self): """Run robustness command.""" reaction = self._get_objective() if not self._mm.has_reaction(reaction): self.fail('Specified biomass reaction is not in model: {}'.format( reaction)) varying_reaction = self._args.varying if not self._mm.has_reaction(varying_reaction): self.fail('Specified varying reaction is not in model: {}'.format( varying_reaction)) steps = self._args.steps if steps <= 0: self.argument_error('Invalid number of steps: {}\n'.format(steps)) loop_removal = self._get_loop_removal_option() if loop_removal == 'tfba': solver = self._get_solver(integer=True) else: solver = self._get_solver() p = fluxanalysis.FluxBalanceProblem(self._mm, solver) if loop_removal == 'tfba': p.add_thermodynamic() try: p.check_constraints() except fluxanalysis.FluxBalanceError as e: self.report_flux_balance_error(e) # Determine minimum and maximum flux for varying reaction if self._args.maximum is None: p.maximize(varying_reaction) flux_max = p.get_flux(varying_reaction) else: flux_max = self._args.maximum if self._args.minimum is None: p.maximize({varying_reaction: -1}) flux_min = p.get_flux(varying_reaction) else: flux_min = self._args.minimum if flux_min > flux_max: self.argument_error('Invalid flux range: {}, {}\n'.format( flux_min, flux_max)) logger.info('Varying {} in {} steps between {} and {}'.format( varying_reaction, steps, flux_min, flux_max)) start_time = time.time() handler_args = ( self._mm, solver, loop_removal, self._args.all_reaction_fluxes) executor = self._create_executor( RobustnessTaskHandler, handler_args, cpus_per_worker=2) def iter_tasks(): for i in range(steps): fixed_flux = flux_min + i*(flux_max - flux_min)/float(steps-1) constraint = varying_reaction, fixed_flux yield constraint, reaction # Run FBA on model at different fixed flux values with executor: for task, result in executor.imap_unordered(iter_tasks(), 16): (varying_reaction, fixed_flux), _ = task if result is None: logger.warning('No solution found for {} at {}'.format( varying_reaction, fixed_flux)) elif self._args.all_reaction_fluxes: for other_reaction in self._mm.reactions: print('{}\t{}\t{}'.format( other_reaction, fixed_flux, result[other_reaction])) else: print('{}\t{}'.format(fixed_flux, result)) executor.join() logger.info('Solving took {:.2f} seconds'.format( time.time() - start_time))
0.000636
def read(fnames, calculation_mode='', region_constraint='', ignore_missing_costs=(), asset_nodes=False, check_dupl=True, tagcol=None, by_country=False): """ Call `Exposure.read(fname)` to get an :class:`Exposure` instance keeping all the assets in memory or `Exposure.read(fname, asset_nodes=True)` to get an iterator over Node objects (one Node for each asset). """ if by_country: # E??_ -> countrycode prefix2cc = countries.from_exposures( os.path.basename(f) for f in fnames) else: prefix = '' allargs = [] tagcol = _minimal_tagcol(fnames, by_country) for i, fname in enumerate(fnames, 1): if by_country and len(fnames) > 1: prefix = prefix2cc['E%02d_' % i] + '_' elif len(fnames) > 1: prefix = 'E%02d_' % i else: prefix = '' allargs.append((fname, calculation_mode, region_constraint, ignore_missing_costs, asset_nodes, check_dupl, prefix, tagcol)) exp = None for exposure in parallel.Starmap( Exposure.read_exp, allargs, distribute='no'): if exp is None: # first time exp = exposure exp.description = 'Composite exposure[%d]' % len(fnames) else: assert exposure.cost_types == exp.cost_types assert exposure.occupancy_periods == exp.occupancy_periods assert (exposure.insurance_limit_is_absolute == exp.insurance_limit_is_absolute) assert exposure.retrofitted == exp.retrofitted assert exposure.area == exp.area exp.assets.extend(exposure.assets) exp.asset_refs.extend(exposure.asset_refs) exp.tagcol.extend(exposure.tagcol) exp.exposures = [os.path.splitext(os.path.basename(f))[0] for f in fnames] return exp
0.001902
def browseprofilegui(profilelog): ''' Browse interactively a profile log in GUI using RunSnakeRun and SquareMap ''' from runsnakerun import runsnake # runsnakerun needs wxPython lib, if it's not available then we can pass if we don't want a GUI. RunSnakeRun is only used for GUI visualisation, not for profiling (and you can still use pstats for console browsing) app = runsnake.RunSnakeRunApp(0) app.OnInit(profilelog) #app.OnInit() app.MainLoop()
0.008333
def list_resource_record_sets_by_zone_id_parser(e_root, connection, zone_id): """ Parses the API responses for the :py:meth:`route53.connection.Route53Connection.list_resource_record_sets_by_zone_id` method. :param lxml.etree._Element e_root: The root node of the etree parsed response from the API. :param Route53Connection connection: The connection instance used to query the API. :param str zone_id: The zone ID of the HostedZone these rrsets belong to. :rtype: ResourceRecordSet :returns: A generator of fully formed ResourceRecordSet instances. """ # The rest of the list pagination tags are handled higher up in the stack. # We'll just worry about the ResourceRecordSets tag, which has # ResourceRecordSet tags nested beneath it. e_rrsets = e_root.find('./{*}ResourceRecordSets') for e_rrset in e_rrsets: yield parse_rrset(e_rrset, connection, zone_id)
0.001057
def connect_xmlstream( jid, metadata, negotiation_timeout=60., override_peer=[], loop=None, logger=logger): """ Prepare and connect a :class:`aioxmpp.protocol.XMLStream` to a server responsible for the given `jid` and authenticate against that server using the SASL mechansims described in `metadata`. :param jid: Address of the user for which the connection is made. :type jid: :class:`aioxmpp.JID` :param metadata: Connection metadata for configuring the TLS usage. :type metadata: :class:`~.security_layer.SecurityLayer` :param negotiation_timeout: Timeout for each individual negotiation step. :type negotiation_timeout: :class:`float` in seconds :param override_peer: Sequence of connection options which take precedence over normal discovery methods. :type override_peer: sequence of (:class:`str`, :class:`int`, :class:`~.BaseConnector`) triples :param loop: asyncio event loop to use (defaults to current) :type loop: :class:`asyncio.BaseEventLoop` :param logger: Logger to use (defaults to module-wide logger) :type logger: :class:`logging.Logger` :raises ValueError: if the domain from the `jid` announces that XMPP is not supported at all. :raises aioxmpp.errors.TLSFailure: if all connection attempts fail and one of them is a :class:`~.TLSFailure`. :raises aioxmpp.errors.MultiOSError: if all connection attempts fail. :return: Transport, XML stream and the current stream features :rtype: tuple of (:class:`asyncio.BaseTransport`, :class:`~.XMLStream`, :class:`~.nonza.StreamFeatures`) The part of the `metadata` specifying the use of TLS is applied. If the security layer does not mandate TLS, the resulting XML stream may not be using TLS. TLS is used whenever possible. The connection options in `override_peer` are tried before any standardised discovery of connection options is made. Only if all of them fail, automatic discovery of connection options is performed. `loop` may be a :class:`asyncio.BaseEventLoop` to use. Defaults to the current event loop. If the domain from the `jid` announces that XMPP is not supported at all, :class:`ValueError` is raised. If no options are returned from :func:`discover_connectors` and `override_peer` is empty, :class:`ValueError` is raised, too. If all connection attempts fail, :class:`aioxmpp.errors.MultiOSError` is raised. The error contains one exception for each of the options discovered as well as the elements from `override_peer` in the order they were tried. A TLS problem is treated like any other connection problem and the other connection options are considered. However, if *all* connection options fail and the set of encountered errors includes a TLS error, the TLS error is re-raised instead of raising a :class:`aioxmpp.errors.MultiOSError`. Return a triple ``(transport, xmlstream, features)``. `transport` the underlying :class:`asyncio.Transport` which is used for the `xmlstream` :class:`~.protocol.XMLStream` instance. `features` is the :class:`aioxmpp.nonza.StreamFeatures` instance describing the features of the stream. .. versionadded:: 0.6 .. versionchanged:: 0.8 The explicit raising of TLS errors has been introduced. Before, TLS errors were treated like any other connection error, possibly masking configuration problems. """ loop = asyncio.get_event_loop() if loop is None else loop options = list(override_peer) exceptions = [] result = yield from _try_options( options, exceptions, jid, metadata, negotiation_timeout, loop, logger, ) if result is not None: return result options = list((yield from discover_connectors( jid.domain, loop=loop, logger=logger, ))) result = yield from _try_options( options, exceptions, jid, metadata, negotiation_timeout, loop, logger, ) if result is not None: return result if not options and not override_peer: raise ValueError("no options to connect to XMPP domain {!r}".format( jid.domain )) for exc in exceptions: if isinstance(exc, errors.TLSFailure): raise exc raise errors.MultiOSError( "failed to connect to XMPP domain {!r}".format(jid.domain), exceptions )
0.000217
def contributionStatus(self): """gets the contribution status of a user""" import time url = "%s/contributors/%s/activeContribution" % (self.root, quote(self.contributorUID)) params = { "agolUserToken" : self._agolSH.token, "f" : "json" } res = self._get(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port) if'Status' in res and \ res['Status'] == 'start': return True return False
0.013378
def check(somestr, check = STRING, interchange = ALL): """ Checks that some string, word or text are palindrome. Checking performs case-insensitive :param str somestr: It is some string that will be checked for palindrome :keyword int check: It is mode of checking. Follows modes are available: - STRING - means that checking of string performs as string. See more at help(palindromus.checkstring) - WORD - means that checking of string performs as word. See more at help(palindromus.checkword) - MULTILINE - means that checking of string performs as multiline palindrome. See more at help(palindromus.checkmultiline) - TEXT - means that checking of string performs as text. See more at help(palindromus.checktext) - SUPER - means that checking of string performs as superpalindrome. See more at help(palindromus.checksuper) The STRING-mode is default :keyword dict interchange: It is dictionary of interchangeable letters :except TypeError: If the checked string is not a string :except TypeError: If checking mode is not specified as integer :except ValueError: If value of checking mode is not valid. If value of checking mode in [STRING, WORD, MULTILINE, TEXT, SUPER] then it is valid :return bool: """ # check invalid data types OnlyStringsCanBeChecked(somestr) if not isinstance(check, int): raise TypeError('keyword argument "check" must be an int') if check == STRING: # check that string is palindrome return checkstring(somestr, interchange = interchange) elif check == WORD: # check that word is palindrome return checkword(somestr, interchange = interchange) elif check == MULTILINE: # check that text is multiline palindrome return checkmultiline(somestr, interchange = interchange) elif check == TEXT: # check that text is palindrome return checktext(somestr, interchange = interchange) elif check == SUPER: # check that text is super palindrome return checksuper(somestr, interchange = interchange) else: # all other cases raise ValueError('Unknown mode %i' % check)
0.046275
def require_http_methods(request_methods): """ Decorator to make a function view only accept particular request methods. Usage:: @require_http_methods(["GET", "POST"]) def function_view(request): # HTTP methods != GET or POST results in 405 error code response """ if not isinstance(request_methods, (list, tuple)): raise ImproperlyConfigured( "require_http_methods decorator must be called " "with a list or tuple of strings. For example:\n\n" " @require_http_methods(['GET', 'POST'])\n" " def function_view(request):\n" " ...\n") request_methods = list(map(str.upper, request_methods)) for method in request_methods: if method not in HTTP_METHOD_NAMES: raise ImproperlyConfigured( "require_http_method called with '%s', " "which is not a valid HTTP method.\n" % (method,)) if 'GET' in request_methods and 'HEAD' not in request_methods: request_methods.append('HEAD') if 'OPTIONS' not in request_methods: request_methods.append('OPTIONS') request_methods.sort() def decorator(func): @wraps(func, assigned=available_attrs(func)) def inner(request, *args, **kwargs): if request.method == 'OPTIONS': response = HttpResponse() response['Allow'] = ', '.join( [m.upper() for m in request_methods]) response['Content-Length'] = '0' return response if request.method not in request_methods: logger.warning( 'Method Not Allowed (%s): %s', request.method, request.path, extra={ 'status_code': 405, 'request': request } ) return HttpResponseNotAllowed(request_methods) return func(request, *args, **kwargs) return inner return decorator
0.000473
def put(self, route: str(), callback: object()): """ Binds a PUT route with the given callback :rtype: object """ self.__set_route('put', {route: callback}) return RouteMapping
0.008929
def base_concrete_model(abstract, model): """ Used in methods of abstract models to find the super-most concrete (non abstract) model in the inheritance chain that inherits from the given abstract model. This is so the methods in the abstract model can query data consistently across the correct concrete model. Consider the following:: class Abstract(models.Model) class Meta: abstract = True def concrete(self): return base_concrete_model(Abstract, self) class Super(Abstract): pass class Sub(Super): pass sub = Sub.objects.create() sub.concrete() # returns Super In actual yacms usage, this allows methods in the ``Displayable`` and ``Orderable`` abstract models to access the ``Page`` instance when instances of custom content types, (eg: models that inherit from ``Page``) need to query the ``Page`` model to determine correct values for ``slug`` and ``_order`` which are only relevant in the context of the ``Page`` model and not the model of the custom content type. """ if hasattr(model, 'objects'): # "model" is a model class return (model if model._meta.abstract else _base_concrete_model(abstract, model)) # "model" is a model instance return ( _base_concrete_model(abstract, model.__class__) or model.__class__)
0.000684
def run_job(self, job_id, array_id = None): """Overwrites the run-job command from the manager to extract the correct job id before calling base class implementation.""" # get the unique job id from the given grid id self.lock() jobs = list(self.session.query(Job).filter(Job.id == job_id)) if len(jobs) != 1: self.unlock() raise ValueError("Could not find job id '%d' in the database'" % job_id) job_id = jobs[0].unique self.unlock() # call base class implementation with the corrected job id return JobManager.run_job(self, job_id, array_id)
0.010152
def add_fields(self, field_dict): """Add a mapping of field names to PayloadField instances. :API: public """ for key, field in field_dict.items(): self.add_field(key, field)
0.010152
def keyPressEvent(self, keyEvent: QtGui.QKeyEvent): """ Undo safe wrapper for the native ``keyPressEvent`` method. |Args| * ``keyEvent`` (**QKeyEvent**): the key event to process. |Returns| **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type. """ undoObj = UndoInsert(self, keyEvent.text()) self.qteUndoStack.push(undoObj)
0.004415
def get_points(self): """Returns a ketama compatible list of (position, nodename) tuples. """ return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
0.01105
def create_datacenter(datacenter_name, service_instance=None): ''' Creates a datacenter. Supported proxies: esxdatacenter datacenter_name The datacenter name service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_datacenter dc1 ''' salt.utils.vmware.create_datacenter(service_instance, datacenter_name) return {'create_datacenter': True}
0.002045
def decrypt(crypt_text) -> str: """ Use config.json key to decrypt """ cipher = Fernet(current_app.config['KEY']) if not isinstance(crypt_text, bytes): crypt_text = str.encode(crypt_text) return cipher.decrypt(crypt_text).decode("utf-8")
0.006993
def get_environment() -> Environment: """ Returns the jinja2 templating environment updated with the most recent cauldron environment configurations :return: """ env = JINJA_ENVIRONMENT loader = env.loader resource_path = environ.configs.make_path( 'resources', 'templates', override_key='template_path' ) if not loader: env.filters['id'] = get_id env.filters['latex'] = get_latex if not loader or resource_path not in loader.searchpath: env.loader = FileSystemLoader(resource_path) return env
0.001704
def get_limit_action(self, criticity, stat_name=""): """Return the tuple (action, repeat) for the alert. - action is a command line - repeat is a bool """ # Get the action for stat + header # Exemple: network_wlan0_rx_careful_action # Action key available ? ret = [(stat_name + '_' + criticity + '_action', False), (stat_name + '_' + criticity + '_action_repeat', True), (self.plugin_name + '_' + criticity + '_action', False), (self.plugin_name + '_' + criticity + '_action_repeat', True)] for r in ret: if r[0] in self._limits: return self._limits[r[0]], r[1] # No key found, the raise an error raise KeyError
0.002584
def _find_module(mod_name): """ Iterate over each part instead of calling imp.find_module directly. This function is able to find submodules (e.g. sickit.tree) """ path = None for part in mod_name.split('.'): if path is not None: path = [path] file, path, description = imp.find_module(part, path) if file is not None: file.close() return path, description
0.002315
def report(policies, start_date, options, output_fh, raw_output_fh=None): """Format a policy's extant records into a report.""" regions = set([p.options.region for p in policies]) policy_names = set([p.name for p in policies]) formatter = Formatter( policies[0].resource_manager.resource_type, extra_fields=options.field, include_default_fields=not options.no_default_fields, include_region=len(regions) > 1, include_policy=len(policy_names) > 1 ) records = [] for policy in policies: # initialize policy execution context for output access policy.ctx.initialize() if policy.ctx.output.type == 's3': policy_records = record_set( policy.session_factory, policy.ctx.output.config['netloc'], policy.ctx.output.config['path'].strip('/'), start_date) else: policy_records = fs_record_set(policy.ctx.log_dir, policy.name) log.debug("Found %d records for region %s", len(policy_records), policy.options.region) for record in policy_records: record['policy'] = policy.name record['region'] = policy.options.region records += policy_records rows = formatter.to_csv(records) if options.format == 'csv': writer = UnicodeWriter(output_fh, formatter.headers()) writer.writerow(formatter.headers()) writer.writerows(rows) elif options.format == 'json': print(dumps(records, indent=2)) else: # We special case CSV, and for other formats we pass to tabulate print(tabulate(rows, formatter.headers(), tablefmt=options.format)) if raw_output_fh is not None: dumps(records, raw_output_fh, indent=2)
0.001112
def _show(self, res, err, prefix='', colored=False): """ Show result or error """ if self.kind is 'local': what = res if not err else err print(what) return if self.kind is 'remote': if colored: red, green, reset = Fore.RED, Fore.GREEN, Fore.RESET else: red = green = reset = '' if err: what = prefix + red + 'remote err: {}'.format(err) + reset else: what = prefix + green + str(res) + reset print(what)
0.00339
def stop(self, precision=0): """ Stops the timer, adds it as an interval to :prop:intervals @precision: #int number of decimal places to round to -> #str formatted interval time """ self._stop = time.perf_counter() return self.add_interval(precision)
0.006515
def eth_getBalance(self, address): """Get account balance. :param address: :return: """ account = self.reader._get_account(address) return account.balance
0.009852
def getParticleInfo(self, modelId): """Return particle info for a specific modelId. Parameters: --------------------------------------------------------------------- modelId: which model Id retval: (particleState, modelId, errScore, completed, matured) """ entry = self._allResults[self._modelIDToIdx[modelId]] return (entry['modelParams']['particleState'], modelId, entry['errScore'], entry['completed'], entry['matured'])
0.002123
def __build_signature(self, data, saml_type, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1): """ Builds the Signature :param data: The Request data :type data: dict :param saml_type: The target URL the user should be redirected to :type saml_type: string SAMLRequest | SAMLResponse :param sign_algorithm: Signature algorithm method :type sign_algorithm: string """ assert saml_type in ('SAMLRequest', 'SAMLResponse') key = self.get_settings().get_sp_key() if not key: raise OneLogin_Saml2_Error( "Trying to sign the %s but can't load the SP private key." % saml_type, OneLogin_Saml2_Error.PRIVATE_KEY_NOT_FOUND ) msg = self.__build_sign_query(data[saml_type], data.get('RelayState', None), sign_algorithm, saml_type) sign_algorithm_transform_map = { OneLogin_Saml2_Constants.DSA_SHA1: xmlsec.Transform.DSA_SHA1, OneLogin_Saml2_Constants.RSA_SHA1: xmlsec.Transform.RSA_SHA1, OneLogin_Saml2_Constants.RSA_SHA256: xmlsec.Transform.RSA_SHA256, OneLogin_Saml2_Constants.RSA_SHA384: xmlsec.Transform.RSA_SHA384, OneLogin_Saml2_Constants.RSA_SHA512: xmlsec.Transform.RSA_SHA512 } sign_algorithm_transform = sign_algorithm_transform_map.get(sign_algorithm, xmlsec.Transform.RSA_SHA1) signature = OneLogin_Saml2_Utils.sign_binary(msg, key, sign_algorithm_transform, self.__settings.is_debug_active()) data['Signature'] = OneLogin_Saml2_Utils.b64encode(signature) data['SigAlg'] = sign_algorithm
0.003376
def open_scene(f, kwargs=None): """Opens the given JB_File :param f: the file to open :type f: :class:`jukeboxcore.filesys.JB_File` :param kwargs: keyword arguments for the command maya.cmds file. defaultflags that are always used: :open: ``True`` e.g. to force the open command use ``{'force'=True}``. :type kwargs: dict|None :returns: An action status. The returnvalue of the actionstatus is the opened mayafile :rtype: :class:`ActionStatus` :raises: None """ defaultkwargs = {'open':True} if kwargs is None: kwargs = {} kwargs.update(defaultkwargs) fp = f.get_fullpath() mayafile = cmds.file(fp, **kwargs) msg = "Successfully opened file %s with arguments: %s" % (fp, kwargs) return ActionStatus(ActionStatus.SUCCESS, msg, returnvalue=mayafile)
0.003405
def configure(root_url, **kwargs): """" Notice that `configure` can either apply to the default configuration or `Client.config`, which is the configuration used by the current thread since `Client` inherits form `threading.local`. """ default = kwargs.pop('default', True) kwargs['client_agent'] = 'example-client/' + __version__ if 'headers' not in kwargs: kwargs['headers'] = {} kwargs['headers']['Accept-Type'] = 'application/json' if default: default_config.reset(root_url, **kwargs) else: Client.config = wac.Config(root_url, **kwargs)
0.001639
def _get_classes(package_name, base_class): """ search monits or works classes. Class must have 'name' attribute :param package_name: 'monits' or 'works' :param base_class: Monit or Work :return: tuple of tuples monit/work-name and class """ classes = {} base_dir = os.getcwd() root_module_name = base_dir.split('/')[-1] package_dir = base_dir + '/%s' % package_name if os.path.isdir(package_dir): for module_path in os.listdir(package_dir): if not module_path.endswith('.py'): continue module_name = os.path.splitext(module_path)[0] module_full_name = '%s.%s.%s' % (root_module_name, package_name, module_name) __import__(module_full_name) work_module = sys.modules[module_full_name] for module_item in work_module.__dict__.values(): if type(module_item) is type \ and issubclass(module_item, base_class) \ and module_item is not base_class\ and hasattr(module_item, 'name') and module_item.name: classes.setdefault(module_item.name, []).append(module_item) # check no duplicated names for work_name, work_modules in classes.items(): if len(work_modules) > 1: raise DuplicatedNameException('Modules %s have same name "%s"' % ( ' and '.join(map(str, work_modules)), work_name )) # create immutable list of modules return tuple([(work_name, work_modules[0]) for work_name, work_modules in classes.items()])
0.003957
def compose(layers, bbox=None, layer_filter=None, color=None, **kwargs): """ Compose layers to a single :py:class:`PIL.Image`. If the layers do not have visible pixels, the function returns `None`. Example:: image = compose([layer1, layer2]) In order to skip some layers, pass `layer_filter` function which should take `layer` as an argument and return `True` to keep the layer or return `False` to skip:: image = compose( layers, layer_filter=lambda x: x.is_visible() and x.kind == 'type' ) By default, visible layers are composed. .. note:: This function is experimental and does not guarantee Photoshop-quality rendering. Currently the following are ignored: - Adjustments layers - Layer effects - Blending mode (all blending modes become normal) Shape drawing is inaccurate if the PSD file is not saved with maximum compatibility. :param layers: a layer, or an iterable of layers. :param bbox: (left, top, bottom, right) tuple that specifies a region to compose. By default, all the visible area is composed. The origin is at the top-left corner of the PSD document. :param layer_filter: a callable that takes a layer and returns `bool`. :param color: background color in `int` or `tuple`. :return: :py:class:`PIL.Image` or `None`. """ from PIL import Image if not hasattr(layers, '__iter__'): layers = [layers] def _default_filter(layer): return layer.is_visible() layer_filter = layer_filter or _default_filter valid_layers = [x for x in layers if layer_filter(x)] if len(valid_layers) == 0: return None if bbox is None: bbox = extract_bbox(valid_layers) if bbox == (0, 0, 0, 0): return None # Alpha must be forced to correctly blend. mode = get_pil_mode(valid_layers[0]._psd.color_mode, True) result = Image.new( mode, (bbox[2] - bbox[0], bbox[3] - bbox[1]), color=color if color is not None else 'white', ) result.putalpha(0) for layer in valid_layers: if intersect(layer.bbox, bbox) == (0, 0, 0, 0): continue image = layer.compose(**kwargs) if image is None: continue logger.debug('Composing %s' % layer) offset = (layer.left - bbox[0], layer.top - bbox[1]) result = _blend(result, image, offset) return result
0.000397
def filter_active(self, *args, **kwargs): """ Return only the 'active' hits. How you count a hit/view will depend on personal choice: Should the same user/visitor *ever* be counted twice? After a week, or a month, or a year, should their view be counted again? The defaulf is to consider a visitor's hit still 'active' if they return within a the last seven days.. After that the hit will be counted again. So if one person visits once a week for a year, they will add 52 hits to a given object. Change how long the expiration is by adding to settings.py: HITCOUNT_KEEP_HIT_ACTIVE = {'days' : 30, 'minutes' : 30} Accepts days, seconds, microseconds, milliseconds, minutes, hours, and weeks. It's creating a datetime.timedelta object. """ grace = getattr(settings, 'HITCOUNT_KEEP_HIT_ACTIVE', {'days': 7}) period = timezone.now() - timedelta(**grace) return self.filter(created__gte=period).filter(*args, **kwargs)
0.001887
def show(self): """Controls if the viewlet should be rendered """ url = self.request.getURL() # XXX: Hack to show the viewlet only on the AR base_view if not any(map(url.endswith, ["base_view", "manage_results"])): return False return self.attachments_view.user_can_add_attachments() or \ self.attachments_view.user_can_update_attachments()
0.007229
def parse_input(tagged_string, disable_colors, keep_tags): """Perform the actual conversion of tags to ANSI escaped codes. Provides a version of the input without any colors for len() and other methods. :param str tagged_string: The input unicode value. :param bool disable_colors: Strip all colors in both outputs. :param bool keep_tags: Skip parsing curly bracket tags into ANSI escape sequences. :return: 2-item tuple. First item is the parsed output. Second item is a version of the input without any colors. :rtype: tuple """ codes = ANSICodeMapping(tagged_string) output_colors = getattr(tagged_string, 'value_colors', tagged_string) # Convert: '{b}{red}' -> '\033[1m\033[31m' if not keep_tags: for tag, replacement in (('{' + k + '}', '' if v is None else '\033[%dm' % v) for k, v in codes.items()): output_colors = output_colors.replace(tag, replacement) # Strip colors. output_no_colors = RE_ANSI.sub('', output_colors) if disable_colors: return output_no_colors, output_no_colors # Combine: '\033[1m\033[31m' -> '\033[1;31m' while True: simplified = RE_COMBINE.sub(r'\033[\1;\2m', output_colors) if simplified == output_colors: break output_colors = simplified # Prune: '\033[31;32;33;34;35m' -> '\033[35m' output_colors = prune_overridden(output_colors) # Deduplicate: '\033[1;mT\033[1;mE\033[1;mS\033[1;mT' -> '\033[1;mTEST' previous_escape = None segments = list() for item in (i for i in RE_SPLIT.split(output_colors) if i): if RE_SPLIT.match(item): if item != previous_escape: segments.append(item) previous_escape = item else: segments.append(item) output_colors = ''.join(segments) return output_colors, output_no_colors
0.002657
def platform(cls): """ What Operating System (and sub-system like glibc / musl) """ system_name = platform.system() if system_name == "Linux": libc = cls.libc() return "unknown-linux-{libc}".format(libc=libc) elif system_name == "Darwin": return "apple-darwin" else: return "unknown"
0.005168
def create_service_from_endpoint(endpoint, service_type, title=None, abstract=None, catalog=None): """ Create a service from an endpoint if it does not already exists. """ from models import Service if Service.objects.filter(url=endpoint, catalog=catalog).count() == 0: # check if endpoint is valid request = requests.get(endpoint) if request.status_code == 200: LOGGER.debug('Creating a %s service for endpoint=%s catalog=%s' % (service_type, endpoint, catalog)) service = Service( type=service_type, url=endpoint, title=title, abstract=abstract, csw_type='service', catalog=catalog ) service.save() return service else: LOGGER.warning('This endpoint is invalid, status code is %s' % request.status_code) else: LOGGER.warning('A service for this endpoint %s in catalog %s already exists' % (endpoint, catalog)) return None
0.005842
def transitions(self): """Dense [k-1]x4 transition frequency matrix""" if self._transitions is not None: return self._transitions transitions = self.array.astype(np.float) transitions /= transitions.sum(1)[:, np.newaxis] self._transitions = transitions return transitions
0.006042
def structured_traceback(self, etype, evalue, etb, tb_offset=None, context=5): """Return a nice text document describing the traceback.""" tb_offset = self.tb_offset if tb_offset is None else tb_offset # some locals try: etype = etype.__name__ except AttributeError: pass Colors = self.Colors # just a shorthand + quicker name lookup ColorsNormal = Colors.Normal # used a lot col_scheme = self.color_scheme_table.active_scheme_name indent = ' '*INDENT_SIZE em_normal = '%s\n%s%s' % (Colors.valEm, indent,ColorsNormal) undefined = '%sundefined%s' % (Colors.em, ColorsNormal) exc = '%s%s%s' % (Colors.excName,etype,ColorsNormal) # some internal-use functions def text_repr(value): """Hopefully pretty robust repr equivalent.""" # this is pretty horrible but should always return *something* try: return pydoc.text.repr(value) except KeyboardInterrupt: raise except: try: return repr(value) except KeyboardInterrupt: raise except: try: # all still in an except block so we catch # getattr raising name = getattr(value, '__name__', None) if name: # ick, recursion return text_repr(name) klass = getattr(value, '__class__', None) if klass: return '%s instance' % text_repr(klass) except KeyboardInterrupt: raise except: return 'UNRECOVERABLE REPR FAILURE' def eqrepr(value, repr=text_repr): return '=%s' % repr(value) def nullrepr(value, repr=text_repr): return '' # meat of the code begins try: etype = etype.__name__ except AttributeError: pass if self.long_header: # Header with the exception type, python version, and date pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = '%s%s%s\n%s%s%s\n%s' % (Colors.topline, '-'*75, ColorsNormal, exc, ' '*(75-len(str(etype))-len(pyver)), pyver, date.rjust(75) ) head += "\nA problem occured executing Python code. Here is the sequence of function"\ "\ncalls leading up to the error, with the most recent (innermost) call last." else: # Simplified header head = '%s%s%s\n%s%s' % (Colors.topline, '-'*75, ColorsNormal,exc, 'Traceback (most recent call last)'.\ rjust(75 - len(str(etype)) ) ) frames = [] # Flush cache before calling inspect. This helps alleviate some of the # problems with python 2.3's inspect.py. ##self.check_cache() # Drop topmost frames if requested try: # Try the default getinnerframes and Alex's: Alex's fixes some # problems, but it generates empty tracebacks for console errors # (5 blanks lines) where none should be returned. #records = inspect.getinnerframes(etb, context)[tb_offset:] #print 'python records:', records # dbg records = _fixed_getinnerframes(etb, context, tb_offset) #print 'alex records:', records # dbg except: # FIXME: I've been getting many crash reports from python 2.3 # users, traceable to inspect.py. If I can find a small test-case # to reproduce this, I should either write a better workaround or # file a bug report against inspect (if that's the real problem). # So far, I haven't been able to find an isolated example to # reproduce the problem. inspect_error() traceback.print_exc(file=self.ostream) info('\nUnfortunately, your original traceback can not be constructed.\n') return '' # build some color string templates outside these nested loops tpl_link = '%s%%s%s' % (Colors.filenameEm,ColorsNormal) tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal) tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \ (Colors.vName, Colors.valEm, ColorsNormal) tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal) tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal, Colors.vName, ColorsNormal) tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal) tpl_line = '%s%%s%s %%s' % (Colors.lineno, ColorsNormal) tpl_line_em = '%s%%s%s %%s%s' % (Colors.linenoEm,Colors.line, ColorsNormal) # now, loop over all records printing context and info abspath = os.path.abspath for frame, file, lnum, func, lines, index in records: #print '*** record:',file,lnum,func,lines,index # dbg if not file: file = '?' elif not(file.startswith("<") and file.endswith(">")): # Guess that filenames like <string> aren't real filenames, so # don't call abspath on them. try: file = abspath(file) except OSError: # Not sure if this can still happen: abspath now works with # file names like <string> pass link = tpl_link % file args, varargs, varkw, locals = inspect.getargvalues(frame) if func == '?': call = '' else: # Decide whether to include variable details or not var_repr = self.include_vars and eqrepr or nullrepr try: call = tpl_call % (func,inspect.formatargvalues(args, varargs, varkw, locals,formatvalue=var_repr)) except KeyError: # This happens in situations like errors inside generator # expressions, where local variables are listed in the # line, but can't be extracted from the frame. I'm not # 100% sure this isn't actually a bug in inspect itself, # but since there's no info for us to compute with, the # best we can do is report the failure and move on. Here # we must *not* call any traceback construction again, # because that would mess up use of %debug later on. So we # simply report the failure and move on. The only # limitation will be that this frame won't have locals # listed in the call signature. Quite subtle problem... # I can't think of a good way to validate this in a unit # test, but running a script consisting of: # dict( (k,v.strip()) for (k,v) in range(10) ) # will illustrate the error, if this exception catch is # disabled. call = tpl_call_fail % func # Don't attempt to tokenize binary files. if file.endswith(('.so', '.pyd', '.dll')): frames.append('%s %s\n' % (link,call)) continue elif file.endswith(('.pyc','.pyo')): # Look up the corresponding source file. file = pyfile.source_from_cache(file) def linereader(file=file, lnum=[lnum], getline=linecache.getline): line = getline(file, lnum[0]) lnum[0] += 1 return line # Build the list of names on this line of code where the exception # occurred. try: names = [] name_cont = False for token_type, token, start, end, line in generate_tokens(linereader): # build composite names if token_type == tokenize.NAME and token not in keyword.kwlist: if name_cont: # Continuation of a dotted name try: names[-1].append(token) except IndexError: names.append([token]) name_cont = False else: # Regular new names. We append everything, the caller # will be responsible for pruning the list later. It's # very tricky to try to prune as we go, b/c composite # names can fool us. The pruning at the end is easy # to do (or the caller can print a list with repeated # names if so desired. names.append([token]) elif token == '.': name_cont = True elif token_type == tokenize.NEWLINE: break except (IndexError, UnicodeDecodeError): # signals exit of tokenizer pass except tokenize.TokenError,msg: _m = ("An unexpected error occurred while tokenizing input\n" "The following traceback may be corrupted or invalid\n" "The error message is: %s\n" % msg) error(_m) # Join composite names (e.g. "dict.fromkeys") names = ['.'.join(n) for n in names] # prune names list of duplicates, but keep the right order unique_names = uniq_stable(names) # Start loop over vars lvals = [] if self.include_vars: for name_full in unique_names: name_base = name_full.split('.',1)[0] if name_base in frame.f_code.co_varnames: if locals.has_key(name_base): try: value = repr(eval(name_full,locals)) except: value = undefined else: value = undefined name = tpl_local_var % name_full else: if frame.f_globals.has_key(name_base): try: value = repr(eval(name_full,frame.f_globals)) except: value = undefined else: value = undefined name = tpl_global_var % name_full lvals.append(tpl_name_val % (name,value)) if lvals: lvals = '%s%s' % (indent,em_normal.join(lvals)) else: lvals = '' level = '%s %s\n' % (link,call) if index is None: frames.append(level) else: frames.append('%s%s' % (level,''.join( _format_traceback_lines(lnum,index,lines,Colors,lvals, col_scheme)))) # Get (safely) a string form of the exception info try: etype_str,evalue_str = map(str,(etype,evalue)) except: # User exception is improperly defined. etype,evalue = str,sys.exc_info()[:2] etype_str,evalue_str = map(str,(etype,evalue)) # ... and format it exception = ['%s%s%s: %s' % (Colors.excName, etype_str, ColorsNormal, evalue_str)] if (not py3compat.PY3) and type(evalue) is types.InstanceType: try: names = [w for w in dir(evalue) if isinstance(w, basestring)] except: # Every now and then, an object with funny inernals blows up # when dir() is called on it. We do the best we can to report # the problem and continue _m = '%sException reporting error (object with broken dir())%s:' exception.append(_m % (Colors.excName,ColorsNormal)) etype_str,evalue_str = map(str,sys.exc_info()[:2]) exception.append('%s%s%s: %s' % (Colors.excName,etype_str, ColorsNormal, evalue_str)) names = [] for name in names: value = text_repr(getattr(evalue, name)) exception.append('\n%s%s = %s' % (indent, name, value)) # vds: >> if records: filepath, lnum = records[-1][1:3] #print "file:", str(file), "linenb", str(lnum) # dbg filepath = os.path.abspath(filepath) ipinst = ipapi.get() if ipinst is not None: ipinst.hooks.synchronize_with_editor(filepath, lnum, 0) # vds: << # return all our info assembled as a single string # return '%s\n\n%s\n%s' % (head,'\n'.join(frames),''.join(exception[0]) ) return [head] + frames + [''.join(exception[0])]
0.007238
def set(self, key, value): """Set a configuration property.""" # Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet. if self._jconf is not None: self._jconf.set(key, unicode(value)) else: self._conf[key] = unicode(value) return self
0.008982
def template(filename): """ Decorator """ def method_wrapper(method): @wraps(method) def jinja_wrapper(*args, **kwargs): ret = method(*args, **kwargs) return render_template(filename, ret) return jinja_wrapper return method_wrapper
0.003311
def _equivalent(self, other): """Compare two entities of the same class, excluding keys.""" if other.__class__ is not self.__class__: # TODO: What about subclasses? raise NotImplementedError('Cannot compare different model classes. ' '%s is not %s' % (self.__class__.__name__, other.__class_.__name__)) if set(self._projection) != set(other._projection): return False # It's all about determining inequality early. if len(self._properties) != len(other._properties): return False # Can only happen for Expandos. my_prop_names = set(self._properties.iterkeys()) their_prop_names = set(other._properties.iterkeys()) if my_prop_names != their_prop_names: return False # Again, only possible for Expandos. if self._projection: my_prop_names = set(self._projection) for name in my_prop_names: if '.' in name: name, _ = name.split('.', 1) my_value = self._properties[name]._get_value(self) their_value = other._properties[name]._get_value(other) if my_value != their_value: return False return True
0.008396
def token(self, value): """ Set the Token of the message. :type value: String :param value: the Token :raise AttributeError: if value is longer than 256 """ if value is None: self._token = value return if not isinstance(value, str): value = str(value) if len(value) > 256: raise AttributeError self._token = value
0.004525
def get_connection(self, name): """Returns the properties for a connection name This method will return the settings for the configuration specified by name. Note that the name argument should only be the name. For instance, give the following eapi.conf file .. code-block:: ini [connection:veos01] transport: http The name to use to retrieve the configuration would be veos01 >>> pyeapi.client.config.get_connection('veos01') Args: name (str): The name of the connection to return Returns: A Python dictionary object of key/value pairs that represent the node configuration. If the name provided in the argument is not found, then None is returned. """ name = 'connection:{}'.format(name) if not self.has_section(name): return None return dict(self.items(name))
0.002058
def write_header(term='bash', tree_dir=None, name=None): ''' Write proper file header in a given shell format Parameters: term (str): The type of shell header to write, can be "bash", "tsch", or "modules" tree_dir (str): The path to this repository name (str): The name of the configuration Returns: A string header to insert ''' assert term in ['bash', 'tsch', 'modules'], 'term must be either bash, tsch, or module' product_dir = tree_dir.rstrip('/') base = 'export' if term == 'bash' else 'setenv' if term != 'modules': hdr = """# Set up tree/{0} for {1} {2} TREE_DIR {3} {2} TREE_VER {1} {2} PATH $TREE_DIR/bin:$PATH {2} PYTHONPATH $TREE_DIR/python:$PYTHONPATH """.format(name, term, base, product_dir) else: hdr = """#%Module1.0 proc ModulesHelp {{ }} {{ global product version puts stderr "This module adds $product/$version to various paths" }} set name tree set product tree set version {1} conflict $product module-whatis "Sets up $product/$version in your environment" set PRODUCT_DIR {0} setenv [string toupper $product]_DIR $PRODUCT_DIR setenv [string toupper $product]_VER $version prepend-path PATH $PRODUCT_DIR/bin prepend-path PYTHONPATH $PRODUCT_DIR/python """.format(product_dir, name) return hdr.strip()
0.002152
def getHourTable(date, pos): """ Returns an HourTable object. """ table = hourTable(date, pos) return HourTable(table, date)
0.007353
def _get_user_ns_object(shell, path): """Get object from the user namespace, given a path containing zero or more dots. Return None if the path is not valid. """ parts = path.split('.', 1) name, attr = parts[0], parts[1:] if name in shell.user_ns: if attr: try: return _getattr(shell.user_ns[name], attr[0]) except AttributeError: return None else: return shell.user_ns[name] return None
0.002
def render_koji(self): """ if there is yum repo in user params, don't pick stuff from koji """ phase = 'prebuild_plugins' plugin = 'koji' if not self.pt.has_plugin_conf(phase, plugin): return if self.user_params.yum_repourls.value: self.pt.remove_plugin(phase, plugin, 'there is a yum repo user parameter') elif not self.pt.set_plugin_arg_valid(phase, plugin, "target", self.user_params.koji_target.value): self.pt.remove_plugin(phase, plugin, 'no koji target supplied in user parameters')
0.007862
def _get_updated_rows(self, auth, function): """ Get rows updated by last update query * `function` [function] Function to use for searching (one of the search_* functions). Helper function used to fetch all rows which was updated by the latest UPDATE ... RETURNING id query. """ # Get dicts for all rows which were edited by building a query for # search_*. Each row returned from UPDATE ... RETURNING id gives us one # query part (qp) which then are combined to one big query for the # search_* API call. qps = [] for row in self._curs_pg: qps.append( { 'operator': 'equals', 'val1': 'id', 'val2': row['id'] } ) # if we didn't update anything return empty list if len(qps) == 0: return [] # fetch list of objects based on IDs q = qps[0] for qp in qps[1:]: q = { 'operator': 'or', 'val1': q, 'val2': qp } updated = function(auth, q, { 'max_result': 10000 })['result'] return updated
0.003167
def validate_and_decode(jwt_bu64, cert_obj): """Example for validating the signature of a JWT using only the cryptography library. Note that this does NOT validate the claims in the claim set. """ public_key = cert_obj.public_key() message = '.'.join(d1_common.cert.jwt.get_bu64_tup(jwt_bu64)[:2]) signature = d1_common.cert.jwt.get_jwt_tup(jwt_bu64)[2] try: public_key.verify( signature, message, cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15(), cryptography.hazmat.primitives.hashes.SHA256(), ) except cryptography.exceptions.InvalidSignature as e: raise Exception('Signature is invalid. error="{}"'.format(str(e))) return d1_common.cert.jwt.get_jwt_dict(jwt_bu64)
0.002522
def create_certificate_signing_request(self, body, **kwargs): """ create a CertificateSigningRequest This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_certificate_signing_request(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1beta1CertificateSigningRequest body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1beta1CertificateSigningRequest If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_certificate_signing_request_with_http_info(body, **kwargs) else: (data) = self.create_certificate_signing_request_with_http_info(body, **kwargs) return data
0.004507
def _parallel_exec(self, hosts): ''' handles mulitprocessing when more than 1 fork is required ''' if not hosts: return p = multiprocessing.Pool(self.forks) results = [] #results = p.map(multiprocessing_runner, hosts) # can't handle keyboard interrupt results = p.map_async(multiprocessing_runner, hosts).get(9999999) p.close() p.join() return results
0.009195
def _convert_rename(self, fc): """Convert a FileRenameCommand into a new FileCommand. :return: None if the rename is being ignored, otherwise a new FileCommand based on the whether the old and new paths are inside or outside of the interesting locations. """ old = fc.old_path new = fc.new_path keep_old = self._path_to_be_kept(old) keep_new = self._path_to_be_kept(new) if keep_old and keep_new: fc.old_path = self._adjust_for_new_root(old) fc.new_path = self._adjust_for_new_root(new) return fc elif keep_old: # The file has been renamed to a non-interesting location. # Delete it! old = self._adjust_for_new_root(old) return commands.FileDeleteCommand(old) elif keep_new: # The file has been renamed into an interesting location # We really ought to add it but we don't currently buffer # the contents of all previous files and probably never want # to. Maybe fast-import-info needs to be extended to # remember all renames and a config file can be passed # into here ala fast-import? self.warning("cannot turn rename of %s into an add of %s yet" % (old, new)) return None
0.00219
def __find_pair_clusters(self, clusters): """! @brief Returns pair of clusters that are best candidates for merging in line with goodness measure. The pair of clusters for which the above goodness measure is maximum is the best pair of clusters to be merged. @param[in] clusters (list): List of clusters that have been allocated during processing, each cluster is represented by list of indexes of points from the input data set. @return (list) List that contains two indexes of clusters (from list 'clusters') that should be merged on this step. It can be equals to [-1, -1] when no links between clusters. """ maximum_goodness = 0.0; cluster_indexes = [-1, -1]; for i in range(0, len(clusters)): for j in range(i + 1, len(clusters)): goodness = self.__calculate_goodness(clusters[i], clusters[j]); if (goodness > maximum_goodness): maximum_goodness = goodness; cluster_indexes = [i, j]; return cluster_indexes;
0.015228
def back_bfs(self, start, end=None): """ Returns a list of nodes in some backward BFS order. Starting from the start node the breadth first search proceeds along incoming edges. """ return [node for node, step in self._iterbfs(start, end, forward=False)]
0.009901
def explainParam(self, param): """ Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string. """ param = self._resolveParam(param) values = [] if self.isDefined(param): if param in self._defaultParamMap: values.append("default: %s" % self._defaultParamMap[param]) if param in self._paramMap: values.append("current: %s" % self._paramMap[param]) else: values.append("undefined") valueStr = "(" + ", ".join(values) + ")" return "%s: %s %s" % (param.name, param.doc, valueStr)
0.00295
def set_curves(self, curves): u''' Set supported curves by name, nid or nist. :param str | tuple(int) curves: Example "secp384r1:secp256k1", (715, 714), "P-384", "K-409:B-409:K-571", ... :return: 1 for success and 0 for failure ''' retVal = None if isinstance(curves, str): retVal = SSL_CTX_set1_curves_list(self._ctx, curves) elif isinstance(curves, tuple): retVal = SSL_CTX_set1_curves(self._ctx, curves, len(curves)) return retVal
0.005747
async def authenticate_with_device(atv): """Perform device authentication and print credentials.""" credentials = await atv.airplay.generate_credentials() await atv.airplay.load_credentials(credentials) try: await atv.airplay.start_authentication() pin = input('PIN Code: ') await atv.airplay.finish_authentication(pin) print('Credentials: {0}'.format(credentials)) except exceptions.DeviceAuthenticationError: print('Failed to authenticate', file=sys.stderr)
0.001919
def get_categories(self, job_id, category_id=None, body=None, params=None): """ `<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html>`_ :arg job_id: The name of the job :arg category_id: The identifier of the category definition of interest :arg body: Category selection details if not provided in URI :arg from_: skips a number of categories :arg size: specifies a max number of categories to get """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") return self.transport.perform_request( "GET", _make_path( "_ml", "anomaly_detectors", job_id, "results", "categories", category_id ), params=params, body=body, )
0.004582
def submit_fatjar(cl_args, unknown_args, tmp_dir): ''' We use the packer to make a package for the jar and dump it to a well-known location. We then run the main method of class with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS. This will run the jar file with the topology_class_name. The submitter inside will write out the topology defn file to a location that we specify. Then we write the topology defn file to a well known location. We then write to appropriate places in zookeeper and launch the scheduler jobs :param cl_args: :param unknown_args: :param tmp_dir: :return: ''' # execute main of the topology to create the topology definition topology_file = cl_args['topology-file-name'] main_class = cl_args['topology-class-name'] res = execute.heron_class( class_name=main_class, lib_jars=config.get_heron_libs(jars.topology_jars()), extra_jars=[topology_file], args=tuple(unknown_args), java_defines=cl_args['topology_main_jvm_property']) result.render(res) if not result.is_successful(res): err_context = ("Failed to create topology definition " \ "file when executing class '%s' of file '%s'") % (main_class, topology_file) res.add_context(err_context) return res results = launch_topologies(cl_args, topology_file, tmp_dir) return results
0.010079
def patch_ref(self, sha): """ Patch reference on the origin master branch :param sha: Sha to use for the branch :return: Status of success :rtype: str or self.ProxyError """ uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format( api=self.github_api_url, origin=self.origin, branch=self.master_fork ) data = { "sha": sha, "force": True } reply = self.request( "PATCH", uri, data=data ) if reply.status_code == 200: dic = json.loads(reply.content.decode("utf-8")) return dic["object"]["sha"] else: dic = json.loads(reply.content.decode("utf-8")) return self.ProxyError( reply.status_code, (dic, "message"), step="patch", context={ "uri": uri, "data": data } )
0.001916
def _onSize(self, evt): """ Called when wxEventSize is generated. In this application we attempt to resize to fit the window, so it is better to take the performance hit and redraw the whole window. """ DEBUG_MSG("_onSize()", 2, self) # Create a new, correctly sized bitmap self._width, self._height = self.GetClientSize() self.bitmap =wx.EmptyBitmap(self._width, self._height) self._isDrawn = False if self._width <= 1 or self._height <= 1: return # Empty figure dpival = self.figure.dpi winch = self._width/dpival hinch = self._height/dpival self.figure.set_size_inches(winch, hinch) # Rendering will happen on the associated paint event # so no need to do anything here except to make sure # the whole background is repainted. self.Refresh(eraseBackground=False) FigureCanvasBase.resize_event(self)
0.005176
def create_log_stream(awsclient, log_group_name, log_stream_name): """Creates a log stream for the specified log group. :param log_group_name: log group name :param log_stream_name: log stream name :return: """ client_logs = awsclient.get_client('logs') response = client_logs.create_log_stream( logGroupName=log_group_name, logStreamName=log_stream_name )
0.002463
def from_json(cls, data, result=None): """ Create new RelationMember element from JSON data :param child: Element data from JSON :type child: Dict :param result: The result this element belongs to :type result: overpy.Result :return: New instance of RelationMember :rtype: overpy.RelationMember :raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match. """ if data.get("type") != cls._type_value: raise exception.ElementDataWrongType( type_expected=cls._type_value, type_provided=data.get("type") ) ref = data.get("ref") role = data.get("role") attributes = {} ignore = ["geometry", "type", "ref", "role"] for n, v in data.items(): if n in ignore: continue attributes[n] = v geometry = data.get("geometry") if isinstance(geometry, list): geometry_orig = geometry geometry = [] for v in geometry_orig: geometry.append( RelationWayGeometryValue( lat=v.get("lat"), lon=v.get("lon") ) ) else: geometry = None return cls( attributes=attributes, geometry=geometry, ref=ref, role=role, result=result )
0.001956
def get_cozy_param(param): ''' Get parameter in Cozy configuration ''' try: req = curl_couchdb('/cozy/_design/cozyinstance/_view/all') rows = req.json()['rows'] if len(rows) == 0: return None else: return rows[0].get('value', {}).get(param, None) except: return None
0.00565