text
stringlengths
78
104k
score
float64
0
0.18
def flush_evicted_objects_unsafe(): """This removes some critical state from the Redis shards. In a multitenant environment, this will flush metadata for all jobs, which may be undesirable. This removes all of the metadata for objects that have been evicted. This can be used to try to address out-of-memory errors caused by the accumulation of metadata in Redis. However, after running this command, fault tolerance will most likely not work. """ ray.worker.global_worker.check_connected() for shard_index in range(len(ray.global_state.redis_clients)): _flush_evicted_objects_unsafe_shard(shard_index)
0.001531
def _lenstr(ins): ''' Returns string length ''' (tmp1, output) = _str_oper(ins.quad[2], no_exaf=True) if tmp1: output.append('push hl') output.append('call __STRLEN') output.extend(_free_sequence(tmp1)) output.append('push hl') REQUIRES.add('strlen.asm') return output
0.003195
def do_action(self, action): """Execute action, add a new tile, update the score & return the reward.""" temp_state = np.rot90(self._state, action) reward = self._do_action_left(temp_state) self._state = np.rot90(temp_state, -action) self._score += reward self.add_random_tile() return reward
0.008547
def get_function_spec(name): """Return a dictionary with the specification of a function: parameter names and defaults (value, bounds, scale, etc.). Returns ------- par_names : list List of parameter names for this function. norm_par : str Name of normalization parameter. default : dict Parameter defaults dictionary. """ if not hasattr(get_function_spec, 'fndict'): modelfile = os.path.join('$FERMIPY_ROOT', 'data', 'models.yaml') modelfile = os.path.expandvars(modelfile) get_function_spec.fndict = yaml.load(open(modelfile)) if not name in get_function_spec.fndict.keys(): raise Exception('Invalid Function Name: %s' % name) return get_function_spec.fndict[name]
0.002488
def _calc(self, x, y): """ List based implementation of binary tree algorithm for concordance measure after :cite:`Christensen2005`. """ x = np.array(x) y = np.array(y) n = len(y) perm = list(range(n)) perm.sort(key=lambda a: (x[a], y[a])) vals = y[perm] ExtraY = 0 ExtraX = 0 ACount = 0 BCount = 0 CCount = 0 DCount = 0 ECount = 0 DCount = 0 Concordant = 0 Discordant = 0 # ids for left child li = [None] * (n - 1) # ids for right child ri = [None] * (n - 1) # number of left descendants for a node ld = np.zeros(n) # number of values equal to value i nequal = np.zeros(n) for i in range(1, n): NumBefore = 0 NumEqual = 1 root = 0 x0 = x[perm[i - 1]] y0 = y[perm[i - 1]] x1 = x[perm[i]] y1 = y[perm[i]] if x0 != x1: DCount = 0 ECount = 1 else: if y0 == y1: ECount += 1 else: DCount += ECount ECount = 1 root = 0 inserting = True while inserting: current = y[perm[i]] if current > y[perm[root]]: # right branch NumBefore += 1 + ld[root] + nequal[root] if ri[root] is None: # insert as right child to root ri[root] = i inserting = False else: root = ri[root] elif current < y[perm[root]]: # increment number of left descendants ld[root] += 1 if li[root] is None: # insert as left child to root li[root] = i inserting = False else: root = li[root] elif current == y[perm[root]]: NumBefore += ld[root] NumEqual += nequal[root] + 1 nequal[root] += 1 inserting = False ACount = NumBefore - DCount BCount = NumEqual - ECount CCount = i - (ACount + BCount + DCount + ECount - 1) ExtraY += DCount ExtraX += BCount Concordant += ACount Discordant += CCount cd = Concordant + Discordant num = Concordant - Discordant tau = num / np.sqrt((cd + ExtraX) * (cd + ExtraY)) v = (4. * n + 10) / (9. * n * (n - 1)) z = tau / np.sqrt(v) pval = erfc(np.abs(z) / 1.4142136) # follow scipy return tau, pval, Concordant, Discordant, ExtraX, ExtraY
0.000675
def def_cmd(name=None, short=None, fn=None, usage=None, help=None): """ Define a command. """ command = Command(name=name, short=short, fn=fn, usage=usage, help=help) Command.register(command)
0.004717
def restore_memory_snapshot(self, snapshot, bSkipMappedFiles = True, bSkipOnError = False): """ Attempts to restore the memory state as it was when the given snapshot was taken. @warning: Currently only the memory contents, state and protect bits are restored. Under some circumstances this method may fail (for example if memory was freed and then reused by a mapped file). @type snapshot: list( L{win32.MemoryBasicInformation} ) @param snapshot: Memory snapshot returned by L{take_memory_snapshot}. Snapshots returned by L{generate_memory_snapshot} don't work here. @type bSkipMappedFiles: bool @param bSkipMappedFiles: C{True} to avoid restoring the contents of memory mapped files, C{False} otherwise. Use with care! Setting this to C{False} can cause undesired side effects - changes to memory mapped files may be written to disk by the OS. Also note that most mapped files are typically executables and don't change, so trying to restore their contents is usually a waste of time. @type bSkipOnError: bool @param bSkipOnError: C{True} to issue a warning when an error occurs during the restoration of the snapshot, C{False} to stop and raise an exception instead. Use with care! Setting this to C{True} will cause the debugger to falsely believe the memory snapshot has been correctly restored. @raise WindowsError: An error occured while restoring the snapshot. @raise RuntimeError: An error occured while restoring the snapshot. @raise TypeError: A snapshot of the wrong type was passed. """ if not snapshot or not isinstance(snapshot, list) \ or not isinstance(snapshot[0], win32.MemoryBasicInformation): raise TypeError( "Only snapshots returned by " \ "take_memory_snapshot() can be used here." ) # Get the process handle. hProcess = self.get_handle( win32.PROCESS_VM_WRITE | win32.PROCESS_VM_OPERATION | win32.PROCESS_SUSPEND_RESUME | win32.PROCESS_QUERY_INFORMATION ) # Freeze the process. self.suspend() try: # For each memory region in the snapshot... for old_mbi in snapshot: # If the region matches, restore it directly. new_mbi = self.mquery(old_mbi.BaseAddress) if new_mbi.BaseAddress == old_mbi.BaseAddress and \ new_mbi.RegionSize == old_mbi.RegionSize: self.__restore_mbi(hProcess, new_mbi, old_mbi, bSkipMappedFiles) # If the region doesn't match, restore it page by page. else: # We need a copy so we don't corrupt the snapshot. old_mbi = win32.MemoryBasicInformation(old_mbi) # Get the overlapping range of pages. old_start = old_mbi.BaseAddress old_end = old_start + old_mbi.RegionSize new_start = new_mbi.BaseAddress new_end = new_start + new_mbi.RegionSize if old_start > new_start: start = old_start else: start = new_start if old_end < new_end: end = old_end else: end = new_end # Restore each page in the overlapping range. step = MemoryAddresses.pageSize old_mbi.RegionSize = step new_mbi.RegionSize = step address = start while address < end: old_mbi.BaseAddress = address new_mbi.BaseAddress = address self.__restore_mbi(hProcess, new_mbi, old_mbi, bSkipMappedFiles, bSkipOnError) address = address + step # Resume execution. finally: self.resume()
0.004265
def _check_curtailment_target(curtailment, curtailment_target, curtailment_key): """ Raises an error if curtailment target was not met in any time step. Parameters ----------- curtailment : :pandas:`pandas:DataFrame<dataframe>` Dataframe containing the curtailment in kW per generator and time step. Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the generator representatives. curtailment_target : :pandas:`pandas.Series<series>` The curtailment in kW that was to be distributed amongst the generators. Index of the series is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str` The technology and weather cell ID if :obj:`tuple` or only the technology if :obj:`str` the curtailment was specified for. """ if not (abs(curtailment.sum(axis=1) - curtailment_target) < 1e-1).all(): message = 'Curtailment target not met for {}.'.format(curtailment_key) logging.error(message) raise TypeError(message)
0.000882
def show_all(key): ''' Context: - abbr - metadata - bill - sources - nav_active Templates: - billy/web/public/bill_all_{key}.html - where key is passed in, like "actions", etc. ''' def func(request, abbr, session, bill_id, key): # get fixed version fixed_bill_id = fix_bill_id(bill_id) # redirect if URL's id isn't fixed id without spaces if fixed_bill_id.replace(' ', '') != bill_id: return redirect('bill', abbr=abbr, session=session, bill_id=fixed_bill_id.replace(' ', '')) bill = db.bills.find_one({settings.LEVEL_FIELD: abbr, 'session': session, 'bill_id': fixed_bill_id}) if bill is None: raise Http404('no bill found {0} {1} {2}'.format(abbr, session, bill_id)) return render(request, templatename('bill_all_%s' % key), dict(abbr=abbr, metadata=Metadata.get_object(abbr), bill=bill, sources=bill['sources'], nav_active='bills')) return func
0.000803
def intersection(self, other): """ Return the intersection between this time interval and the given time interval, or ``None`` if the two intervals do not overlap. :rtype: :class:`~aeneas.exacttiming.TimeInterval` or ``NoneType`` """ relative_position = self.relative_position_of(other) if relative_position in [ self.RELATIVE_POSITION_PP_C, self.RELATIVE_POSITION_PI_LC, self.RELATIVE_POSITION_PI_LG, self.RELATIVE_POSITION_PI_CG, self.RELATIVE_POSITION_IP_B, self.RELATIVE_POSITION_II_LB, ]: return TimeInterval(begin=self.begin, end=self.begin) if relative_position in [ self.RELATIVE_POSITION_IP_E, self.RELATIVE_POSITION_II_EG, ]: return TimeInterval(begin=self.end, end=self.end) if relative_position in [ self.RELATIVE_POSITION_II_BI, self.RELATIVE_POSITION_II_BE, self.RELATIVE_POSITION_II_II, self.RELATIVE_POSITION_II_IE, ]: return TimeInterval(begin=other.begin, end=other.end) if relative_position in [ self.RELATIVE_POSITION_IP_I, self.RELATIVE_POSITION_II_LI, self.RELATIVE_POSITION_II_LE, self.RELATIVE_POSITION_II_LG, self.RELATIVE_POSITION_II_BG, self.RELATIVE_POSITION_II_IG, ]: begin = max(self.begin, other.begin) end = min(self.end, other.end) return TimeInterval(begin=begin, end=end) return None
0.001223
def num_to_var_int(x): """ (bitcoin-specific): convert an integer into a variable-length integer """ x = int(x) if x < 253: return from_int_to_byte(x) elif x < 65536: return from_int_to_byte(253) + encode(x, 256, 2)[::-1] elif x < 4294967296: return from_int_to_byte(254) + encode(x, 256, 4)[::-1] else: return from_int_to_byte(255) + encode(x, 256, 8)[::-1]
0.002353
def alignments(self): """ Get alignments from the SAM/BAM file, subject to filtering. """ referenceIds = self.referenceIds dropUnmapped = self.dropUnmapped dropSecondary = self.dropSecondary dropSupplementary = self.dropSupplementary dropDuplicates = self.dropDuplicates keepQCFailures = self.keepQCFailures storeQueryIds = self.storeQueryIds filterRead = self.filterRead minScore = self.minScore maxScore = self.maxScore scoreTag = self.scoreTag if storeQueryIds: self.queryIds = queryIds = set() lastAlignment = None count = 0 with samfile(self.filename) as samAlignment: for count, alignment in enumerate(samAlignment.fetch(), start=1): if storeQueryIds: queryIds.add(alignment.query_name) if minScore is not None or maxScore is not None: try: score = alignment.get_tag(scoreTag) except KeyError: continue else: if ((minScore is not None and score < minScore) or (maxScore is not None and score > maxScore)): continue # Secondary and supplementary alignments may have a '*' # (pysam returns this as None) SEQ field, indicating that # the previous sequence should be used. This is best # practice according to section 2.5.2 of # https://samtools.github.io/hts-specs/SAMv1.pdf So we use # the last alignment query and quality strings if we get # None as a query sequence. if alignment.query_sequence is None: if lastAlignment is None: raise InvalidSAM( 'pysam produced an alignment (number %d) with no ' 'query sequence without previously giving an ' 'alignment with a sequence.' % count) # Use the previous query sequence and quality. I'm not # making the call to _hardClip dependent on # alignment.cigartuples (as in the else clause below) # because I don't think it's possible for # alignment.cigartuples to be None in this case. If we # have a second match on a query, then it must be # aligned to something (i.e., it cannot be unmapped # with no CIGAR string). The assertion will tell us if # this is ever not the case. assert alignment.cigartuples (alignment.query_sequence, alignment.query_qualities, _) = _hardClip( lastAlignment.query_sequence, lastAlignment.query_qualities, alignment.cigartuples) else: lastAlignment = alignment if alignment.cigartuples: (alignment.query_sequence, alignment.query_qualities, _) = _hardClip( alignment.query_sequence, alignment.query_qualities, alignment.cigartuples) if ((filterRead is None or filterRead(Read(alignment.query_name, alignment.query_sequence, alignment.qual))) and not ( (referenceIds and alignment.reference_name not in referenceIds) or (alignment.is_unmapped and dropUnmapped) or (alignment.is_secondary and dropSecondary) or (alignment.is_supplementary and dropSupplementary) or (alignment.is_duplicate and dropDuplicates) or (alignment.is_qcfail and not keepQCFailures))): yield alignment self.alignmentCount = count
0.000466
def format_volume(citation_elements): """format volume number (roman numbers to arabic) When the volume number is expressed in roman numbers (CXXII), they are converted to their equivalent in arabic numbers (42) """ re_roman = re.compile(re_roman_numbers + u'$', re.UNICODE) for el in citation_elements: if el['type'] == 'JOURNAL' and re_roman.match(el['volume']): el['volume'] = str(roman2arabic(el['volume'].upper())) return citation_elements
0.002028
def ensure_indirect_subclass(class_, of): """Check whether given is an indirect subclass of another, i.e. there exists at least intermediate base between ``of`` and ``class_``. :param class_: Class to check :param of: Superclass to check against :return: ``class_``, if the check succeeds :raise TypeError: When the check fails .. versionadded:: 0.0.4 """ if not is_indirect_subclass(class_, of): raise TypeError( "expected an indirect subclass of %r, got %s instead" % ( of, class_.__name__)) return class_
0.001706
def can_pp_seq_no_be_in_view(self, view_no, pp_seq_no): """ Checks if the `pp_seq_no` could have been in view `view_no`. It will return False when the `pp_seq_no` belongs to a later view than `view_no` else will return True :return: """ if view_no > self.viewNo: raise PlenumValueError( 'view_no', view_no, "<= current view_no {}".format(self.viewNo), prefix=self ) return view_no == self.viewNo or ( view_no < self.viewNo and self.last_prepared_before_view_change and compare_3PC_keys( (view_no, pp_seq_no), self.last_prepared_before_view_change) >= 0)
0.005571
def _get_field_comment(field, separator=' - '): """ Create SQL comment from field's title and description :param field: tableschema-py Field, with optional 'title' and 'description' values :param separator: :return: >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': 'my_desc'})) 'my_title - my_desc' >>> _get_field_comment(tableschema.Field({'title': 'my_title', 'description': None})) 'my_title' >>> _get_field_comment(tableschema.Field({'title': '', 'description': 'my_description'})) 'my_description' >>> _get_field_comment(tableschema.Field({})) '' """ title = field.descriptor.get('title') or '' description = field.descriptor.get('description') or '' return _get_comment(description, title, separator)
0.006211
def save_chkpt_vars(dic, path): """ Save variables in dic to path. Args: dic: {name: value} path: save as npz if the name ends with '.npz', otherwise save as a checkpoint. """ logger.info("Variables to save to {}:".format(path)) keys = sorted(list(dic.keys())) logger.info(pprint.pformat(keys)) assert not path.endswith('.npy') if path.endswith('.npz'): np.savez_compressed(path, **dic) else: with tf.Graph().as_default(), \ tf.Session() as sess: for k, v in six.iteritems(dic): k = get_op_tensor_name(k)[0] _ = tf.Variable(name=k, initial_value=v) # noqa sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.save(sess, path, write_meta_graph=False)
0.00237
def write_module_code(self, module_code): """write module-level template code, i.e. that which is enclosed in <%! %> tags in the template.""" for n in module_code: self.printer.start_source(n.lineno) self.printer.write_indented_block(n.text)
0.00692
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ if isinstance(self.input.payload, Instances): inst = None data = self.input.payload elif isinstance(self.input.payload, Instance): inst = self.input.payload data = inst.dataset index = str(self.resolve_option("index")) unset = bool(self.resolve_option("unset")) if unset: data.no_class() else: if index == "first": data.class_is_first() elif index == "last": data.class_is_last() else: data.class_index = int(index) - 1 if inst is None: self._output.append(Token(data)) else: self._output.append(Token(inst)) return None
0.002141
def query(self, query=None, callback=None): """Performs a query against the index using the passed lunr.Query object. If performing programmatic queries against the index, this method is preferred over `lunr.Index.search` so as to avoid the additional query parsing overhead. Args: query (lunr.Query): A configured Query to perform the search against, use `create_query` to get a preconfigured object or use `callback` for convenience. callback (callable): An optional function taking a single Query object result of `create_query` for further configuration. """ if query is None: query = self.create_query() if callback is not None: callback(query) if len(query.clauses) == 0: logger.warning( "Attempting a query with no clauses. Please add clauses by " "either using the `callback` argument or using `create_query` " "to create a preconfigured Query, manually adding clauses and " "passing it as the `query` argument." ) return [] # for each query clause # * process terms # * expand terms from token set # * find matching documents and metadata # * get document vectors # * score documents matching_fields = {} query_vectors = {field: Vector() for field in self.fields} term_field_cache = {} required_matches = {} prohibited_matches = defaultdict(set) for clause in query.clauses: # Unless the pipeline has been disabled for this term, which is # the case for terms with wildcards, we need to pass the clause # term through the search pipeline. A pipeline returns an array # of processed terms. Pipeline functions may expand the passed # term, which means we may end up performing multiple index lookups # for a single query term. if clause.use_pipeline: terms = self.pipeline.run_string(clause.term, {"fields": clause.fields}) else: terms = [clause.term] clause_matches = CompleteSet() for term in terms: # Each term returned from the pipeline needs to use the same # query clause object, e.g. the same boost and or edit distance # The simplest way to do this is to re-use the clause object # but mutate its term property. clause.term = term # From the term in the clause we create a token set which will # then be used to intersect the indexes token set to get a list # of terms to lookup in the inverted index term_token_set = TokenSet.from_clause(clause) expanded_terms = self.token_set.intersect(term_token_set).to_list() # If a term marked as required does not exist in the TokenSet # it is impossible for the search to return any matches. # We set all the field-scoped required matches set to empty # and stop examining further clauses if ( len(expanded_terms) == 0 and clause.presence == QueryPresence.REQUIRED ): for field in clause.fields: required_matches[field] = CompleteSet() break for expanded_term in expanded_terms: posting = self.inverted_index[expanded_term] term_index = posting["_index"] for field in clause.fields: # For each field that this query term is scoped by # (by default all fields are in scope) we need to get # all the document refs that have this term in that # field. # # The posting is the entry in the invertedIndex for the # matching term from above. field_posting = posting[field] matching_document_refs = field_posting.keys() term_field = expanded_term + "/" + field matching_documents_set = set(matching_document_refs) # If the presence of this term is required, ensure that # the matching documents are added to the set of # required matches for this clause. if clause.presence == QueryPresence.REQUIRED: clause_matches = clause_matches.union( matching_documents_set ) if field not in required_matches: required_matches[field] = CompleteSet() # If the presence of this term is prohibited, # ensure that the matching documents are added to the # set of prohibited matches for this field, creating # that set if it does not exist yet. elif clause.presence == QueryPresence.PROHIBITED: prohibited_matches[field] = prohibited_matches[field].union( matching_documents_set ) # prohibited matches should not be part of the # query vector used for similarity scoring and no # metadata should be extracted so we continue # to the next field continue # The query field vector is populated using the # term_index found for the term an a unit value with # the appropriate boost # Using upsert because there could already be an entry # in the vector for the term we are working with. # In that case we just add the scores together. query_vectors[field].upsert( term_index, clause.boost, lambda a, b: a + b ) # If we've already seen this term, field combo then # we've already collected the matching documents and # metadata, no need to go through all that again if term_field in term_field_cache: continue for matching_document_ref in matching_document_refs: # All metadata for this term/field/document triple # are then extracted and collected into an instance # of lunr.MatchData ready to be returned in the # query results matching_field_ref = FieldRef(matching_document_ref, field) metadata = field_posting[str(matching_document_ref)] if str(matching_field_ref) not in matching_fields: matching_fields[str(matching_field_ref)] = MatchData( expanded_term, field, metadata ) else: matching_fields[str(matching_field_ref)].add( expanded_term, field, metadata ) term_field_cache[term_field] = True # if the presence was required we need to update the required # matches field sets, we do this after all fields for the term # have collected their matches because the clause terms presence # is required in _any_ of the fields, not _all_ of the fields if clause.presence == QueryPresence.REQUIRED: for field in clause.fields: required_matches[field] = required_matches[field].intersection( clause_matches ) # We need to combine the field scoped required and prohibited # matching documents inot a global set of required and prohibited # matches all_required_matches = CompleteSet() all_prohibited_matches = set() for field in self.fields: if field in required_matches: all_required_matches = all_required_matches.intersection( required_matches[field] ) if field in prohibited_matches: all_prohibited_matches = all_prohibited_matches.union( prohibited_matches[field] ) matching_field_refs = matching_fields.keys() results = [] matches = {} # If the query is negated (only contains prohibited terms) # we need to get _all_ field_refs currently existing in the index. # This to avoid any costs of getting all field regs unnecessarily # Additionally, blank match data must be created to correctly populate # the results if query.is_negated(): matching_field_refs = list(self.field_vectors.keys()) for matching_field_ref in matching_field_refs: field_ref = FieldRef.from_string(matching_field_ref) matching_fields[matching_field_ref] = MatchData() for matching_field_ref in matching_field_refs: # Currently we have document fields that match the query, but we # need to return documents. The matchData and scores are combined # from multiple fields belonging to the same document. # # Scores are calculated by field, using the query vectors created # above, and combined into a final document score using addition. field_ref = FieldRef.from_string(matching_field_ref) doc_ref = field_ref.doc_ref if doc_ref not in all_required_matches or doc_ref in all_prohibited_matches: continue field_vector = self.field_vectors[matching_field_ref] score = query_vectors[field_ref.field_name].similarity(field_vector) try: doc_match = matches[doc_ref] doc_match["score"] += score doc_match["match_data"].combine(matching_fields[matching_field_ref]) except KeyError: match = { "ref": doc_ref, "score": score, "match_data": matching_fields[matching_field_ref], } matches[doc_ref] = match results.append(match) return sorted(results, key=lambda a: a["score"], reverse=True)
0.001076
def token(self) -> str: """ Return token identifying role to indy-sdk. :return: token: 'STEWARD', 'TRUSTEE', 'TRUST_ANCHOR', or None (for USER) """ return self.value[0] if self in (Role.USER, Role.ROLE_REMOVE) else self.name
0.015038
def call(self, fn, *args, **kwargs): """ Like :meth:`call_async`, but block until the return value is available. Equivalent to:: call_async(fn, *args, **kwargs).get().unpickle() :returns: The function's return value. :raises mitogen.core.CallError: An exception was raised in the remote context during execution. """ receiver = self.call_async(fn, *args, **kwargs) return receiver.get().unpickle(throw_dead=False)
0.003883
def poll(self, batch_id, retry_seconds=None, back_off=None, timeout=None, halt_on_error=True): """Poll Batch status to ThreatConnect API. .. code-block:: javascript { "status": "Success", "data": { "batchStatus": { "id":3505, "status":"Completed", "errorCount":0, "successCount":0, "unprocessCount":0 } } } Args: batch_id (str): The ID returned from the ThreatConnect API for the current batch job. retry_seconds (int): The base number of seconds used for retries when job is not completed. back_off (float): A multiplier to use for backing off on each poll attempt when job has not completed. timeout (int, optional): The number of seconds before the poll should timeout. halt_on_error (bool, default:True): If True any exception will raise an error. Returns: dict: The batch status returned from the ThreatConnect API. """ # check global setting for override if self.halt_on_poll_error is not None: halt_on_error = self.halt_on_poll_error # initial poll interval if self._poll_interval is None and self._batch_data_count is not None: # calculate poll_interval base off the number of entries in the batch data # with a minimum value of 5 seconds. self._poll_interval = max(math.ceil(self._batch_data_count / 300), 5) elif self._poll_interval is None: # if not able to calculate poll_interval default to 15 seconds self._poll_interval = 15 # poll retry back_off factor if back_off is None: poll_interval_back_off = 2.5 else: poll_interval_back_off = float(back_off) # poll retry seconds if retry_seconds is None: poll_retry_seconds = 5 else: poll_retry_seconds = int(retry_seconds) # poll timeout if timeout is None: timeout = self.poll_timeout else: timeout = int(timeout) params = {'includeAdditional': 'true'} poll_count = 0 poll_time_total = 0 data = {} while True: poll_count += 1 poll_time_total += self._poll_interval time.sleep(self._poll_interval) self.tcex.log.info('Batch poll time: {} seconds'.format(poll_time_total)) try: # retrieve job status r = self.tcex.session.get('/v2/batch/{}'.format(batch_id), params=params) if not r.ok or 'application/json' not in r.headers.get('content-type', ''): self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error) return data data = r.json() if data.get('status') != 'Success': self.tcex.handle_error(545, [r.status_code, r.text], halt_on_error) except Exception as e: self.tcex.handle_error(540, [e], halt_on_error) if data.get('data', {}).get('batchStatus', {}).get('status') == 'Completed': # store last 5 poll times to use in calculating average poll time modifier = poll_time_total * 0.7 self._poll_interval_times = self._poll_interval_times[-4:] + [modifier] weights = [1] poll_interval_time_weighted_sum = 0 for poll_interval_time in self._poll_interval_times: poll_interval_time_weighted_sum += poll_interval_time * weights[-1] # weights will be [1, 1.5, 2.25, 3.375, 5.0625] for all 5 poll times depending # on how many poll times are available. weights.append(weights[-1] * 1.5) # pop off the last weight so its not added in to the sum weights.pop() # calculate the weighted average of the last 5 poll times self._poll_interval = math.floor(poll_interval_time_weighted_sum / sum(weights)) if poll_count == 1: # if completed on first poll, reduce poll interval. self._poll_interval = self._poll_interval * 0.85 self.tcex.log.debug('Batch Status: {}'.format(data)) return data # update poll_interval for retry with max poll time of 20 seconds self._poll_interval = min( poll_retry_seconds + int(poll_count * poll_interval_back_off), 20 ) # time out poll to prevent App running indefinitely if poll_time_total >= timeout: self.tcex.handle_error(550, [timeout], True)
0.004387
def _report_container_count(self, containers_by_id): """Report container count per state""" m_func = FUNC_MAP[GAUGE][self.use_histogram] per_state_count = defaultdict(int) filterlambda = lambda ctr: not self._is_container_excluded(ctr) containers = list(filter(filterlambda, containers_by_id.values())) for ctr in containers: per_state_count[ctr.get('State', '')] += 1 for state in per_state_count: if state: m_func(self, 'docker.container.count', per_state_count[state], tags=['container_state:%s' % state.lower()])
0.006494
def Cross(width=3, color=0): """Draws a cross centered in the target area :param width: width of the lines of the cross in pixels :type width: int :param color: color of the lines of the cross :type color: pygame.Color """ return Overlay(Line("h", width, color), Line("v", width, color))
0.003165
def copy_contents_to(self, destination): """ Copies the contents of this directory to the given destination. Returns a Folder object that represents the moved directory. """ logger.info("Copying contents of %s to %s" % (self, destination)) target = Folder(destination) target.make() self._create_target_tree(target) dir_util.copy_tree(self.path, unicode(target)) return target
0.004386
def get_xdg_home(self): # type: () -> str """ Returns the value specified in the XDG_CONFIG_HOME environment variable or the appropriate default. """ config_home = getenv('XDG_CONFIG_HOME', '') if config_home: self._log.debug('XDG_CONFIG_HOME is set to %r', config_home) return expanduser(join(config_home, self.group_name, self.app_name)) return expanduser('~/.config/%s/%s' % (self.group_name, self.app_name))
0.008016
def load_bytecode(self, f): """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return self.code = marshal_load(f)
0.004292
def _feature_most_population(self, results): """ Find the placename with the largest population and return its country. More population is a rough measure of importance. Paramaters ---------- results: dict output of `query_geonames` Returns ------- most_pop: str ISO code of country of place with largest population, or empty string if none """ try: populations = [i['population'] for i in results['hits']['hits']] most_pop = results['hits']['hits'][np.array(populations).astype("int").argmax()] return most_pop['country_code3'] except Exception as e: return ""
0.004032
def _addToBuffers(self, logname, data): """ Add data to the buffer for logname Start a timer to send the buffers if BUFFER_TIMEOUT elapses. If adding data causes the buffer size to grow beyond BUFFER_SIZE, then the buffers will be sent. """ n = len(data) self.buflen += n self.buffered.append((logname, data)) if self.buflen > self.BUFFER_SIZE: self._sendBuffers() elif not self.sendBuffersTimer: self.sendBuffersTimer = self._reactor.callLater( self.BUFFER_TIMEOUT, self._bufferTimeout)
0.003247
def is_file(self, follow_symlinks=True): """ Return True if this entry is a file or a symbolic link pointing to a file; return False if the entry is or points to a directory or other non-file entry, or if it doesn’t exist anymore. The result is cached on the os.DirEntry object. Args: follow_symlinks (bool): Follow symlinks. Not supported on cloud storage objects. Returns: bool: True if directory exists. """ return self._system.isfile( path=self._path, client_kwargs=self._client_kwargs)
0.003241
def play_env_problem_randomly(env_problem, num_steps): """Plays the env problem by randomly sampling actions for `num_steps`.""" # Reset all environments. env_problem.reset() # Play all environments, sampling random actions each time. for _ in range(num_steps): # Sample batch_size actions from the action space and stack them. actions = np.stack([env_problem.action_space.sample() for _ in range( env_problem.batch_size)]) # Execute actions, observations are stored in `env_problem`. _, _, dones, _ = env_problem.step(actions) # Get the indices where we are done and reset those. env_problem.reset(indices=done_indices(dones))
0.008547
def save_all(self): """ Save all editors. """ initial_index = self.currentIndex() for i in range(self.count()): try: self.setCurrentIndex(i) self.save_current() except AttributeError: pass self.setCurrentIndex(initial_index)
0.005814
def context_loader(self, callback): """ Decorate a method that receives a key id and returns an object or dict that will be available in the request context as g.cavage_context """ if not callback or not callable(callback): raise Exception("Please pass in a callable that loads your context.") self.context_loader_callback = callback return callback
0.007194
def update(self, a, b, c, d): """ Update contingency table with new values without creating a new object. """ self.table.ravel()[:] = [a, b, c, d] self.N = self.table.sum()
0.009434
def get_target_state(): """SDP target State. Returns the target state; allowed target states and time updated """ sdp_state = SDPState() errval, errdict = _check_status(sdp_state) if errval == "error": LOG.debug(errdict['reason']) return dict( current_target_state="unknown", last_updated="unknown", reason=errdict['reason'] ) LOG.debug('Getting target state') target_state = sdp_state.target_state LOG.debug('Target state = %s', target_state) return dict( current_target_state=target_state, allowed_target_states=sdp_state.allowed_target_states[ sdp_state.current_state], last_updated=sdp_state.target_timestamp.isoformat())
0.001312
def _loop_wrapper_func(func, args, shared_mem_run, shared_mem_pause, interval, sigint, sigterm, name, logging_level, conn_send, func_running, log_queue): """ to be executed as a separate process (that's why this functions is declared static) """ prefix = get_identifier(name) + ' ' global log log = logging.getLogger(__name__+".log_{}".format(get_identifier(name, bold=False))) log.setLevel(logging_level) log.addHandler(QueueHandler(log_queue)) sys.stdout = StdoutPipe(conn_send) log.debug("enter wrapper_func") SIG_handler_Loop(sigint, sigterm, log, prefix) func_running.value = True error = False while shared_mem_run.value: try: # in pause mode, simply sleep if shared_mem_pause.value: quit_loop = False else: # if not pause mode -> call func and see what happens try: quit_loop = func(*args) except LoopInterruptError: raise except Exception as e: log.error("error %s occurred in loop calling 'func(*args)'", type(e)) log.info("show traceback.print_exc()\n%s", traceback.format_exc()) error = True break if quit_loop is True: log.debug("loop stooped because func returned True") break time.sleep(interval) except LoopInterruptError: log.debug("quit wrapper_func due to InterruptedError") break func_running.value = False if error: sys.exit(-1) else: log.debug("wrapper_func terminates gracefully") # gets rid of the following warnings # Exception ignored in: <_io.FileIO name='/dev/null' mode='rb'> # ResourceWarning: unclosed file <_io.TextIOWrapper name='/dev/null' mode='r' encoding='UTF-8'> try: if mp.get_start_method() == "spawn": sys.stdin.close() except AttributeError: pass
0.005671
def _smooth_hpx_map(hpx_map, sigma): """ Smooth a healpix map using a Gaussian """ if hpx_map.hpx.ordering == "NESTED": ring_map = hpx_map.swap_scheme() else: ring_map = hpx_map ring_data = ring_map.data.copy() nebins = len(hpx_map.data) smoothed_data = np.zeros((hpx_map.data.shape)) for i in range(nebins): smoothed_data[i] = healpy.sphtfunc.smoothing( ring_data[i], sigma=np.radians(sigma), verbose=False) smoothed_data.clip(0., 1e99) smoothed_ring_map = HpxMap(smoothed_data, ring_map.hpx) if hpx_map.hpx.ordering == "NESTED": return smoothed_ring_map.swap_scheme() return smoothed_ring_map
0.002649
def add_config(self, cfg_or_file=None, **kw_conf): """ Add a configuration entry to this ingredient/experiment. Can be called with a filename, a dictionary xor with keyword arguments. Supported formats for the config-file so far are: ``json``, ``pickle`` and ``yaml``. The resulting dictionary will be converted into a :class:`~sacred.config_scope.ConfigDict`. :param cfg_or_file: Configuration dictionary of filename of config file to add to this ingredient/experiment. :type cfg_or_file: dict or str :param kw_conf: Configuration entries to be added to this ingredient/experiment. """ self.configurations.append(self._create_config_dict(cfg_or_file, kw_conf))
0.002291
def generic_export(request, model_name=None): """ Generic view configured through settings.TABLIB_MODELS Usage: 1. Add the view to ``urlpatterns`` in ``urls.py``:: url(r'export/(?P<model_name>[^/]+)/$', "django_tablib.views.generic_export"), 2. Create the ``settings.TABLIB_MODELS`` dictionary using model names as keys the allowed lookup operators as values, if any:: TABLIB_MODELS = { 'myapp.simple': None, 'myapp.related': {'simple__title': ('exact', 'iexact')}, } 3. Open ``/export/myapp.simple`` or ``/export/myapp.related/?simple__title__iexact=test`` """ if model_name not in settings.TABLIB_MODELS: raise Http404() model = get_model(*model_name.split(".", 2)) if not model: raise ImproperlyConfigured( "Model {0} is in settings.TABLIB_MODELS but" " could not be loaded".format(model_name)) qs = model._default_manager.all() # Filtering may be allowed based on TABLIB_MODELS: filter_settings = settings.TABLIB_MODELS[model_name] filters = {} for k, v in request.GET.items(): try: # Allow joins (they'll be checked below) but chop off the trailing # lookup operator: rel, lookup_type = k.rsplit("__", 1) except ValueError: rel = k lookup_type = "exact" allowed_lookups = filter_settings.get(rel, None) if allowed_lookups is None: return HttpResponseBadRequest( "Filtering on {0} is not allowed".format(rel) ) elif lookup_type not in allowed_lookups: return HttpResponseBadRequest( "{0} may only be filtered using {1}".format( k, " ".join(allowed_lookups))) else: filters[str(k)] = v if filters: qs = qs.filter(**filters) return export(request, model=model, queryset=qs)
0.000494
def findConfigFile(cls, filename): """ Search the configuration path (specified via the NTA_CONF_PATH environment variable) for the given filename. If found, return the complete path to the file. :param filename: (string) name of file to locate """ paths = cls.getConfigPaths() for p in paths: testPath = os.path.join(p, filename) if os.path.isfile(testPath): return os.path.join(p, filename)
0.006787
def save(self, filename="temp.pkl"): """ Save TM in the filename specified above """ output = open(filename, 'wb') cPickle.dump(self.tm, output, protocol=cPickle.HIGHEST_PROTOCOL)
0.005025
def is_acquired(self): """Check if the lock is acquired""" values = self.client.get(self.key) return six.b(self._uuid) in values
0.013158
def is_analysis_attachment_allowed(self, analysis): """Checks if the analysis """ if analysis.getAttachmentOption() not in ["p", "r"]: return False if api.get_workflow_status_of(analysis) in ["retracted"]: return False return True
0.006803
def run(self, steps=10): """Executes up to `steps` instructions.""" try: super(GeneticMachine, self).run(steps) self._error = False except StopIteration: self._error = False except Exception: self._error = True
0.006897
def register_controller(self, module, required=True, min_number=1): """Loads a controller module and returns its loaded devices. This is to be used in a mobly test class. Args: module: A module that follows the controller module interface. required: A bool. If True, failing to register the specified controller module raises exceptions. If False, the objects failed to instantiate will be skipped. min_number: An integer that is the minimum number of controller objects to be created. Default is one, since you should not register a controller module without expecting at least one object. Returns: A list of controller objects instantiated from controller_module, or None if no config existed for this controller and it was not a required controller. Raises: ControllerError: * The controller module has already been registered. * The actual number of objects instantiated is less than the * `min_number`. * `required` is True and no corresponding config can be found. * Any other error occurred in the registration process. """ verify_controller_module(module) # Use the module's name as the ref name module_ref_name = module.__name__.split('.')[-1] if module_ref_name in self._controller_objects: raise signals.ControllerError( 'Controller module %s has already been registered. It cannot ' 'be registered again.' % module_ref_name) # Create controller objects. module_config_name = module.MOBLY_CONTROLLER_CONFIG_NAME if module_config_name not in self.controller_configs: if required: raise signals.ControllerError( 'No corresponding config found for %s' % module_config_name) logging.warning( 'No corresponding config found for optional controller %s', module_config_name) return None try: # Make a deep copy of the config to pass to the controller module, # in case the controller module modifies the config internally. original_config = self.controller_configs[module_config_name] controller_config = copy.deepcopy(original_config) objects = module.create(controller_config) except: logging.exception( 'Failed to initialize objects for controller %s, abort!', module_config_name) raise if not isinstance(objects, list): raise signals.ControllerError( 'Controller module %s did not return a list of objects, abort.' % module_ref_name) # Check we got enough controller objects to continue. actual_number = len(objects) if actual_number < min_number: module.destroy(objects) raise signals.ControllerError( 'Expected to get at least %d controller objects, got %d.' % (min_number, actual_number)) # Save a shallow copy of the list for internal usage, so tests can't # affect internal registry by manipulating the object list. self._controller_objects[module_ref_name] = copy.copy(objects) logging.debug('Found %d objects for controller %s', len(objects), module_config_name) self._controller_modules[module_ref_name] = module return objects
0.001082
def __send_buffer(self): """ Sends the contents of self.__out_buffer to serial device :return: Number of bytes written """ bytes_written = self.serial.write(self.__out_buffer.raw) if self.DEBUG_MODE: print("Wrote: '{}'".format(binascii.hexlify(self.__out_buffer.raw))) if bytes_written != len(self.__out_buffer): raise IOError("{} bytes written for output buffer of size {}".format(bytes_written, len(self.__out_buffer))) return bytes_written
0.008183
def session_path(cls, project, session): """Return a fully-qualified session string.""" return google.api_core.path_template.expand( 'projects/{project}/agent/sessions/{session}', project=project, session=session, )
0.007273
def get_backend_router_id_from_hostname(self, hostname): """Finds the backend router Id that matches the hostname given No way to use an objectFilter to find a backendRouter, so we have to search the hard way. """ results = self.client.call('SoftLayer_Network_Pod', 'getAllObjects') return [result['backendRouterId'] for result in results if result['backendRouterName'] == hostname.lower()]
0.009281
def obj_for_name(obj_name): """ Instantiate class or function :param obj_name: class or function name :return: instance of class or function """ parts = obj_name.split('.') module = ".".join(parts[:-1]) m = __import__( module ) for comp in parts[1:]: m = getattr(m, comp) return m
0.009146
def md( self, url, width="original"): """*generate a multimarkdown image link viewable anywhere (no sign-in needed for private photos)* **Key Arguments:** - ``url`` -- the share URL for the flickr image (or just the unique photoid) - ``width`` -- the pixel width of the fully resolved image. Default *original*. [75, 100, 150, 240, 320, 500, 640, 800, 1024, 1600, 2048] **Return:** - ``md`` -- the image reference link in multi-markdown syntax **Usage:** To return the markdown markup for an image at a given Flickr share URL: .. code-block:: python from picaxe import picaxe Flickr = picaxe( log=log, settings=settings ) mdLink = Flickr.md( url="https://www.flickr.com/photos/92344916@N06/30455211086" width=1024 ) """ self.log.info('starting the ``md_image`` method') images, title, desc, photoId = self.get_photo_metadata(url) if len(title) == 0: tag = photoId else: tag = "%(title)s %(photoId)s" % locals() image = images[str(width)] if width == "original": pxWidth = 1024 else: pxWidth = width md = """![%(title)s][%(tag)s] [%(tag)s]: %(image)s title="%(title)s" width=600px """ % locals() self.log.info('completed the ``md_image`` method') return md
0.003766
def pick_keys(self, keys, use="", alg=""): """ The assumption is that upper layer has made certain you only get keys you can use. :param alg: The crypto algorithm :param use: What the key should be used for :param keys: A list of JWK instances :return: A list of JWK instances that fulfill the requirements """ if not alg: alg = self["alg"] if alg == "none": return [] _k = self.alg2keytype(alg) if _k is None: logger.error("Unknown algorithm '%s'" % alg) raise ValueError('Unknown cryptography algorithm') logger.debug("Picking key by key type={0}".format(_k)) _kty = [_k.lower(), _k.upper(), _k.lower().encode("utf-8"), _k.upper().encode("utf-8")] _keys = [k for k in keys if k.kty in _kty] try: _kid = self["kid"] except KeyError: try: _kid = self.jwt.headers["kid"] except (AttributeError, KeyError): _kid = None logger.debug("Picking key based on alg={0}, kid={1} and use={2}".format( alg, _kid, use)) pkey = [] for _key in _keys: logger.debug( "Picked: kid:{}, use:{}, kty:{}".format( _key.kid, _key.use, _key.kty)) if _kid: if _kid != _key.kid: continue if use and _key.use and _key.use != use: continue if alg and _key.alg and _key.alg != alg: continue pkey.append(_key) return pkey
0.00179
def non_neighbors(graph, node, t=None): """Returns the non-neighbors of the node in the graph at time t. Parameters ---------- graph : DyNetx graph Graph to find neighbors. node : node The node whose neighbors will be returned. t : snapshot id (default=None) If None the non-neighbors are identified on the flattened graph. Returns ------- non_neighbors : iterator Iterator of nodes in the graph that are not neighbors of the node. """ if graph.is_directed(): values = chain(graph.predecessors(node, t=t), graph.successors(node, t=t)) else: values = graph.neighbors(node, t=t) nbors = set(values) | {node} return (nnode for nnode in graph if nnode not in nbors)
0.00243
def value_to_db(self, value): """ Returns field's single value prepared for saving into a database. """ assert isinstance(value, datetime.date) assert not isinstance(value, datetime.datetime) try: value = value - datetime.date(year=1970, month=1, day=1) except OverflowError: raise tldap.exceptions.ValidationError("is too big a date") return str(value.days).encode("utf_8")
0.006696
def group_alleles_by_start_end_Xbp(arr, bp=28): """Group alleles by matching ends Args: arr (numpy.array): 2D int matrix of alleles bp (int): length of ends to group by Returns: dict of lists: key of start + end strings to list of indices of alleles with matching ends """ starts = arr[:,0:bp] ends = arr[:,-bp:] starts_ends_idxs = defaultdict(list) l, seq_len = arr.shape for i in range(l): start_i = starts[i] end_i = ends[i] start_i_str = ''.join([str(x) for x in start_i]) end_i_str = ''.join([str(x) for x in end_i]) starts_ends_idxs[start_i_str + end_i_str].append(i) return starts_ends_idxs
0.00569
def get_romfile_path(game, inttype=Integrations.DEFAULT): """ Return the path to a given game's romfile """ for extension in EMU_EXTENSIONS.keys(): possible_path = get_file_path(game, "rom" + extension, inttype) if possible_path: return possible_path raise FileNotFoundError("No romfiles found for game: %s" % game)
0.002747
def load_publickey(type, buffer): """ Load a public key from a buffer. :param type: The file type (one of :data:`FILETYPE_PEM`, :data:`FILETYPE_ASN1`). :param buffer: The buffer the key is stored in. :type buffer: A Python string object, either unicode or bytestring. :return: The PKey object. :rtype: :class:`PKey` """ if isinstance(buffer, _text_type): buffer = buffer.encode("ascii") bio = _new_mem_buf(buffer) if type == FILETYPE_PEM: evp_pkey = _lib.PEM_read_bio_PUBKEY( bio, _ffi.NULL, _ffi.NULL, _ffi.NULL) elif type == FILETYPE_ASN1: evp_pkey = _lib.d2i_PUBKEY_bio(bio, _ffi.NULL) else: raise ValueError("type argument must be FILETYPE_PEM or FILETYPE_ASN1") if evp_pkey == _ffi.NULL: _raise_current_error() pkey = PKey.__new__(PKey) pkey._pkey = _ffi.gc(evp_pkey, _lib.EVP_PKEY_free) pkey._only_public = True return pkey
0.001035
def max_runs_reached(self): """ :return: whether all file paths have been processed max_runs times """ if self._max_runs == -1: # Unlimited runs. return False for file_path in self._file_paths: if self._run_count[file_path] < self._max_runs: return False if self._run_count[self._heart_beat_key] < self._max_runs: return False return True
0.004474
def lock(instance_id, profile=None, **kwargs): ''' Lock an instance instance_id ID of the instance to be locked CLI Example: .. code-block:: bash salt '*' nova.lock 1138 ''' conn = _auth(profile, **kwargs) return conn.lock(instance_id)
0.003472
def split_markers_from_line(line): # type: (AnyStr) -> Tuple[AnyStr, Optional[AnyStr]] """Split markers from a dependency""" if not any(line.startswith(uri_prefix) for uri_prefix in SCHEME_LIST): marker_sep = ";" else: marker_sep = "; " markers = None if marker_sep in line: line, markers = line.split(marker_sep, 1) markers = markers.strip() if markers else None return line, markers
0.002252
def _determine_doubled_obj(self): """Return the target object. Returns the object that should be treated as the target object. For partial doubles, this will be the same as ``self.obj``, but for pure doubles, it's pulled from the special ``_doubles_target`` attribute. :return: The object to be doubled. :rtype: object """ if isinstance(self.obj, ObjectDouble): return self.obj._doubles_target else: return self.obj
0.007782
def build_job_configs(self, args): """Hook to build job configurations """ job_configs = {} ttype = args['ttype'] (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args) if targets_yaml is None: return job_configs config_yaml = 'config.yaml' config_override = args.get('config') if is_not_null(config_override): config_yaml = config_override targets = load_yaml(targets_yaml) nsims_job = args['nsims_job'] first_seed = args['seed'] nsims = args['nsims'] last_seed = first_seed + nsims base_config = dict(sim_profile=args['sim_profile'], roi_baseline=args['roi_baseline'], non_null_src=args['non_null_src'], sim=sim) for target_name, target_list in targets.items(): name_keys = dict(target_type=ttype, target_name=target_name, sim_name=sim, fullpath=True) simdir = NAME_FACTORY.sim_targetdir(**name_keys) config_path = os.path.join(simdir, config_yaml) job_config = base_config.copy() job_config.update(dict(config=config_path, profiles=target_list)) current_seed = first_seed while current_seed < last_seed: fullkey = "%s_%06i" % (target_name, current_seed) logfile = make_nfs_path(os.path.join(simdir, "%s_%s_%06i.log" % (self.linkname, target_name, current_seed))) if nsims_job <= 0 or current_seed + nsims_job >= last_seed: nsims_current = last_seed - current_seed else: nsims_current = nsims_job job_config.update(dict(seed=current_seed, nsims=nsims_current, logfile=logfile)) job_configs[fullkey] = job_config.copy() current_seed += nsims_current return job_configs
0.002231
def list_element_combinations_variadic( elements_specification ): """ This function accepts a specification of lists of elements for each place in lists in the form of a list, the elements of which are lists of possible elements and returns a list of lists corresponding to the combinations of elements of the specification with varying numbers of elements. For example, the list elements specification [[10, 20], [30, 40], [50, 60]] yields the following lists: [10] [20] [10, 30] [10, 40] [20, 30] [20, 40] [10, 30, 50] [10, 30, 60] [10, 40, 50] [10, 40, 60] [20, 30, 50] [20, 30, 60] [20, 40, 50] [20, 40, 60] """ lists = [list(list_generated) for index, element_specification in enumerate(elements_specification) for list_generated in itertools.product(*elements_specification[:index + 1])] return lists
0.004376
def schemaValidateOneElement(self, elem): """Validate a branch of a tree, starting with the given @elem. """ if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlSchemaValidateOneElement(self._o, elem__o) return ret
0.014388
def wins(self, year): """Returns the # of regular season wins a team in a year. :year: The year for the season in question. :returns: The number of regular season wins. """ schedule = self.schedule(year) if schedule.empty: return np.nan return schedule.query('week_num <= 17').is_win.sum()
0.005587
def register(name, validator): """Register a validator instance under the given ``name``.""" if not isinstance(validator, Validator): raise TypeError("Validator instance expected, %s given" % validator.__class__) _NAMED_VALIDATORS[name] = validator
0.007463
def farthest(self, dt1, dt2, *dts): from functools import reduce """ Get the farthest date from the instance. :type dt1: datetime.datetime :type dt2: datetime.datetime :type dts: list[datetime.datetime,] :rtype: DateTime """ dt1 = pendulum.instance(dt1) dt2 = pendulum.instance(dt2) dts = [dt1, dt2] + [pendulum.instance(x) for x in dts] dts = [(abs(self - dt), dt) for dt in dts] return max(dts)[1]
0.003937
def save_model(self, steps): """ Saves the model :param steps: The number of steps the model was trained for :return: """ with self.graph.as_default(): last_checkpoint = self.model_path + '/model-' + str(steps) + '.cptk' self.saver.save(self.sess, last_checkpoint) tf.train.write_graph(self.graph, self.model_path, 'raw_graph_def.pb', as_text=False)
0.006438
def gradient(self): """ Derivative of the covariance matrix over the parameters of L. Returns ------- Lu : ndarray Derivative of K over the lower triangular part of L. """ L = self.L self._grad_Lu[:] = 0 for i in range(len(self._tril1[0])): row = self._tril1[0][i] col = self._tril1[1][i] self._grad_Lu[row, :, i] = L[:, col] self._grad_Lu[:, row, i] += L[:, col] m = len(self._tril1[0]) for i in range(len(self._diag[0])): row = self._diag[0][i] col = self._diag[1][i] self._grad_Lu[row, :, m + i] = L[row, col] * L[:, col] self._grad_Lu[:, row, m + i] += L[row, col] * L[:, col] return {"Lu": self._grad_Lu}
0.002448
def download(self, filename, representation, overwrite=False): """Download the resolved structure as a file. :param string filename: File path to save to :param string representation: Desired output representation :param bool overwrite: (Optional) Whether to allow overwriting of an existing file """ download(self.input, filename, representation, overwrite, self.resolvers, self.get3d, **self.kwargs)
0.008889
def get_response(self, request, *args, **kwargs): '''Returns the redirect response for this exception.''' # normal process response = HttpResponseRedirect(self.redirect_to) response[REDIRECT_HEADER_KEY] = self.redirect_to return response
0.00722
def register_builtin_message_types(): """Registers the built-in message types.""" from .plain import PlainTextMessage from .email import EmailTextMessage, EmailHtmlMessage register_message_types(PlainTextMessage, EmailTextMessage, EmailHtmlMessage)
0.007576
def _event_for(self, elts): """Creates an Event that is set when the bundle with elts is sent.""" event = Event() event.canceller = self._canceller_for(elts, event) return event
0.009569
def _readNamelist(currentlyIncluding, cache, namFilename, unique_glyphs): """ Detect infinite recursion and prevent it. This is an implementation detail of readNamelist. Raises NamelistRecursionError if namFilename is in the process of being included """ # normalize filename = os.path.abspath(os.path.normcase(namFilename)) if filename in currentlyIncluding: raise NamelistRecursionError(filename) currentlyIncluding.add(filename) try: result = __readNamelist(cache, filename, unique_glyphs) finally: currentlyIncluding.remove(filename) return result
0.017036
def send_keyevents(self, keyevent: int) -> None: '''Simulates typing keyevents.''' self._execute('-s', self.device_sn, 'shell', 'input', 'keyevent', str(keyevent))
0.00995
def get_user(self, user_id=None, username=None, email=None): """ Returns the user specified by either ID, username or email. Since more than user can have the same email address, searching by that term will return a list of 1 or more User objects. Searching by username or ID will return a single User. If a user_id that doesn't belong to the current account is searched for, a Forbidden exception is raised. When searching by username or email, a NotFound exception is raised if there is no matching user. """ if user_id: uri = "/users/%s" % user_id elif username: uri = "/users?name=%s" % username elif email: uri = "/users?email=%s" % email else: raise ValueError("You must include one of 'user_id', " "'username', or 'email' when calling get_user().") resp, resp_body = self.method_get(uri) if resp.status_code == 404: raise exc.NotFound("No such user exists.") users = resp_body.get("users", []) if users: return [User(self, user) for user in users] else: user = resp_body.get("user", {}) if user: return User(self, user) else: raise exc.NotFound("No such user exists.")
0.002172
def update_default(cls) -> 'TrustStoresRepository': """Update the default trust stores used by SSLyze. The latest stores will be downloaded from https://github.com/nabla-c0d3/trust_stores_observatory. """ temp_path = mkdtemp() try: # Download the latest trust stores archive_path = join(temp_path, 'trust_stores_as_pem.tar.gz') urlretrieve(cls._UPDATE_URL, archive_path) # Extract the archive extract_path = join(temp_path, 'extracted') tarfile.open(archive_path).extractall(extract_path) # Copy the files to SSLyze and overwrite the existing stores shutil.rmtree(cls._DEFAULT_TRUST_STORES_PATH) shutil.copytree(extract_path, cls._DEFAULT_TRUST_STORES_PATH) finally: shutil.rmtree(temp_path) # Re-generate the default repo - not thread-safe cls._DEFAULT_REPOSITORY = cls(cls._DEFAULT_TRUST_STORES_PATH) return cls._DEFAULT_REPOSITORY
0.002915
def which_roles_can(self, name): """Which role can SendMail? """ targetPermissionRecords = AuthPermission.objects(creator=self.client, name=name).first() return [{'role': group.role} for group in targetPermissionRecords.groups]
0.015936
def prj_view_seq(self, *args, **kwargs): """View the, in the prj_seq_tablev selected, sequence. :returns: None :rtype: None :raises: None """ if not self.cur_prj: return i = self.prj_seq_tablev.currentIndex() item = i.internalPointer() if item: seq = item.internal_data() self.view_seq(seq)
0.005013
def fit(self, X, y, num_training_samples=None): """Use correlation data to train a model. First compute the correlation of the input data, and then normalize within subject if more than one sample in one subject, and then fit to a model defined by self.clf. Parameters ---------- X: list of tuple (data1, data2) data1 and data2 are numpy array in shape [num_TRs, num_voxels] to be computed for correlation. They contain the activity data filtered by ROIs and prepared for correlation computation. Within list, all data1s must have the same num_voxels value, all data2s must have the same num_voxels value. y: 1D numpy array labels, len(X) equals len(y) num_training_samples: Optional[int] The number of samples used in the training. Set it to construct the kernel matrix portion by portion so the similarity vectors of the test data have to be computed here. Only set num_training_samples when sklearn.svm.SVC with precomputed kernel is used. If it is set, only those samples will be used to fit the model. Returns ------- Classifier: self. """ time1 = time.time() assert len(X) == len(y), \ 'the number of samples must be equal to the number of labels' for x in X: assert len(x) == 2, \ 'there must be two parts for each correlation computation' X1, X2 = zip(*X) if not (isinstance(self.clf, sklearn.svm.SVC) and self.clf.kernel == 'precomputed'): if num_training_samples is not None: num_training_samples = None logger.warn( 'num_training_samples should not be set for classifiers ' 'other than SVM with precomputed kernels' ) num_samples = len(X1) num_voxels1 = X1[0].shape[1] num_voxels2 = X2[0].shape[1] # make sure X1 always has more voxels if num_voxels1 < num_voxels2: X1, X2 = X2, X1 num_voxels1, num_voxels2 = num_voxels2, num_voxels1 self.num_voxels_ = num_voxels1 self.num_features_ = num_voxels1 * num_voxels2 self.num_samples_ = num_samples data = self._generate_training_data(X1, X2, num_training_samples) if num_training_samples is not None: self.test_raw_data_ = None self.test_data_ = data[num_training_samples:, 0:num_training_samples] # limit training to the data specified by num_training_samples data = data[0:num_training_samples, 0:num_training_samples] # training self.clf = self.clf.fit(data, y[0:num_training_samples]) # set the test data if num_training_samples is None: self.test_raw_data_ = None self.test_data_ = None time2 = time.time() logger.info( 'training done, takes %.2f s' % (time2 - time1) ) return self
0.000618
def _build_line(colwidths, colaligns, linefmt): "Return a string which represents a horizontal line." if not linefmt: return None if hasattr(linefmt, "__call__"): return linefmt(colwidths, colaligns) else: begin, fill, sep, end = linefmt cells = [fill*w for w in colwidths] return _build_simple_row(cells, (begin, sep, end))
0.002625
def groups_roles(self, room_id=None, room_name=None, **kwargs): """Lists all user’s roles in the private group.""" if room_id: return self.__call_api_get('groups.roles', roomId=room_id, kwargs=kwargs) elif room_name: return self.__call_api_get('groups.roles', roomName=room_name, kwargs=kwargs) else: raise RocketMissingParamException('roomId or room_name required')
0.009217
def verify( self, headers, serialized_request_env, deserialized_request_env): # type: (Dict[str, Any], str, RequestEnvelope) -> None """Verify if the input request signature and the body matches. The verify method retrieves the Signature Certificate Chain URL, validates the URL, retrieves the chain from the URL, validates the signing certificate, extract the public key, base64 decode the Signature and verifies if the hash value of the request body matches with the decrypted signature. :param headers: headers of the input POST request :type headers: Dict[str, Any] :param serialized_request_env: raw request envelope in the input POST request :type serialized_request_env: str :param deserialized_request_env: deserialized request envelope instance of the input POST request :type deserialized_request_env: :py:class:`ask_sdk_model.request_envelope.RequestEnvelope` :raises: :py:class:`VerificationException` if headers doesn't exist or verification fails """ cert_url = headers.get(self._signature_cert_chain_url_key) signature = headers.get(self._signature_key) if cert_url is None or signature is None: raise VerificationException( "Missing Signature/Certificate for the skill request") cert_chain = self._retrieve_and_validate_certificate_chain(cert_url) self._valid_request_body( cert_chain, signature, serialized_request_env)
0.001874
def repo(name: str, owner: str) -> snug.Query[dict]: """a repository lookup by owner and name""" return json.loads((yield f'/repos/{owner}/{name}').content)
0.006098
def fix_groups(groups): """Takes care of strange group numbers.""" _groups = [] for g in groups: try: if not float(g) > 0: _groups.append(1000) else: _groups.append(int(g)) except TypeError as e: logging.info("Error in reading group number (check your db)") logging.debug(g) logging.debug(e) _groups.append(1000) return _groups
0.00216
def set_video_stream_param(self, streamtype, resolution, bitrate, framerate, gop, isvbr, callback=None): ''' Set the video stream param of stream N streamtype(0~3): Stream N. resolution(0~4): 0 720P, 1 VGA(640*480), 2 VGA(640*360), 3 QVGA(320*240), 4 QVGA(320*180). bitrate: Bit rate of stream type N(20480~2097152). framerate: Frame rate of stream type N. GOP: P frames between 1 frame of stream type N. The suggest value is: X * framerate. isvbr: 0(Not in use currently), 1(In use). ''' params = {'streamType': streamtype, 'resolution': resolution, 'bitRate' : bitrate, 'frameRate' : framerate, 'GOP' : gop, 'isVBR' : isvbr } return self.execute_command('setVideoStreamParam', params, callback=callback)
0.007456
def coerce(self, values): """Convert an iterable of literals to an iterable of options. Args: values (iterable or string): An iterable of raw values to convert into options. If the value is a string is is assumed to be a comma separated list and will be split before processing. Returns: iterable: An iterable of option values initialized with the raw values from `values`. Raises: TypeError: If `values` is not iterable or string. TypeError: If the underlying option raises a TypeError. ValueError: If the underlying option raises a ValueError. """ if isinstance(values, compat.basestring): values = tuple(value.strip() for value in values.split(',')) # Create a list of options to store each value. opt_iter = tuple(copy.deepcopy(self._option) for value in values) for opt_obj, val in compat.zip(opt_iter, values): opt_obj.__set__(None, val) return opt_iter
0.001823
def invoke_hook_bolt_fail(self, heron_tuple, fail_latency_ns): """invoke task hooks for every time bolt fails a tuple :type heron_tuple: HeronTuple :param heron_tuple: tuple that is failed :type fail_latency_ns: float :param fail_latency_ns: fail latency in nano seconds """ if len(self.task_hooks) > 0: bolt_fail_info = BoltFailInfo(heron_tuple=heron_tuple, failing_task_id=self.get_task_id(), fail_latency_ms=fail_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.bolt_fail(bolt_fail_info)
0.006192
def _update(self, conf_dict, base_name=None): """ Updates the current configuration with the values in `conf_dict`. :param dict conf_dict: Dictionary of key value settings. :param str base_name: Base namespace for setting keys. """ for name in conf_dict: # Skip private names if name.startswith('_'): continue value = conf_dict[name] # Skip Namespace if it's imported if value is Namespace: continue # Use a base namespace if base_name: name = base_name + '.' + name if isinstance(value, Namespace): for name, value in value.iteritems(name): self.set(name, value) # Automatically call any functions in the settings module, and if # they return a value other than None, that value becomes a setting elif callable(value): value = value() if value is not None: self.set(name, value) else: self.set(name, value)
0.001735
def enterEvent( self, event ): """ Toggles the display for the tracker item. """ item = self.trackerItem() if ( item ): item.setVisible(True)
0.030151
def draw_canvas(): """Render the tkinter canvas based on the state of ``world``""" for x in range(len(world)): for y in range(len(world[x])): if world[x][y].value: color = world[x][y].color_alive.get_as_hex() else: color = world[x][y].color_dead.get_as_hex() canvas.itemconfig(canvas_grid[x][y], fill=color)
0.002558
def emit(self, record): """ Override emit() method in handler parent for sending log to RESTful API """ # avoid infinite recursion if record.name.startswith('requests'): return data, header = self._prepPayload(record) try: self.session.post(self._getEndpoint(), data=data, headers={'content-type': header}) except Exception: self.handleError(record)
0.003914
def rpc_get_consensus_hashes( self, block_id_list, **con_info ): """ Return the consensus hashes at multiple block numbers Return a dict mapping each block ID to its consensus hash. Returns {'status': True, 'consensus_hashes': dict} on success Returns {'error': ...} on success """ if type(block_id_list) != list: return {'error': 'Invalid block heights', 'http_status': 400} if len(block_id_list) > 32: return {'error': 'Too many block heights', 'http_status': 400} for bid in block_id_list: if not check_block(bid): return {'error': 'Invalid block height', 'http_status': 400} db = get_db_state(self.working_dir) ret = {} for block_id in block_id_list: ret[block_id] = db.get_consensus_at(block_id) db.close() return self.success_response( {'consensus_hashes': ret} )
0.006309
def endswith(self, suffix, start=0, end=None): """Return True if ends with the specified suffix, False otherwise. With optional start, test beginning at that position. With optional end, stop comparing at that position. suffix can also be a tuple of strings to try. :param str suffix: Suffix to search. :param int start: Beginning position. :param int end: Stop comparison at this position. """ args = [suffix, start] + ([] if end is None else [end]) return self.value_no_colors.endswith(*args)
0.005282
def logon(self, password='admin'): """ Parameters ---------- password : str default 'admin' Returns ------- dict """ r = self._basic_post(url='logon', data=password) return r.json()
0.007299
def rest(self, token, endpoint=None, timeout=None): """Obtain a metadata REST API client.""" from . import rest return rest.SignalFxRestClient( token=token, endpoint=endpoint or self._api_endpoint, timeout=timeout or self._timeout)
0.006873
def align_blocks(source_sentences, target_sentences, params = LanguageIndependent): """Creates the sentence alignment of two blocks of texts (usually paragraphs). @param source_sentences: The list of source sentence lengths. @param target_sentences: The list of target sentence lengths. @param params: the sentence alignment parameters. @return: The sentence alignments, a list of index pairs. """ alignment_types = list(params.PRIORS.keys()) # there are always three rows in the history (with the last of them being filled) # and the rows are always |target_text| + 2, so that we never have to do # boundary checks D = [(len(target_sentences) + 2) * [0] for x in range(2)] # for the first sentence, only substitution, insertion or deletion are # allowed, and they are all equally likely ( == 1) D.append([0, 1]) try: D[-2][1] = 1 D[-2][2] = 1 except: pass backlinks = {} for i in range(len(source_sentences)): for j in range(len(target_sentences)): m = [] for a in alignment_types: k = D[-(1 + a[0])][j + 2 - a[1]] if k > 0: p = k * \ align_probability(i, j, source_sentences, target_sentences, a, params) m.append((p, a)) if len(m) > 0: v = max(m) backlinks[(i, j)] = v[1] D[-1].append(v[0]) else: backlinks[(i, j)] = (1, 1) D[-1].append(0) D.pop(0) D.append([0, 0]) return trace(backlinks, source_sentences, target_sentences)
0.006555
def open_upload_stream_with_id( self, file_id, filename, chunk_size_bytes=None, metadata=None): """Opens a Stream that the application can write the contents of the file to. The user must specify the file id and filename, and can choose to add any additional information in the metadata field of the file document or modify the chunk size. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) grid_in, file_id = fs.open_upload_stream( ObjectId(), "test_file", chunk_size_bytes=4, metadata={"contentType": "text/plain"}) grid_in.write("data I want to store!") grid_in.close() # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` if `filename` is not a string. :Parameters: - `file_id`: The id to use for this file. The id must not have already been used for another file. - `filename`: The name of the file to upload. - `chunk_size_bytes` (options): The number of bytes per chunk of this file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. - `metadata` (optional): User data for the 'metadata' field of the files collection document. If not provided the metadata field will be omitted from the files collection document. """ validate_string("filename", filename) opts = {"_id": file_id, "filename": filename, "chunk_size": (chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes)} if metadata is not None: opts["metadata"] = metadata return GridIn(self._collection, **opts)
0.001014
def effectiveTagSet(self): """Return a :class:`~pyasn1.type.tag.TagSet` object of the currently initialized component or self (if |ASN.1| is tagged).""" if self.tagSet: return self.tagSet else: component = self.getComponent() return component.effectiveTagSet
0.009434
def load(self, filename, format_file='cloudupdrs'): """ This is a general load data method where the format of data to load can be passed as a parameter, :param str filename: The path to load data from :param str format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. :return dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration \ data_frame.index is the datetime-like index """ try: ts = load_data(filename, format_file) validator = CloudUPDRSDataFrameValidator() if validator.is_valid(ts): return ts else: logging.error('Error loading data, wrong format.') return None except IOError as e: ierr = "({}): {}".format(e.errno, e.strerror) logging.error("load data, file not found, I/O error %s", ierr) except ValueError as verr: logging.error("load data ValueError ->%s", verr.message) except: logging.error("Unexpected error on load data method: %s", sys.exc_info()[0])
0.005863