text
stringlengths
78
104k
score
float64
0
0.18
def delete_wish_list_by_id(cls, wish_list_id, **kwargs): """Delete WishList Delete an instance of WishList by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_wish_list_by_id(wish_list_id, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_wish_list_by_id_with_http_info(wish_list_id, **kwargs) else: (data) = cls._delete_wish_list_by_id_with_http_info(wish_list_id, **kwargs) return data
0.00444
def init_ui(self): """Setup control widget UI.""" self.control_layout = QHBoxLayout() self.setLayout(self.control_layout) self.reset_button = QPushButton() self.reset_button.setFixedSize(40, 40) self.reset_button.setIcon(QtGui.QIcon(WIN_PATH)) self.game_timer = QLCDNumber() self.game_timer.setStyleSheet("QLCDNumber {color: red;}") self.game_timer.setFixedWidth(100) self.move_counter = QLCDNumber() self.move_counter.setStyleSheet("QLCDNumber {color: red;}") self.move_counter.setFixedWidth(100) self.control_layout.addWidget(self.game_timer) self.control_layout.addWidget(self.reset_button) self.control_layout.addWidget(self.move_counter)
0.002621
def adjustRange(self, recursive=True): """ Adjust the start and end ranges for this item based on the limits from its children. This method will only apply to group items. :param recursive | <bool> """ if ( self.adjustmentsBlocked('range') ): return if ( self.itemStyle() == self.ItemStyle.Group ): dateStart = self.dateStart() dateEnd = self.dateEnd() first = True for c in range(self.childCount()): child = self.child(c) if ( first ): dateStart = child.dateStart() dateEnd = child.dateEnd() first = False else: dateStart = min(child.dateStart(), dateStart) dateEnd = max(child.dateEnd(), dateEnd) self._dateStart = dateStart self._dateEnd = dateEnd self.sync() if ( self.parent() and recursive ): self.parent().adjustRange(True)
0.020886
def summarize_tensors(tensor_dict, tag=None): """Summarize the tensors. Args: tensor_dict: a dictionary of tensors. tag: name scope of the summary; defaults to tensors/. """ if tag is None: tag = "tensors/" for t_name in list(tensor_dict): t = tensor_dict[t_name] tf.summary.histogram(tag + t_name, t)
0.012012
def get_missing_value_key(d): """ Get the Missing Value entry from a table of data. If none is found, try the columns. If still none found, prompt user. :param dict d: Table of data :return str _mv: Missing Value """ _mv = "nan" # Attempt to find a table-level missing value key try: # check for missing value key at the table root _mv = d["missingValue"] except KeyError as e: logger_misc.info("get_missing_value: No missing value key found: {}".format(e)) except AttributeError as e: logger_misc.warn("get_missing_value: Column is wrong data type: {}".format(e)) # No table-level missing value found. Attempt to find a column-level missing value key if not _mv: try: # loop for each column of data, searching for a missing value key for k, v in d["columns"].items(): # found a column with a missing value key. Store it and exit the loop. _mv = v["missingValue"] break except KeyError: # There are no columns in this table. We've got bigger problems! pass # No table-level or column-level missing value. Out of places to look. Ask the user to enter the missing value # used in this data # if not _mv: # print("No 'missingValue' key provided. Please type the missingValue used in this file: {}\n".format(filename)) # _mv = input("missingValue: ") return _mv
0.005373
def get_alt_lengths(self): """Returns the longest length of the variant. For deletions, return is negative, SNPs return 0, and insertions are +. None return corresponds to no variant in interval for specified individual """ #this is a hack to store the # of individuals without having to actually store it out = [] for i in six.moves.range(len(self.genotype)): valid_alt = self.get_alt_length(individual=i) if not valid_alt: out.append(None) else: out.append(max(valid_alt)-len(self.ref)) return out
0.009494
def list_documents(project_id, knowledge_base_id): """Lists the Documents belonging to a Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.""" import dialogflow_v2beta1 as dialogflow client = dialogflow.DocumentsClient() knowledge_base_path = client.knowledge_base_path(project_id, knowledge_base_id) print('Documents for Knowledge Id: {}'.format(knowledge_base_id)) for document in client.list_documents(knowledge_base_path): print(' - Display Name: {}'.format(document.display_name)) print(' - Knowledge ID: {}'.format(document.name)) print(' - MIME Type: {}'.format(document.mime_type)) print(' - Knowledge Types:') for knowledge_type in document.knowledge_types: print(' - {}'.format(KNOWLEDGE_TYPES[knowledge_type])) print(' - Source: {}\n'.format(document.content_uri))
0.000996
def register(self, typedef): """ Add the typedef to this engine if it is compatible. After registering a :class:`~TypeDefinition`, it will not be bound until :meth:`~TypeEngine.bind` is next called. Nothing will happen when register is called with a typedef that is pending binding or already bound. Otherwise, the engine will ensure it is compatible with the type using :meth:`~TypeEngine.is_compatible` before adding it to the set of unbound types. Parameters ---------- typedef : :class:`~TypeDefinition` The typedef to register with this engine Raises ------ exc : :class:`ValueError` If :meth:`~TypeEngine.is_compatible` is falsey """ if typedef in self.bound_types: return if not self.is_compatible(typedef): raise ValueError("Incompatible type {} for engine {}".format( typedef, self)) if typedef not in self.unbound_types: self.unbound_types.add(typedef) typedef._register(self)
0.001779
def _apply(self, method_name, *args, **kwargs): """Call ``method_name`` with args and kwargs on each member. Returns a sequence of return values. """ return [ getattr(member, method_name)(*args, **kwargs) for member in self.forms ]
0.006711
def _cc(self): """ implementation of the efficient bilayer cross counting by insert-sort (see Barth & Mutzel paper "Simple and Efficient Bilayer Cross Counting") """ g=self.layout.grx P=[] for v in self: P.extend(sorted([g[x].pos for x in self._neighbors(v)])) # count inversions in P: s = [] count = 0 for i,p in enumerate(P): j = bisect(s,p) if j<i: count += (i-j) s.insert(j,p) return count
0.018622
def widont_html(value): """ Add an HTML non-breaking space between the final two words at the end of (and in sentences just outside of) block level tags to avoid "widowed" words. Examples: >>> print(widont_html('<h2>Here is a simple example </h2> <p>Single</p>')) <h2>Here is a simple&nbsp;example </h2> <p>Single</p> >>> print(widont_html('<p>test me<br /> out</p><h2>Ok?</h2>Not in a p<p title="test me">and this</p>')) <p>test&nbsp;me<br /> out</p><h2>Ok?</h2>Not in a&nbsp;p<p title="test me">and&nbsp;this</p> >>> print(widont_html('leading text <p>test me out</p> trailing text')) leading&nbsp;text <p>test me&nbsp;out</p> trailing&nbsp;text """ def replace(matchobj): return force_text('%s&nbsp;%s%s' % matchobj.groups()) return re_widont_html.sub(replace, force_text(value))
0.004667
def strings_equal(s1, s2): """ Timing-attack resistant string comparison. Normal comparison using == will short-circuit on the first mismatching character. This avoids that by scanning the whole string, though we still reveal to a timing attack whether the strings are the same length. """ try: s1 = unicodedata.normalize('NFKC', str(s1)) s2 = unicodedata.normalize('NFKC', str(s2)) except: s1 = unicodedata.normalize('NFKC', unicode(s1)) s2 = unicodedata.normalize('NFKC', unicode(s2)) return compare_digest(s1, s2)
0.003396
def updateFontPicker(self): """ Updates the font picker widget to the current font settings. """ font = self.currentFont() self._fontPickerWidget.setPointSize(font.pointSize()) self._fontPickerWidget.setCurrentFamily(font.family())
0.007018
def detect_encoding(self, path): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding``` """ with open(path, 'rb') as file: source = file.read() # take care of line encodings (not in jedi) source = source.replace(b'\r', b'') source_str = str(source).replace('\\n', '\n') byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) return 'UTF-8'
0.002026
def authorization_code(self, request, data, client): """ Handle ``grant_type=authorization_code`` requests as defined in :rfc:`4.1.3`. """ grant = self.get_authorization_code_grant(request, request.POST, client) if constants.SINGLE_ACCESS_TOKEN: at = self.get_access_token(request, grant.user, grant.scope, client) else: at = self.create_access_token(request, grant.user, grant.scope, client) rt = self.create_refresh_token(request, grant.user, grant.scope, at, client) self.invalidate_grant(grant) return self.access_token_response(at)
0.010264
def clause_indices(self): """The list of clause indices in ``words`` layer. The indices are unique only in the boundary of a single sentence. """ if not self.is_tagged(CLAUSE_ANNOTATION): self.tag_clause_annotations() return [word.get(CLAUSE_IDX, None) for word in self[WORDS]]
0.006079
def setup_logging(self): """Setup python logging handler.""" date_format = '%Y-%m-%dT%H:%M:%S' log_format = '%(asctime)s %(levelname)s: %(message)s' if self.opts.verbose: lvl = logging.DEBUG else: lvl = logging.INFO # Requests is a bit chatty logging.getLogger('requests').setLevel('WARNING') self.logger.setLevel(lvl) stdout = logging.StreamHandler(sys.stdout) stdout.setLevel(lvl) formatter = logging.Formatter(log_format, date_format) stdout.setFormatter(formatter) self.logger.addHandler(stdout) # Decided not to use stderr # stderr = logging.StreamHandler(sys.stderr) # stderr.setLevel(logging.ERROR) # Error and above go to both stdout & stderr # formatter = logging.Formatter(log_format, date_format) # stderr.setFormatter(formatter) # self.logger.addHandler(stderr) log = self.opts.log or self.config['crony'].get('log_file') if log: logfile = logging.FileHandler(log) logfile.setLevel(lvl) formatter = logging.Formatter(log_format, date_format) logfile.setFormatter(formatter) self.logger.addHandler(logfile) if self.sentry_client: sentry = SentryHandler(self.sentry_client) sentry.setLevel(logging.ERROR) self.logger.addHandler(sentry) self.logger.debug('Logging setup complete.')
0.001987
def data(self, data): """Called for text between tags""" if self.state == STATE_SOURCE_ID: self.context.audit_record.source_id = int(data) # Audit ids can be 64 bits elif self.state == STATE_DATETIME: dt = datetime.datetime.strptime(data, "%Y-%m-%dT%H:%M:%S") self.get_parent_element().datetimestamp = dt elif self.state == STATE_REASON_FOR_CHANGE: self.context.audit_record.reason_for_change = data.strip() or None # Convert a result of '' to None. self.state = STATE_NONE
0.007105
def long_encode(input, errors='strict'): """Transliterate to 8 bit using as many letters as needed. For example, \u00e4 LATIN SMALL LETTER A WITH DIAERESIS ``ä`` will be replaced with ``ae``. """ if not isinstance(input, text_type): input = text_type(input, sys.getdefaultencoding(), errors) length = len(input) input = unicodedata.normalize('NFKC', input) return input.translate(long_table), length
0.002268
def dotted(self): " Returns dotted-decimal reperesentation " obj = libcrypto.OBJ_nid2obj(self.nid) buf = create_string_buffer(256) libcrypto.OBJ_obj2txt(buf, 256, obj, 1) if pyver == 2: return buf.value else: return buf.value.decode('ascii')
0.00639
def mark_broker_action_done(action, rid=None, unit=None): """Mark action as having been completed. @param action: name of action to be performed @returns None """ rdata = relation_get(rid, unit) or {} broker_rsp = rdata.get(get_broker_rsp_key()) if not broker_rsp: return rsp = CephBrokerRsp(broker_rsp) unit_name = local_unit().partition('/')[2] key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) kvstore = kv() kvstore.set(key=key, value=rsp.request_id) kvstore.flush()
0.001832
def ask(question, default_answer=False, default_answer_str="no"): """ Ask for user input. This asks a yes/no question with a preset default. You can bypass the user-input and fetch the default answer, if you set Args: question: The question to ask on stdout. default_answer: The default value to return. default_answer_str: The default answer string that we present to the user. Tests: >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=True) True >>> os.putenv("TEST", "yes"); ask("Test?", default_answer=False) False """ response = default_answer def should_ignore_tty(): """ Check, if we want to ignore an opened tty result. """ ret_to_bool = {"yes": True, "no": False, "true": True, "false": False} envs = [os.getenv("CI", default="no"), os.getenv("TEST", default="no")] vals = [ret_to_bool[val] for val in envs if val in ret_to_bool] return any(vals) ignore_stdin_istty = should_ignore_tty() has_tty = sys.stdin.isatty() and not ignore_stdin_istty if has_tty: response = query_yes_no(question, default_answer_str) else: LOG.debug("NoTTY: %s -> %s", question, response) return response
0.000768
def quick_search(self, name, platform=None, sort_by=None, desc=True): """ Quick search method that allows you to search for a game using only the title and the platform :param name: string :param platform: int :param sort_by: string :param desc: bool :return: pybomb.clients.Response """ if platform is None: query_filter = "name:{0}".format(name) else: query_filter = "name:{0},platforms:{1}".format(name, platform) search_params = {"filter": query_filter} if sort_by is not None: self._validate_sort_field(sort_by) if desc: direction = self.SORT_ORDER_DESCENDING else: direction = self.SORT_ORDER_ASCENDING search_params["sort"] = "{0}:{1}".format(sort_by, direction) response = self._query(search_params) return response
0.002096
def save(self, obj): """Save current instance - as per the gludb spec.""" cur = self._conn().cursor() tabname = obj.__class__.get_table_name() index_names = obj.__class__.index_names() or [] col_names = ['id', 'value'] + index_names value_holders = ['%s'] * len(col_names) updates = ['%s = EXCLUDED.%s' % (cn, cn) for cn in col_names[1:]] if not obj.id: id = uuid() obj.id = id query = 'insert into {0} ({1}) values ({2}) on conflict(id) do update set {3};'.format( tabname, ','.join(col_names), ','.join(value_holders), ','.join(updates), ) values = [obj.id, obj.to_data()] index_vals = obj.indexes() or {} values += [index_vals.get(name, 'NULL') for name in index_names] with self._conn() as conn: with conn.cursor() as cur: cur.execute(query, tuple(values))
0.003061
def serialize(self, include_class=True, save_dynamic=False, **kwargs): """Serializes a **HasProperties** instance to dictionary This uses the Property serializers to serialize all Property values to a JSON-compatible dictionary. Properties that are undefined are not included. If the **HasProperties** instance contains a reference to itself, a :code:`properties.SelfReferenceError` will be raised. **Parameters**: * **include_class** - If True (the default), the name of the class will also be saved to the serialized dictionary under key :code:`'__class__'` * **save_dynamic** - If True, dynamic properties are written to the serialized dict (default: False). * Any other keyword arguments will be passed through to the Property serializers. """ if getattr(self, '_getting_serialized', False): raise utils.SelfReferenceError('Object contains unserializable ' 'self reference') self._getting_serialized = True try: kwargs.update({ 'include_class': include_class, 'save_dynamic': save_dynamic }) if save_dynamic: prop_source = self._props else: prop_source = self._backend data = ( (key, self._props[key].serialize(getattr(self, key), **kwargs)) for key in prop_source ) json_dict = {k: v for k, v in data if v is not None} if include_class: json_dict.update({'__class__': self.__class__.__name__}) return json_dict finally: self._getting_serialized = False
0.001112
def parse(fp): """ Parse the contents of the `~io.IOBase.readline`-supporting file-like object ``fp`` as a simple line-oriented ``.properties`` file and return a generator of ``(key, value, original_lines)`` triples for every entry in ``fp`` (including duplicate keys) in order of occurrence. The third element of each triple is the concatenation of the unmodified lines in ``fp`` (including trailing newlines) from which the key and value were extracted. The generator also includes comments and blank/all-whitespace lines found in ``fp``, one triple per line, with the first two elements of the triples set to `None`. This is the only way to extract comments from a ``.properties`` file with this library. ``fp`` may be either a text or binary filehandle, with or without universal newlines enabled. If it is a binary filehandle, its contents are decoded as Latin-1. .. versionchanged:: 0.5.0 Invalid ``\\uXXXX`` escape sequences will now cause an `InvalidUEscapeError` to be raised :param fp: the file from which to read the ``.properties`` document :type fp: file-like object :rtype: generator of triples of text strings :raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence occurs in the input """ def lineiter(): while True: ln = fp.readline() if isinstance(ln, binary_type): ln = ln.decode('iso-8859-1') if ln == '': return for l in ascii_splitlines(ln): yield l liter = lineiter() for source in liter: line = source if re.match(r'^[ \t\f]*(?:[#!]|\r?\n?$)', line): yield (None, None, source) continue line = line.lstrip(' \t\f').rstrip('\r\n') while re.search(r'(?<!\\)(?:\\\\)*\\$', line): line = line[:-1] nextline = next(liter, '') source += nextline line += nextline.lstrip(' \t\f').rstrip('\r\n') if line == '': # series of otherwise-blank lines with continuations yield (None, None, source) continue m = re.search(r'(?<!\\)(?:\\\\)*([ \t\f]*[=:]|[ \t\f])[ \t\f]*', line) if m: yield (unescape(line[:m.start(1)]),unescape(line[m.end():]),source) else: yield (unescape(line), '', source)
0.001647
def showGroupMenu( self ): """ Displays the group menu to the user for modification. """ group_active = self.isGroupingActive() group_by = self.groupBy() menu = XMenu(self) menu.setTitle('Grouping Options') menu.setShowTitle(True) menu.addAction('Edit Advanced Grouping') menu.addSeparator() action = menu.addAction('No Grouping') action.setCheckable(True) action.setChecked(not group_active) action = menu.addAction('Advanced') action.setCheckable(True) action.setChecked(group_by == self.GroupByAdvancedKey and group_active) if ( group_by == self.GroupByAdvancedKey ): font = action.font() font.setBold(True) action.setFont(font) menu.addSeparator() # add dynamic options from the table schema tableType = self.tableType() if ( tableType ): columns = tableType.schema().columns() columns.sort(key = lambda x: x.displayName()) for column in columns: action = menu.addAction(column.displayName()) action.setCheckable(True) action.setChecked(group_by == column.displayName() and group_active) if ( column.displayName() == group_by ): font = action.font() font.setBold(True) action.setFont(font) point = QPoint(0, self.uiGroupOptionsBTN.height()) action = menu.exec_(self.uiGroupOptionsBTN.mapToGlobal(point)) if ( not action ): return elif ( action.text() == 'Edit Advanced Grouping' ): print 'edit advanced grouping options' elif ( action.text() == 'No Grouping' ): self.setGroupingActive(False) elif ( action.text() == 'Advanced' ): self.uiGroupBTN.blockSignals(True) self.setGroupBy(self.GroupByAdvancedKey) self.setGroupingActive(True) self.uiGroupBTN.blockSignals(False) self.refreshResults() else: self.uiGroupBTN.blockSignals(True) self.setGroupBy(nativestring(action.text())) self.setGroupingActive(True) self.uiGroupBTN.blockSignals(False) self.refreshResults()
0.013138
def write_file(file_path, txt, **kws): """将文本内容写入文件。 :param str file_path: 文件路径。 :param str txt: 待写入的文件内容。 """ if not os.path.exists(file_path): upDir = os.path.dirname(file_path) if not os.path.isdir(upDir): os.makedirs(upDir) kw = {"mode":"w", "encoding":"utf-8"} if kws: for k,v in kws.items(): kw[k] = v with open(file_path, **kw) as afile: afile.write(txt)
0.00885
def _read_gtf(gtf): """ Load GTF file with precursor positions on genome """ if not gtf: return gtf db = defaultdict(list) with open(gtf) as in_handle: for line in in_handle: if line.startswith("#"): continue cols = line.strip().split("\t") name = [n.split("=")[1] for n in cols[-1].split(";") if n.startswith("Name")] chrom, start, end, strand = cols[0], cols[3], cols[4], cols[6] if cols[2] == "miRNA_primary_transcript": db[name[0]].append([chrom, int(start), int(end), strand]) return db
0.00319
def close_handle(self): """ Closes the handle to the module. @note: Normally you don't need to call this method. All handles created by I{WinAppDbg} are automatically closed when the garbage collector claims them. So unless you've been tinkering with it, setting L{hFile} to C{None} should be enough. """ try: if hasattr(self.hFile, 'close'): self.hFile.close() elif self.hFile not in (None, win32.INVALID_HANDLE_VALUE): win32.CloseHandle(self.hFile) finally: self.hFile = None
0.003175
def configure_all_loggers_for_colour(remove_existing: bool = True) -> None: """ Applies a preconfigured datetime/colour scheme to ALL logger. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Generally MORE SENSIBLE just to apply a handler to the root logger. Args: remove_existing: remove existing handlers from logger first? """ handler = get_colour_handler() apply_handler_to_all_logs(handler, remove_existing=remove_existing)
0.001789
def select_color(cpcolor, evalcolor, idx=0): """ Selects item color for plotting. :param cpcolor: color for control points grid item :type cpcolor: str, list, tuple :param evalcolor: color for evaluated points grid item :type evalcolor: str, list, tuple :param idx: index of the current geometry object :type idx: int :return: a list of color values :rtype: list """ # Random colors by default color = utilities.color_generator() # Constant color for control points grid if isinstance(cpcolor, str): color[0] = cpcolor # User-defined color for control points grid if isinstance(cpcolor, (list, tuple)): color[0] = cpcolor[idx] # Constant color for evaluated points grid if isinstance(evalcolor, str): color[1] = evalcolor # User-defined color for evaluated points grid if isinstance(evalcolor, (list, tuple)): color[1] = evalcolor[idx] return color
0.001031
def _set_blob_properties(self, ud): # type: (Uploader, blobxfer.models.upload.Descriptor) -> None """Set blob properties (md5, cache control) :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor """ if ud.requires_non_encrypted_md5_put: digest = blobxfer.util.base64_encode_as_string(ud.md5.digest()) else: digest = None blobxfer.operations.azure.blob.set_blob_properties(ud.entity, digest) if blobxfer.util.is_not_empty(ud.entity.replica_targets): for ase in ud.entity.replica_targets: blobxfer.operations.azure.blob.set_blob_properties(ase, digest)
0.004219
def multi_ifo_coherent_job_setup(workflow, out_files, curr_exe_job, science_segs, datafind_outs, output_dir, parents=None, slide_dict=None, tags=None): """ Method for setting up coherent inspiral jobs. """ if tags is None: tags = [] data_seg, job_valid_seg = curr_exe_job.get_valid_times() curr_out_files = FileList([]) if 'IPN' in datafind_outs[-1].description \ and 'bank_veto_bank' in datafind_outs[-2].description: ipn_sky_points = datafind_outs[-1] bank_veto = datafind_outs[-2] frame_files = datafind_outs[:-2] else: ipn_sky_points = None bank_veto = datafind_outs[-1] frame_files = datafind_outs[:-1] split_bank_counter = 0 if curr_exe_job.injection_file is None: for split_bank in parents: tag = list(tags) tag.append(split_bank.tag_str) node = curr_exe_job.create_node(data_seg, job_valid_seg, parent=split_bank, dfParents=frame_files, bankVetoBank=bank_veto, ipn_file=ipn_sky_points, slide=slide_dict, tags=tag) workflow.add_node(node) split_bank_counter += 1 curr_out_files.extend(node.output_files) else: for inj_file in curr_exe_job.injection_file: for split_bank in parents: tag = list(tags) tag.append(inj_file.tag_str) tag.append(split_bank.tag_str) node = curr_exe_job.create_node(data_seg, job_valid_seg, parent=split_bank, inj_file=inj_file, tags=tag, dfParents=frame_files, bankVetoBank=bank_veto, ipn_file=ipn_sky_points) workflow.add_node(node) split_bank_counter += 1 curr_out_files.extend(node.output_files) # FIXME: Here we remove PSD files if they are coming # through. This should be done in a better way. On # to-do list. curr_out_files = [i for i in curr_out_files if 'PSD_FILE'\ not in i.tags] out_files += curr_out_files return out_files
0.003549
def pdf_split( input: str, output: str, stepsize: int = 1, sequence: [int] = None ): """ Split the input file in multiple output files :param input: name of the input file :param output: name of the output files :param stepsize: how many pages per file, only if sequence is None :param sequence: list with number of pages per file """ output = output or os.path.splitext(input)[0] if not os.path.isfile(input): print("Error. The file '%s' does not exist." % input) return with open(input, "rb") as inputfile: reader = PdfFileReader(inputfile) pagenr = 0 outputfile = None if sequence is None: for i, page in enumerate(reader.pages): if not i % stepsize: pagenr += 1 outputfile = open(output + "_%i.pdf" % pagenr, "wb") writer = PdfFileWriter() writer.addPage(page) if not (i + 1) % stepsize: writer.write(outputfile) outputfile.close() else: sequence = map(int, sequence) iter_pages = iter(reader.pages) for filenr, pagecount in enumerate(sequence): with open( output + "_%i.pdf" % (filenr + 1), "wb" ) as outputfile: writer = PdfFileWriter() for i in range(pagecount): try: page = next(iter_pages) writer.addPage(page) except StopIteration: writer.write(outputfile) return writer.write(outputfile) if not outputfile.closed: writer.write(outputfile) outputfile.close()
0.000534
def getAggShocks(self): ''' Returns aggregate state variables and shocks for this period. The capital-to-labor ratio is irrelevant and thus treated as constant, and the wage and interest rates are also constant. However, aggregate shocks are assigned from a prespecified history. Parameters ---------- None Returns ------- AggVarsNow : CobbDouglasAggVars Aggregate state and shock variables for this period. ''' # Get this period's aggregate shocks PermShkAggNow = self.PermShkAggHist[self.Shk_idx] TranShkAggNow = self.TranShkAggHist[self.Shk_idx] self.Shk_idx += 1 # Factor prices are constant RfreeNow = self.Rfunc(1.0/PermShkAggNow) wRteNow = self.wFunc(1.0/PermShkAggNow) # Aggregates are irrelavent AaggNow = 1.0 MaggNow = 1.0 KtoLnow = 1.0/PermShkAggNow # Package the results into an object and return it AggVarsNow = CobbDouglasAggVars(MaggNow,AaggNow,KtoLnow,RfreeNow,wRteNow,PermShkAggNow,TranShkAggNow) return AggVarsNow
0.011295
def of_cls(self): """ DONT USE. Will be deleted soon. Use ``self.of``! """ if isinstance(self.of, six.string_types): warnings.warn('When using Range programatically, dont pass "of" param as string!') return Register.get_task_cls(self.of) return self.of
0.009494
def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn): """Compute loss numerator and denominator for one shard of output.""" del vocab_size # unused arg logits = top_out logits = common_attention.maybe_upcast(logits, hparams=model_hparams) cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0) return common_layers.padded_cross_entropy( logits, targets, model_hparams.label_smoothing, cutoff=cutoff, weights_fn=weights_fn)
0.014056
def configure_proxy(self, curl_object): """configure pycurl proxy settings""" curl_object.setopt(curl_object.PROXY, self._proxy_hostname) curl_object.setopt(curl_object.PROXYPORT, self._proxy_port) curl_object.setopt(curl_object.PROXYTYPE, curl_object.PROXYTYPE_SOCKS5) if self._proxy_user and self._proxy_passwd: curl_object.setopt(curl_object.PROXYUSERPWD, '%s:%s' % (self._proxy_user, self._proxy_port))
0.00655
def set_coupl_old(self): """ Using the adjacency matrix, sample a coupling matrix. """ if self.model == 'krumsiek11' or self.model == 'var': # we already built the coupling matrix in set_coupl20() return self.Coupl = np.zeros((self.dim,self.dim)) for i in range(self.Adj.shape[0]): for j,a in enumerate(self.Adj[i]): # if there is a 1 in Adj, specify co and antiregulation # and strength of regulation if a != 0: co_anti = np.random.randint(2) # set a lower bound for the coupling parameters # they ought not to be smaller than 0.1 # and not be larger than 0.4 self.Coupl[i,j] = 0.0*np.random.rand() + 0.1 # set sign for coupling if co_anti == 1: self.Coupl[i,j] *= -1 # enforce certain requirements on models if self.model == 1: self.coupl_model1() elif self.model == 5: self.coupl_model5() elif self.model in [6,7]: self.coupl_model6() elif self.model in [8,9,10]: self.coupl_model8() # output if self.verbosity > 1: settings.m(0,self.Coupl)
0.008172
def get_weather(test=False): """ Returns weather reports from the dataset. """ if _Constants._TEST or test: rows = _Constants._DATABASE.execute("SELECT data FROM weather LIMIT {hardware}".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data) else: rows = _Constants._DATABASE.execute("SELECT data FROM weather".format( hardware=_Constants._HARDWARE)) data = [r[0] for r in rows] data = [_Auxiliary._byteify(_json.loads(r)) for r in data] return _Auxiliary._byteify(data)
0.00838
def wait(self, timeout=None): # type: (Optional[int]) -> None """Wait on the long running operation for a specified length of time. You can check if this call as ended with timeout with the "done()" method. :param int timeout: Period of time to wait for the long running operation to complete (in seconds). :raises CloudError: Server problem with the query. """ if self._thread is None: return self._thread.join(timeout=timeout) try: # Let's handle possible None in forgiveness here raise self._exception # type: ignore except TypeError: # Was None pass
0.005714
def getlist(self, key, delimiter=',', **kwargs): """ Gets the setting value as a :class:`list`; it splits the string using ``delimiter``. :param str delimiter: split the value using this delimiter :rtype: list """ value = self.get(key, **kwargs) if value is None: return value if isinstance(value, str): value = value.strip() if value.startswith('[') and value.endswith(']'): return self.getserialized(key) return [p.strip(' ') for p in value.split(delimiter)] #end if return list(value)
0.008026
def _from_hex_digest(digest): """Convert hex digest to sequence of bytes.""" return "".join( [chr(int(digest[x : x + 2], 16)) for x in range(0, len(digest), 2)] )
0.010989
def run_job(self, job, # type: Union[JobBase, WorkflowJob, None] runtime_context # type: RuntimeContext ): # type: (...) -> None """ Execute a single Job in a seperate thread. """ if job is not None: with self.pending_jobs_lock: self.pending_jobs.append(job) with self.pending_jobs_lock: n = 0 while (n+1) <= len(self.pending_jobs): job = self.pending_jobs[n] if isinstance(job, JobBase): if ((job.builder.resources["ram"]) > self.max_ram or (job.builder.resources["cores"]) > self.max_cores): _logger.error( 'Job "%s" cannot be run, requests more resources (%s) ' 'than available on this host (max ram %d, max cores %d', job.name, job.builder.resources, self.allocated_ram, self.allocated_cores, self.max_ram, self.max_cores) self.pending_jobs.remove(job) return if ((self.allocated_ram + job.builder.resources["ram"]) > self.max_ram or (self.allocated_cores + job.builder.resources["cores"]) > self.max_cores): _logger.debug( 'Job "%s" cannot run yet, resources (%s) are not ' 'available (already allocated ram is %d, allocated cores is %d, ' 'max ram %d, max cores %d', job.name, job.builder.resources, self.allocated_ram, self.allocated_cores, self.max_ram, self.max_cores) n += 1 continue thread = threading.Thread(target=self._runner, args=(job, runtime_context)) thread.daemon = True self.threads.add(thread) if isinstance(job, JobBase): self.allocated_ram += job.builder.resources["ram"] self.allocated_cores += job.builder.resources["cores"] thread.start() self.pending_jobs.remove(job)
0.004702
def data_dirpath(task=None, **kwargs): """Get the path of the corresponding data directory. Parameters ---------- task : str, optional The task for which datasets in the desired directory are used for. If not given, a path for the corresponding task-agnostic directory is returned. **kwargs : extra keyword arguments Extra keyword arguments, representing additional attributes of the datasets, are used to generate additional sub-folders on the path. For example, providing 'lang=en' will results in a path such as '/barn_base_dir/regression/lang_en/mydataset.csv'. Hierarchy always matches lexicographical order of keyword argument names, so 'lang=en' and 'animal=dog' will result in a path such as 'barn_base_dir/task_name/animal_dof/lang_en/dset.csv'. Returns ------- str The path to the desired dir. """ path = _base_dir() if task: path = os.path.join(path, _snail_case(task)) for k, v in sorted(kwargs.items()): subdir_name = '{}_{}'.format(_snail_case(k), _snail_case(v)) path = os.path.join(path, subdir_name) os.makedirs(path, exist_ok=True) return path
0.000812
def model_page(self, request, app_label, model_name, rest_of_url=None): """ Handles the model-specific functionality of the databrowse site, delegating<to the appropriate ModelDatabrowse class. """ try: model = get_model(app_label, model_name) except LookupError: model = None if model is None: raise http.Http404("App %r, model %r, not found." % (app_label, model_name)) try: databrowse_class = self.registry[model] except KeyError: raise http.Http404("This model exists but has not been registered " "with databrowse.") return databrowse_class(model, self).root(request, rest_of_url)
0.002538
def image_height(image): """ Returns the height of the image found at the path supplied by `image` relative to your project's images directory. """ image_size_cache = _get_cache('image_size_cache') if not Image: raise SassMissingDependency('PIL', 'image manipulation') filepath = String.unquoted(image).value path = None try: height = image_size_cache[filepath][1] except KeyError: height = 0 IMAGES_ROOT = _images_root() if callable(IMAGES_ROOT): try: _file, _storage = list(IMAGES_ROOT(filepath))[0] except IndexError: pass else: path = _storage.open(_file) else: _path = os.path.join(IMAGES_ROOT, filepath.strip(os.sep)) if os.path.exists(_path): path = open(_path, 'rb') if path: image = Image.open(path) size = image.size height = size[1] image_size_cache[filepath] = size return Number(height, 'px')
0.000929
def build_java_docs(app): """build java docs and then move the outdir""" java_path = app.builder.srcdir + '/../scala-package' java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"' java_doc_classpath = ':'.join([ '`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `', '`find macros -name "*.jar" | tr "\\n" ":" `', '`find core -name "*.jar" | tr "\\n" ":" `', '`find infer -name "*.jar" | tr "\\n" ":" `' ]) _run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation' .format(java_path, java_doc_sources, java_doc_classpath)) dest_path = app.builder.outdir + '/api/java/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html'] for doc_file in javadocs: _run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
0.008841
def _parse_api_time(timestr): """ Typical expiration times returned from the auth server are in this format: 2012-05-02T14:27:40.000-05:00 They can also be returned as a UTC value in this format: 2012-05-02T14:27:40.000Z This method returns a proper datetime object from either of these formats. """ try: reg_groups = API_DATE_PATTERN.match(timestr).groups() yr, mth, dy, hr, mn, sc, off_sign, off_hr, off_mn = reg_groups except AttributeError: # UTC dates don't show offsets. utc_groups = UTC_API_DATE_PATTERN.match(timestr).groups() yr, mth, dy, hr, mn, sc = utc_groups off_sign = "+" off_hr = off_mn = 0 base_dt = datetime.datetime(int(yr), int(mth), int(dy), int(hr), int(mn), int(sc)) delta = datetime.timedelta(hours=int(off_hr), minutes=int(off_mn)) if off_sign == "+": # Time is greater than UTC ret = base_dt - delta else: ret = base_dt + delta return ret
0.002641
def remove_file(self, relativePath, name=None, removeFromSystem=False): """ Remove file from repository. :Parameters: #. relativePath (string): The relative to the repository path of the directory where the file should be dumped. If relativePath does not exist, it will be created automatically. #. name (string): The file name. If None is given, name will be split from relativePath. #. removeFromSystem (boolean): Whether to also remove directory and all files from the system.\n Only files saved in the repository will be removed and empty left directories. """ # get relative path normalized relativePath = os.path.normpath(relativePath) if relativePath == '.': relativePath = '' assert name != '.pyrepinfo', "'.pyrepinfo' is not allowed as file name in main repository directory" assert name != '.pyrepstate', "'.pyrepstate' is not allowed as file name in main repository directory" assert name != '.pyreplock', "'.pyreplock' is not allowed as file name in main repository directory" if name is None: assert len(relativePath), "name must be given when relative path is given as empty string or as a simple dot '.'" relativePath, name = os.path.split(relativePath) # get file info dict dirInfoDict, errorMessage = self.get_directory_info(relativePath) assert dirInfoDict is not None, errorMessage # check directory in repository assert name in dict.__getitem__(dirInfoDict, "files"), "file '%s' is not found in repository relative path '%s'"%(name, relativePath) # remove file from repo dict.__getitem__(dirInfoDict, "files").pop(name) # remove file from system if removeFromSystem: ap = os.path.join(self.__path, relativePath, name ) if os.path.isfile(ap): os.remove( ap ) # save repository self.save()
0.007306
def model_definition_factory(base_model_definition, **kwargs): """ Provides an iterator over passed-in configuration values, allowing for easy exploration of models. Parameters: ___________ base_model_definition: The base `ModelDefinition` to augment kwargs: Can be any keyword accepted by `ModelDefinition`. Values should be iterables. """ if not kwargs: yield config else: for param in kwargs: if not hasattr(base_model_definition, param): raise ValueError("'%s' is not a valid configuration parameter" % param) for raw_params in itertools.product(*kwargs.values()): new_definition = copy.copy(base_model_definition) new_definition.update(dict(zip(kwargs.keys(), raw_params))) yield new_definition
0.002328
def _listAttrGroupMembers(self, attrGroup): """ Returns a list of all members in the attribute group. """ from inspect import getmembers, ismethod methods = getmembers(self, ismethod) group_prefix = attrGroup + '_' group_len = len(group_prefix) group_members = [method[0][group_len:] for method in methods if method[0].startswith(group_prefix)] return group_members
0.004107
def get_keys(src, dst, keys): """ Copies the value of keys from source object to dest object :param src: :param dst: :param keys: :return: """ for key in keys: #dst[no_camel(key)] = src[key] if key in src else None dst[key] = src[key] if key in src else None
0.006515
def get_onionoo_details(self, agent): """ Requests the 'details' document from onionoo.torproject.org via the given `twisted.web.iweb.IAgent` -- you can get a suitable instance to pass here by calling either :meth:`txtorcon.Tor.web_agent` or :meth:`txtorcon.Circuit.web_agent`. """ # clearnet: 'https://onionoo.torproject.org/details?lookup={}' uri = 'http://tgel7v4rpcllsrk2.onion/details?lookup={}'.format(self.id_hex[1:]).encode('ascii') resp = yield agent.request(b'GET', uri) if resp.code != 200: raise RuntimeError( 'Failed to lookup relay details for {}'.format(self.id_hex) ) body = yield readBody(resp) data = json.loads(body.decode('ascii')) if len(data['relays']) != 1: raise RuntimeError( 'Got multiple relays for {}'.format(self.id_hex) ) relay_data = data['relays'][0] if relay_data['fingerprint'].lower() != self.id_hex[1:].lower(): raise RuntimeError( 'Expected "{}" but got data for "{}"'.format(self.id_hex, relay_data['fingerprint']) ) returnValue(relay_data)
0.004072
def copyFile(input, output, replace=None): """Copy a file whole from input to output.""" _found = findFile(output) if not _found or (_found and replace): shutil.copy2(input, output)
0.00495
def _write_box_information(xml_file, structure, ref_distance): """Write box information. Parameters ---------- xml_file : file object The file object of the hoomdxml file being written structure : parmed.Structure Parmed structure object ref_energy : float, default=1.0 Reference energy for conversion to reduced units """ if np.allclose(structure.box[3:6], np.array([90, 90, 90])): box_str = '<box units="sigma" Lx="{}" Ly="{}" Lz="{}"/>\n' xml_file.write(box_str.format(*structure.box[:3] / ref_distance)) else: a, b, c = structure.box[0:3] / ref_distance alpha, beta, gamma = np.radians(structure.box[3:6]) lx = a xy = b * np.cos(gamma) xz = c * np.cos(beta) ly = np.sqrt(b**2 - xy**2) yz = (b*c*np.cos(alpha) - xy*xz) / ly lz = np.sqrt(c**2 - xz**2 - yz**2) box_str = '<box units="sigma" Lx="{}" Ly="{}" Lz="{}" xy="{}" xz="{}" yz="{}"/>\n' xml_file.write(box_str.format(lx, ly, lz, xy, xz, yz))
0.001887
def fetch_column(self, sql, *args, **kwargs): """Executes an SQL SELECT query and returns the first column of the first row or `None`. :param sql: statement to execute :param args: parameters iterable :param kwargs: parameters iterable :return: the first row of the first column or `None` """ with self.locked() as conn: return conn.query(sql, *args, **kwargs).fetch_column()
0.006757
def _parse_json(self, page, exactly_one=True): '''Returns location, (latitude, longitude) from json feed.''' places = page.get('results', []) if not len(places): self._check_status(page.get('status')) return None def parse_place(place): '''Get the location, lat, lng from a single json place.''' location = place.get('formatted_address') latitude = place['geometry']['location']['lat'] longitude = place['geometry']['location']['lng'] return Location(location, (latitude, longitude), place) if exactly_one: return parse_place(places[0]) else: return [parse_place(place) for place in places]
0.00267
def set_chain_info(self, chain_id, chain_name, num_groups): """Set the chain information. :param chain_id: the asym chain id from mmCIF :param chain_name: the auth chain id from mmCIF :param num_groups: the number of groups this chain has """ self.chain_id_list.append(chain_id) self.chain_name_list.append(chain_name) self.groups_per_chain.append(num_groups)
0.004728
def nonspeech_fragments(self): """ Iterates through the nonspeech fragments in the list (which are sorted). :rtype: generator of (int, :class:`~aeneas.syncmap.SyncMapFragment`) """ for i, fragment in enumerate(self.__fragments): if fragment.fragment_type == SyncMapFragment.NONSPEECH: yield (i, fragment)
0.005249
def do_gate_matrix(self, matrix: np.ndarray, qubits: Sequence[int]) -> 'AbstractQuantumSimulator': """ Apply an arbitrary unitary; not necessarily a named gate. :param matrix: The unitary matrix to apply. No checks are done :param qubits: A list of qubits to apply the unitary to. :return: ``self`` to support method chaining. """ unitary = lifted_gate_matrix(matrix=matrix, qubit_inds=qubits, n_qubits=self.n_qubits) self.density = unitary.dot(self.density).dot(np.conj(unitary).T) return self
0.00678
def create_group(self, name): """ Create group :param name: Group name """ parameters = { 'name': name } url = self.TEAM_GROUPS_URL connection = Connection(self.token) connection.set_url(self.production, url) connection.add_params(parameters) return connection.post_request()
0.005291
def filter_single_need(need, filter_string=""): """ Checks if a single need/need_part passes a filter_string :param need: need or need_part :param filter_string: string, which is used as input for eval() :return: True, if need as passed the filter_string, else False """ filter_context = need.copy() filter_context["search"] = re.search result = False try: result = bool(eval(filter_string, None, filter_context)) except Exception as e: raise NeedInvalidFilter("Filter {0} not valid: Error: {1}".format(filter_string, e)) return result
0.003333
def block_matrix(A, B, C, D): r"""Generate the operator matrix with quadrants .. math:: \begin{pmatrix} A B \\ C D \end{pmatrix} Args: A (Matrix): Matrix of shape ``(n, m)`` B (Matrix): Matrix of shape ``(n, k)`` C (Matrix): Matrix of shape ``(l, m)`` D (Matrix): Matrix of shape ``(l, k)`` Returns: Matrix: The combined block matrix ``[[A, B], [C, D]]``. """ return vstackm((hstackm((A, B)), hstackm((C, D))))
0.002062
def base_warfare(name, bases, attributes): """ Adds any number of attributes to an existing class. :param name: Name. :type name: unicode :param bases: Bases. :type bases: list :param attributes: Attributes. :type attributes: dict :return: Base. :rtype: object """ assert len(bases) == 1, "{0} | '{1}' object has multiple bases!".format(__name__, name) base = foundations.common.get_first_item(bases) for name, value in attributes.iteritems(): if name != "__metaclass__": setattr(base, name, value) return base
0.003373
def setMonospace(self): """ Fix the fonts of the first 32 styles to a mono space one. |Args| * **None** |Returns| **None** |Raises| * **None** """ font = bytes('courier new', 'utf-8') for ii in range(32): self.SendScintilla(self.SCI_STYLESETFONT, ii, font)
0.005525
def sg_train_func(func): r""" Decorates a function `func` as sg_train_func. Args: func: A function to decorate """ @wraps(func) def wrapper(**kwargs): r""" Manages arguments of `tf.sg_opt`. Args: **kwargs: lr: A Python Scalar (optional). Learning rate. Default is .001. save_dir: A string. The root path to which checkpoint and log files are saved. Default is `asset/train`. max_ep: A positive integer. Maximum number of epochs. Default is 1000. ep_size: A positive integer. Number of Total batches in an epoch. For proper display of log. Default is 1e5. save_interval: A Python scalar. The interval of saving checkpoint files. By default, for every 600 seconds, a checkpoint file is written. log_interval: A Python scalar. The interval of recoding logs. By default, for every 60 seconds, logging is executed. max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5. keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour. eval_metric: A list of tensors containing the value to evaluate. Default is []. tqdm: Boolean. If True (Default), progress bars are shown. If False, a series of loss will be shown on the console. """ opt = tf.sg_opt(kwargs) # default training options opt += tf.sg_opt(lr=0.001, save_dir='asset/train', max_ep=1000, ep_size=100000, save_interval=600, log_interval=60, eval_metric=[], max_keep=5, keep_interval=1, tqdm=True) # training epoch and loss epoch, loss = -1, None # checkpoint saver saver = tf.train.Saver(max_to_keep=opt.max_keep, keep_checkpoint_every_n_hours=opt.keep_interval) # add evaluation summary for m in opt.eval_metric: tf.sg_summary_metric(m) # summary writer log_dir = opt.save_dir + '/run-%02d%02d-%02d%02d' % tuple(time.localtime(time.time()))[1:5] summary_writer = tf.summary.FileWriter(log_dir) # console logging function def console_log(sess_): if epoch >= 0: tf.sg_info('\tEpoch[%03d:gs=%d] - loss = %s' % (epoch, sess_.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) # create supervisor sv = tf.train.Supervisor(logdir=opt.save_dir, saver=saver, save_model_secs=opt.save_interval, summary_writer=summary_writer, save_summaries_secs=opt.log_interval, global_step=tf.sg_global_step(), local_init_op=tf.sg_phase().assign(True)) # create session with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # console logging loop if not opt.tqdm: sv.loop(opt.log_interval, console_log, args=(sess, )) # get start epoch _step = sess.run(tf.sg_global_step()) ep = _step // opt.ep_size # check if already finished if ep <= opt.max_ep: # logging tf.sg_info('Training started from epoch[%03d]-step[%d].' % (ep, _step)) # epoch loop for ep in range(ep, opt.max_ep + 1): # update epoch info start_step = sess.run(tf.sg_global_step()) % opt.ep_size epoch = ep # create progressbar iterator if opt.tqdm: iterator = tqdm(range(start_step, opt.ep_size), total=opt.ep_size, initial=start_step, desc='train', ncols=70, unit='b', leave=False) else: iterator = range(start_step, opt.ep_size) # batch loop for _ in iterator: # exit loop if sv.should_stop(): break # call train function batch_loss = func(sess, opt) # loss history update if batch_loss is not None and \ not np.isnan(batch_loss.all()) and not np.isinf(batch_loss.all()): if loss is None: loss = np.mean(batch_loss) else: loss = loss * 0.9 + np.mean(batch_loss) * 0.1 # log epoch information console_log(sess) # save last version saver.save(sess, opt.save_dir + '/model.ckpt', global_step=sess.run(tf.sg_global_step())) # logging tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step()))) else: tf.sg_info('Training already finished at epoch[%d]-step[%d].' % (ep - 1, sess.run(tf.sg_global_step()))) return wrapper
0.002884
def _deploy(self): """Deploy environment and wait for all hooks to finish executing.""" timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) try: self.d.setup(timeout=timeout) self.d.sentry.wait(timeout=timeout) except amulet.helpers.TimeoutError: amulet.raise_status( amulet.FAIL, msg="Deployment timed out ({}s)".format(timeout) ) except Exception: raise
0.004049
def call_frog(text): """ Call frog on the text and return (sent, offset, word, lemma, pos, morphofeat) tuples """ host, port = os.environ.get('FROG_HOST', 'localhost:9887').split(":") frogclient = FrogClient(host, port, returnall=True) sent = 1 offset = 0 for word, lemma, morph, morphofeat, ner, chunk, _p1, _p2 in frogclient.process(text): if word is None: sent += 1 else: pos = _POSMAP[morphofeat.split("(")[0]] yield Token(sent, offset, word, lemma, pos, morphofeat, ner, chunk) offset += len(word)
0.006633
def _createConnection(self, connections): """ Create GSSHAPY Connection Objects Method """ for c in connections: # Create GSSHAPY Connection object connection = Connection(slinkNumber=c['slinkNumber'], upSjuncNumber=c['upSjunc'], downSjuncNumber=c['downSjunc']) # Associate Connection with StormPipeNetworkFile connection.stormPipeNetworkFile = self
0.003976
def add_lv_load_area(self, lv_load_area): # TODO: check docstring """Adds a LV load_area to _lv_load_areas if not already existing Args ---- lv_load_area: :shapely:`Shapely Polygon object<polygons>` Descr """ self._lv_load_areas.append(lv_load_area) if not isinstance(lv_load_area, MVCableDistributorDing0): self.peak_load += lv_load_area.peak_load
0.008989
def redirect_stdout(self, enabled=True, log_level=logging.INFO): """ Redirect sys.stdout to file-like object. """ if enabled: if self.__stdout_wrapper: self.__stdout_wrapper.update_log_level(log_level=log_level) else: self.__stdout_wrapper = StdOutWrapper(logger=self, log_level=log_level) self.__stdout_stream = self.__stdout_wrapper else: self.__stdout_stream = _original_stdout # Assign the new stream to sys.stdout sys.stdout = self.__stdout_stream
0.005085
def train(self, conversation): """ Train the chat bot based on the provided list of statements that represents a single conversation. """ previous_statement_text = None previous_statement_search_text = '' statements_to_create = [] for conversation_count, text in enumerate(conversation): if self.show_training_progress: utils.print_progress_bar( 'List Trainer', conversation_count + 1, len(conversation) ) statement_search_text = self.chatbot.storage.tagger.get_bigram_pair_string(text) statement = self.get_preprocessed_statement( Statement( text=text, search_text=statement_search_text, in_response_to=previous_statement_text, search_in_response_to=previous_statement_search_text, conversation='training' ) ) previous_statement_text = statement.text previous_statement_search_text = statement_search_text statements_to_create.append(statement) self.chatbot.storage.create_many(statements_to_create)
0.002368
def words(self, fileids=None) -> Generator[str, str, None]: """ Provide the words of the corpus; skipping any paragraphs flagged by keywords to the main class constructor :param fileids: :return: words, including punctuation, one by one """ for sentence in self.sents(fileids): words = self._word_tokenizer.tokenize(sentence) for word in words: yield word
0.006652
def _compute_permanent_id(private_key): """ Internal helper. Return an authenticated service's permanent ID given an RSA private key object. The permanent ID is the base32 encoding of the SHA1 hash of the first 10 bytes (80 bits) of the public key. """ pub = private_key.public_key() p = pub.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.PKCS1 ) z = ''.join(p.decode('ascii').strip().split('\n')[1:-1]) b = base64.b64decode(z) h1 = hashlib.new('sha1') h1.update(b) permanent_id = h1.digest()[:10] return base64.b32encode(permanent_id).lower().decode('ascii')
0.001484
def area(p): """Area of a polygone :param p: list of the points taken in any orientation, p[0] can differ from p[-1] :returns: area :complexity: linear """ A = 0 for i in range(len(p)): A += p[i - 1][0] * p[i][1] - p[i][0] * p[i - 1][1] return A / 2.
0.003279
def _make_headers(config, kwargs): """ Replace the kwargs with one where the headers include our user-agent """ headers = kwargs.get('headers') headers = headers.copy() if headers is not None else {} headers['User-Agent'] = config.args.user_agent kwargs = kwargs.copy() kwargs['headers'] = headers return kwargs
0.005865
def analyze(self, config_string=None): """ Analyze the given container and return the corresponding job object. On error, it will return ``None``. :param string config_string: the configuration string generated by wizard :rtype: :class:`~aeneas.job.Job` or ``None`` """ try: if config_string is not None: self.log(u"Analyzing container with the given config string") return self._analyze_txt_config(config_string=config_string) elif self.container.has_config_xml: self.log(u"Analyzing container with XML config file") return self._analyze_xml_config(config_contents=None) elif self.container.has_config_txt: self.log(u"Analyzing container with TXT config file") return self._analyze_txt_config(config_string=None) else: self.log(u"No configuration file in this container, returning None") except (OSError, KeyError, TypeError) as exc: self.log_exc(u"An unexpected error occurred while analyzing", exc, True, None) return None
0.004241
def list_files(self, extensions=None): """ List the ports contents by file type or all. :param extensions: string extensions, single string or list of extensions. :return: A list of full path names of each file. """ if self.type.lower() != 'directory': raise ValueError("Port type is not == directory") filesystem_location = self.path for root, dirs, files in os.walk(filesystem_location): if extensions is None: return [os.path.join(root, f) for f in files] elif not isinstance(extensions, list): extensions = [extensions] subset_files = [] for f in files: for extension in extensions: if f.lower().endswith(extension.lower()): subset_files.append(os.path.join(root, f)) break return subset_files
0.003148
def _six_fail_hook(modname): """Fix six.moves imports due to the dynamic nature of this class. Construct a pseudo-module which contains all the necessary imports for six :param modname: Name of failed module :type modname: str :return: An astroid module :rtype: nodes.Module """ attribute_of = modname != "six.moves" and modname.startswith("six.moves") if modname != "six.moves" and not attribute_of: raise AstroidBuildingError(modname=modname) module = AstroidBuilder(MANAGER).string_build(_IMPORTS) module.name = "six.moves" if attribute_of: # Facilitate import of submodules in Moves start_index = len(module.name) attribute = modname[start_index:].lstrip(".").replace(".", "_") try: import_attr = module.getattr(attribute)[0] except AttributeInferenceError: raise AstroidBuildingError(modname=modname) if isinstance(import_attr, nodes.Import): submodule = MANAGER.ast_from_module_name(import_attr.names[0][0]) return submodule # Let dummy submodule imports pass through # This will cause an Uninferable result, which is okay return module
0.000821
def delete_topology(self, topologyName): """ delete topology """ path = self.get_topology_path(topologyName) LOG.info("Removing topology: {0} from path: {1}".format( topologyName, path)) try: self.client.delete(path) return True except NoNodeError: raise_(StateException("NoNodeError while deteling topology", StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2]) except NotEmptyError: raise_(StateException("NotEmptyError while deleting topology", StateException.EX_TYPE_NOT_EMPTY_ERROR), sys.exc_info()[2]) except ZookeeperError: raise_(StateException("Zookeeper while deleting topology", StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2]) except Exception: # Just re raise the exception. raise
0.012571
def main_lstm_generate_text(): """Generate text by Synced sequence input and output.""" # rnn model and update (describtion: see tutorial_ptb_lstm.py) init_scale = 0.1 learning_rate = 1.0 max_grad_norm = 5 sequence_length = 20 hidden_size = 200 max_epoch = 4 max_max_epoch = 100 lr_decay = 0.9 batch_size = 20 top_k_list = [1, 3, 5, 10] print_length = 30 model_file_name = "model_generate_text.npz" # ===== Prepare Data words = customized_read_words(input_fpath="data/trump/trump_text.txt") vocab = tl.nlp.create_vocab([words], word_counts_output_file='vocab.txt', min_word_count=1) vocab = tl.nlp.Vocabulary('vocab.txt', unk_word="<UNK>") vocab_size = vocab.unk_id + 1 train_data = [vocab.word_to_id(word) for word in words] # Set the seed to generate sentence. seed = "it is a" # seed = basic_clean_str(seed).split() seed = nltk.tokenize.word_tokenize(seed) print('seed : %s' % seed) sess = tf.InteractiveSession() # ===== Define model input_data = tf.placeholder(tf.int32, [batch_size, sequence_length]) targets = tf.placeholder(tf.int32, [batch_size, sequence_length]) # Testing (Evaluation), for generate text input_data_test = tf.placeholder(tf.int32, [1, 1]) def inference(x, is_train, sequence_length, reuse=None): """If reuse is True, the inferences use the existing parameters, then different inferences share the same parameters. """ print("\nsequence_length: %d, is_train: %s, reuse: %s" % (sequence_length, is_train, reuse)) rnn_init = tf.random_uniform_initializer(-init_scale, init_scale) with tf.variable_scope("model", reuse=reuse): network = EmbeddingInputlayer(x, vocab_size, hidden_size, rnn_init, name='embedding') network = RNNLayer( network, cell_fn=tf.contrib.rnn.BasicLSTMCell, cell_init_args={ 'forget_bias': 0.0, 'state_is_tuple': True }, n_hidden=hidden_size, initializer=rnn_init, n_steps=sequence_length, return_last=False, return_seq_2d=True, name='lstm1' ) lstm1 = network network = DenseLayer(network, vocab_size, W_init=rnn_init, b_init=rnn_init, act=None, name='output') return network, lstm1 # Inference for Training network, lstm1 = inference(input_data, is_train=True, sequence_length=sequence_length, reuse=None) # Inference for generate text, sequence_length=1 network_test, lstm1_test = inference(input_data_test, is_train=False, sequence_length=1, reuse=True) y_linear = network_test.outputs y_soft = tf.nn.softmax(y_linear) # y_id = tf.argmax(tf.nn.softmax(y), 1) # ===== Define train ops def loss_fn(outputs, targets, batch_size, sequence_length): # Returns the cost function of Cross-entropy of two sequences, implement # softmax internally. # outputs : 2D tensor [n_examples, n_outputs] # targets : 2D tensor [n_examples, n_outputs] # n_examples = batch_size * sequence_length # so # cost is the averaged cost of each mini-batch (concurrent process). loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [outputs], [tf.reshape(targets, [-1])], [tf.ones([batch_size * sequence_length])] ) cost = tf.reduce_sum(loss) / batch_size return cost # Cost for Training cost = loss_fn(network.outputs, targets, batch_size, sequence_length) # Truncated Backpropagation for training with tf.variable_scope('learning_rate'): lr = tf.Variable(0.0, trainable=False) # You can get all trainable parameters as follow. # tvars = tf.trainable_variables() # Alternatively, you can specify the parameters for training as follw. # tvars = network.all_params $ all parameters # tvars = network.all_params[1:] $ parameters except embedding matrix # Train the whole network. tvars = network.all_params grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), max_grad_norm) optimizer = tf.train.GradientDescentOptimizer(lr) train_op = optimizer.apply_gradients(zip(grads, tvars)) # ===== Training sess.run(tf.global_variables_initializer()) print("\nStart learning a model to generate text") for i in range(max_max_epoch): # decrease the learning_rate after ``max_epoch``, by multipling lr_decay. new_lr_decay = lr_decay**max(i - max_epoch, 0.0) sess.run(tf.assign(lr, learning_rate * new_lr_decay)) print("Epoch: %d/%d Learning rate: %.8f" % (i + 1, max_max_epoch, sess.run(lr))) epoch_size = ((len(train_data) // batch_size) - 1) // sequence_length start_time = time.time() costs = 0.0 iters = 0 # reset all states at the begining of every epoch state1 = tl.layers.initialize_rnn_state(lstm1.initial_state) for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data, batch_size, sequence_length)): _cost, state1, _ = sess.run( [cost, lstm1.final_state, train_op], feed_dict={ input_data: x, targets: y, lstm1.initial_state: state1 } ) costs += _cost iters += sequence_length if step % (epoch_size // 10) == 1: print( "%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / epoch_size, np.exp(costs / iters), iters * batch_size / (time.time() - start_time)) ) train_perplexity = np.exp(costs / iters) # print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch, train_perplexity)) # for diversity in diversity_list: # testing: sample from top k words for top_k in top_k_list: # Testing, generate some text from a given seed. state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state) # state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state) outs_id = [vocab.word_to_id(w) for w in seed] # feed the seed to initialize the state for generation. for ids in outs_id[:-1]: a_id = np.asarray(ids).reshape(1, 1) state1 = sess.run( [lstm1_test.final_state], feed_dict={ input_data_test: a_id, lstm1_test.initial_state: state1 } ) # feed the last word in seed, and start to generate sentence. a_id = outs_id[-1] for _ in range(print_length): a_id = np.asarray(a_id).reshape(1, 1) out, state1 = sess.run( [y_soft, lstm1_test.final_state], feed_dict={ input_data_test: a_id, lstm1_test.initial_state: state1 } ) # Without sampling # a_id = np.argmax(out[0]) # Sample from all words, if vocab_size is large, # this may have numeric error. # a_id = tl.nlp.sample(out[0], diversity) # Sample from the top k words. a_id = tl.nlp.sample_top(out[0], top_k=top_k) outs_id.append(a_id) sentence = [vocab.id_to_word(w) for w in outs_id] sentence = " ".join(sentence) # print(diversity, ':', sentence) print(top_k, ':', sentence) print("Save model") tl.files.save_npz(network_test.all_params, name=model_file_name)
0.001916
def GetArtifactKnowledgeBase(client_obj, allow_uninitialized=False): """This generates an artifact knowledge base from a GRR client. Args: client_obj: A GRRClient object which is opened for reading. allow_uninitialized: If True we accept an uninitialized knowledge_base. Returns: A KnowledgeBase semantic value. Raises: ArtifactProcessingError: If called when the knowledge base has not been initialized. KnowledgeBaseUninitializedError: If we failed to initialize the knowledge base. This is needed so that the artifact library has a standardized interface to the data that is actually stored in the GRRClient object in the GRR datastore. We expect that the client KNOWLEDGE_BASE is already filled out through the, KnowledgeBaseInitialization flow, but attempt to make some intelligent guesses if things failed. """ client_schema = client_obj.Schema kb = client_obj.Get(client_schema.KNOWLEDGE_BASE) if not allow_uninitialized: if not kb: raise artifact_utils.KnowledgeBaseUninitializedError( "KnowledgeBase empty for %s." % client_obj.urn) if not kb.os: raise artifact_utils.KnowledgeBaseAttributesMissingError( "KnowledgeBase missing OS for %s. Knowledgebase content: %s" % (client_obj.urn, kb)) if not kb: kb = client_schema.KNOWLEDGE_BASE() SetCoreGRRKnowledgeBaseValues(kb, client_obj) if kb.os == "Windows": # Add fallback values. if not kb.environ_allusersappdata and kb.environ_allusersprofile: # Guess if we don't have it already. if kb.os_major_version >= 6: kb.environ_allusersappdata = u"c:\\programdata" kb.environ_allusersprofile = u"c:\\programdata" else: kb.environ_allusersappdata = (u"c:\\documents and settings\\All Users\\" "Application Data") kb.environ_allusersprofile = u"c:\\documents and settings\\All Users" return kb
0.007117
def _TemplateNamesToFiles(self, template_str): """Parses a string of templates into a list of file handles.""" template_list = template_str.split(":") template_files = [] try: for tmplt in template_list: template_files.append(open(os.path.join(self.template_dir, tmplt), "r")) except: # noqa for tmplt in template_files: tmplt.close() raise return template_files
0.00625
def get_setter(self, oid): """ Retrieve the nearest parent setter function for an OID """ if hasattr(self.setter, oid): return self.setter[oid] parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ] if parents: return self.setter[max(parents)] return self.default_setter
0.044444
def has_trivial_constructor(class_): """if class has public trivial constructor, this function will return reference to it, None otherwise""" class_ = class_traits.get_declaration(class_) trivial = find_trivial_constructor(class_) if trivial and trivial.access_type == 'public': return trivial
0.003115
def get(self): """ Reloads the measurements from the backing store. :return: 200 if success. """ try: self._measurementController.reloadCompletedMeasurements() return None, 200 except: logger.exception("Failed to reload measurements") return str(sys.exc_info()), 500
0.008287
def _host(): """Get the Host from the most recent HTTP request.""" host_and_port = request.urlparts[1] try: host, _ = host_and_port.split(':') except ValueError: # No port yet. Host defaults to '127.0.0.1' in bottle.request. return DEFAULT_BIND return host or DEFAULT_BIND
0.003165
def record_list_projects(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /record-xxxx/listProjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Cloning#API-method%3A-%2Fclass-xxxx%2FlistProjects """ return DXHTTPRequest('/%s/listProjects' % object_id, input_params, always_retry=always_retry, **kwargs)
0.010336
def handle(self): """ Executes the command """ database = self.option("database") repository = DatabaseMigrationRepository(self.resolver, "migrations") repository.set_source(database) repository.create_repository() self.info("Migration table created successfully")
0.006061
def extract(self, searches, tree=None, as_dict=True): """ >>> foo = pdf.extract([['pages', 'LTPage']]) >>> foo {'pages': [<LTPage>, <LTPage>]} >>> pdf.extract([['bar', ':in_bbox("100,100,400,400")']], foo['pages'][0]) {'bar': [<LTTextLineHorizontal>, <LTTextBoxHorizontal>,... """ if self.tree is None or self.pq is None: self.load() if tree is None: pq = self.pq else: pq = PyQuery(tree, css_translator=PDFQueryTranslator()) results = [] formatter = None parent = pq for search in searches: if len(search) < 3: search = list(search) + [formatter] key, search, tmp_formatter = search if key == 'with_formatter': if isinstance(search, six.string_types): # is a pyquery method name, e.g. 'text' formatter = lambda o, search=search: getattr(o, search)() elif hasattr(search, '__call__') or not search: # is a method, or None to end formatting formatter = search else: raise TypeError("Formatter should be either a pyquery " "method name or a callable function.") elif key == 'with_parent': parent = pq(search) if search else pq else: try: result = parent("*").filter(search) if \ hasattr(search, '__call__') else parent(search) except cssselect.SelectorSyntaxError as e: raise cssselect.SelectorSyntaxError( "Error applying selector '%s': %s" % (search, e)) if tmp_formatter: result = tmp_formatter(result) results += result if type(result) == tuple else [[key, result]] if as_dict: results = dict(results) return results
0.001898
def git_clone(target_dir, repo_location, branch_or_tag=None, verbose=True): """Clone repo at repo_location to target_dir and checkout branch_or_tag. If branch_or_tag is not specified, the HEAD of the primary branch of the cloned repo is checked out. """ target_dir = pipes.quote(target_dir) command = ['git', 'clone'] if verbose: command.append('--verbose') if os.path.isdir(repo_location): command.append('--no-hardlinks') command.extend([pipes.quote(repo_location), target_dir]) if branch_or_tag: command.extend(['--branch', branch_or_tag]) return execute_git_command(command)
0.001546
def VEXTRACTF128(cpu, dest, src, offset): """Extract Packed Floating-Point Values Extracts 128-bits of packed floating-point values from the source operand (second operand) at an 128-bit offset from imm8[0] into the destination operand (first operand). The destination may be either an XMM register or an 128-bit memory location. """ offset = offset.read() dest.write(Operators.EXTRACT(src.read(), offset * 128, (offset + 1) * 128))
0.006036
def value(self, value, *args, **kwargs): """ Takes a string value and returns the Date based on the format """ from datetime import datetime value = self.obj.value(value, *args, **kwargs) try: rv = datetime.strptime(value, self.format) except ValueError as _: # noqa rv = None return rv
0.005333
def secure(func_or_obj, check_permissions_for_obj=None): """ This method secures a method or class depending on invocation. To decorate a method use one argument: @secure(<check_permissions_method>) To secure a class, invoke with two arguments: secure(<obj instance>, <check_permissions_method>) """ if _allowed_check_permissions_types(func_or_obj): return _secure_method(func_or_obj) else: if not _allowed_check_permissions_types(check_permissions_for_obj): msg = "When securing an object, secure() requires the " + \ "second argument to be method" raise TypeError(msg) return _SecuredAttribute(func_or_obj, check_permissions_for_obj)
0.001339
def get_all_context_names(context_num): """Based on the nucleotide base context number, return a list of strings representing each context. Parameters ---------- context_num : int number representing the amount of nucleotide base context to use. Returns ------- a list of strings containing the names of the base contexts """ if context_num == 0: return ['None'] elif context_num == 1: return ['A', 'C', 'T', 'G'] elif context_num == 1.5: return ['C*pG', 'CpG*', 'TpC*', 'G*pA', 'A', 'C', 'T', 'G'] elif context_num == 2: dinucs = list(set( [d1+d2 for d1 in 'ACTG' for d2 in 'ACTG'] )) return dinucs elif context_num == 3: trinucs = list(set( [t1+t2+t3 for t1 in 'ACTG' for t2 in 'ACTG' for t3 in 'ACTG'] )) return trinucs
0.001032
def raise_on_failure(mainfunc): """raise if and only if mainfunc fails""" try: errors = mainfunc() if errors: exit(errors) except CalledProcessError as error: exit(error.returncode) except SystemExit as error: if error.code: raise except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover: exit(1)
0.004975
def package_config(path, template='__config__.ini.TEMPLATE', config_name='__config__.ini', **params): """configure the module at the given path with a config template and file. path = the filesystem path to the given module template = the config template filename within that path config_name = the config filename within that path params = a dict containing config params, which are found in the template using %(key)s. """ config_fns = [] template_fns = rglob(path, template) for template_fn in template_fns: config_template = ConfigTemplate(fn=template_fn) config = config_template.render( fn=os.path.join(os.path.dirname(template_fn), config_name), prompt=True, path=path, **params) config.write() config_fns.append(config.fn) log.info('wrote %r' % config) return config_fns
0.004296
def lca(root, p, q): """ :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode """ if root is None or root is p or root is q: return root left = lca(root.left, p, q) right = lca(root.right, p, q) if left is not None and right is not None: return root return left if left else right
0.00277