signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
---|---|---|---|
@property<EOL><INDENT>def cursor(self):<DEDENT> | return self.get("<STR_LIT>", b"<STR_LIT>")<EOL> | bytes: The url-safe cursor for a query. | f585:c1:m6 |
@property<EOL><INDENT>def cursor(self):<DEDENT> | return self._options.cursor<EOL> | str: The url-safe cursor for the next batch of results. | f585:c2:m1 |
@property<EOL><INDENT>def has_more(self):<DEDENT> | return not self._complete<EOL> | bool: Whether or not there are more results. | f585:c2:m2 |
@property<EOL><INDENT>def cursor(self):<DEDENT> | return self._cursor<EOL> | str: The url-safe cursor for the next page of results. | f585:c3:m1 |
@property<EOL><INDENT>def has_more(self):<DEDENT> | return self._resultset.has_more<EOL> | bool: Whether or not there are more pages. | f585:c4:m1 |
@property<EOL><INDENT>def cursor(self):<DEDENT> | return self._resultset.cursor<EOL> | str: The url-safe cursor for the next page of results. | f585:c4:m2 |
def fetch_next_page(self): | for page in self:<EOL><INDENT>return page<EOL><DEDENT>else:<EOL><INDENT>return Page(self._resultset.cursor, iter(()))<EOL><DEDENT> | Fetch the next Page of results.
Returns:
Page: The next page of results. | f585:c4:m3 |
def select(self, *projection): | return self._replace(projection=_prepare_projection(projection))<EOL> | Return a new query with its projection replaced.
Parameters:
\*projection(str): The fields to project.
Returns:
Query: The derived Query. | f585:c5:m1 |
def where(self, *filters): | return self._replace(filters=filters)<EOL> | Return a new query, replacing the current set of filters.
Parameters:
\*filters(PropertyFilter): The filters to add.
Returns:
Query: The derived Query. | f585:c5:m2 |
def and_where(self, *filters): | return self._replace(filters=self.filters + filters)<EOL> | Return a new query, adding the given filters with the
current query's filters to form an "and".
Parameters:
\*filters(PropertyFilter): The filters to add.
Returns:
Query: The derived Query. | f585:c5:m3 |
def order_by(self, *orders): | return self._replace(orders=self.orders + orders)<EOL> | Returns a new query containing an additional set of orders.
Parameters:
\*orders(str): The sort orders to add.
Returns:
Query: The derived Query. | f585:c5:m4 |
def with_ancestor(self, ancestor): | return self._replace(ancestor=ancestor)<EOL> | Returns a new query with its ancestor updated.
Parameters:
ancestor(anom.Key): The new ancestor.
Returns:
Query: The derived Query. | f585:c5:m5 |
def with_namespace(self, namespace): | return self._replace(namespace=namespace)<EOL> | Returns a new query with its namespace updated.
Parameters:
namespace(str): The new namespace.
Returns:
Query: The derived Query. | f585:c5:m6 |
def with_offset(self, offset): | return self._replace(offset=offset)<EOL> | Returns a new query with its offset updated.
Parameters:
offset(int): The new offset.
Returns:
Query: The derived Query. | f585:c5:m7 |
def with_limit(self, limit): | return self._replace(limit=limit)<EOL> | Returns a new query with its limit updated.
Parameters:
limit(int): The new limit.
Returns:
Query: The derived Query. | f585:c5:m8 |
def count(self, *, page_size=DEFAULT_BATCH_SIZE, **options): | entities = <NUM_LIT:0><EOL>options = QueryOptions(self).replace(keys_only=True)<EOL>for page in self.paginate(page_size=page_size, **options):<EOL><INDENT>entities += len(list(page))<EOL><DEDENT>return entities<EOL> | Counts the number of entities that match this query.
Note:
Since Datastore doesn't provide a native way to count
entities by query, this method paginates through all the
entities' keys and counts them.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of entities. | f585:c5:m9 |
def delete(self, *, page_size=DEFAULT_BATCH_SIZE, **options): | from .model import delete_multi<EOL>deleted = <NUM_LIT:0><EOL>options = QueryOptions(self).replace(keys_only=True)<EOL>for page in self.paginate(page_size=page_size, **options):<EOL><INDENT>keys = list(page)<EOL>deleted += len(keys)<EOL>delete_multi(keys)<EOL><DEDENT>return deleted<EOL> | Deletes all the entities that match this query.
Note:
Since Datasotre doesn't provide a native way to delete
entities by query, this method paginates through all the
entities' keys and issues a single delete_multi call per
page.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of deleted entities. | f585:c5:m10 |
def get(self, **options): | sub_query = self.with_limit(<NUM_LIT:1>)<EOL>options = QueryOptions(sub_query).replace(batch_size=<NUM_LIT:1>)<EOL>for result in sub_query.run(**options):<EOL><INDENT>return result<EOL><DEDENT>return None<EOL> | Run this query and get the first result.
Parameters:
\**options(QueryOptions, optional)
Returns:
Model: An entity or None if there were no results. | f585:c5:m11 |
def run(self, **options): | return Resultset(self._prepare(), QueryOptions(self, **options))<EOL> | Run this query and return a result iterator.
Parameters:
\**options(QueryOptions, optional)
Returns:
Resultset: An iterator for this query's results. | f585:c5:m12 |
def paginate(self, *, page_size, **options): | return Pages(self._prepare(), page_size, QueryOptions(self, **options))<EOL> | Run this query and return a page iterator.
Parameters:
page_size(int): The number of entities to fetch per page.
\**options(QueryOptions, optional)
Returns:
Pages: An iterator for this query's pages of results. | f585:c5:m13 |
def set_default_namespace(namespace=None): | global _default_namespace<EOL>_default_namespace = namespace or "<STR_LIT>"<EOL>return _default_namespace<EOL> | Set the global default namespace.
Parameters:
namespace(str): The namespace to set as the global default.
Returns:
str: The input namespace. | f587:m0 |
def get_namespace(): | try:<EOL><INDENT>return _namespace.current<EOL><DEDENT>except AttributeError:<EOL><INDENT>return _default_namespace<EOL><DEDENT> | str: The namespace for the current thread. | f587:m1 |
def set_namespace(namespace=None): | if namespace is None:<EOL><INDENT>try:<EOL><INDENT>del _namespace.current<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>else:<EOL><INDENT>_namespace.current = namespace<EOL><DEDENT> | Set the current thread-local default namespace. If namespace
is None, then the thread-local namespace value is removed, forcing
`get_namespace()` to return the global default namespace on
subsequent calls.
Parameters:
namespace(str): namespace to set as the current thread-local
default.
Returns:
None | f587:m2 |
@contextmanager<EOL>def namespace(namespace): | try:<EOL><INDENT>current_namespace = _namespace.current<EOL><DEDENT>except AttributeError:<EOL><INDENT>current_namespace = None<EOL><DEDENT>set_namespace(namespace)<EOL>try:<EOL><INDENT>yield<EOL><DEDENT>finally:<EOL><INDENT>set_namespace(current_namespace)<EOL><DEDENT> | Context manager for stacking the current thread-local default
namespace. Exiting the context sets the thread-local default
namespace back to the previously-set namespace. If there is no
previous namespace, then the thread-local namespace is cleared.
Example:
>>> with namespace("foo"):
... with namespace("bar"):
... assert get_namespace() == "bar"
... assert get_namespace() == "foo"
>>> assert get_namespace() == ""
Parameters:
namespace(str): namespace to set as the current thread-local
default.
Returns:
None | f587:m3 |
def is_default(entity, prop, name): | return getattr(entity, name) == prop.default<EOL> | bool: True if the value of a property is equal to its default. | f588:m0 |
def is_not_default(entity, prop, name): | return getattr(entity, name) != prop.default<EOL> | bool: True if the value of a property is not equal to its default. | f588:m1 |
def is_empty(entity, prop, name): | return name not in entity._data<EOL> | bool: True if the value of a property is not set. | f588:m2 |
def is_not_empty(entity, prop, name): | return name in entity._data<EOL> | bool: True if the value of a property is set. | f588:m3 |
def is_none(entity, prop, name): | return is_not_empty(entity, prop, name) and getattr(entity, name) is None<EOL> | bool: True if the value of a property is None. | f588:m4 |
def is_not_none(entity, prop, name): | return is_not_empty(entity, prop, name) and name in entity._data and getattr(entity, name) is not None<EOL> | bool: True if the value of a property is not None. | f588:m5 |
def is_true(entity, prop, name): | return is_not_empty(entity, prop, name) and name in entity._data and bool(getattr(entity, name))<EOL> | bool: True if the value of a property is True. | f588:m6 |
def is_false(entity, prop, name): | return is_not_empty(entity, prop, name) and name in entity._data and not bool(getattr(entity, name))<EOL> | bool: True if the value of a property is False. | f588:m7 |
def rate(base, target, error_log=None): | if base == target:<EOL><INDENT>return decimal.Decimal(<NUM_LIT>)<EOL><DEDENT>services = [yahoo, fixer, ecb]<EOL>if error_log is None:<EOL><INDENT>error_log = _error_log<EOL><DEDENT>for fn in services:<EOL><INDENT>try:<EOL><INDENT>return fn(base, target)<EOL><DEDENT>except Exception as e:<EOL><INDENT>error_log(e)<EOL><DEDENT><DEDENT>return None<EOL> | Get current exchange rate.
:param base: A base currency
:param target: Convert to the target currency
:param error_log: A callable function to track the exception
It parses current exchange rate from these services:
1) Yahoo finance
2) fixer.io
3) European Central Bank
It will fallback to the next service when previous not available.
The exchane rate is a decimal number. If `None` is returned, it means
the parsing goes wrong::
>>> import exchange
>>> exchange.rate('USD', 'CNY')
Decimal('6.2045') | f591:m0 |
def yahoo(base, target): | api_url = '<STR_LIT>'<EOL>resp = requests.get(<EOL>api_url,<EOL>params={<EOL>'<STR_LIT:e>': '<STR_LIT>',<EOL>'<STR_LIT:f>': '<STR_LIT>',<EOL>'<STR_LIT:s>': '<STR_LIT>'.format(base, target)<EOL>},<EOL>timeout=<NUM_LIT:1>,<EOL>)<EOL>value = resp.text.split('<STR_LIT:U+002C>', <NUM_LIT:2>)[<NUM_LIT:1>]<EOL>return decimal.Decimal(value)<EOL> | Parse data from Yahoo. | f591:m2 |
def fixer(base, target): | api_url = '<STR_LIT>'<EOL>resp = requests.get(<EOL>api_url,<EOL>params={<EOL>'<STR_LIT>': base,<EOL>'<STR_LIT>': target,<EOL>},<EOL>timeout=<NUM_LIT:1>,<EOL>)<EOL>data = resp.json()<EOL>return decimal.Decimal(data['<STR_LIT>'][target])<EOL> | Parse data from fixer.io. | f591:m3 |
def ecb(base, target): | api_url = '<STR_LIT>'<EOL>resp = requests.get(api_url, timeout=<NUM_LIT:1>)<EOL>text = resp.text<EOL>def _find_rate(symbol):<EOL><INDENT>if symbol == '<STR_LIT>':<EOL><INDENT>return decimal.Decimal(<NUM_LIT>)<EOL><DEDENT>m = re.findall(r"<STR_LIT>" % symbol, text)<EOL>return decimal.Decimal(m[<NUM_LIT:0>])<EOL><DEDENT>return _find_rate(target) / _find_rate(base)<EOL> | Parse data from European Central Bank. | f591:m4 |
def get_type(self): | return self.__typ<EOL> | Returns type | f595:c0:m1 |
def get_accession_number(self): | return self.__accession_number<EOL> | Returns accession number | f595:c0:m2 |
def get_source(self): | return self.__source<EOL> | Returns source | f595:c0:m3 |
def get_species_text(self): | return self.__species_text<EOL> | Returns species_text | f596:c0:m1 |
def get_component_text(self): | return self.__component_text<EOL> | Returns component_text | f596:c0:m2 |
def get_component_accession(self): | return self.__component_accession<EOL> | Returns component_accession | f596:c0:m3 |
def get_strain_text(self): | return self.__strain_text<EOL> | Returns strain_text | f596:c0:m4 |
def get_strain_accession(self): | return self.__strain_accession<EOL> | Returns strain_accession | f596:c0:m5 |
def get_source_type(self): | return self.__source_type<EOL> | Returns source_type | f596:c0:m6 |
def get_source_accession(self): | return self.__source_accession<EOL> | Returns source_accession | f596:c0:m7 |
def get_comments(self): | return self.__comments<EOL> | Returns comments | f596:c0:m8 |
def __get_species_accession(self): | return self.__species_accession<EOL> | Returns species_accession | f596:c0:m9 |
def get_datatype(self): | return self.__datatype<EOL> | Returns datatype | f597:c0:m1 |
def get_text(self): | return self.__text<EOL> | Returns text | f597:c0:m2 |
def get_created_on(self): | return self.__created_on<EOL> | Returns created_on | f597:c0:m3 |
def __get_datatype_id(self): | return self.__datatype_id<EOL> | Returns datatype_id | f597:c0:m4 |
def get_name(self): | return self.__name<EOL> | Returns name | f598:c0:m1 |
def get_type(self): | return self.__typ<EOL> | Returns type | f598:c0:m2 |
def get_adapted(self): | return self.__adapted<EOL> | Returns adapted | f598:c0:m3 |
def get_language(self): | return self.__language<EOL> | Returns language | f598:c0:m4 |
def get_source(self): | return self.__source<EOL> | Returns source | f598:c0:m5 |
def set_download_cache_path(path): | __DOWNLOAD_PARAMS['<STR_LIT:path>'] = path<EOL> | Sets download cache path. | f599:m0 |
def set_auto_update(auto_update): | __DOWNLOAD_PARAMS['<STR_LIT>'] = auto_update<EOL> | Sets auto update flag. | f599:m1 |
def get_formulae(chebi_id): | if len(__FORMULAE) == <NUM_LIT:0>:<EOL><INDENT>__parse_chemical_data()<EOL><DEDENT>return __FORMULAE[chebi_id] if chebi_id in __FORMULAE else []<EOL> | Returns formulae | f599:m2 |
def get_all_formulae(chebi_ids): | all_formulae = [get_formulae(chebi_id) for chebi_id in chebi_ids]<EOL>return [x for sublist in all_formulae for x in sublist]<EOL> | Returns all formulae | f599:m3 |
def get_mass(chebi_id): | if len(__MASSES) == <NUM_LIT:0>:<EOL><INDENT>__parse_chemical_data()<EOL><DEDENT>return __MASSES[chebi_id] if chebi_id in __MASSES else float('<STR_LIT>')<EOL> | Returns mass | f599:m4 |
def get_charge(chebi_id): | if len(__CHARGES) == <NUM_LIT:0>:<EOL><INDENT>__parse_chemical_data()<EOL><DEDENT>return __CHARGES[chebi_id] if chebi_id in __CHARGES else float('<STR_LIT>')<EOL> | Returns charge | f599:m5 |
def __parse_chemical_data(): | filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>if tokens[<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>chebi_id = int(tokens[<NUM_LIT:1>])<EOL>if chebi_id not in __FORMULAE:<EOL><INDENT>__FORMULAE[chebi_id] = []<EOL><DEDENT>form = Formula(tokens[<NUM_LIT:4>], tokens[<NUM_LIT:2>])<EOL>__FORMULAE[chebi_id].append(form)<EOL><DEDENT>elif tokens[<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>__MASSES[int(tokens[<NUM_LIT:1>])] = float(tokens[<NUM_LIT:4>])<EOL><DEDENT>elif tokens[<NUM_LIT:3>] == '<STR_LIT>':<EOL><INDENT>__CHARGES[int(tokens[<NUM_LIT:1>])] = int(tokens[<NUM_LIT:4>]<EOL>if tokens[<NUM_LIT:4>][-<NUM_LIT:1>] != '<STR_LIT:->'<EOL>else '<STR_LIT:->' + tokens[<NUM_LIT:4>][:-<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT> | Gets and parses file | f599:m6 |
def get_comments(chebi_id): | if len(__COMMENTS) == <NUM_LIT:0>:<EOL><INDENT>__parse_comments()<EOL><DEDENT>return __COMMENTS[chebi_id] if chebi_id in __COMMENTS else []<EOL> | Returns comments | f599:m7 |
def get_all_comments(chebi_ids): | all_comments = [get_comments(chebi_id) for chebi_id in chebi_ids]<EOL>return [x for sublist in all_comments for x in sublist]<EOL> | Returns all comments | f599:m8 |
def __parse_comments(): | filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>chebi_id = int(tokens[<NUM_LIT:1>])<EOL>if chebi_id not in __COMMENTS:<EOL><INDENT>__COMMENTS[chebi_id] = []<EOL><DEDENT>com = Comment(tokens[<NUM_LIT:3>],<EOL>tokens[<NUM_LIT:4>],<EOL>tokens[<NUM_LIT:5>],<EOL>datetime.datetime.strptime(tokens[<NUM_LIT:2>], '<STR_LIT>'))<EOL>__COMMENTS[chebi_id].append(com)<EOL><DEDENT><DEDENT> | Gets and parses file | f599:m9 |
def get_compound_origins(chebi_id): | if len(__COMPOUND_ORIGINS) == <NUM_LIT:0>:<EOL><INDENT>__parse_compound_origins()<EOL><DEDENT>return __COMPOUND_ORIGINS[chebi_id] if chebi_id in__COMPOUND_ORIGINS else []<EOL> | Returns compound origins | f599:m10 |
def get_all_compound_origins(chebi_ids): | all_compound_origins = [get_compound_origins(chebi_id)<EOL>for chebi_id in chebi_ids]<EOL>return [x for sublist in all_compound_origins for x in sublist]<EOL> | Returns all compound origins | f599:m11 |
def __parse_compound_origins(): | filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>if len(tokens) > <NUM_LIT:10>:<EOL><INDENT>chebi_id = int(tokens[<NUM_LIT:1>])<EOL>if chebi_id not in __COMPOUND_ORIGINS:<EOL><INDENT>__COMPOUND_ORIGINS[chebi_id] = []<EOL><DEDENT>comp_orig = CompoundOrigin(tokens[<NUM_LIT:2>], tokens[<NUM_LIT:3>],<EOL>tokens[<NUM_LIT:4>], tokens[<NUM_LIT:5>],<EOL>tokens[<NUM_LIT:6>], tokens[<NUM_LIT:7>],<EOL>tokens[<NUM_LIT:8>], tokens[<NUM_LIT:9>],<EOL>tokens[<NUM_LIT:10>])<EOL>__COMPOUND_ORIGINS[chebi_id].append(comp_orig)<EOL><DEDENT><DEDENT><DEDENT> | Gets and parses file | f599:m12 |
def get_status(chebi_id): | if len(__STATUSES) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __STATUSES[chebi_id] if chebi_id in __STATUSES else None<EOL> | Returns status | f599:m13 |
def get_source(chebi_id): | if len(__SOURCES) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __SOURCES[chebi_id] if chebi_id in __SOURCES else None<EOL> | Returns source | f599:m14 |
def get_parent_id(chebi_id): | if len(__PARENT_IDS) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __PARENT_IDS[chebi_id] if chebi_id in __PARENT_IDS else float('<STR_LIT>')<EOL> | Returns parent id | f599:m15 |
def get_all_ids(chebi_id): | if len(__ALL_IDS) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __ALL_IDS[chebi_id] if chebi_id in __ALL_IDS else []<EOL> | Returns all ids | f599:m16 |
def get_name(chebi_id): | if len(__NAMES) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __NAMES[chebi_id] if chebi_id in __NAMES else None<EOL> | Returns name | f599:m17 |
def get_definition(chebi_id): | if len(__DEFINITIONS) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __DEFINITIONS[chebi_id] if chebi_id in __DEFINITIONS else None<EOL> | Returns definition | f599:m18 |
def get_modified_on(chebi_id): | if len(__MODIFIED_ONS) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __MODIFIED_ONS[chebi_id] if chebi_id in __MODIFIED_ONS else None<EOL> | Returns modified on | f599:m19 |
def get_all_modified_on(chebi_ids): | all_modified_ons = [get_modified_on(chebi_id) for chebi_id in chebi_ids]<EOL>all_modified_ons = [modified_on for modified_on in all_modified_ons<EOL>if modified_on is not None]<EOL>return None if len(all_modified_ons) == <NUM_LIT:0> else sorted(all_modified_ons)[-<NUM_LIT:1>]<EOL> | Returns all modified on | f599:m20 |
def get_created_by(chebi_id): | if len(__CREATED_BYS) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __CREATED_BYS[chebi_id] if chebi_id in __MODIFIED_ONS else None<EOL> | Returns created by | f599:m21 |
def get_star(chebi_id): | if len(__STARS) == <NUM_LIT:0>:<EOL><INDENT>__parse_compounds()<EOL><DEDENT>return __STARS[chebi_id] if chebi_id in __STARS else float('<STR_LIT>')<EOL> | Returns created by | f599:m22 |
def __parse_compounds(): | filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>chebi_id = int(tokens[<NUM_LIT:0>])<EOL>__STATUSES[chebi_id] = tokens[<NUM_LIT:1>]<EOL>__SOURCES[chebi_id] = tokens[<NUM_LIT:3>]<EOL>parent_id_token = tokens[<NUM_LIT:4>]<EOL>__PARENT_IDS[chebi_id] = float('<STR_LIT>')if parent_id_token == '<STR_LIT:null>'else int(parent_id_token)<EOL>__put_all_ids(chebi_id, chebi_id)<EOL>if parent_id_token != '<STR_LIT:null>':<EOL><INDENT>parent_id = int(parent_id_token)<EOL>__put_all_ids(parent_id, chebi_id)<EOL><DEDENT>__NAMES[chebi_id] = None if tokens[<NUM_LIT:5>] == '<STR_LIT:null>' else tokens[<NUM_LIT:5>]<EOL>__DEFINITIONS[chebi_id] = None if tokens[<NUM_LIT:6>] == '<STR_LIT:null>'else tokens[<NUM_LIT:6>]<EOL>__MODIFIED_ONS[chebi_id] = None if tokens[<NUM_LIT:7>] == '<STR_LIT:null>'else datetime.datetime.strptime(tokens[<NUM_LIT:7>], '<STR_LIT>')<EOL>__CREATED_BYS[chebi_id] = None if tokens[<NUM_LIT:8>] == '<STR_LIT:null>'or len(tokens) == <NUM_LIT:9> else tokens[<NUM_LIT:8>]<EOL>__STARS[chebi_id] = float('<STR_LIT>')if tokens[<NUM_LIT:9> if len(tokens) > <NUM_LIT:9> else <NUM_LIT:8>] == '<STR_LIT:null>'else int(tokens[<NUM_LIT:9> if len(tokens) > <NUM_LIT:9> else <NUM_LIT:8>])<EOL><DEDENT><DEDENT> | Gets and parses file | f599:m23 |
def __put_all_ids(parent_id, child_id): | if parent_id in __ALL_IDS:<EOL><INDENT>__ALL_IDS[parent_id].append(child_id)<EOL><DEDENT>else:<EOL><INDENT>__ALL_IDS[parent_id] = [child_id]<EOL><DEDENT> | COMMENT | f599:m24 |
def get_database_accessions(chebi_id): | if len(__DATABASE_ACCESSIONS) == <NUM_LIT:0>:<EOL><INDENT>__parse_database_accessions()<EOL><DEDENT>return __DATABASE_ACCESSIONS[chebi_id] if chebi_id in__DATABASE_ACCESSIONS else []<EOL> | Returns database accession | f599:m25 |
def get_all_database_accessions(chebi_ids): | all_database_accessions = [get_database_accessions(chebi_id)<EOL>for chebi_id in chebi_ids]<EOL>return [x for sublist in all_database_accessions for x in sublist]<EOL> | Returns all database accessions | f599:m26 |
def __parse_database_accessions(): | filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>chebi_id = int(tokens[<NUM_LIT:1>])<EOL>if chebi_id not in __DATABASE_ACCESSIONS:<EOL><INDENT>__DATABASE_ACCESSIONS[chebi_id] = []<EOL><DEDENT>dat_acc = DatabaseAccession(tokens[<NUM_LIT:3>], tokens[<NUM_LIT:4>], tokens[<NUM_LIT:2>])<EOL>__DATABASE_ACCESSIONS[chebi_id].append(dat_acc)<EOL><DEDENT><DEDENT> | Gets and parses file | f599:m27 |
def get_inchi(chebi_id): | if len(__INCHIS) == <NUM_LIT:0>:<EOL><INDENT>__parse_inchi()<EOL><DEDENT>return __INCHIS[chebi_id] if chebi_id in __INCHIS else None<EOL> | Returns InChI string | f599:m28 |
def __parse_inchi(): | filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>__INCHIS[int(tokens[<NUM_LIT:0>])] = tokens[<NUM_LIT:1>]<EOL><DEDENT><DEDENT> | Gets and parses file | f599:m29 |
def get_names(chebi_id): | if len(__ALL_NAMES) == <NUM_LIT:0>:<EOL><INDENT>__parse_names()<EOL><DEDENT>return __ALL_NAMES[chebi_id] if chebi_id in __ALL_NAMES else []<EOL> | Returns names | f599:m30 |
def get_all_names(chebi_ids): | all_names = [get_names(chebi_id) for chebi_id in chebi_ids]<EOL>return [x for sublist in all_names for x in sublist]<EOL> | Returns all names | f599:m31 |
def __parse_names(): | filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>chebi_id = int(tokens[<NUM_LIT:1>])<EOL>if chebi_id not in __ALL_NAMES:<EOL><INDENT>__ALL_NAMES[chebi_id] = []<EOL><DEDENT>nme = Name(tokens[<NUM_LIT:4>],<EOL>tokens[<NUM_LIT:2>],<EOL>tokens[<NUM_LIT:3>],<EOL>tokens[<NUM_LIT:5>] == '<STR_LIT:T>',<EOL>tokens[<NUM_LIT:6>])<EOL>__ALL_NAMES[chebi_id].append(nme)<EOL><DEDENT><DEDENT> | Gets and parses file | f599:m32 |
def get_references(chebi_ids): | references = []<EOL>chebi_ids = [str(chebi_id) for chebi_id in chebi_ids]<EOL>filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>next(textfile)<EOL>for line in textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>if tokens[<NUM_LIT:0>] in chebi_ids:<EOL><INDENT>if len(tokens) > <NUM_LIT:3>:<EOL><INDENT>ref = Reference(tokens[<NUM_LIT:1>], tokens[<NUM_LIT:2>], tokens[<NUM_LIT:3>],<EOL>tokens[<NUM_LIT:4>])<EOL><DEDENT>else:<EOL><INDENT>ref = Reference(tokens[<NUM_LIT:1>], tokens[<NUM_LIT:2>])<EOL><DEDENT>references.append(ref)<EOL><DEDENT><DEDENT><DEDENT>return references<EOL> | Returns references | f599:m33 |
def get_outgoings(chebi_id): | if len(__OUTGOINGS) == <NUM_LIT:0>:<EOL><INDENT>__parse_relation()<EOL><DEDENT>return __OUTGOINGS[chebi_id] if chebi_id in __OUTGOINGS else []<EOL> | Returns outgoings | f599:m34 |
def get_all_outgoings(chebi_ids): | all_outgoings = [get_outgoings(chebi_id) for chebi_id in chebi_ids]<EOL>return [x for sublist in all_outgoings for x in sublist]<EOL> | Returns all outgoings | f599:m35 |
def get_incomings(chebi_id): | if len(__INCOMINGS) == <NUM_LIT:0>:<EOL><INDENT>__parse_relation()<EOL><DEDENT>return __INCOMINGS[chebi_id] if chebi_id in __INCOMINGS else []<EOL> | Returns incomings | f599:m36 |
def get_all_incomings(chebi_ids): | all_incomings = [get_incomings(chebi_id) for chebi_id in chebi_ids]<EOL>return [x for sublist in all_incomings for x in sublist]<EOL> | Returns all incomings | f599:m37 |
def __parse_relation(): | relation_filename = get_file('<STR_LIT>')<EOL>vertice_filename = get_file('<STR_LIT>')<EOL>relation_textfile = open(relation_filename, '<STR_LIT:r>')<EOL>vertice_textfile = open(vertice_filename, '<STR_LIT:r>')<EOL>vertices = {}<EOL>next(vertice_textfile)<EOL>for line in vertice_textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>vertices[tokens[<NUM_LIT:0>]] = tokens[<NUM_LIT:1>]<EOL><DEDENT>next(relation_textfile)<EOL>for line in relation_textfile:<EOL><INDENT>tokens = line.strip().split('<STR_LIT:\t>')<EOL>source_chebi_id = int(vertices[tokens[<NUM_LIT:3>]])<EOL>target_chebi_id = int(vertices[tokens[<NUM_LIT:2>]])<EOL>typ = tokens[<NUM_LIT:1>]<EOL>if source_chebi_id not in __OUTGOINGS:<EOL><INDENT>__OUTGOINGS[source_chebi_id] = []<EOL><DEDENT>if target_chebi_id not in __INCOMINGS:<EOL><INDENT>__INCOMINGS[target_chebi_id] = []<EOL><DEDENT>target_relation = Relation(typ, str(target_chebi_id), tokens[<NUM_LIT:4>])<EOL>source_relation = Relation(typ, str(source_chebi_id), tokens[<NUM_LIT:4>])<EOL>__OUTGOINGS[source_chebi_id].append(target_relation)<EOL>__INCOMINGS[target_chebi_id].append(source_relation)<EOL><DEDENT> | Gets and parses file | f599:m38 |
def get_inchi_key(chebi_id): | if len(__INCHI_KEYS) == <NUM_LIT:0>:<EOL><INDENT>__parse_structures()<EOL><DEDENT>return __INCHI_KEYS[chebi_id] if chebi_id in __INCHI_KEYS else None<EOL> | Returns InChI key | f599:m39 |
def get_smiles(chebi_id): | if len(__SMILES) == <NUM_LIT:0>:<EOL><INDENT>__parse_structures()<EOL><DEDENT>return __SMILES[chebi_id] if chebi_id in __SMILES else None<EOL> | Returns InChI key | f599:m40 |
def get_mol(chebi_id): | chebi_id_regexp = '<STR_LIT>' + str(chebi_id) + '<STR_LIT>'<EOL>mol_file_end_regexp = '<STR_LIT>'<EOL>this_structure = []<EOL>filename = get_file('<STR_LIT>')<EOL>with io.open(filename, '<STR_LIT:r>', encoding='<STR_LIT>') as textfile:<EOL><INDENT>in_chebi_id = False<EOL>next(textfile)<EOL>for line in textfile:<EOL><INDENT>if in_chebi_id or line[<NUM_LIT:0>].isdigit():<EOL><INDENT>if re.match(chebi_id_regexp, line)and int(line.split('<STR_LIT:U+002C>')[<NUM_LIT:0>])in __get_default_structure_ids():<EOL><INDENT>tokens = line.strip().split('<STR_LIT:U+002C>')<EOL>in_chebi_id = True<EOL>this_structure = []<EOL>this_structure.append('<STR_LIT:U+002C>'.join(tokens[<NUM_LIT:2>:])<EOL>.replace('<STR_LIT>', '<STR_LIT>'))<EOL>this_structure.append('<STR_LIT:\n>')<EOL><DEDENT>elif in_chebi_id:<EOL><INDENT>if re.match(mol_file_end_regexp, line):<EOL><INDENT>tokens = line.strip().split('<STR_LIT:U+002C>')<EOL>this_structure.append(tokens[<NUM_LIT:0>].replace('<STR_LIT>', '<STR_LIT>'))<EOL>return Structure('<STR_LIT>'.join(this_structure),<EOL>Structure.mol,<EOL>int(tokens[<NUM_LIT:2>][<NUM_LIT:0>]))<EOL><DEDENT>else:<EOL><INDENT>this_structure.append(line)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return None<EOL> | Returns mol | f599:m41 |
def get_mol_filename(chebi_id): | mol = get_mol(chebi_id)<EOL>if mol is None:<EOL><INDENT>return None<EOL><DEDENT>file_descriptor, mol_filename = tempfile.mkstemp(str(chebi_id) +<EOL>'<STR_LIT:_>', '<STR_LIT>')<EOL>mol_file = open(mol_filename, '<STR_LIT:w>')<EOL>mol_file.write(mol.get_structure())<EOL>mol_file.close()<EOL>os.close(file_descriptor)<EOL>return mol_filename<EOL> | Returns mol file | f599:m42 |