repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
CybOXProject/mixbox | mixbox/entities.py | Entity.from_json | def from_json(cls, json_doc):
"""Parse a JSON string and build an entity."""
try:
d = json.load(json_doc)
except AttributeError: # catch the read() error
d = json.loads(json_doc)
return cls.from_dict(d) | python | def from_json(cls, json_doc):
"""Parse a JSON string and build an entity."""
try:
d = json.load(json_doc)
except AttributeError: # catch the read() error
d = json.loads(json_doc)
return cls.from_dict(d) | [
"def",
"from_json",
"(",
"cls",
",",
"json_doc",
")",
":",
"try",
":",
"d",
"=",
"json",
".",
"load",
"(",
"json_doc",
")",
"except",
"AttributeError",
":",
"# catch the read() error",
"d",
"=",
"json",
".",
"loads",
"(",
"json_doc",
")",
"return",
"cls",
".",
"from_dict",
"(",
"d",
")"
] | Parse a JSON string and build an entity. | [
"Parse",
"a",
"JSON",
"string",
"and",
"build",
"an",
"entity",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/entities.py#L490-L497 | train |
CybOXProject/mixbox | mixbox/entities.py | EntityList._multiple_field | def _multiple_field(cls):
"""Return the "multiple" TypedField associated with this EntityList.
This also lazily sets the ``_entitylist_multiplefield`` value if it
hasn't been set yet. This is set to a tuple containing one item because
if we set the class attribute to the TypedField, we would effectively
add a TypedField descriptor to the class, which we don't want.
Raises:
AssertionError: If there is more than one multiple TypedField
or the the TypedField type_ is not a subclass of Entity.
"""
klassdict = cls.__dict__
try:
# Checking for cls.entitylist_multifield would return any inherited
# values, so we check the class __dict__ explicitly.
return klassdict["_entitylist_multifield"][0]
except (KeyError, IndexError, TypeError):
from . import fields
multifield_tuple = tuple(fields.find(cls, multiple=True))
assert len(multifield_tuple) == 1
# Make sure that the multiple field actually has an Entity type.
multifield = multifield_tuple[0]
assert issubclass(multifield.type_, Entity)
# Store aside the multiple field. We wrap it in a tuple because
# just doing ``cls._entitylist_multifield = multifield`` would
# assign another TypedField descriptor to this class. We don't
# want that.
cls._entitylist_multifield = multifield_tuple
# Return the multiple TypedField
return multifield_tuple[0] | python | def _multiple_field(cls):
"""Return the "multiple" TypedField associated with this EntityList.
This also lazily sets the ``_entitylist_multiplefield`` value if it
hasn't been set yet. This is set to a tuple containing one item because
if we set the class attribute to the TypedField, we would effectively
add a TypedField descriptor to the class, which we don't want.
Raises:
AssertionError: If there is more than one multiple TypedField
or the the TypedField type_ is not a subclass of Entity.
"""
klassdict = cls.__dict__
try:
# Checking for cls.entitylist_multifield would return any inherited
# values, so we check the class __dict__ explicitly.
return klassdict["_entitylist_multifield"][0]
except (KeyError, IndexError, TypeError):
from . import fields
multifield_tuple = tuple(fields.find(cls, multiple=True))
assert len(multifield_tuple) == 1
# Make sure that the multiple field actually has an Entity type.
multifield = multifield_tuple[0]
assert issubclass(multifield.type_, Entity)
# Store aside the multiple field. We wrap it in a tuple because
# just doing ``cls._entitylist_multifield = multifield`` would
# assign another TypedField descriptor to this class. We don't
# want that.
cls._entitylist_multifield = multifield_tuple
# Return the multiple TypedField
return multifield_tuple[0] | [
"def",
"_multiple_field",
"(",
"cls",
")",
":",
"klassdict",
"=",
"cls",
".",
"__dict__",
"try",
":",
"# Checking for cls.entitylist_multifield would return any inherited",
"# values, so we check the class __dict__ explicitly.",
"return",
"klassdict",
"[",
"\"_entitylist_multifield\"",
"]",
"[",
"0",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
",",
"TypeError",
")",
":",
"from",
".",
"import",
"fields",
"multifield_tuple",
"=",
"tuple",
"(",
"fields",
".",
"find",
"(",
"cls",
",",
"multiple",
"=",
"True",
")",
")",
"assert",
"len",
"(",
"multifield_tuple",
")",
"==",
"1",
"# Make sure that the multiple field actually has an Entity type.",
"multifield",
"=",
"multifield_tuple",
"[",
"0",
"]",
"assert",
"issubclass",
"(",
"multifield",
".",
"type_",
",",
"Entity",
")",
"# Store aside the multiple field. We wrap it in a tuple because",
"# just doing ``cls._entitylist_multifield = multifield`` would",
"# assign another TypedField descriptor to this class. We don't",
"# want that.",
"cls",
".",
"_entitylist_multifield",
"=",
"multifield_tuple",
"# Return the multiple TypedField",
"return",
"multifield_tuple",
"[",
"0",
"]"
] | Return the "multiple" TypedField associated with this EntityList.
This also lazily sets the ``_entitylist_multiplefield`` value if it
hasn't been set yet. This is set to a tuple containing one item because
if we set the class attribute to the TypedField, we would effectively
add a TypedField descriptor to the class, which we don't want.
Raises:
AssertionError: If there is more than one multiple TypedField
or the the TypedField type_ is not a subclass of Entity. | [
"Return",
"the",
"multiple",
"TypedField",
"associated",
"with",
"this",
"EntityList",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/entities.py#L594-L628 | train |
CybOXProject/mixbox | mixbox/entities.py | NamespaceCollector._finalize_namespaces | def _finalize_namespaces(self, ns_dict=None):
"""Returns a dictionary of namespaces to be exported with an XML
document.
This loops over all the namespaces that were discovered and built
during the execution of ``collect()`` and
``_parse_collected_classes()`` and attempts to merge them all.
Raises:
.namespaces.DuplicatePrefixError: If namespace prefix was
mapped to more than one namespace.
.namespaces.NoPrefixError: If a namespace was collected that is
not mapped to a prefix.
"""
if ns_dict:
# Add the user's entries to our set
for ns, alias in six.iteritems(ns_dict):
self._collected_namespaces.add_namespace_uri(ns, alias)
# Add the ID namespaces
self._collected_namespaces.add_namespace_uri(
ns_uri=idgen.get_id_namespace(),
prefix=idgen.get_id_namespace_alias()
)
# Remap the example namespace to the one expected by the APIs if the
# sample example namespace is found.
self._fix_example_namespace()
# Add _input_namespaces
for prefix, uri in six.iteritems(self._input_namespaces):
self._collected_namespaces.add_namespace_uri(uri, prefix)
# Add some default XML namespaces to make sure they're there.
self._collected_namespaces.import_from(namespaces.XML_NAMESPACES)
# python-stix's generateDS-generated binding classes can't handle
# default namespaces. So make sure there are no preferred defaults in
# the set. Get prefixes from the global namespace set if we have to.
for ns_uri in self._collected_namespaces.namespace_uris:
preferred_prefix = self._collected_namespaces.preferred_prefix_for_namespace(ns_uri)
if preferred_prefix:
continue
# No preferred prefix set for namespace. Try to assign one.
prefixes = self._collected_namespaces.get_prefixes(ns_uri)
if prefixes:
prefix = next(iter(prefixes))
else:
prefix = namespaces.lookup_name(ns_uri)
if prefix is None:
raise namespaces.NoPrefixesError(ns_uri)
self._collected_namespaces.set_preferred_prefix_for_namespace(
ns_uri=ns_uri,
prefix=prefix,
add_if_not_exist=True
) | python | def _finalize_namespaces(self, ns_dict=None):
"""Returns a dictionary of namespaces to be exported with an XML
document.
This loops over all the namespaces that were discovered and built
during the execution of ``collect()`` and
``_parse_collected_classes()`` and attempts to merge them all.
Raises:
.namespaces.DuplicatePrefixError: If namespace prefix was
mapped to more than one namespace.
.namespaces.NoPrefixError: If a namespace was collected that is
not mapped to a prefix.
"""
if ns_dict:
# Add the user's entries to our set
for ns, alias in six.iteritems(ns_dict):
self._collected_namespaces.add_namespace_uri(ns, alias)
# Add the ID namespaces
self._collected_namespaces.add_namespace_uri(
ns_uri=idgen.get_id_namespace(),
prefix=idgen.get_id_namespace_alias()
)
# Remap the example namespace to the one expected by the APIs if the
# sample example namespace is found.
self._fix_example_namespace()
# Add _input_namespaces
for prefix, uri in six.iteritems(self._input_namespaces):
self._collected_namespaces.add_namespace_uri(uri, prefix)
# Add some default XML namespaces to make sure they're there.
self._collected_namespaces.import_from(namespaces.XML_NAMESPACES)
# python-stix's generateDS-generated binding classes can't handle
# default namespaces. So make sure there are no preferred defaults in
# the set. Get prefixes from the global namespace set if we have to.
for ns_uri in self._collected_namespaces.namespace_uris:
preferred_prefix = self._collected_namespaces.preferred_prefix_for_namespace(ns_uri)
if preferred_prefix:
continue
# No preferred prefix set for namespace. Try to assign one.
prefixes = self._collected_namespaces.get_prefixes(ns_uri)
if prefixes:
prefix = next(iter(prefixes))
else:
prefix = namespaces.lookup_name(ns_uri)
if prefix is None:
raise namespaces.NoPrefixesError(ns_uri)
self._collected_namespaces.set_preferred_prefix_for_namespace(
ns_uri=ns_uri,
prefix=prefix,
add_if_not_exist=True
) | [
"def",
"_finalize_namespaces",
"(",
"self",
",",
"ns_dict",
"=",
"None",
")",
":",
"if",
"ns_dict",
":",
"# Add the user's entries to our set",
"for",
"ns",
",",
"alias",
"in",
"six",
".",
"iteritems",
"(",
"ns_dict",
")",
":",
"self",
".",
"_collected_namespaces",
".",
"add_namespace_uri",
"(",
"ns",
",",
"alias",
")",
"# Add the ID namespaces",
"self",
".",
"_collected_namespaces",
".",
"add_namespace_uri",
"(",
"ns_uri",
"=",
"idgen",
".",
"get_id_namespace",
"(",
")",
",",
"prefix",
"=",
"idgen",
".",
"get_id_namespace_alias",
"(",
")",
")",
"# Remap the example namespace to the one expected by the APIs if the",
"# sample example namespace is found.",
"self",
".",
"_fix_example_namespace",
"(",
")",
"# Add _input_namespaces",
"for",
"prefix",
",",
"uri",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_input_namespaces",
")",
":",
"self",
".",
"_collected_namespaces",
".",
"add_namespace_uri",
"(",
"uri",
",",
"prefix",
")",
"# Add some default XML namespaces to make sure they're there.",
"self",
".",
"_collected_namespaces",
".",
"import_from",
"(",
"namespaces",
".",
"XML_NAMESPACES",
")",
"# python-stix's generateDS-generated binding classes can't handle",
"# default namespaces. So make sure there are no preferred defaults in",
"# the set. Get prefixes from the global namespace set if we have to.",
"for",
"ns_uri",
"in",
"self",
".",
"_collected_namespaces",
".",
"namespace_uris",
":",
"preferred_prefix",
"=",
"self",
".",
"_collected_namespaces",
".",
"preferred_prefix_for_namespace",
"(",
"ns_uri",
")",
"if",
"preferred_prefix",
":",
"continue",
"# No preferred prefix set for namespace. Try to assign one.",
"prefixes",
"=",
"self",
".",
"_collected_namespaces",
".",
"get_prefixes",
"(",
"ns_uri",
")",
"if",
"prefixes",
":",
"prefix",
"=",
"next",
"(",
"iter",
"(",
"prefixes",
")",
")",
"else",
":",
"prefix",
"=",
"namespaces",
".",
"lookup_name",
"(",
"ns_uri",
")",
"if",
"prefix",
"is",
"None",
":",
"raise",
"namespaces",
".",
"NoPrefixesError",
"(",
"ns_uri",
")",
"self",
".",
"_collected_namespaces",
".",
"set_preferred_prefix_for_namespace",
"(",
"ns_uri",
"=",
"ns_uri",
",",
"prefix",
"=",
"prefix",
",",
"add_if_not_exist",
"=",
"True",
")"
] | Returns a dictionary of namespaces to be exported with an XML
document.
This loops over all the namespaces that were discovered and built
during the execution of ``collect()`` and
``_parse_collected_classes()`` and attempts to merge them all.
Raises:
.namespaces.DuplicatePrefixError: If namespace prefix was
mapped to more than one namespace.
.namespaces.NoPrefixError: If a namespace was collected that is
not mapped to a prefix. | [
"Returns",
"a",
"dictionary",
"of",
"namespaces",
"to",
"be",
"exported",
"with",
"an",
"XML",
"document",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/entities.py#L796-L856 | train |
inveniosoftware/invenio-communities | invenio_communities/views/api.py | CommunitiesResource.get | def get(self, query, sort, page, size):
"""Get a list of all the communities.
.. http:get:: /communities/(string:id)
Returns a JSON list with all the communities.
**Request**:
.. sourcecode:: http
GET /communities HTTP/1.1
Accept: application/json
Content-Type: application/json
Host: localhost:5000
:reqheader Content-Type: application/json
**Response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Length: 334
Content-Type: application/json
[
{
"id": "comm1"
},
{
"id": "comm2"
}
]
:resheader Content-Type: application/json
:statuscode 200: no error
"""
urlkwargs = {
'q': query,
'sort': sort,
'size': size,
}
communities = Community.filter_communities(query, sort)
page = communities.paginate(page, size)
links = default_links_pagination_factory(page, urlkwargs)
links_headers = map(lambda key: ('link', 'ref="{0}" href="{1}"'.format(
key, links[key])), links)
return self.make_response(
page,
headers=links_headers,
links_item_factory=default_links_item_factory,
page=page,
urlkwargs=urlkwargs,
links_pagination_factory=default_links_pagination_factory,
) | python | def get(self, query, sort, page, size):
"""Get a list of all the communities.
.. http:get:: /communities/(string:id)
Returns a JSON list with all the communities.
**Request**:
.. sourcecode:: http
GET /communities HTTP/1.1
Accept: application/json
Content-Type: application/json
Host: localhost:5000
:reqheader Content-Type: application/json
**Response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Length: 334
Content-Type: application/json
[
{
"id": "comm1"
},
{
"id": "comm2"
}
]
:resheader Content-Type: application/json
:statuscode 200: no error
"""
urlkwargs = {
'q': query,
'sort': sort,
'size': size,
}
communities = Community.filter_communities(query, sort)
page = communities.paginate(page, size)
links = default_links_pagination_factory(page, urlkwargs)
links_headers = map(lambda key: ('link', 'ref="{0}" href="{1}"'.format(
key, links[key])), links)
return self.make_response(
page,
headers=links_headers,
links_item_factory=default_links_item_factory,
page=page,
urlkwargs=urlkwargs,
links_pagination_factory=default_links_pagination_factory,
) | [
"def",
"get",
"(",
"self",
",",
"query",
",",
"sort",
",",
"page",
",",
"size",
")",
":",
"urlkwargs",
"=",
"{",
"'q'",
":",
"query",
",",
"'sort'",
":",
"sort",
",",
"'size'",
":",
"size",
",",
"}",
"communities",
"=",
"Community",
".",
"filter_communities",
"(",
"query",
",",
"sort",
")",
"page",
"=",
"communities",
".",
"paginate",
"(",
"page",
",",
"size",
")",
"links",
"=",
"default_links_pagination_factory",
"(",
"page",
",",
"urlkwargs",
")",
"links_headers",
"=",
"map",
"(",
"lambda",
"key",
":",
"(",
"'link'",
",",
"'ref=\"{0}\" href=\"{1}\"'",
".",
"format",
"(",
"key",
",",
"links",
"[",
"key",
"]",
")",
")",
",",
"links",
")",
"return",
"self",
".",
"make_response",
"(",
"page",
",",
"headers",
"=",
"links_headers",
",",
"links_item_factory",
"=",
"default_links_item_factory",
",",
"page",
"=",
"page",
",",
"urlkwargs",
"=",
"urlkwargs",
",",
"links_pagination_factory",
"=",
"default_links_pagination_factory",
",",
")"
] | Get a list of all the communities.
.. http:get:: /communities/(string:id)
Returns a JSON list with all the communities.
**Request**:
.. sourcecode:: http
GET /communities HTTP/1.1
Accept: application/json
Content-Type: application/json
Host: localhost:5000
:reqheader Content-Type: application/json
**Response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Length: 334
Content-Type: application/json
[
{
"id": "comm1"
},
{
"id": "comm2"
}
]
:resheader Content-Type: application/json
:statuscode 200: no error | [
"Get",
"a",
"list",
"of",
"all",
"the",
"communities",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/api.py#L80-L129 | train |
inveniosoftware/invenio-communities | invenio_communities/views/api.py | CommunityDetailsResource.get | def get(self, community_id):
"""Get the details of the specified community.
.. http:get:: /communities/(string:id)
Returns a JSON dictionary with the details of the specified
community.
**Request**:
.. sourcecode:: http
GET /communities/communities/comm1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Host: localhost:5000
:reqheader Content-Type: application/json
:query string id: ID of an specific community to get more
information.
**Response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Length: 334
Content-Type: application/json
{
"id_user": 1,
"description": "",
"title": "",
"created": "2016-04-05T14:56:37.051462",
"id": "comm1",
"page": "",
"curation_policy": ""
}
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 404: page not found
"""
community = Community.get(community_id)
if not community:
abort(404)
etag = community.version_id
self.check_etag(etag)
response = self.make_response(
community, links_item_factory=default_links_item_factory)
response.set_etag(etag)
return response | python | def get(self, community_id):
"""Get the details of the specified community.
.. http:get:: /communities/(string:id)
Returns a JSON dictionary with the details of the specified
community.
**Request**:
.. sourcecode:: http
GET /communities/communities/comm1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Host: localhost:5000
:reqheader Content-Type: application/json
:query string id: ID of an specific community to get more
information.
**Response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Length: 334
Content-Type: application/json
{
"id_user": 1,
"description": "",
"title": "",
"created": "2016-04-05T14:56:37.051462",
"id": "comm1",
"page": "",
"curation_policy": ""
}
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 404: page not found
"""
community = Community.get(community_id)
if not community:
abort(404)
etag = community.version_id
self.check_etag(etag)
response = self.make_response(
community, links_item_factory=default_links_item_factory)
response.set_etag(etag)
return response | [
"def",
"get",
"(",
"self",
",",
"community_id",
")",
":",
"community",
"=",
"Community",
".",
"get",
"(",
"community_id",
")",
"if",
"not",
"community",
":",
"abort",
"(",
"404",
")",
"etag",
"=",
"community",
".",
"version_id",
"self",
".",
"check_etag",
"(",
"etag",
")",
"response",
"=",
"self",
".",
"make_response",
"(",
"community",
",",
"links_item_factory",
"=",
"default_links_item_factory",
")",
"response",
".",
"set_etag",
"(",
"etag",
")",
"return",
"response"
] | Get the details of the specified community.
.. http:get:: /communities/(string:id)
Returns a JSON dictionary with the details of the specified
community.
**Request**:
.. sourcecode:: http
GET /communities/communities/comm1 HTTP/1.1
Accept: application/json
Content-Type: application/json
Host: localhost:5000
:reqheader Content-Type: application/json
:query string id: ID of an specific community to get more
information.
**Response**:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Length: 334
Content-Type: application/json
{
"id_user": 1,
"description": "",
"title": "",
"created": "2016-04-05T14:56:37.051462",
"id": "comm1",
"page": "",
"curation_policy": ""
}
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 404: page not found | [
"Get",
"the",
"details",
"of",
"the",
"specified",
"community",
"."
] | 5c4de6783724d276ae1b6dd13a399a9e22fadc7a | https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/views/api.py#L143-L185 | train |
OpenTreeOfLife/peyotl | peyotl/phylesystem/phylesystem_umbrella.py | Phylesystem | def Phylesystem(repos_dict=None,
repos_par=None,
with_caching=True,
repo_nexml2json=None,
git_ssh=None,
pkey=None,
git_action_class=PhylesystemGitAction,
mirror_info=None,
new_study_prefix=None,
infrastructure_commit_author='OpenTree API <api@opentreeoflife.org>'):
"""Factory function for a _Phylesystem object.
A wrapper around the _Phylesystem class instantiation for
the most common use case: a singleton _Phylesystem.
If you need distinct _Phylesystem objects, you'll need to
call that class directly.
"""
if not repo_nexml2json:
repo_nexml2json = get_config_setting('phylesystem', 'repo_nexml2json')
global _THE_PHYLESYSTEM
if _THE_PHYLESYSTEM is None:
_THE_PHYLESYSTEM = _Phylesystem(repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
repo_nexml2json=repo_nexml2json,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=git_action_class,
mirror_info=mirror_info,
new_study_prefix=new_study_prefix,
infrastructure_commit_author=infrastructure_commit_author)
return _THE_PHYLESYSTEM | python | def Phylesystem(repos_dict=None,
repos_par=None,
with_caching=True,
repo_nexml2json=None,
git_ssh=None,
pkey=None,
git_action_class=PhylesystemGitAction,
mirror_info=None,
new_study_prefix=None,
infrastructure_commit_author='OpenTree API <api@opentreeoflife.org>'):
"""Factory function for a _Phylesystem object.
A wrapper around the _Phylesystem class instantiation for
the most common use case: a singleton _Phylesystem.
If you need distinct _Phylesystem objects, you'll need to
call that class directly.
"""
if not repo_nexml2json:
repo_nexml2json = get_config_setting('phylesystem', 'repo_nexml2json')
global _THE_PHYLESYSTEM
if _THE_PHYLESYSTEM is None:
_THE_PHYLESYSTEM = _Phylesystem(repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
repo_nexml2json=repo_nexml2json,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=git_action_class,
mirror_info=mirror_info,
new_study_prefix=new_study_prefix,
infrastructure_commit_author=infrastructure_commit_author)
return _THE_PHYLESYSTEM | [
"def",
"Phylesystem",
"(",
"repos_dict",
"=",
"None",
",",
"repos_par",
"=",
"None",
",",
"with_caching",
"=",
"True",
",",
"repo_nexml2json",
"=",
"None",
",",
"git_ssh",
"=",
"None",
",",
"pkey",
"=",
"None",
",",
"git_action_class",
"=",
"PhylesystemGitAction",
",",
"mirror_info",
"=",
"None",
",",
"new_study_prefix",
"=",
"None",
",",
"infrastructure_commit_author",
"=",
"'OpenTree API <api@opentreeoflife.org>'",
")",
":",
"if",
"not",
"repo_nexml2json",
":",
"repo_nexml2json",
"=",
"get_config_setting",
"(",
"'phylesystem'",
",",
"'repo_nexml2json'",
")",
"global",
"_THE_PHYLESYSTEM",
"if",
"_THE_PHYLESYSTEM",
"is",
"None",
":",
"_THE_PHYLESYSTEM",
"=",
"_Phylesystem",
"(",
"repos_dict",
"=",
"repos_dict",
",",
"repos_par",
"=",
"repos_par",
",",
"with_caching",
"=",
"with_caching",
",",
"repo_nexml2json",
"=",
"repo_nexml2json",
",",
"git_ssh",
"=",
"git_ssh",
",",
"pkey",
"=",
"pkey",
",",
"git_action_class",
"=",
"git_action_class",
",",
"mirror_info",
"=",
"mirror_info",
",",
"new_study_prefix",
"=",
"new_study_prefix",
",",
"infrastructure_commit_author",
"=",
"infrastructure_commit_author",
")",
"return",
"_THE_PHYLESYSTEM"
] | Factory function for a _Phylesystem object.
A wrapper around the _Phylesystem class instantiation for
the most common use case: a singleton _Phylesystem.
If you need distinct _Phylesystem objects, you'll need to
call that class directly. | [
"Factory",
"function",
"for",
"a",
"_Phylesystem",
"object",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/phylesystem_umbrella.py#L273-L304 | train |
SpotlightData/preprocessing | preprocessing/text.py | convert_html_entities | def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string") | python | def convert_html_entities(text_string):
'''
Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return html.unescape(text_string).replace(""", "'")
else:
raise InputError("string not passed as argument for text_string") | [
"def",
"convert_html_entities",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"html",
".",
"unescape",
"(",
"text_string",
")",
".",
"replace",
"(",
"\""\"",
",",
"\"'\"",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as argument for text_string\"",
")"
] | Converts HTML5 character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Converts",
"HTML5",
"character",
"references",
"within",
"text_string",
"to",
"their",
"corresponding",
"unicode",
"characters",
"and",
"returns",
"converted",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L33-L51 | train |
SpotlightData/preprocessing | preprocessing/text.py | convert_ligatures | def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument") | python | def convert_ligatures(text_string):
'''
Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for i in range(0, len(LIGATURES)):
text_string = text_string.replace(LIGATURES[str(i)]["ligature"], LIGATURES[str(i)]["term"])
return text_string
else:
raise InputError("none type or string not passed as an argument") | [
"def",
"convert_ligatures",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"LIGATURES",
")",
")",
":",
"text_string",
"=",
"text_string",
".",
"replace",
"(",
"LIGATURES",
"[",
"str",
"(",
"i",
")",
"]",
"[",
"\"ligature\"",
"]",
",",
"LIGATURES",
"[",
"str",
"(",
"i",
")",
"]",
"[",
"\"term\"",
"]",
")",
"return",
"text_string",
"else",
":",
"raise",
"InputError",
"(",
"\"none type or string not passed as an argument\"",
")"
] | Coverts Latin character references within text_string to their corresponding unicode characters
and returns converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument | [
"Coverts",
"Latin",
"character",
"references",
"within",
"text_string",
"to",
"their",
"corresponding",
"unicode",
"characters",
"and",
"returns",
"converted",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L53-L73 | train |
SpotlightData/preprocessing | preprocessing/text.py | correct_spelling | def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument") | python | def correct_spelling(text_string):
'''
Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
word_list = text_string.split()
spellchecked_word_list = []
for word in word_list:
spellchecked_word_list.append(spellcheck.correct_word(word))
return " ".join(spellchecked_word_list)
else:
raise InputError("none type or string not passed as an argument") | [
"def",
"correct_spelling",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"word_list",
"=",
"text_string",
".",
"split",
"(",
")",
"spellchecked_word_list",
"=",
"[",
"]",
"for",
"word",
"in",
"word_list",
":",
"spellchecked_word_list",
".",
"append",
"(",
"spellcheck",
".",
"correct_word",
"(",
"word",
")",
")",
"return",
"\" \"",
".",
"join",
"(",
"spellchecked_word_list",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"none type or string not passed as an argument\"",
")"
] | Splits string and converts words not found within a pre-built dictionary to their
most likely actual word based on a relative probability dictionary. Returns edited
string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument | [
"Splits",
"string",
"and",
"converts",
"words",
"not",
"found",
"within",
"a",
"pre",
"-",
"built",
"dictionary",
"to",
"their",
"most",
"likely",
"actual",
"word",
"based",
"on",
"a",
"relative",
"probability",
"dictionary",
".",
"Returns",
"edited",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L75-L98 | train |
SpotlightData/preprocessing | preprocessing/text.py | create_sentence_list | def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list") | python | def create_sentence_list(text_string):
'''
Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return []
elif isinstance(text_string, str):
return SENTENCE_TOKENIZER.tokenize(text_string)
else:
raise InputError("non-string passed as argument for create_sentence_list") | [
"def",
"create_sentence_list",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"[",
"]",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"SENTENCE_TOKENIZER",
".",
"tokenize",
"(",
"text_string",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"non-string passed as argument for create_sentence_list\"",
")"
] | Splits text_string into a list of sentences based on NLTK's english.pickle tokenizer, and
returns said list as type list of str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Splits",
"text_string",
"into",
"a",
"list",
"of",
"sentences",
"based",
"on",
"NLTK",
"s",
"english",
".",
"pickle",
"tokenizer",
"and",
"returns",
"said",
"list",
"as",
"type",
"list",
"of",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L100-L118 | train |
SpotlightData/preprocessing | preprocessing/text.py | keyword_tokenize | def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string") | python | def keyword_tokenize(text_string):
'''
Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join([word for word in KEYWORD_TOKENIZER.tokenize(text_string) if word not in STOPWORDS and len(word) >= 3])
else:
raise InputError("string not passed as argument for text_string") | [
"def",
"keyword_tokenize",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"\" \"",
".",
"join",
"(",
"[",
"word",
"for",
"word",
"in",
"KEYWORD_TOKENIZER",
".",
"tokenize",
"(",
"text_string",
")",
"if",
"word",
"not",
"in",
"STOPWORDS",
"and",
"len",
"(",
"word",
")",
">=",
"3",
"]",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as argument for text_string\"",
")"
] | Extracts keywords from text_string using NLTK's list of English stopwords, ignoring words of a
length smaller than 3, and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Extracts",
"keywords",
"from",
"text_string",
"using",
"NLTK",
"s",
"list",
"of",
"English",
"stopwords",
"ignoring",
"words",
"of",
"a",
"length",
"smaller",
"than",
"3",
"and",
"returns",
"the",
"new",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L120-L138 | train |
SpotlightData/preprocessing | preprocessing/text.py | lemmatize | def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument") | python | def lemmatize(text_string):
'''
Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return LEMMATIZER.lemmatize(text_string)
else:
raise InputError("string not passed as primary argument") | [
"def",
"lemmatize",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"LEMMATIZER",
".",
"lemmatize",
"(",
"text_string",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as primary argument\"",
")"
] | Returns base from of text_string using NLTK's WordNetLemmatizer as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Returns",
"base",
"from",
"of",
"text_string",
"using",
"NLTK",
"s",
"WordNetLemmatizer",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L140-L157 | train |
SpotlightData/preprocessing | preprocessing/text.py | lowercase | def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string") | python | def lowercase(text_string):
'''
Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return text_string.lower()
else:
raise InputError("string not passed as argument for text_string") | [
"def",
"lowercase",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"text_string",
".",
"lower",
"(",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as argument for text_string\"",
")"
] | Converts text_string into lowercase and returns the converted string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Converts",
"text_string",
"into",
"lowercase",
"and",
"returns",
"the",
"converted",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L159-L176 | train |
SpotlightData/preprocessing | preprocessing/text.py | preprocess_text | def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string") | python | def preprocess_text(text_string, function_list):
'''
Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
if isinstance(function_list, list):
for func in function_list:
try:
text_string = func(text_string)
except (NameError, TypeError):
raise FunctionError("invalid function passed as element of function_list")
except:
raise
return text_string
else:
raise InputError("list of functions not passed as argument for function_list")
else:
raise InputError("string not passed as argument for text_string") | [
"def",
"preprocess_text",
"(",
"text_string",
",",
"function_list",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"if",
"isinstance",
"(",
"function_list",
",",
"list",
")",
":",
"for",
"func",
"in",
"function_list",
":",
"try",
":",
"text_string",
"=",
"func",
"(",
"text_string",
")",
"except",
"(",
"NameError",
",",
"TypeError",
")",
":",
"raise",
"FunctionError",
"(",
"\"invalid function passed as element of function_list\"",
")",
"except",
":",
"raise",
"return",
"text_string",
"else",
":",
"raise",
"InputError",
"(",
"\"list of functions not passed as argument for function_list\"",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as argument for text_string\"",
")"
] | Given each function within function_list, applies the order of functions put forward onto
text_string, returning the processed string as type str.
Keyword argument:
- function_list: list of functions available in preprocessing.text
- text_string: string instance
Exceptions raised:
- FunctionError: occurs should an invalid function be passed within the list of functions
- InputError: occurs should text_string be non-string, or function_list be non-list | [
"Given",
"each",
"function",
"within",
"function_list",
"applies",
"the",
"order",
"of",
"functions",
"put",
"forward",
"onto",
"text_string",
"returning",
"the",
"processed",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L178-L208 | train |
SpotlightData/preprocessing | preprocessing/text.py | remove_esc_chars | def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument") | python | def remove_esc_chars(text_string):
'''
Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\\\w', "", text_string).split())
else:
raise InputError("string not passed as argument") | [
"def",
"remove_esc_chars",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"\" \"",
".",
"join",
"(",
"re",
".",
"sub",
"(",
"r'\\\\\\w'",
",",
"\"\"",
",",
"text_string",
")",
".",
"split",
"(",
")",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as argument\"",
")"
] | Removes any escape character within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Removes",
"any",
"escape",
"character",
"within",
"text_string",
"and",
"returns",
"the",
"new",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L210-L227 | train |
SpotlightData/preprocessing | preprocessing/text.py | remove_numbers | def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument") | python | def remove_numbers(text_string):
'''
Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'\b[\d.\/,]+', "", text_string).split())
else:
raise InputError("string not passed as argument") | [
"def",
"remove_numbers",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"\" \"",
".",
"join",
"(",
"re",
".",
"sub",
"(",
"r'\\b[\\d.\\/,]+'",
",",
"\"\"",
",",
"text_string",
")",
".",
"split",
"(",
")",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as argument\"",
")"
] | Removes any digit value discovered within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Removes",
"any",
"digit",
"value",
"discovered",
"within",
"text_string",
"and",
"returns",
"the",
"new",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L229-L246 | train |
SpotlightData/preprocessing | preprocessing/text.py | remove_number_words | def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument") | python | def remove_number_words(text_string):
'''
Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
for word in NUMBER_WORDS:
text_string = re.sub(r'[\S]*\b'+word+r'[\S]*', "", text_string)
return " ".join(text_string.split())
else:
raise InputError("string not passed as argument") | [
"def",
"remove_number_words",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"for",
"word",
"in",
"NUMBER_WORDS",
":",
"text_string",
"=",
"re",
".",
"sub",
"(",
"r'[\\S]*\\b'",
"+",
"word",
"+",
"r'[\\S]*'",
",",
"\"\"",
",",
"text_string",
")",
"return",
"\" \"",
".",
"join",
"(",
"text_string",
".",
"split",
"(",
")",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as argument\"",
")"
] | Removes any integer represented as a word within text_string and returns the new string as
type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Removes",
"any",
"integer",
"represented",
"as",
"a",
"word",
"within",
"text_string",
"and",
"returns",
"the",
"new",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L248-L268 | train |
SpotlightData/preprocessing | preprocessing/text.py | remove_urls | def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument") | python | def remove_urls(text_string):
'''
Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(re.sub(r'http\S+', "", text_string).split())
else:
raise InputError("string not passed as argument") | [
"def",
"remove_urls",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"\" \"",
".",
"join",
"(",
"re",
".",
"sub",
"(",
"r'http\\S+'",
",",
"\"\"",
",",
"text_string",
")",
".",
"split",
"(",
")",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"string not passed as argument\"",
")"
] | Removes all URLs within text_string and returns the new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a non-string argument be passed | [
"Removes",
"all",
"URLs",
"within",
"text_string",
"and",
"returns",
"the",
"new",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L313-L330 | train |
SpotlightData/preprocessing | preprocessing/text.py | remove_whitespace | def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument") | python | def remove_whitespace(text_string):
'''
Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument
'''
if text_string is None or text_string == "":
return ""
elif isinstance(text_string, str):
return " ".join(text_string.split())
else:
raise InputError("none type or string not passed as an argument") | [
"def",
"remove_whitespace",
"(",
"text_string",
")",
":",
"if",
"text_string",
"is",
"None",
"or",
"text_string",
"==",
"\"\"",
":",
"return",
"\"\"",
"elif",
"isinstance",
"(",
"text_string",
",",
"str",
")",
":",
"return",
"\" \"",
".",
"join",
"(",
"text_string",
".",
"split",
"(",
")",
")",
"else",
":",
"raise",
"InputError",
"(",
"\"none type or string not passed as an argument\"",
")"
] | Removes all whitespace found within text_string and returns new string as type str.
Keyword argument:
- text_string: string instance
Exceptions raised:
- InputError: occurs should a string or NoneType not be passed as an argument | [
"Removes",
"all",
"whitespace",
"found",
"within",
"text_string",
"and",
"returns",
"new",
"string",
"as",
"type",
"str",
"."
] | 180c6472bc2642afbd7a1ece08d0b0d14968a708 | https://github.com/SpotlightData/preprocessing/blob/180c6472bc2642afbd7a1ece08d0b0d14968a708/preprocessing/text.py#L332-L349 | train |
romaryd/python-logging-mixin | loggingmixin/__init__.py | WrappedLogger.log | def log(self, level, message, *args, **kwargs):
"""
This is the primary method to override to ensure logging with extra
options gets correctly specified.
"""
extra = self.extras.copy()
extra.update(kwargs.pop('extra', {}))
kwargs['extra'] = extra
self.logger.log(level, message, *args, **kwargs) | python | def log(self, level, message, *args, **kwargs):
"""
This is the primary method to override to ensure logging with extra
options gets correctly specified.
"""
extra = self.extras.copy()
extra.update(kwargs.pop('extra', {}))
kwargs['extra'] = extra
self.logger.log(level, message, *args, **kwargs) | [
"def",
"log",
"(",
"self",
",",
"level",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"extra",
"=",
"self",
".",
"extras",
".",
"copy",
"(",
")",
"extra",
".",
"update",
"(",
"kwargs",
".",
"pop",
"(",
"'extra'",
",",
"{",
"}",
")",
")",
"kwargs",
"[",
"'extra'",
"]",
"=",
"extra",
"self",
".",
"logger",
".",
"log",
"(",
"level",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | This is the primary method to override to ensure logging with extra
options gets correctly specified. | [
"This",
"is",
"the",
"primary",
"method",
"to",
"override",
"to",
"ensure",
"logging",
"with",
"extra",
"options",
"gets",
"correctly",
"specified",
"."
] | 8ac77df5731b607e6ff9ef762e71398cb5a892ea | https://github.com/romaryd/python-logging-mixin/blob/8ac77df5731b607e6ff9ef762e71398cb5a892ea/loggingmixin/__init__.py#L90-L99 | train |
romaryd/python-logging-mixin | loggingmixin/__init__.py | WrappedLogger.warning | def warning(self, message, *args, **kwargs):
"""
Specialized warnings system. If a warning subclass is passed into
the keyword arguments and raise_warnings is True - the warnning will
be passed to the warnings module.
"""
warncls = kwargs.pop('warning', None)
if warncls and self.raise_warnings:
warnings.warn(message, warncls)
return self.log(logging.WARNING, message, *args, **kwargs) | python | def warning(self, message, *args, **kwargs):
"""
Specialized warnings system. If a warning subclass is passed into
the keyword arguments and raise_warnings is True - the warnning will
be passed to the warnings module.
"""
warncls = kwargs.pop('warning', None)
if warncls and self.raise_warnings:
warnings.warn(message, warncls)
return self.log(logging.WARNING, message, *args, **kwargs) | [
"def",
"warning",
"(",
"self",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warncls",
"=",
"kwargs",
".",
"pop",
"(",
"'warning'",
",",
"None",
")",
"if",
"warncls",
"and",
"self",
".",
"raise_warnings",
":",
"warnings",
".",
"warn",
"(",
"message",
",",
"warncls",
")",
"return",
"self",
".",
"log",
"(",
"logging",
".",
"WARNING",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Specialized warnings system. If a warning subclass is passed into
the keyword arguments and raise_warnings is True - the warnning will
be passed to the warnings module. | [
"Specialized",
"warnings",
"system",
".",
"If",
"a",
"warning",
"subclass",
"is",
"passed",
"into",
"the",
"keyword",
"arguments",
"and",
"raise_warnings",
"is",
"True",
"-",
"the",
"warnning",
"will",
"be",
"passed",
"to",
"the",
"warnings",
"module",
"."
] | 8ac77df5731b607e6ff9ef762e71398cb5a892ea | https://github.com/romaryd/python-logging-mixin/blob/8ac77df5731b607e6ff9ef762e71398cb5a892ea/loggingmixin/__init__.py#L107-L117 | train |
romaryd/python-logging-mixin | loggingmixin/__init__.py | ServiceLogger.log | def log(self, level, message, *args, **kwargs):
"""
Provide current user as extra context to the logger
"""
extra = kwargs.pop('extra', {})
extra.update({
'user': self.user
})
kwargs['extra'] = extra
super(ServiceLogger, self).log(level, message, *args, **kwargs) | python | def log(self, level, message, *args, **kwargs):
"""
Provide current user as extra context to the logger
"""
extra = kwargs.pop('extra', {})
extra.update({
'user': self.user
})
kwargs['extra'] = extra
super(ServiceLogger, self).log(level, message, *args, **kwargs) | [
"def",
"log",
"(",
"self",
",",
"level",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"extra",
"=",
"kwargs",
".",
"pop",
"(",
"'extra'",
",",
"{",
"}",
")",
"extra",
".",
"update",
"(",
"{",
"'user'",
":",
"self",
".",
"user",
"}",
")",
"kwargs",
"[",
"'extra'",
"]",
"=",
"extra",
"super",
"(",
"ServiceLogger",
",",
"self",
")",
".",
"log",
"(",
"level",
",",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Provide current user as extra context to the logger | [
"Provide",
"current",
"user",
"as",
"extra",
"context",
"to",
"the",
"logger"
] | 8ac77df5731b607e6ff9ef762e71398cb5a892ea | https://github.com/romaryd/python-logging-mixin/blob/8ac77df5731b607e6ff9ef762e71398cb5a892ea/loggingmixin/__init__.py#L147-L157 | train |
romaryd/python-logging-mixin | loggingmixin/__init__.py | LoggingMixin.logger | def logger(self):
"""
Instantiates and returns a ServiceLogger instance
"""
if not hasattr(self, '_logger') or not self._logger:
self._logger = ServiceLogger()
return self._logger | python | def logger(self):
"""
Instantiates and returns a ServiceLogger instance
"""
if not hasattr(self, '_logger') or not self._logger:
self._logger = ServiceLogger()
return self._logger | [
"def",
"logger",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_logger'",
")",
"or",
"not",
"self",
".",
"_logger",
":",
"self",
".",
"_logger",
"=",
"ServiceLogger",
"(",
")",
"return",
"self",
".",
"_logger"
] | Instantiates and returns a ServiceLogger instance | [
"Instantiates",
"and",
"returns",
"a",
"ServiceLogger",
"instance"
] | 8ac77df5731b607e6ff9ef762e71398cb5a892ea | https://github.com/romaryd/python-logging-mixin/blob/8ac77df5731b607e6ff9ef762e71398cb5a892ea/loggingmixin/__init__.py#L166-L172 | train |
OpenTreeOfLife/peyotl | tutorials/ot-oti-find-studies.py | ot_find_studies | def ot_find_studies(arg_dict, exact=True, verbose=False, oti_wrapper=None):
"""Uses a peyotl wrapper around an Open Tree web service to get a list of studies
including values `value` for a given property to be searched on `porperty`.
The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used.
All other arguments correspond to the arguments of the web-service call.
"""
if oti_wrapper is None:
from peyotl.sugar import oti
oti_wrapper = oti
return oti_wrapper.find_studies(arg_dict,
exact=exact,
verbose=verbose,
wrap_response=True) | python | def ot_find_studies(arg_dict, exact=True, verbose=False, oti_wrapper=None):
"""Uses a peyotl wrapper around an Open Tree web service to get a list of studies
including values `value` for a given property to be searched on `porperty`.
The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used.
All other arguments correspond to the arguments of the web-service call.
"""
if oti_wrapper is None:
from peyotl.sugar import oti
oti_wrapper = oti
return oti_wrapper.find_studies(arg_dict,
exact=exact,
verbose=verbose,
wrap_response=True) | [
"def",
"ot_find_studies",
"(",
"arg_dict",
",",
"exact",
"=",
"True",
",",
"verbose",
"=",
"False",
",",
"oti_wrapper",
"=",
"None",
")",
":",
"if",
"oti_wrapper",
"is",
"None",
":",
"from",
"peyotl",
".",
"sugar",
"import",
"oti",
"oti_wrapper",
"=",
"oti",
"return",
"oti_wrapper",
".",
"find_studies",
"(",
"arg_dict",
",",
"exact",
"=",
"exact",
",",
"verbose",
"=",
"verbose",
",",
"wrap_response",
"=",
"True",
")"
] | Uses a peyotl wrapper around an Open Tree web service to get a list of studies
including values `value` for a given property to be searched on `porperty`.
The oti_wrapper can be None (in which case the default wrapper from peyotl.sugar will be used.
All other arguments correspond to the arguments of the web-service call. | [
"Uses",
"a",
"peyotl",
"wrapper",
"around",
"an",
"Open",
"Tree",
"web",
"service",
"to",
"get",
"a",
"list",
"of",
"studies",
"including",
"values",
"value",
"for",
"a",
"given",
"property",
"to",
"be",
"searched",
"on",
"porperty",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-oti-find-studies.py#L12-L25 | train |
OpenTreeOfLife/peyotl | tutorials/ot-oti-find-studies.py | main | def main(argv):
"""This function sets up a command-line option parser and then calls print_matching_trees
to do all of the real work.
"""
import argparse
description = 'Uses Open Tree of Life web services to try to find a tree with the value property pair specified. ' \
'setting --fuzzy will allow fuzzy matching'
parser = argparse.ArgumentParser(prog='ot-get-tree', description=description)
parser.add_argument('arg_dict', type=json.loads, help='name(s) for which we will try to find OTT IDs')
parser.add_argument('--property', default=None, type=str, required=False)
parser.add_argument('--fuzzy', action='store_true', default=False,
required=False) # exact matching and verbose not working atm...
parser.add_argument('--verbose', action='store_true', default=False, required=False)
try:
args = parser.parse_args(argv)
arg_dict = args.arg_dict
exact = not args.fuzzy
verbose = args.verbose
except:
arg_dict = {'ot:studyId': 'ot_308'}
sys.stderr.write('Running a demonstration query with {}\n'.format(arg_dict))
exact = True
verbose = False
print_matching_studies(arg_dict, exact=exact, verbose=verbose) | python | def main(argv):
"""This function sets up a command-line option parser and then calls print_matching_trees
to do all of the real work.
"""
import argparse
description = 'Uses Open Tree of Life web services to try to find a tree with the value property pair specified. ' \
'setting --fuzzy will allow fuzzy matching'
parser = argparse.ArgumentParser(prog='ot-get-tree', description=description)
parser.add_argument('arg_dict', type=json.loads, help='name(s) for which we will try to find OTT IDs')
parser.add_argument('--property', default=None, type=str, required=False)
parser.add_argument('--fuzzy', action='store_true', default=False,
required=False) # exact matching and verbose not working atm...
parser.add_argument('--verbose', action='store_true', default=False, required=False)
try:
args = parser.parse_args(argv)
arg_dict = args.arg_dict
exact = not args.fuzzy
verbose = args.verbose
except:
arg_dict = {'ot:studyId': 'ot_308'}
sys.stderr.write('Running a demonstration query with {}\n'.format(arg_dict))
exact = True
verbose = False
print_matching_studies(arg_dict, exact=exact, verbose=verbose) | [
"def",
"main",
"(",
"argv",
")",
":",
"import",
"argparse",
"description",
"=",
"'Uses Open Tree of Life web services to try to find a tree with the value property pair specified. '",
"'setting --fuzzy will allow fuzzy matching'",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"'ot-get-tree'",
",",
"description",
"=",
"description",
")",
"parser",
".",
"add_argument",
"(",
"'arg_dict'",
",",
"type",
"=",
"json",
".",
"loads",
",",
"help",
"=",
"'name(s) for which we will try to find OTT IDs'",
")",
"parser",
".",
"add_argument",
"(",
"'--property'",
",",
"default",
"=",
"None",
",",
"type",
"=",
"str",
",",
"required",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'--fuzzy'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"required",
"=",
"False",
")",
"# exact matching and verbose not working atm...",
"parser",
".",
"add_argument",
"(",
"'--verbose'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"required",
"=",
"False",
")",
"try",
":",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"arg_dict",
"=",
"args",
".",
"arg_dict",
"exact",
"=",
"not",
"args",
".",
"fuzzy",
"verbose",
"=",
"args",
".",
"verbose",
"except",
":",
"arg_dict",
"=",
"{",
"'ot:studyId'",
":",
"'ot_308'",
"}",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Running a demonstration query with {}\\n'",
".",
"format",
"(",
"arg_dict",
")",
")",
"exact",
"=",
"True",
"verbose",
"=",
"False",
"print_matching_studies",
"(",
"arg_dict",
",",
"exact",
"=",
"exact",
",",
"verbose",
"=",
"verbose",
")"
] | This function sets up a command-line option parser and then calls print_matching_trees
to do all of the real work. | [
"This",
"function",
"sets",
"up",
"a",
"command",
"-",
"line",
"option",
"parser",
"and",
"then",
"calls",
"print_matching_trees",
"to",
"do",
"all",
"of",
"the",
"real",
"work",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-oti-find-studies.py#L36-L59 | train |
OpenTreeOfLife/peyotl | tutorials/ot-taxo-mrca-to-root.py | main | def main(argv):
"""This function sets up a command-line option parser and then calls
to do all of the real work.
"""
import argparse
import codecs
# have to be ready to deal with utf-8 names
out = codecs.getwriter('utf-8')(sys.stdout)
description = '''Takes a series of at least 2 OTT ids and reports the OTT of their least inclusive taxonomic ancestor and that taxon's ancestors.'''
parser = argparse.ArgumentParser(prog='ot-taxo-mrca-to-root', description=description)
parser.add_argument('ids', nargs='+', type=int, help='OTT IDs')
args = parser.parse_args(argv)
id_list = args.ids
last_id = id_list.pop()
anc_list = get_taxonomic_ancestor_ids(last_id)
common_anc = set(anc_list)
for curr_id in id_list:
curr_anc_set = set(get_taxonomic_ancestor_ids(curr_id))
common_anc &= curr_anc_set
if not common_anc:
break
for anc_id in anc_list:
if anc_id in common_anc:
out.write('{}\n'.format(anc_id)) | python | def main(argv):
"""This function sets up a command-line option parser and then calls
to do all of the real work.
"""
import argparse
import codecs
# have to be ready to deal with utf-8 names
out = codecs.getwriter('utf-8')(sys.stdout)
description = '''Takes a series of at least 2 OTT ids and reports the OTT of their least inclusive taxonomic ancestor and that taxon's ancestors.'''
parser = argparse.ArgumentParser(prog='ot-taxo-mrca-to-root', description=description)
parser.add_argument('ids', nargs='+', type=int, help='OTT IDs')
args = parser.parse_args(argv)
id_list = args.ids
last_id = id_list.pop()
anc_list = get_taxonomic_ancestor_ids(last_id)
common_anc = set(anc_list)
for curr_id in id_list:
curr_anc_set = set(get_taxonomic_ancestor_ids(curr_id))
common_anc &= curr_anc_set
if not common_anc:
break
for anc_id in anc_list:
if anc_id in common_anc:
out.write('{}\n'.format(anc_id)) | [
"def",
"main",
"(",
"argv",
")",
":",
"import",
"argparse",
"import",
"codecs",
"# have to be ready to deal with utf-8 names",
"out",
"=",
"codecs",
".",
"getwriter",
"(",
"'utf-8'",
")",
"(",
"sys",
".",
"stdout",
")",
"description",
"=",
"'''Takes a series of at least 2 OTT ids and reports the OTT of their least inclusive taxonomic ancestor and that taxon's ancestors.'''",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"'ot-taxo-mrca-to-root'",
",",
"description",
"=",
"description",
")",
"parser",
".",
"add_argument",
"(",
"'ids'",
",",
"nargs",
"=",
"'+'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'OTT IDs'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"argv",
")",
"id_list",
"=",
"args",
".",
"ids",
"last_id",
"=",
"id_list",
".",
"pop",
"(",
")",
"anc_list",
"=",
"get_taxonomic_ancestor_ids",
"(",
"last_id",
")",
"common_anc",
"=",
"set",
"(",
"anc_list",
")",
"for",
"curr_id",
"in",
"id_list",
":",
"curr_anc_set",
"=",
"set",
"(",
"get_taxonomic_ancestor_ids",
"(",
"curr_id",
")",
")",
"common_anc",
"&=",
"curr_anc_set",
"if",
"not",
"common_anc",
":",
"break",
"for",
"anc_id",
"in",
"anc_list",
":",
"if",
"anc_id",
"in",
"common_anc",
":",
"out",
".",
"write",
"(",
"'{}\\n'",
".",
"format",
"(",
"anc_id",
")",
")"
] | This function sets up a command-line option parser and then calls
to do all of the real work. | [
"This",
"function",
"sets",
"up",
"a",
"command",
"-",
"line",
"option",
"parser",
"and",
"then",
"calls",
"to",
"do",
"all",
"of",
"the",
"real",
"work",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/tutorials/ot-taxo-mrca-to-root.py#L23-L46 | train |
CybOXProject/mixbox | mixbox/datautils.py | is_sequence | def is_sequence(value):
"""Determine if a value is a sequence type.
Returns:
``True`` if `value` is a sequence type (e.g., ``list``, or ``tuple``).
String types will return ``False``.
NOTE: On Python 3, strings have the __iter__ defined, so a simple hasattr
check is insufficient.
"""
return (hasattr(value, "__iter__") and not
isinstance(value, (six.string_types, six.binary_type))) | python | def is_sequence(value):
"""Determine if a value is a sequence type.
Returns:
``True`` if `value` is a sequence type (e.g., ``list``, or ``tuple``).
String types will return ``False``.
NOTE: On Python 3, strings have the __iter__ defined, so a simple hasattr
check is insufficient.
"""
return (hasattr(value, "__iter__") and not
isinstance(value, (six.string_types, six.binary_type))) | [
"def",
"is_sequence",
"(",
"value",
")",
":",
"return",
"(",
"hasattr",
"(",
"value",
",",
"\"__iter__\"",
")",
"and",
"not",
"isinstance",
"(",
"value",
",",
"(",
"six",
".",
"string_types",
",",
"six",
".",
"binary_type",
")",
")",
")"
] | Determine if a value is a sequence type.
Returns:
``True`` if `value` is a sequence type (e.g., ``list``, or ``tuple``).
String types will return ``False``.
NOTE: On Python 3, strings have the __iter__ defined, so a simple hasattr
check is insufficient. | [
"Determine",
"if",
"a",
"value",
"is",
"a",
"sequence",
"type",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L12-L23 | train |
CybOXProject/mixbox | mixbox/datautils.py | import_class | def import_class(classpath):
"""Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module.
"""
modname, classname = classpath.rsplit(".", 1)
module = importlib.import_module(modname)
klass = getattr(module, classname)
return klass | python | def import_class(classpath):
"""Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module.
"""
modname, classname = classpath.rsplit(".", 1)
module = importlib.import_module(modname)
klass = getattr(module, classname)
return klass | [
"def",
"import_class",
"(",
"classpath",
")",
":",
"modname",
",",
"classname",
"=",
"classpath",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"modname",
")",
"klass",
"=",
"getattr",
"(",
"module",
",",
"classname",
")",
"return",
"klass"
] | Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module. | [
"Import",
"the",
"class",
"referred",
"to",
"by",
"the",
"fully",
"qualified",
"class",
"path",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L26-L42 | train |
CybOXProject/mixbox | mixbox/datautils.py | resolve_class | def resolve_class(classref):
"""Attempt to return a Python class for the input class reference.
If `classref` is a class or None, return it. If `classref` is a
python classpath (e.g., "foo.bar.MyClass") import the class and return
it.
Args:
classref: A fully-qualified Python path to class, or a Python class.
Returns:
A class.
"""
if classref is None:
return None
elif isinstance(classref, six.class_types):
return classref
elif isinstance(classref, six.string_types):
return import_class(classref)
else:
raise ValueError("Unable to resolve class for '%s'" % classref) | python | def resolve_class(classref):
"""Attempt to return a Python class for the input class reference.
If `classref` is a class or None, return it. If `classref` is a
python classpath (e.g., "foo.bar.MyClass") import the class and return
it.
Args:
classref: A fully-qualified Python path to class, or a Python class.
Returns:
A class.
"""
if classref is None:
return None
elif isinstance(classref, six.class_types):
return classref
elif isinstance(classref, six.string_types):
return import_class(classref)
else:
raise ValueError("Unable to resolve class for '%s'" % classref) | [
"def",
"resolve_class",
"(",
"classref",
")",
":",
"if",
"classref",
"is",
"None",
":",
"return",
"None",
"elif",
"isinstance",
"(",
"classref",
",",
"six",
".",
"class_types",
")",
":",
"return",
"classref",
"elif",
"isinstance",
"(",
"classref",
",",
"six",
".",
"string_types",
")",
":",
"return",
"import_class",
"(",
"classref",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unable to resolve class for '%s'\"",
"%",
"classref",
")"
] | Attempt to return a Python class for the input class reference.
If `classref` is a class or None, return it. If `classref` is a
python classpath (e.g., "foo.bar.MyClass") import the class and return
it.
Args:
classref: A fully-qualified Python path to class, or a Python class.
Returns:
A class. | [
"Attempt",
"to",
"return",
"a",
"Python",
"class",
"for",
"the",
"input",
"class",
"reference",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L45-L65 | train |
CybOXProject/mixbox | mixbox/datautils.py | needkwargs | def needkwargs(*argnames):
"""Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call.
"""
required = set(argnames)
def decorator(func):
def inner(*args, **kwargs):
missing = required - set(kwargs)
if missing:
err = "%s kwargs are missing." % list(missing)
raise ValueError(err)
return func(*args, **kwargs)
return inner
return decorator | python | def needkwargs(*argnames):
"""Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call.
"""
required = set(argnames)
def decorator(func):
def inner(*args, **kwargs):
missing = required - set(kwargs)
if missing:
err = "%s kwargs are missing." % list(missing)
raise ValueError(err)
return func(*args, **kwargs)
return inner
return decorator | [
"def",
"needkwargs",
"(",
"*",
"argnames",
")",
":",
"required",
"=",
"set",
"(",
"argnames",
")",
"def",
"decorator",
"(",
"func",
")",
":",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"missing",
"=",
"required",
"-",
"set",
"(",
"kwargs",
")",
"if",
"missing",
":",
"err",
"=",
"\"%s kwargs are missing.\"",
"%",
"list",
"(",
"missing",
")",
"raise",
"ValueError",
"(",
"err",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"inner",
"return",
"decorator"
] | Function decorator which checks that the decorated function is called
with a set of required kwargs.
Args:
*argnames: String keyword argument names.
Raises:
ValueError: If a required kwarg is missing in the decorated function
call. | [
"Function",
"decorator",
"which",
"checks",
"that",
"the",
"decorated",
"function",
"is",
"called",
"with",
"a",
"set",
"of",
"required",
"kwargs",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/datautils.py#L83-L104 | train |
flyte/apcaccess | apcaccess/status.py | get | def get(host="localhost", port=3551, timeout=30):
"""
Connect to the APCUPSd NIS and request its status.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((host, port))
sock.send(CMD_STATUS)
buffr = ""
while not buffr.endswith(EOF):
buffr += sock.recv(BUFFER_SIZE).decode()
sock.close()
return buffr | python | def get(host="localhost", port=3551, timeout=30):
"""
Connect to the APCUPSd NIS and request its status.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((host, port))
sock.send(CMD_STATUS)
buffr = ""
while not buffr.endswith(EOF):
buffr += sock.recv(BUFFER_SIZE).decode()
sock.close()
return buffr | [
"def",
"get",
"(",
"host",
"=",
"\"localhost\"",
",",
"port",
"=",
"3551",
",",
"timeout",
"=",
"30",
")",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"sock",
".",
"settimeout",
"(",
"timeout",
")",
"sock",
".",
"connect",
"(",
"(",
"host",
",",
"port",
")",
")",
"sock",
".",
"send",
"(",
"CMD_STATUS",
")",
"buffr",
"=",
"\"\"",
"while",
"not",
"buffr",
".",
"endswith",
"(",
"EOF",
")",
":",
"buffr",
"+=",
"sock",
".",
"recv",
"(",
"BUFFER_SIZE",
")",
".",
"decode",
"(",
")",
"sock",
".",
"close",
"(",
")",
"return",
"buffr"
] | Connect to the APCUPSd NIS and request its status. | [
"Connect",
"to",
"the",
"APCUPSd",
"NIS",
"and",
"request",
"its",
"status",
"."
] | 0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a | https://github.com/flyte/apcaccess/blob/0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a/apcaccess/status.py#L31-L43 | train |
flyte/apcaccess | apcaccess/status.py | strip_units_from_lines | def strip_units_from_lines(lines):
"""
Removes all units from the ends of the lines.
"""
for line in lines:
for unit in ALL_UNITS:
if line.endswith(" %s" % unit):
line = line[:-1-len(unit)]
yield line | python | def strip_units_from_lines(lines):
"""
Removes all units from the ends of the lines.
"""
for line in lines:
for unit in ALL_UNITS:
if line.endswith(" %s" % unit):
line = line[:-1-len(unit)]
yield line | [
"def",
"strip_units_from_lines",
"(",
"lines",
")",
":",
"for",
"line",
"in",
"lines",
":",
"for",
"unit",
"in",
"ALL_UNITS",
":",
"if",
"line",
".",
"endswith",
"(",
"\" %s\"",
"%",
"unit",
")",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"-",
"len",
"(",
"unit",
")",
"]",
"yield",
"line"
] | Removes all units from the ends of the lines. | [
"Removes",
"all",
"units",
"from",
"the",
"ends",
"of",
"the",
"lines",
"."
] | 0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a | https://github.com/flyte/apcaccess/blob/0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a/apcaccess/status.py#L69-L77 | train |
flyte/apcaccess | apcaccess/status.py | print_status | def print_status(raw_status, strip_units=False):
"""
Print the status to stdout in the same format as the original apcaccess.
"""
lines = split(raw_status)
if strip_units:
lines = strip_units_from_lines(lines)
for line in lines:
print(line) | python | def print_status(raw_status, strip_units=False):
"""
Print the status to stdout in the same format as the original apcaccess.
"""
lines = split(raw_status)
if strip_units:
lines = strip_units_from_lines(lines)
for line in lines:
print(line) | [
"def",
"print_status",
"(",
"raw_status",
",",
"strip_units",
"=",
"False",
")",
":",
"lines",
"=",
"split",
"(",
"raw_status",
")",
"if",
"strip_units",
":",
"lines",
"=",
"strip_units_from_lines",
"(",
"lines",
")",
"for",
"line",
"in",
"lines",
":",
"print",
"(",
"line",
")"
] | Print the status to stdout in the same format as the original apcaccess. | [
"Print",
"the",
"status",
"to",
"stdout",
"in",
"the",
"same",
"format",
"as",
"the",
"original",
"apcaccess",
"."
] | 0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a | https://github.com/flyte/apcaccess/blob/0c8a5d5e4ba1c07110e411b4ffea4ddccef4829a/apcaccess/status.py#L80-L88 | train |
OpenTreeOfLife/peyotl | peyotl/api/taxomachine.py | _TaxomachineAPIWrapper.get_cached_parent_for_taxon | def get_cached_parent_for_taxon(self, child_taxon):
"""If the taxa are being cached, this call will create a the lineage "spike" for taxon child_taxon
Expecting child_taxon to have a non-empty _taxonomic_lineage with response dicts that can create
an ancestral TaxonWrapper.
"""
if self._ott_id2taxon is None:
resp = child_taxon._taxonomic_lineage[0]
tl = child_taxon._taxonomic_lineage[1:]
assert 'taxonomic_lineage' not in resp
resp['taxonomic_lineage'] = tl
return TaxonWrapper(taxonomy=child_taxon.taxonomy,
taxomachine_wrapper=self._wr,
prop_dict=resp) # TODO recursive (indirectly)
else:
anc = []
prev = None
for resp in reversed(child_taxon._taxonomic_lineage):
ott_id = resp['ot:ottId']
curr = self._ott_id2taxon.get(ott_id)
if curr is None:
assert 'taxonomic_lineage' not in resp
assert 'parent' not in resp
resp['parent'] = prev
resp['taxonomic_lineage'] = anc
curr = TaxonWrapper(taxonomy=child_taxon.taxonomy,
taxomachine_wrapper=self._wr,
prop_dict=resp)
elif curr._parent is None and prev is not None:
curr._parent = prev
prev = curr
anc.insert(0, curr)
return prev | python | def get_cached_parent_for_taxon(self, child_taxon):
"""If the taxa are being cached, this call will create a the lineage "spike" for taxon child_taxon
Expecting child_taxon to have a non-empty _taxonomic_lineage with response dicts that can create
an ancestral TaxonWrapper.
"""
if self._ott_id2taxon is None:
resp = child_taxon._taxonomic_lineage[0]
tl = child_taxon._taxonomic_lineage[1:]
assert 'taxonomic_lineage' not in resp
resp['taxonomic_lineage'] = tl
return TaxonWrapper(taxonomy=child_taxon.taxonomy,
taxomachine_wrapper=self._wr,
prop_dict=resp) # TODO recursive (indirectly)
else:
anc = []
prev = None
for resp in reversed(child_taxon._taxonomic_lineage):
ott_id = resp['ot:ottId']
curr = self._ott_id2taxon.get(ott_id)
if curr is None:
assert 'taxonomic_lineage' not in resp
assert 'parent' not in resp
resp['parent'] = prev
resp['taxonomic_lineage'] = anc
curr = TaxonWrapper(taxonomy=child_taxon.taxonomy,
taxomachine_wrapper=self._wr,
prop_dict=resp)
elif curr._parent is None and prev is not None:
curr._parent = prev
prev = curr
anc.insert(0, curr)
return prev | [
"def",
"get_cached_parent_for_taxon",
"(",
"self",
",",
"child_taxon",
")",
":",
"if",
"self",
".",
"_ott_id2taxon",
"is",
"None",
":",
"resp",
"=",
"child_taxon",
".",
"_taxonomic_lineage",
"[",
"0",
"]",
"tl",
"=",
"child_taxon",
".",
"_taxonomic_lineage",
"[",
"1",
":",
"]",
"assert",
"'taxonomic_lineage'",
"not",
"in",
"resp",
"resp",
"[",
"'taxonomic_lineage'",
"]",
"=",
"tl",
"return",
"TaxonWrapper",
"(",
"taxonomy",
"=",
"child_taxon",
".",
"taxonomy",
",",
"taxomachine_wrapper",
"=",
"self",
".",
"_wr",
",",
"prop_dict",
"=",
"resp",
")",
"# TODO recursive (indirectly)",
"else",
":",
"anc",
"=",
"[",
"]",
"prev",
"=",
"None",
"for",
"resp",
"in",
"reversed",
"(",
"child_taxon",
".",
"_taxonomic_lineage",
")",
":",
"ott_id",
"=",
"resp",
"[",
"'ot:ottId'",
"]",
"curr",
"=",
"self",
".",
"_ott_id2taxon",
".",
"get",
"(",
"ott_id",
")",
"if",
"curr",
"is",
"None",
":",
"assert",
"'taxonomic_lineage'",
"not",
"in",
"resp",
"assert",
"'parent'",
"not",
"in",
"resp",
"resp",
"[",
"'parent'",
"]",
"=",
"prev",
"resp",
"[",
"'taxonomic_lineage'",
"]",
"=",
"anc",
"curr",
"=",
"TaxonWrapper",
"(",
"taxonomy",
"=",
"child_taxon",
".",
"taxonomy",
",",
"taxomachine_wrapper",
"=",
"self",
".",
"_wr",
",",
"prop_dict",
"=",
"resp",
")",
"elif",
"curr",
".",
"_parent",
"is",
"None",
"and",
"prev",
"is",
"not",
"None",
":",
"curr",
".",
"_parent",
"=",
"prev",
"prev",
"=",
"curr",
"anc",
".",
"insert",
"(",
"0",
",",
"curr",
")",
"return",
"prev"
] | If the taxa are being cached, this call will create a the lineage "spike" for taxon child_taxon
Expecting child_taxon to have a non-empty _taxonomic_lineage with response dicts that can create
an ancestral TaxonWrapper. | [
"If",
"the",
"taxa",
"are",
"being",
"cached",
"this",
"call",
"will",
"create",
"a",
"the",
"lineage",
"spike",
"for",
"taxon",
"child_taxon"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/api/taxomachine.py#L373-L405 | train |
OpenTreeOfLife/peyotl | peyotl/api/taxon.py | TaxonWrapper.update_empty_fields | def update_empty_fields(self, **kwargs):
"""Updates the field of info about an OTU that might not be filled in by a match_names or taxon call."""
if self._is_deprecated is None:
self._is_deprecated = kwargs.get('is_deprecated')
if self._is_dubious is None:
self._is_dubious = kwargs.get('is_dubious')
if self._is_synonym is None:
self._is_synonym = kwargs.get('is_synonym')
if self._synonyms is _EMPTY_TUPLE:
self._synonyms = kwargs.get('synonyms')
if self._synonyms is None:
self._synonyms = _EMPTY_TUPLE
if self.rank is None:
self._rank = kwargs.get('rank')
if self._nomenclature_code:
self._nomenclature_code = kwargs.get('nomenclature_code')
if not self._unique_name:
self._unique_name = kwargs.get('unique_name')
if self._taxonomic_lineage is None:
self._taxonomic_lineage = kwargs.get('taxonomic_lineage')
if self._parent is None:
self._parent = kwargs.get('parent')
if self._parent is None and self._taxomachine_wrapper is not None and self._taxonomic_lineage:
self._fill_parent_attr() | python | def update_empty_fields(self, **kwargs):
"""Updates the field of info about an OTU that might not be filled in by a match_names or taxon call."""
if self._is_deprecated is None:
self._is_deprecated = kwargs.get('is_deprecated')
if self._is_dubious is None:
self._is_dubious = kwargs.get('is_dubious')
if self._is_synonym is None:
self._is_synonym = kwargs.get('is_synonym')
if self._synonyms is _EMPTY_TUPLE:
self._synonyms = kwargs.get('synonyms')
if self._synonyms is None:
self._synonyms = _EMPTY_TUPLE
if self.rank is None:
self._rank = kwargs.get('rank')
if self._nomenclature_code:
self._nomenclature_code = kwargs.get('nomenclature_code')
if not self._unique_name:
self._unique_name = kwargs.get('unique_name')
if self._taxonomic_lineage is None:
self._taxonomic_lineage = kwargs.get('taxonomic_lineage')
if self._parent is None:
self._parent = kwargs.get('parent')
if self._parent is None and self._taxomachine_wrapper is not None and self._taxonomic_lineage:
self._fill_parent_attr() | [
"def",
"update_empty_fields",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_is_deprecated",
"is",
"None",
":",
"self",
".",
"_is_deprecated",
"=",
"kwargs",
".",
"get",
"(",
"'is_deprecated'",
")",
"if",
"self",
".",
"_is_dubious",
"is",
"None",
":",
"self",
".",
"_is_dubious",
"=",
"kwargs",
".",
"get",
"(",
"'is_dubious'",
")",
"if",
"self",
".",
"_is_synonym",
"is",
"None",
":",
"self",
".",
"_is_synonym",
"=",
"kwargs",
".",
"get",
"(",
"'is_synonym'",
")",
"if",
"self",
".",
"_synonyms",
"is",
"_EMPTY_TUPLE",
":",
"self",
".",
"_synonyms",
"=",
"kwargs",
".",
"get",
"(",
"'synonyms'",
")",
"if",
"self",
".",
"_synonyms",
"is",
"None",
":",
"self",
".",
"_synonyms",
"=",
"_EMPTY_TUPLE",
"if",
"self",
".",
"rank",
"is",
"None",
":",
"self",
".",
"_rank",
"=",
"kwargs",
".",
"get",
"(",
"'rank'",
")",
"if",
"self",
".",
"_nomenclature_code",
":",
"self",
".",
"_nomenclature_code",
"=",
"kwargs",
".",
"get",
"(",
"'nomenclature_code'",
")",
"if",
"not",
"self",
".",
"_unique_name",
":",
"self",
".",
"_unique_name",
"=",
"kwargs",
".",
"get",
"(",
"'unique_name'",
")",
"if",
"self",
".",
"_taxonomic_lineage",
"is",
"None",
":",
"self",
".",
"_taxonomic_lineage",
"=",
"kwargs",
".",
"get",
"(",
"'taxonomic_lineage'",
")",
"if",
"self",
".",
"_parent",
"is",
"None",
":",
"self",
".",
"_parent",
"=",
"kwargs",
".",
"get",
"(",
"'parent'",
")",
"if",
"self",
".",
"_parent",
"is",
"None",
"and",
"self",
".",
"_taxomachine_wrapper",
"is",
"not",
"None",
"and",
"self",
".",
"_taxonomic_lineage",
":",
"self",
".",
"_fill_parent_attr",
"(",
")"
] | Updates the field of info about an OTU that might not be filled in by a match_names or taxon call. | [
"Updates",
"the",
"field",
"of",
"info",
"about",
"an",
"OTU",
"that",
"might",
"not",
"be",
"filled",
"in",
"by",
"a",
"match_names",
"or",
"taxon",
"call",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/api/taxon.py#L47-L70 | train |
OpenTreeOfLife/peyotl | scripts/nexson/prune_to_clean_mapped.py | _check_rev_dict | def _check_rev_dict(tree, ebt):
"""Verifyies that `ebt` is the inverse of the `edgeBySourceId` data member of `tree`"""
ebs = defaultdict(dict)
for edge in ebt.values():
source_id = edge['@source']
edge_id = edge['@id']
ebs[source_id][edge_id] = edge
assert ebs == tree['edgeBySourceId'] | python | def _check_rev_dict(tree, ebt):
"""Verifyies that `ebt` is the inverse of the `edgeBySourceId` data member of `tree`"""
ebs = defaultdict(dict)
for edge in ebt.values():
source_id = edge['@source']
edge_id = edge['@id']
ebs[source_id][edge_id] = edge
assert ebs == tree['edgeBySourceId'] | [
"def",
"_check_rev_dict",
"(",
"tree",
",",
"ebt",
")",
":",
"ebs",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"edge",
"in",
"ebt",
".",
"values",
"(",
")",
":",
"source_id",
"=",
"edge",
"[",
"'@source'",
"]",
"edge_id",
"=",
"edge",
"[",
"'@id'",
"]",
"ebs",
"[",
"source_id",
"]",
"[",
"edge_id",
"]",
"=",
"edge",
"assert",
"ebs",
"==",
"tree",
"[",
"'edgeBySourceId'",
"]"
] | Verifyies that `ebt` is the inverse of the `edgeBySourceId` data member of `tree` | [
"Verifyies",
"that",
"ebt",
"is",
"the",
"inverse",
"of",
"the",
"edgeBySourceId",
"data",
"member",
"of",
"tree"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L42-L49 | train |
OpenTreeOfLife/peyotl | scripts/nexson/prune_to_clean_mapped.py | NexsonTreeWrapper._create_edge_by_target | def _create_edge_by_target(self):
"""creates a edge_by_target dict with the same edge objects as the edge_by_source.
Also adds an '@id' field to each edge."""
ebt = {}
for edge_dict in self._edge_by_source.values():
for edge_id, edge in edge_dict.items():
target_id = edge['@target']
edge['@id'] = edge_id
assert target_id not in ebt
ebt[target_id] = edge
# _check_rev_dict(self._tree, ebt)
return ebt | python | def _create_edge_by_target(self):
"""creates a edge_by_target dict with the same edge objects as the edge_by_source.
Also adds an '@id' field to each edge."""
ebt = {}
for edge_dict in self._edge_by_source.values():
for edge_id, edge in edge_dict.items():
target_id = edge['@target']
edge['@id'] = edge_id
assert target_id not in ebt
ebt[target_id] = edge
# _check_rev_dict(self._tree, ebt)
return ebt | [
"def",
"_create_edge_by_target",
"(",
"self",
")",
":",
"ebt",
"=",
"{",
"}",
"for",
"edge_dict",
"in",
"self",
".",
"_edge_by_source",
".",
"values",
"(",
")",
":",
"for",
"edge_id",
",",
"edge",
"in",
"edge_dict",
".",
"items",
"(",
")",
":",
"target_id",
"=",
"edge",
"[",
"'@target'",
"]",
"edge",
"[",
"'@id'",
"]",
"=",
"edge_id",
"assert",
"target_id",
"not",
"in",
"ebt",
"ebt",
"[",
"target_id",
"]",
"=",
"edge",
"# _check_rev_dict(self._tree, ebt)",
"return",
"ebt"
] | creates a edge_by_target dict with the same edge objects as the edge_by_source.
Also adds an '@id' field to each edge. | [
"creates",
"a",
"edge_by_target",
"dict",
"with",
"the",
"same",
"edge",
"objects",
"as",
"the",
"edge_by_source",
".",
"Also",
"adds",
"an"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L86-L97 | train |
OpenTreeOfLife/peyotl | scripts/nexson/prune_to_clean_mapped.py | NexsonTreeWrapper.prune_to_ingroup | def prune_to_ingroup(self):
"""Remove nodes and edges from tree if they are not the ingroup or a descendant of it."""
# Prune to just the ingroup
if not self._ingroup_node_id:
_LOG.debug('No ingroup node was specified.')
self._ingroup_node_id = self.root_node_id
elif self._ingroup_node_id != self.root_node_id:
self._do_prune_to_ingroup()
self.root_node_id = self._ingroup_node_id
else:
_LOG.debug('Ingroup node is root.')
return self.root_node_id | python | def prune_to_ingroup(self):
"""Remove nodes and edges from tree if they are not the ingroup or a descendant of it."""
# Prune to just the ingroup
if not self._ingroup_node_id:
_LOG.debug('No ingroup node was specified.')
self._ingroup_node_id = self.root_node_id
elif self._ingroup_node_id != self.root_node_id:
self._do_prune_to_ingroup()
self.root_node_id = self._ingroup_node_id
else:
_LOG.debug('Ingroup node is root.')
return self.root_node_id | [
"def",
"prune_to_ingroup",
"(",
"self",
")",
":",
"# Prune to just the ingroup",
"if",
"not",
"self",
".",
"_ingroup_node_id",
":",
"_LOG",
".",
"debug",
"(",
"'No ingroup node was specified.'",
")",
"self",
".",
"_ingroup_node_id",
"=",
"self",
".",
"root_node_id",
"elif",
"self",
".",
"_ingroup_node_id",
"!=",
"self",
".",
"root_node_id",
":",
"self",
".",
"_do_prune_to_ingroup",
"(",
")",
"self",
".",
"root_node_id",
"=",
"self",
".",
"_ingroup_node_id",
"else",
":",
"_LOG",
".",
"debug",
"(",
"'Ingroup node is root.'",
")",
"return",
"self",
".",
"root_node_id"
] | Remove nodes and edges from tree if they are not the ingroup or a descendant of it. | [
"Remove",
"nodes",
"and",
"edges",
"from",
"tree",
"if",
"they",
"are",
"not",
"the",
"ingroup",
"or",
"a",
"descendant",
"of",
"it",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L119-L130 | train |
OpenTreeOfLife/peyotl | scripts/nexson/prune_to_clean_mapped.py | NexsonTreeWrapper.prune_clade | def prune_clade(self, node_id):
"""Prune `node_id` and the edges and nodes that are tipward of it.
Caller must delete the edge to node_id."""
to_del_nodes = [node_id]
while bool(to_del_nodes):
node_id = to_del_nodes.pop(0)
self._flag_node_as_del_and_del_in_by_target(node_id)
ebsd = self._edge_by_source.get(node_id)
if ebsd is not None:
child_edges = list(ebsd.values())
to_del_nodes.extend([i['@target'] for i in child_edges])
del self._edge_by_source[
node_id] | python | def prune_clade(self, node_id):
"""Prune `node_id` and the edges and nodes that are tipward of it.
Caller must delete the edge to node_id."""
to_del_nodes = [node_id]
while bool(to_del_nodes):
node_id = to_del_nodes.pop(0)
self._flag_node_as_del_and_del_in_by_target(node_id)
ebsd = self._edge_by_source.get(node_id)
if ebsd is not None:
child_edges = list(ebsd.values())
to_del_nodes.extend([i['@target'] for i in child_edges])
del self._edge_by_source[
node_id] | [
"def",
"prune_clade",
"(",
"self",
",",
"node_id",
")",
":",
"to_del_nodes",
"=",
"[",
"node_id",
"]",
"while",
"bool",
"(",
"to_del_nodes",
")",
":",
"node_id",
"=",
"to_del_nodes",
".",
"pop",
"(",
"0",
")",
"self",
".",
"_flag_node_as_del_and_del_in_by_target",
"(",
"node_id",
")",
"ebsd",
"=",
"self",
".",
"_edge_by_source",
".",
"get",
"(",
"node_id",
")",
"if",
"ebsd",
"is",
"not",
"None",
":",
"child_edges",
"=",
"list",
"(",
"ebsd",
".",
"values",
"(",
")",
")",
"to_del_nodes",
".",
"extend",
"(",
"[",
"i",
"[",
"'@target'",
"]",
"for",
"i",
"in",
"child_edges",
"]",
")",
"del",
"self",
".",
"_edge_by_source",
"[",
"node_id",
"]"
] | Prune `node_id` and the edges and nodes that are tipward of it.
Caller must delete the edge to node_id. | [
"Prune",
"node_id",
"and",
"the",
"edges",
"and",
"nodes",
"that",
"are",
"tipward",
"of",
"it",
".",
"Caller",
"must",
"delete",
"the",
"edge",
"to",
"node_id",
"."
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L147-L159 | train |
OpenTreeOfLife/peyotl | scripts/nexson/prune_to_clean_mapped.py | NexsonTreeWrapper.suppress_deg_one_node | def suppress_deg_one_node(self, to_par_edge, nd_id, to_child_edge):
"""Deletes to_par_edge and nd_id. To be used when nd_id is an out-degree= 1 node"""
# circumvent the node with nd_id
to_child_edge_id = to_child_edge['@id']
par = to_par_edge['@source']
self._edge_by_source[par][to_child_edge_id] = to_child_edge
to_child_edge['@source'] = par
# make it a tip...
del self._edge_by_source[nd_id]
# delete it
self._del_tip(nd_id) | python | def suppress_deg_one_node(self, to_par_edge, nd_id, to_child_edge):
"""Deletes to_par_edge and nd_id. To be used when nd_id is an out-degree= 1 node"""
# circumvent the node with nd_id
to_child_edge_id = to_child_edge['@id']
par = to_par_edge['@source']
self._edge_by_source[par][to_child_edge_id] = to_child_edge
to_child_edge['@source'] = par
# make it a tip...
del self._edge_by_source[nd_id]
# delete it
self._del_tip(nd_id) | [
"def",
"suppress_deg_one_node",
"(",
"self",
",",
"to_par_edge",
",",
"nd_id",
",",
"to_child_edge",
")",
":",
"# circumvent the node with nd_id",
"to_child_edge_id",
"=",
"to_child_edge",
"[",
"'@id'",
"]",
"par",
"=",
"to_par_edge",
"[",
"'@source'",
"]",
"self",
".",
"_edge_by_source",
"[",
"par",
"]",
"[",
"to_child_edge_id",
"]",
"=",
"to_child_edge",
"to_child_edge",
"[",
"'@source'",
"]",
"=",
"par",
"# make it a tip...",
"del",
"self",
".",
"_edge_by_source",
"[",
"nd_id",
"]",
"# delete it",
"self",
".",
"_del_tip",
"(",
"nd_id",
")"
] | Deletes to_par_edge and nd_id. To be used when nd_id is an out-degree= 1 node | [
"Deletes",
"to_par_edge",
"and",
"nd_id",
".",
"To",
"be",
"used",
"when",
"nd_id",
"is",
"an",
"out",
"-",
"degree",
"=",
"1",
"node"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L278-L288 | train |
palantir/typedjsonrpc | typedjsonrpc/method_info.py | MethodInfo.describe | def describe(self):
"""Describes the method.
:return: Description
:rtype: dict[str, object]
"""
return {
"name": self.name,
"params": self.params,
"returns": self.returns,
"description": self.description,
} | python | def describe(self):
"""Describes the method.
:return: Description
:rtype: dict[str, object]
"""
return {
"name": self.name,
"params": self.params,
"returns": self.returns,
"description": self.description,
} | [
"def",
"describe",
"(",
"self",
")",
":",
"return",
"{",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"params\"",
":",
"self",
".",
"params",
",",
"\"returns\"",
":",
"self",
".",
"returns",
",",
"\"description\"",
":",
"self",
".",
"description",
",",
"}"
] | Describes the method.
:return: Description
:rtype: dict[str, object] | [
"Describes",
"the",
"method",
"."
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L36-L47 | train |
palantir/typedjsonrpc | typedjsonrpc/method_info.py | MethodInfo.params | def params(self):
"""The parameters for this method in a JSON-compatible format
:rtype: list[dict[str, str]]
"""
return [{"name": p_name, "type": p_type.__name__}
for (p_name, p_type) in self.signature.parameter_types] | python | def params(self):
"""The parameters for this method in a JSON-compatible format
:rtype: list[dict[str, str]]
"""
return [{"name": p_name, "type": p_type.__name__}
for (p_name, p_type) in self.signature.parameter_types] | [
"def",
"params",
"(",
"self",
")",
":",
"return",
"[",
"{",
"\"name\"",
":",
"p_name",
",",
"\"type\"",
":",
"p_type",
".",
"__name__",
"}",
"for",
"(",
"p_name",
",",
"p_type",
")",
"in",
"self",
".",
"signature",
".",
"parameter_types",
"]"
] | The parameters for this method in a JSON-compatible format
:rtype: list[dict[str, str]] | [
"The",
"parameters",
"for",
"this",
"method",
"in",
"a",
"JSON",
"-",
"compatible",
"format"
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L50-L56 | train |
palantir/typedjsonrpc | typedjsonrpc/method_info.py | MethodInfo.returns | def returns(self):
"""The return type for this method in a JSON-compatible format.
This handles the special case of ``None`` which allows ``type(None)`` also.
:rtype: str | None
"""
return_type = self.signature.return_type
none_type = type(None)
if return_type is not None and return_type is not none_type:
return return_type.__name__ | python | def returns(self):
"""The return type for this method in a JSON-compatible format.
This handles the special case of ``None`` which allows ``type(None)`` also.
:rtype: str | None
"""
return_type = self.signature.return_type
none_type = type(None)
if return_type is not None and return_type is not none_type:
return return_type.__name__ | [
"def",
"returns",
"(",
"self",
")",
":",
"return_type",
"=",
"self",
".",
"signature",
".",
"return_type",
"none_type",
"=",
"type",
"(",
"None",
")",
"if",
"return_type",
"is",
"not",
"None",
"and",
"return_type",
"is",
"not",
"none_type",
":",
"return",
"return_type",
".",
"__name__"
] | The return type for this method in a JSON-compatible format.
This handles the special case of ``None`` which allows ``type(None)`` also.
:rtype: str | None | [
"The",
"return",
"type",
"for",
"this",
"method",
"in",
"a",
"JSON",
"-",
"compatible",
"format",
"."
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L59-L69 | train |
palantir/typedjsonrpc | typedjsonrpc/method_info.py | MethodSignature.create | def create(parameter_names, parameter_types, return_type):
"""Returns a signature object ensuring order of parameter names and types.
:param parameter_names: A list of ordered parameter names
:type parameter_names: list[str]
:param parameter_types: A dictionary of parameter names to types
:type parameter_types: dict[str, type]
:param return_type: The type the function returns
:type return_type: type
:rtype: MethodSignature
"""
ordered_pairs = [(name, parameter_types[name]) for name in parameter_names]
return MethodSignature(ordered_pairs, return_type) | python | def create(parameter_names, parameter_types, return_type):
"""Returns a signature object ensuring order of parameter names and types.
:param parameter_names: A list of ordered parameter names
:type parameter_names: list[str]
:param parameter_types: A dictionary of parameter names to types
:type parameter_types: dict[str, type]
:param return_type: The type the function returns
:type return_type: type
:rtype: MethodSignature
"""
ordered_pairs = [(name, parameter_types[name]) for name in parameter_names]
return MethodSignature(ordered_pairs, return_type) | [
"def",
"create",
"(",
"parameter_names",
",",
"parameter_types",
",",
"return_type",
")",
":",
"ordered_pairs",
"=",
"[",
"(",
"name",
",",
"parameter_types",
"[",
"name",
"]",
")",
"for",
"name",
"in",
"parameter_names",
"]",
"return",
"MethodSignature",
"(",
"ordered_pairs",
",",
"return_type",
")"
] | Returns a signature object ensuring order of parameter names and types.
:param parameter_names: A list of ordered parameter names
:type parameter_names: list[str]
:param parameter_types: A dictionary of parameter names to types
:type parameter_types: dict[str, type]
:param return_type: The type the function returns
:type return_type: type
:rtype: MethodSignature | [
"Returns",
"a",
"signature",
"object",
"ensuring",
"order",
"of",
"parameter",
"names",
"and",
"types",
"."
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/typedjsonrpc/method_info.py#L90-L102 | train |
OpenTreeOfLife/peyotl | peyotl/nexson_syntax/nexml2nexson.py | Nexml2Nexson._hbf_handle_child_elements | def _hbf_handle_child_elements(self, obj, ntl):
"""
Indirect recursion through _gen_hbf_el
"""
# accumulate a list of the children names in ko, and
# the a dictionary of tag to xml elements.
# repetition of a tag means that it will map to a list of
# xml elements
cd = {}
ko = []
ks = set()
for child in ntl:
k = child.nodeName
if k == 'meta' and (not self._badgerfish_style_conversion):
matk, matv = self._transform_meta_key_value(child)
if matk is not None:
_add_value_to_dict_bf(obj, matk, matv)
else:
if k not in ks:
ko.append(k)
ks.add(k)
_add_value_to_dict_bf(cd, k, child)
# Converts the child XML elements to dicts by recursion and
# adds these to the dict.
for k in ko:
v = _index_list_of_values(cd, k)
dcl = []
ct = None
for xc in v:
ct, dc = self._gen_hbf_el(xc)
dcl.append(dc)
# this assertion will trip is the hacky stripping of namespaces
# results in a name clash among the tags of the children
assert ct not in obj
obj[ct] = dcl
# delete redundant about attributes that are used in XML, but not JSON (last rule of HoneyBadgerFish)
_cull_redundant_about(obj)
return obj | python | def _hbf_handle_child_elements(self, obj, ntl):
"""
Indirect recursion through _gen_hbf_el
"""
# accumulate a list of the children names in ko, and
# the a dictionary of tag to xml elements.
# repetition of a tag means that it will map to a list of
# xml elements
cd = {}
ko = []
ks = set()
for child in ntl:
k = child.nodeName
if k == 'meta' and (not self._badgerfish_style_conversion):
matk, matv = self._transform_meta_key_value(child)
if matk is not None:
_add_value_to_dict_bf(obj, matk, matv)
else:
if k not in ks:
ko.append(k)
ks.add(k)
_add_value_to_dict_bf(cd, k, child)
# Converts the child XML elements to dicts by recursion and
# adds these to the dict.
for k in ko:
v = _index_list_of_values(cd, k)
dcl = []
ct = None
for xc in v:
ct, dc = self._gen_hbf_el(xc)
dcl.append(dc)
# this assertion will trip is the hacky stripping of namespaces
# results in a name clash among the tags of the children
assert ct not in obj
obj[ct] = dcl
# delete redundant about attributes that are used in XML, but not JSON (last rule of HoneyBadgerFish)
_cull_redundant_about(obj)
return obj | [
"def",
"_hbf_handle_child_elements",
"(",
"self",
",",
"obj",
",",
"ntl",
")",
":",
"# accumulate a list of the children names in ko, and",
"# the a dictionary of tag to xml elements.",
"# repetition of a tag means that it will map to a list of",
"# xml elements",
"cd",
"=",
"{",
"}",
"ko",
"=",
"[",
"]",
"ks",
"=",
"set",
"(",
")",
"for",
"child",
"in",
"ntl",
":",
"k",
"=",
"child",
".",
"nodeName",
"if",
"k",
"==",
"'meta'",
"and",
"(",
"not",
"self",
".",
"_badgerfish_style_conversion",
")",
":",
"matk",
",",
"matv",
"=",
"self",
".",
"_transform_meta_key_value",
"(",
"child",
")",
"if",
"matk",
"is",
"not",
"None",
":",
"_add_value_to_dict_bf",
"(",
"obj",
",",
"matk",
",",
"matv",
")",
"else",
":",
"if",
"k",
"not",
"in",
"ks",
":",
"ko",
".",
"append",
"(",
"k",
")",
"ks",
".",
"add",
"(",
"k",
")",
"_add_value_to_dict_bf",
"(",
"cd",
",",
"k",
",",
"child",
")",
"# Converts the child XML elements to dicts by recursion and",
"# adds these to the dict.",
"for",
"k",
"in",
"ko",
":",
"v",
"=",
"_index_list_of_values",
"(",
"cd",
",",
"k",
")",
"dcl",
"=",
"[",
"]",
"ct",
"=",
"None",
"for",
"xc",
"in",
"v",
":",
"ct",
",",
"dc",
"=",
"self",
".",
"_gen_hbf_el",
"(",
"xc",
")",
"dcl",
".",
"append",
"(",
"dc",
")",
"# this assertion will trip is the hacky stripping of namespaces",
"# results in a name clash among the tags of the children",
"assert",
"ct",
"not",
"in",
"obj",
"obj",
"[",
"ct",
"]",
"=",
"dcl",
"# delete redundant about attributes that are used in XML, but not JSON (last rule of HoneyBadgerFish)",
"_cull_redundant_about",
"(",
"obj",
")",
"return",
"obj"
] | Indirect recursion through _gen_hbf_el | [
"Indirect",
"recursion",
"through",
"_gen_hbf_el"
] | 5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0 | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/nexml2nexson.py#L169-L208 | train |
CybOXProject/mixbox | mixbox/xml.py | get_xml_parser | def get_xml_parser(encoding=None):
"""Returns an ``etree.ETCompatXMLParser`` instance."""
parser = etree.ETCompatXMLParser(
huge_tree=True,
remove_comments=True,
strip_cdata=False,
remove_blank_text=True,
resolve_entities=False,
encoding=encoding
)
return parser | python | def get_xml_parser(encoding=None):
"""Returns an ``etree.ETCompatXMLParser`` instance."""
parser = etree.ETCompatXMLParser(
huge_tree=True,
remove_comments=True,
strip_cdata=False,
remove_blank_text=True,
resolve_entities=False,
encoding=encoding
)
return parser | [
"def",
"get_xml_parser",
"(",
"encoding",
"=",
"None",
")",
":",
"parser",
"=",
"etree",
".",
"ETCompatXMLParser",
"(",
"huge_tree",
"=",
"True",
",",
"remove_comments",
"=",
"True",
",",
"strip_cdata",
"=",
"False",
",",
"remove_blank_text",
"=",
"True",
",",
"resolve_entities",
"=",
"False",
",",
"encoding",
"=",
"encoding",
")",
"return",
"parser"
] | Returns an ``etree.ETCompatXMLParser`` instance. | [
"Returns",
"an",
"etree",
".",
"ETCompatXMLParser",
"instance",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/xml.py#L34-L45 | train |
CybOXProject/mixbox | mixbox/xml.py | get_etree_root | def get_etree_root(doc, encoding=None):
"""Returns an instance of lxml.etree._Element for the given `doc` input.
Args:
doc: The input XML document. Can be an instance of
``lxml.etree._Element``, ``lxml.etree._ElementTree``, a file-like
object, or a string filename.
encoding: The character encoding of `doc`. If ``None``, an attempt
will be made to determine the character encoding by the XML
parser.
Returns:
An ``lxml.etree._Element`` instance for `doc`.
Raises:
IOError: If `doc` cannot be found.
lxml.ParseError: If `doc` is a malformed XML document.
"""
tree = get_etree(doc, encoding)
root = tree.getroot()
return root | python | def get_etree_root(doc, encoding=None):
"""Returns an instance of lxml.etree._Element for the given `doc` input.
Args:
doc: The input XML document. Can be an instance of
``lxml.etree._Element``, ``lxml.etree._ElementTree``, a file-like
object, or a string filename.
encoding: The character encoding of `doc`. If ``None``, an attempt
will be made to determine the character encoding by the XML
parser.
Returns:
An ``lxml.etree._Element`` instance for `doc`.
Raises:
IOError: If `doc` cannot be found.
lxml.ParseError: If `doc` is a malformed XML document.
"""
tree = get_etree(doc, encoding)
root = tree.getroot()
return root | [
"def",
"get_etree_root",
"(",
"doc",
",",
"encoding",
"=",
"None",
")",
":",
"tree",
"=",
"get_etree",
"(",
"doc",
",",
"encoding",
")",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"return",
"root"
] | Returns an instance of lxml.etree._Element for the given `doc` input.
Args:
doc: The input XML document. Can be an instance of
``lxml.etree._Element``, ``lxml.etree._ElementTree``, a file-like
object, or a string filename.
encoding: The character encoding of `doc`. If ``None``, an attempt
will be made to determine the character encoding by the XML
parser.
Returns:
An ``lxml.etree._Element`` instance for `doc`.
Raises:
IOError: If `doc` cannot be found.
lxml.ParseError: If `doc` is a malformed XML document. | [
"Returns",
"an",
"instance",
"of",
"lxml",
".",
"etree",
".",
"_Element",
"for",
"the",
"given",
"doc",
"input",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/xml.py#L58-L80 | train |
CybOXProject/mixbox | mixbox/xml.py | strip_cdata | def strip_cdata(text):
"""Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed.
"""
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = etree.fromstring(xml)
return node.text | python | def strip_cdata(text):
"""Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed.
"""
if not is_cdata(text):
return text
xml = "<e>{0}</e>".format(text)
node = etree.fromstring(xml)
return node.text | [
"def",
"strip_cdata",
"(",
"text",
")",
":",
"if",
"not",
"is_cdata",
"(",
"text",
")",
":",
"return",
"text",
"xml",
"=",
"\"<e>{0}</e>\"",
".",
"format",
"(",
"text",
")",
"node",
"=",
"etree",
".",
"fromstring",
"(",
"xml",
")",
"return",
"node",
".",
"text"
] | Removes all CDATA blocks from `text` if it contains them.
Note:
If the function contains escaped XML characters outside of a
CDATA block, they will be unescaped.
Args:
A string containing one or more CDATA blocks.
Returns:
An XML unescaped string with CDATA block qualifiers removed. | [
"Removes",
"all",
"CDATA",
"blocks",
"from",
"text",
"if",
"it",
"contains",
"them",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/xml.py#L114-L133 | train |
CybOXProject/mixbox | mixbox/typedlist.py | TypedList._is_valid | def _is_valid(self, value):
"""Return True if the input value is valid for insertion into the
inner list.
Args:
value: An object about to be inserted.
"""
# Entities have an istypeof method that can perform more sophisticated
# type checking.
if hasattr(self._type, "istypeof"):
return self._type.istypeof(value)
else:
return isinstance(value, self._type) | python | def _is_valid(self, value):
"""Return True if the input value is valid for insertion into the
inner list.
Args:
value: An object about to be inserted.
"""
# Entities have an istypeof method that can perform more sophisticated
# type checking.
if hasattr(self._type, "istypeof"):
return self._type.istypeof(value)
else:
return isinstance(value, self._type) | [
"def",
"_is_valid",
"(",
"self",
",",
"value",
")",
":",
"# Entities have an istypeof method that can perform more sophisticated",
"# type checking.",
"if",
"hasattr",
"(",
"self",
".",
"_type",
",",
"\"istypeof\"",
")",
":",
"return",
"self",
".",
"_type",
".",
"istypeof",
"(",
"value",
")",
"else",
":",
"return",
"isinstance",
"(",
"value",
",",
"self",
".",
"_type",
")"
] | Return True if the input value is valid for insertion into the
inner list.
Args:
value: An object about to be inserted. | [
"Return",
"True",
"if",
"the",
"input",
"value",
"is",
"valid",
"for",
"insertion",
"into",
"the",
"inner",
"list",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/typedlist.py#L40-L53 | train |
CybOXProject/mixbox | mixbox/typedlist.py | TypedList._fix_value | def _fix_value(self, value):
"""Attempt to coerce value into the correct type.
Subclasses can override this function.
"""
try:
return self._castfunc(value)
except:
error = "Can't put '{0}' ({1}) into a {2}. Expected a {3} object."
error = error.format(
value, # Input value
type(value), # Type of input value
type(self), # Type of collection
self._type # Expected type of input value
)
six.reraise(TypeError, TypeError(error), sys.exc_info()[-1]) | python | def _fix_value(self, value):
"""Attempt to coerce value into the correct type.
Subclasses can override this function.
"""
try:
return self._castfunc(value)
except:
error = "Can't put '{0}' ({1}) into a {2}. Expected a {3} object."
error = error.format(
value, # Input value
type(value), # Type of input value
type(self), # Type of collection
self._type # Expected type of input value
)
six.reraise(TypeError, TypeError(error), sys.exc_info()[-1]) | [
"def",
"_fix_value",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"return",
"self",
".",
"_castfunc",
"(",
"value",
")",
"except",
":",
"error",
"=",
"\"Can't put '{0}' ({1}) into a {2}. Expected a {3} object.\"",
"error",
"=",
"error",
".",
"format",
"(",
"value",
",",
"# Input value",
"type",
"(",
"value",
")",
",",
"# Type of input value",
"type",
"(",
"self",
")",
",",
"# Type of collection",
"self",
".",
"_type",
"# Expected type of input value",
")",
"six",
".",
"reraise",
"(",
"TypeError",
",",
"TypeError",
"(",
"error",
")",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"-",
"1",
"]",
")"
] | Attempt to coerce value into the correct type.
Subclasses can override this function. | [
"Attempt",
"to",
"coerce",
"value",
"into",
"the",
"correct",
"type",
"."
] | 9097dae7a433f5b98c18171c4a5598f69a7d30af | https://github.com/CybOXProject/mixbox/blob/9097dae7a433f5b98c18171c4a5598f69a7d30af/mixbox/typedlist.py#L55-L70 | train |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_pairdef_parser.py | JSGPairDef.members_entries | def members_entries(self, all_are_optional: Optional[bool] = False) -> List[Tuple[str, str]]:
""" Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing
reference types
:param all_are_optional: If true, all types are forced optional
:return: raw name/ signature type for all elements in this pair
"""
if self._type_reference:
rval: List[Tuple[str, str]] = []
for n, t in self._context.reference(self._type_reference).members_entries(all_are_optional):
rval.append((n, self._ebnf.signature_cardinality(t, all_are_optional).format(name=n)))
return rval
else:
sig = self._ebnf.signature_cardinality(self._typ.reference_type(), all_are_optional)
return [(name, sig.format(name=name)) for name in self._names] | python | def members_entries(self, all_are_optional: Optional[bool] = False) -> List[Tuple[str, str]]:
""" Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing
reference types
:param all_are_optional: If true, all types are forced optional
:return: raw name/ signature type for all elements in this pair
"""
if self._type_reference:
rval: List[Tuple[str, str]] = []
for n, t in self._context.reference(self._type_reference).members_entries(all_are_optional):
rval.append((n, self._ebnf.signature_cardinality(t, all_are_optional).format(name=n)))
return rval
else:
sig = self._ebnf.signature_cardinality(self._typ.reference_type(), all_are_optional)
return [(name, sig.format(name=name)) for name in self._names] | [
"def",
"members_entries",
"(",
"self",
",",
"all_are_optional",
":",
"Optional",
"[",
"bool",
"]",
"=",
"False",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
":",
"if",
"self",
".",
"_type_reference",
":",
"rval",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"[",
"]",
"for",
"n",
",",
"t",
"in",
"self",
".",
"_context",
".",
"reference",
"(",
"self",
".",
"_type_reference",
")",
".",
"members_entries",
"(",
"all_are_optional",
")",
":",
"rval",
".",
"append",
"(",
"(",
"n",
",",
"self",
".",
"_ebnf",
".",
"signature_cardinality",
"(",
"t",
",",
"all_are_optional",
")",
".",
"format",
"(",
"name",
"=",
"n",
")",
")",
")",
"return",
"rval",
"else",
":",
"sig",
"=",
"self",
".",
"_ebnf",
".",
"signature_cardinality",
"(",
"self",
".",
"_typ",
".",
"reference_type",
"(",
")",
",",
"all_are_optional",
")",
"return",
"[",
"(",
"name",
",",
"sig",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"for",
"name",
"in",
"self",
".",
"_names",
"]"
] | Generate a list quoted raw name, signature type entries for this pairdef, recursively traversing
reference types
:param all_are_optional: If true, all types are forced optional
:return: raw name/ signature type for all elements in this pair | [
"Generate",
"a",
"list",
"quoted",
"raw",
"name",
"signature",
"type",
"entries",
"for",
"this",
"pairdef",
"recursively",
"traversing",
"reference",
"types"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_pairdef_parser.py#L43-L57 | train |
hsolbrig/pyjsg | pyjsg/parser_impl/jsg_pairdef_parser.py | JSGPairDef._initializer_for | def _initializer_for(self, raw_name: str, cooked_name: str, prefix: Optional[str]) -> List[str]:
"""Create an initializer entry for the entry
:param raw_name: name unadjusted for python compatibility.
:param cooked_name: name that may or may not be python compatible
:param prefix: owner of the element - used when objects passed as arguments
:return: Initialization statements
"""
mt_val = self._ebnf.mt_value(self._typ)
rval = []
if is_valid_python(raw_name):
if prefix:
# If a prefix exists, the input has already been processed - no if clause is necessary
rval.append(f"self.{raw_name} = {prefix}.{raw_name}")
else:
cons = raw_name
rval.append(f"self.{raw_name} = {cons}")
elif is_valid_python(cooked_name):
if prefix:
rval.append(f"setattr(self, '{raw_name}', getattr({prefix}, '{raw_name}')")
else:
cons = f"{cooked_name} if {cooked_name} is not {mt_val} else _kwargs.get('{raw_name}', {mt_val})"
rval.append(f"setattr(self, '{raw_name}', {cons})")
else:
getter = f"_kwargs.get('{raw_name}', {mt_val})"
if prefix:
rval.append(f"setattr(self, '{raw_name}', getattr({prefix}, '{getter}')")
else:
rval.append(f"setattr(self, '{raw_name}', {getter})")
return rval | python | def _initializer_for(self, raw_name: str, cooked_name: str, prefix: Optional[str]) -> List[str]:
"""Create an initializer entry for the entry
:param raw_name: name unadjusted for python compatibility.
:param cooked_name: name that may or may not be python compatible
:param prefix: owner of the element - used when objects passed as arguments
:return: Initialization statements
"""
mt_val = self._ebnf.mt_value(self._typ)
rval = []
if is_valid_python(raw_name):
if prefix:
# If a prefix exists, the input has already been processed - no if clause is necessary
rval.append(f"self.{raw_name} = {prefix}.{raw_name}")
else:
cons = raw_name
rval.append(f"self.{raw_name} = {cons}")
elif is_valid_python(cooked_name):
if prefix:
rval.append(f"setattr(self, '{raw_name}', getattr({prefix}, '{raw_name}')")
else:
cons = f"{cooked_name} if {cooked_name} is not {mt_val} else _kwargs.get('{raw_name}', {mt_val})"
rval.append(f"setattr(self, '{raw_name}', {cons})")
else:
getter = f"_kwargs.get('{raw_name}', {mt_val})"
if prefix:
rval.append(f"setattr(self, '{raw_name}', getattr({prefix}, '{getter}')")
else:
rval.append(f"setattr(self, '{raw_name}', {getter})")
return rval | [
"def",
"_initializer_for",
"(",
"self",
",",
"raw_name",
":",
"str",
",",
"cooked_name",
":",
"str",
",",
"prefix",
":",
"Optional",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"str",
"]",
":",
"mt_val",
"=",
"self",
".",
"_ebnf",
".",
"mt_value",
"(",
"self",
".",
"_typ",
")",
"rval",
"=",
"[",
"]",
"if",
"is_valid_python",
"(",
"raw_name",
")",
":",
"if",
"prefix",
":",
"# If a prefix exists, the input has already been processed - no if clause is necessary",
"rval",
".",
"append",
"(",
"f\"self.{raw_name} = {prefix}.{raw_name}\"",
")",
"else",
":",
"cons",
"=",
"raw_name",
"rval",
".",
"append",
"(",
"f\"self.{raw_name} = {cons}\"",
")",
"elif",
"is_valid_python",
"(",
"cooked_name",
")",
":",
"if",
"prefix",
":",
"rval",
".",
"append",
"(",
"f\"setattr(self, '{raw_name}', getattr({prefix}, '{raw_name}')\"",
")",
"else",
":",
"cons",
"=",
"f\"{cooked_name} if {cooked_name} is not {mt_val} else _kwargs.get('{raw_name}', {mt_val})\"",
"rval",
".",
"append",
"(",
"f\"setattr(self, '{raw_name}', {cons})\"",
")",
"else",
":",
"getter",
"=",
"f\"_kwargs.get('{raw_name}', {mt_val})\"",
"if",
"prefix",
":",
"rval",
".",
"append",
"(",
"f\"setattr(self, '{raw_name}', getattr({prefix}, '{getter}')\"",
")",
"else",
":",
"rval",
".",
"append",
"(",
"f\"setattr(self, '{raw_name}', {getter})\"",
")",
"return",
"rval"
] | Create an initializer entry for the entry
:param raw_name: name unadjusted for python compatibility.
:param cooked_name: name that may or may not be python compatible
:param prefix: owner of the element - used when objects passed as arguments
:return: Initialization statements | [
"Create",
"an",
"initializer",
"entry",
"for",
"the",
"entry"
] | 9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7 | https://github.com/hsolbrig/pyjsg/blob/9b2b8fa8e3b8448abe70b09f804a79f0f31b32b7/pyjsg/parser_impl/jsg_pairdef_parser.py#L96-L129 | train |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/holder_prover.py | HolderProver._assert_link_secret | def _assert_link_secret(self, action: str):
"""
Raise AbsentLinkSecret if link secret is not set.
:param action: action requiring link secret
"""
if self._link_secret is None:
LOGGER.debug('HolderProver._assert_link_secret: action %s requires link secret but it is not set', action)
raise AbsentLinkSecret('Action {} requires link secret but it is not set'.format(action)) | python | def _assert_link_secret(self, action: str):
"""
Raise AbsentLinkSecret if link secret is not set.
:param action: action requiring link secret
"""
if self._link_secret is None:
LOGGER.debug('HolderProver._assert_link_secret: action %s requires link secret but it is not set', action)
raise AbsentLinkSecret('Action {} requires link secret but it is not set'.format(action)) | [
"def",
"_assert_link_secret",
"(",
"self",
",",
"action",
":",
"str",
")",
":",
"if",
"self",
".",
"_link_secret",
"is",
"None",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver._assert_link_secret: action %s requires link secret but it is not set'",
",",
"action",
")",
"raise",
"AbsentLinkSecret",
"(",
"'Action {} requires link secret but it is not set'",
".",
"format",
"(",
"action",
")",
")"
] | Raise AbsentLinkSecret if link secret is not set.
:param action: action requiring link secret | [
"Raise",
"AbsentLinkSecret",
"if",
"link",
"secret",
"is",
"not",
"set",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L90-L99 | train |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/holder_prover.py | HolderProver.rev_regs | def rev_regs(self) -> list:
"""
Return list of revocation registry identifiers for which HolderProver has tails files.
:return: list of revocation registry identifiers for which HolderProver has tails files
"""
LOGGER.debug('HolderProver.rev_regs >>>')
rv = [basename(f) for f in Tails.links(self._dir_tails)]
LOGGER.debug('HolderProver.rev_regs <<< %s', rv)
return rv | python | def rev_regs(self) -> list:
"""
Return list of revocation registry identifiers for which HolderProver has tails files.
:return: list of revocation registry identifiers for which HolderProver has tails files
"""
LOGGER.debug('HolderProver.rev_regs >>>')
rv = [basename(f) for f in Tails.links(self._dir_tails)]
LOGGER.debug('HolderProver.rev_regs <<< %s', rv)
return rv | [
"def",
"rev_regs",
"(",
"self",
")",
"->",
"list",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.rev_regs >>>'",
")",
"rv",
"=",
"[",
"basename",
"(",
"f",
")",
"for",
"f",
"in",
"Tails",
".",
"links",
"(",
"self",
".",
"_dir_tails",
")",
"]",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.rev_regs <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | Return list of revocation registry identifiers for which HolderProver has tails files.
:return: list of revocation registry identifiers for which HolderProver has tails files | [
"Return",
"list",
"of",
"revocation",
"registry",
"identifiers",
"for",
"which",
"HolderProver",
"has",
"tails",
"files",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L539-L550 | train |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/holder_prover.py | HolderProver.create_cred_req | async def create_cred_req(self, cred_offer_json: str, cd_id: str) -> (str, str):
"""
Create credential request as HolderProver and store in wallet; return credential json and metadata json.
Raise AbsentLinkSecret if link secret not set.
:param cred_offer_json: credential offer json
:param cd_id: credential definition identifier
:return: cred request json and corresponding metadata json as created and stored in wallet
"""
LOGGER.debug('HolderProver.create_cred_req >>> cred_offer_json: %s, cd_id: %s', cred_offer_json, cd_id)
self._assert_link_secret('create_cred_req')
# Check that ledger has schema on ledger where cred def expects - in case of pool reset with extant wallet
cred_def_json = await self.get_cred_def(cd_id)
schema_seq_no = int(json.loads(cred_def_json)['schemaId'])
schema_json = await self.get_schema(schema_seq_no)
schema = json.loads(schema_json)
if not schema:
LOGGER.debug(
'HolderProver.create_cred_req: <!< absent schema@#%s, cred req may be for another ledger',
schema_seq_no)
raise AbsentSchema('Absent schema@#{}, cred req may be for another ledger'.format(schema_seq_no))
(cred_req_json, cred_req_metadata_json) = await anoncreds.prover_create_credential_req(
self.wallet.handle,
self.did,
cred_offer_json,
cred_def_json,
self._link_secret)
rv = (cred_req_json, cred_req_metadata_json)
LOGGER.debug('HolderProver.create_cred_req <<< %s', rv)
return rv | python | async def create_cred_req(self, cred_offer_json: str, cd_id: str) -> (str, str):
"""
Create credential request as HolderProver and store in wallet; return credential json and metadata json.
Raise AbsentLinkSecret if link secret not set.
:param cred_offer_json: credential offer json
:param cd_id: credential definition identifier
:return: cred request json and corresponding metadata json as created and stored in wallet
"""
LOGGER.debug('HolderProver.create_cred_req >>> cred_offer_json: %s, cd_id: %s', cred_offer_json, cd_id)
self._assert_link_secret('create_cred_req')
# Check that ledger has schema on ledger where cred def expects - in case of pool reset with extant wallet
cred_def_json = await self.get_cred_def(cd_id)
schema_seq_no = int(json.loads(cred_def_json)['schemaId'])
schema_json = await self.get_schema(schema_seq_no)
schema = json.loads(schema_json)
if not schema:
LOGGER.debug(
'HolderProver.create_cred_req: <!< absent schema@#%s, cred req may be for another ledger',
schema_seq_no)
raise AbsentSchema('Absent schema@#{}, cred req may be for another ledger'.format(schema_seq_no))
(cred_req_json, cred_req_metadata_json) = await anoncreds.prover_create_credential_req(
self.wallet.handle,
self.did,
cred_offer_json,
cred_def_json,
self._link_secret)
rv = (cred_req_json, cred_req_metadata_json)
LOGGER.debug('HolderProver.create_cred_req <<< %s', rv)
return rv | [
"async",
"def",
"create_cred_req",
"(",
"self",
",",
"cred_offer_json",
":",
"str",
",",
"cd_id",
":",
"str",
")",
"->",
"(",
"str",
",",
"str",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.create_cred_req >>> cred_offer_json: %s, cd_id: %s'",
",",
"cred_offer_json",
",",
"cd_id",
")",
"self",
".",
"_assert_link_secret",
"(",
"'create_cred_req'",
")",
"# Check that ledger has schema on ledger where cred def expects - in case of pool reset with extant wallet",
"cred_def_json",
"=",
"await",
"self",
".",
"get_cred_def",
"(",
"cd_id",
")",
"schema_seq_no",
"=",
"int",
"(",
"json",
".",
"loads",
"(",
"cred_def_json",
")",
"[",
"'schemaId'",
"]",
")",
"schema_json",
"=",
"await",
"self",
".",
"get_schema",
"(",
"schema_seq_no",
")",
"schema",
"=",
"json",
".",
"loads",
"(",
"schema_json",
")",
"if",
"not",
"schema",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.create_cred_req: <!< absent schema@#%s, cred req may be for another ledger'",
",",
"schema_seq_no",
")",
"raise",
"AbsentSchema",
"(",
"'Absent schema@#{}, cred req may be for another ledger'",
".",
"format",
"(",
"schema_seq_no",
")",
")",
"(",
"cred_req_json",
",",
"cred_req_metadata_json",
")",
"=",
"await",
"anoncreds",
".",
"prover_create_credential_req",
"(",
"self",
".",
"wallet",
".",
"handle",
",",
"self",
".",
"did",
",",
"cred_offer_json",
",",
"cred_def_json",
",",
"self",
".",
"_link_secret",
")",
"rv",
"=",
"(",
"cred_req_json",
",",
"cred_req_metadata_json",
")",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.create_cred_req <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | Create credential request as HolderProver and store in wallet; return credential json and metadata json.
Raise AbsentLinkSecret if link secret not set.
:param cred_offer_json: credential offer json
:param cd_id: credential definition identifier
:return: cred request json and corresponding metadata json as created and stored in wallet | [
"Create",
"credential",
"request",
"as",
"HolderProver",
"and",
"store",
"in",
"wallet",
";",
"return",
"credential",
"json",
"and",
"metadata",
"json",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L578-L612 | train |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/holder_prover.py | HolderProver.load_cache | async def load_cache(self, archive: bool = False) -> int:
"""
Load caches and archive enough to go offline and be able to generate proof
on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:return: cache load event timestamp (epoch seconds)
"""
LOGGER.debug('HolderProver.load_cache >>> archive: %s', archive)
rv = int(time())
box_ids = json.loads(await self.get_box_ids_json())
for s_id in box_ids['schema_id']:
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
for cd_id in box_ids['cred_def_id']:
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
for rr_id in box_ids['rev_reg_id']:
await self._get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'Holder-Prover %s is offline from pool %s, cannot update revo cache reg delta for %s to %s',
self.wallet.name,
self.pool.name,
rr_id,
rv)
if archive:
Caches.archive(self.dir_cache)
LOGGER.debug('HolderProver.load_cache <<< %s', rv)
return rv | python | async def load_cache(self, archive: bool = False) -> int:
"""
Load caches and archive enough to go offline and be able to generate proof
on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:return: cache load event timestamp (epoch seconds)
"""
LOGGER.debug('HolderProver.load_cache >>> archive: %s', archive)
rv = int(time())
box_ids = json.loads(await self.get_box_ids_json())
for s_id in box_ids['schema_id']:
with SCHEMA_CACHE.lock:
await self.get_schema(s_id)
for cd_id in box_ids['cred_def_id']:
with CRED_DEF_CACHE.lock:
await self.get_cred_def(cd_id)
for rr_id in box_ids['rev_reg_id']:
await self._get_rev_reg_def(rr_id)
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
if revo_cache_entry:
try:
await revo_cache_entry.get_delta_json(self._build_rr_delta_json, rv, rv)
except ClosedPool:
LOGGER.warning(
'Holder-Prover %s is offline from pool %s, cannot update revo cache reg delta for %s to %s',
self.wallet.name,
self.pool.name,
rr_id,
rv)
if archive:
Caches.archive(self.dir_cache)
LOGGER.debug('HolderProver.load_cache <<< %s', rv)
return rv | [
"async",
"def",
"load_cache",
"(",
"self",
",",
"archive",
":",
"bool",
"=",
"False",
")",
"->",
"int",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.load_cache >>> archive: %s'",
",",
"archive",
")",
"rv",
"=",
"int",
"(",
"time",
"(",
")",
")",
"box_ids",
"=",
"json",
".",
"loads",
"(",
"await",
"self",
".",
"get_box_ids_json",
"(",
")",
")",
"for",
"s_id",
"in",
"box_ids",
"[",
"'schema_id'",
"]",
":",
"with",
"SCHEMA_CACHE",
".",
"lock",
":",
"await",
"self",
".",
"get_schema",
"(",
"s_id",
")",
"for",
"cd_id",
"in",
"box_ids",
"[",
"'cred_def_id'",
"]",
":",
"with",
"CRED_DEF_CACHE",
".",
"lock",
":",
"await",
"self",
".",
"get_cred_def",
"(",
"cd_id",
")",
"for",
"rr_id",
"in",
"box_ids",
"[",
"'rev_reg_id'",
"]",
":",
"await",
"self",
".",
"_get_rev_reg_def",
"(",
"rr_id",
")",
"with",
"REVO_CACHE",
".",
"lock",
":",
"revo_cache_entry",
"=",
"REVO_CACHE",
".",
"get",
"(",
"rr_id",
",",
"None",
")",
"if",
"revo_cache_entry",
":",
"try",
":",
"await",
"revo_cache_entry",
".",
"get_delta_json",
"(",
"self",
".",
"_build_rr_delta_json",
",",
"rv",
",",
"rv",
")",
"except",
"ClosedPool",
":",
"LOGGER",
".",
"warning",
"(",
"'Holder-Prover %s is offline from pool %s, cannot update revo cache reg delta for %s to %s'",
",",
"self",
".",
"wallet",
".",
"name",
",",
"self",
".",
"pool",
".",
"name",
",",
"rr_id",
",",
"rv",
")",
"if",
"archive",
":",
"Caches",
".",
"archive",
"(",
"self",
".",
"dir_cache",
")",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.load_cache <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | Load caches and archive enough to go offline and be able to generate proof
on all credentials in wallet.
Return timestamp (epoch seconds) of cache load event, also used as subdirectory
for cache archives.
:return: cache load event timestamp (epoch seconds) | [
"Load",
"caches",
"and",
"archive",
"enough",
"to",
"go",
"offline",
"and",
"be",
"able",
"to",
"generate",
"proof",
"on",
"all",
"credentials",
"in",
"wallet",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L649-L688 | train |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/holder_prover.py | HolderProver.get_creds | async def get_creds(self, proof_req_json: str, filt: dict = None, filt_dflt_incl: bool = False) -> (Set[str], str):
"""
Get credentials from HolderProver wallet corresponding to proof request and
filter criteria; return credential identifiers from wallet and credentials json.
Return empty set and empty production for no such credentials.
:param proof_req_json: proof request json as Verifier creates; has entries for proof request's
nonce, name, and version; plus credential's requested attributes, requested predicates. I.e.,
::
{
'nonce': string, # indy-sdk makes no semantic specification on this value
'name': string, # indy-sdk makes no semantic specification on this value
'version': numeric-string, # indy-sdk makes no semantic specification on this value
'requested_attributes': {
'<attr_uuid>': { # aka attr_referent, a proof-request local identifier
'name': string, # attribute name (matches case- and space-insensitively)
'restrictions' [ # optional
{
"schema_id": string, # optional
"schema_issuer_did": string, # optional
"schema_name": string, # optional
"schema_version": string, # optional
"issuer_did": string, # optional
"cred_def_id": string # optional
},
{
... # if more than one restriction given, combined disjunctively (i.e., via OR)
}
],
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': int, # optional, epoch seconds
'to': int # optional, epoch seconds
}
},
...
},
'requested_predicates': {
'<pred_uuid>': { # aka predicate_referent, a proof-request local predicate identifier
'name': string, # attribute name (matches case- and space-insensitively)
'p_type': '>=',
'p_value': int, # predicate value
'restrictions': [ # optional
{
"schema_id": string, # optional
"schema_issuer_did": string, # optional
"schema_name": string, # optional
"schema_version": string, # optional
"issuer_did": string, # optional
"cred_def_id": string # optional
},
{
... # if more than one restriction given, combined disjunctively (i.e., via OR)
}
],
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': int, # optional, epoch seconds
'to': int # optional, epoch seconds
}
},
...
},
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': Optional<int>,
'to': Optional<int>
}
}
:param filt: filter for matching attribute-value pairs and predicates; dict mapping each
cred def id to dict (specify empty dict or none for no filter, matching all)
mapping attributes to values to match or compare. E.g.,
::
{
'Vx4E82R17q...:3:CL:16:0': {
'attr-match': {
'name': 'Alex',
'sex': 'M',
'favouriteDrink': None
},
'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND)
'favouriteNumber' : 10,
'score': '100' # nicety: implementation converts to int for caller
},
},
'R17v42T4pk...:3:CL:19:0': {
'attr-match': {
'height': 175,
'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND)
}
},
'Z9ccax812j...:3:CL:27:0': {
'attr-match': {} # match all attributes on this cred def
}
...
}
:param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not
identify by cred def, or to exclude (False) all such credentials
:return: tuple with (set of referents, creds json for input proof request);
empty set and empty production for no such credential
"""
LOGGER.debug('HolderProver.get_creds >>> proof_req_json: %s, filt: %s', proof_req_json, filt)
if filt is None:
filt = {}
rv = None
creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json)
creds = json.loads(creds_json)
cred_ids = set()
if filt:
for cd_id in filt:
try:
json.loads(await self.get_cred_def(cd_id))
except AbsentCredDef:
LOGGER.warning('HolderProver.get_creds: ignoring filter criterion, no cred def on %s', cd_id)
filt.pop(cd_id)
for inner_creds in {**creds['attrs'], **creds['predicates']}.values():
for cred in inner_creds: # cred is a dict in a list of dicts
cred_info = cred['cred_info']
if filt:
cred_cd_id = cred_info['cred_def_id']
if cred_cd_id not in filt:
if filt_dflt_incl:
cred_ids.add(cred_info['referent'])
continue
if 'attr-match' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None
if not {k: str(filt[cred_cd_id].get('attr-match', {})[k])
for k in filt[cred_cd_id].get('attr-match', {})}.items() <= cred_info['attrs'].items():
continue
if 'minima' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None
minima = filt[cred_cd_id].get('minima', {})
try:
if any((attr not in cred_info['attrs'])
or (int(cred_info['attrs'][attr]) < int(minima[attr]))
for attr in minima):
continue
except ValueError:
continue # int conversion failed - reject candidate
cred_ids.add(cred_info['referent'])
else:
cred_ids.add(cred_info['referent'])
if filt:
creds = json.loads(prune_creds_json(creds, cred_ids))
rv = (cred_ids, json.dumps(creds))
LOGGER.debug('HolderProver.get_creds <<< %s', rv)
return rv | python | async def get_creds(self, proof_req_json: str, filt: dict = None, filt_dflt_incl: bool = False) -> (Set[str], str):
"""
Get credentials from HolderProver wallet corresponding to proof request and
filter criteria; return credential identifiers from wallet and credentials json.
Return empty set and empty production for no such credentials.
:param proof_req_json: proof request json as Verifier creates; has entries for proof request's
nonce, name, and version; plus credential's requested attributes, requested predicates. I.e.,
::
{
'nonce': string, # indy-sdk makes no semantic specification on this value
'name': string, # indy-sdk makes no semantic specification on this value
'version': numeric-string, # indy-sdk makes no semantic specification on this value
'requested_attributes': {
'<attr_uuid>': { # aka attr_referent, a proof-request local identifier
'name': string, # attribute name (matches case- and space-insensitively)
'restrictions' [ # optional
{
"schema_id": string, # optional
"schema_issuer_did": string, # optional
"schema_name": string, # optional
"schema_version": string, # optional
"issuer_did": string, # optional
"cred_def_id": string # optional
},
{
... # if more than one restriction given, combined disjunctively (i.e., via OR)
}
],
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': int, # optional, epoch seconds
'to': int # optional, epoch seconds
}
},
...
},
'requested_predicates': {
'<pred_uuid>': { # aka predicate_referent, a proof-request local predicate identifier
'name': string, # attribute name (matches case- and space-insensitively)
'p_type': '>=',
'p_value': int, # predicate value
'restrictions': [ # optional
{
"schema_id": string, # optional
"schema_issuer_did": string, # optional
"schema_name": string, # optional
"schema_version": string, # optional
"issuer_did": string, # optional
"cred_def_id": string # optional
},
{
... # if more than one restriction given, combined disjunctively (i.e., via OR)
}
],
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': int, # optional, epoch seconds
'to': int # optional, epoch seconds
}
},
...
},
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': Optional<int>,
'to': Optional<int>
}
}
:param filt: filter for matching attribute-value pairs and predicates; dict mapping each
cred def id to dict (specify empty dict or none for no filter, matching all)
mapping attributes to values to match or compare. E.g.,
::
{
'Vx4E82R17q...:3:CL:16:0': {
'attr-match': {
'name': 'Alex',
'sex': 'M',
'favouriteDrink': None
},
'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND)
'favouriteNumber' : 10,
'score': '100' # nicety: implementation converts to int for caller
},
},
'R17v42T4pk...:3:CL:19:0': {
'attr-match': {
'height': 175,
'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND)
}
},
'Z9ccax812j...:3:CL:27:0': {
'attr-match': {} # match all attributes on this cred def
}
...
}
:param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not
identify by cred def, or to exclude (False) all such credentials
:return: tuple with (set of referents, creds json for input proof request);
empty set and empty production for no such credential
"""
LOGGER.debug('HolderProver.get_creds >>> proof_req_json: %s, filt: %s', proof_req_json, filt)
if filt is None:
filt = {}
rv = None
creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json)
creds = json.loads(creds_json)
cred_ids = set()
if filt:
for cd_id in filt:
try:
json.loads(await self.get_cred_def(cd_id))
except AbsentCredDef:
LOGGER.warning('HolderProver.get_creds: ignoring filter criterion, no cred def on %s', cd_id)
filt.pop(cd_id)
for inner_creds in {**creds['attrs'], **creds['predicates']}.values():
for cred in inner_creds: # cred is a dict in a list of dicts
cred_info = cred['cred_info']
if filt:
cred_cd_id = cred_info['cred_def_id']
if cred_cd_id not in filt:
if filt_dflt_incl:
cred_ids.add(cred_info['referent'])
continue
if 'attr-match' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None
if not {k: str(filt[cred_cd_id].get('attr-match', {})[k])
for k in filt[cred_cd_id].get('attr-match', {})}.items() <= cred_info['attrs'].items():
continue
if 'minima' in (filt[cred_cd_id] or {}): # maybe filt[cred_cd_id]: None
minima = filt[cred_cd_id].get('minima', {})
try:
if any((attr not in cred_info['attrs'])
or (int(cred_info['attrs'][attr]) < int(minima[attr]))
for attr in minima):
continue
except ValueError:
continue # int conversion failed - reject candidate
cred_ids.add(cred_info['referent'])
else:
cred_ids.add(cred_info['referent'])
if filt:
creds = json.loads(prune_creds_json(creds, cred_ids))
rv = (cred_ids, json.dumps(creds))
LOGGER.debug('HolderProver.get_creds <<< %s', rv)
return rv | [
"async",
"def",
"get_creds",
"(",
"self",
",",
"proof_req_json",
":",
"str",
",",
"filt",
":",
"dict",
"=",
"None",
",",
"filt_dflt_incl",
":",
"bool",
"=",
"False",
")",
"->",
"(",
"Set",
"[",
"str",
"]",
",",
"str",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.get_creds >>> proof_req_json: %s, filt: %s'",
",",
"proof_req_json",
",",
"filt",
")",
"if",
"filt",
"is",
"None",
":",
"filt",
"=",
"{",
"}",
"rv",
"=",
"None",
"creds_json",
"=",
"await",
"anoncreds",
".",
"prover_get_credentials_for_proof_req",
"(",
"self",
".",
"wallet",
".",
"handle",
",",
"proof_req_json",
")",
"creds",
"=",
"json",
".",
"loads",
"(",
"creds_json",
")",
"cred_ids",
"=",
"set",
"(",
")",
"if",
"filt",
":",
"for",
"cd_id",
"in",
"filt",
":",
"try",
":",
"json",
".",
"loads",
"(",
"await",
"self",
".",
"get_cred_def",
"(",
"cd_id",
")",
")",
"except",
"AbsentCredDef",
":",
"LOGGER",
".",
"warning",
"(",
"'HolderProver.get_creds: ignoring filter criterion, no cred def on %s'",
",",
"cd_id",
")",
"filt",
".",
"pop",
"(",
"cd_id",
")",
"for",
"inner_creds",
"in",
"{",
"*",
"*",
"creds",
"[",
"'attrs'",
"]",
",",
"*",
"*",
"creds",
"[",
"'predicates'",
"]",
"}",
".",
"values",
"(",
")",
":",
"for",
"cred",
"in",
"inner_creds",
":",
"# cred is a dict in a list of dicts",
"cred_info",
"=",
"cred",
"[",
"'cred_info'",
"]",
"if",
"filt",
":",
"cred_cd_id",
"=",
"cred_info",
"[",
"'cred_def_id'",
"]",
"if",
"cred_cd_id",
"not",
"in",
"filt",
":",
"if",
"filt_dflt_incl",
":",
"cred_ids",
".",
"add",
"(",
"cred_info",
"[",
"'referent'",
"]",
")",
"continue",
"if",
"'attr-match'",
"in",
"(",
"filt",
"[",
"cred_cd_id",
"]",
"or",
"{",
"}",
")",
":",
"# maybe filt[cred_cd_id]: None",
"if",
"not",
"{",
"k",
":",
"str",
"(",
"filt",
"[",
"cred_cd_id",
"]",
".",
"get",
"(",
"'attr-match'",
",",
"{",
"}",
")",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"filt",
"[",
"cred_cd_id",
"]",
".",
"get",
"(",
"'attr-match'",
",",
"{",
"}",
")",
"}",
".",
"items",
"(",
")",
"<=",
"cred_info",
"[",
"'attrs'",
"]",
".",
"items",
"(",
")",
":",
"continue",
"if",
"'minima'",
"in",
"(",
"filt",
"[",
"cred_cd_id",
"]",
"or",
"{",
"}",
")",
":",
"# maybe filt[cred_cd_id]: None",
"minima",
"=",
"filt",
"[",
"cred_cd_id",
"]",
".",
"get",
"(",
"'minima'",
",",
"{",
"}",
")",
"try",
":",
"if",
"any",
"(",
"(",
"attr",
"not",
"in",
"cred_info",
"[",
"'attrs'",
"]",
")",
"or",
"(",
"int",
"(",
"cred_info",
"[",
"'attrs'",
"]",
"[",
"attr",
"]",
")",
"<",
"int",
"(",
"minima",
"[",
"attr",
"]",
")",
")",
"for",
"attr",
"in",
"minima",
")",
":",
"continue",
"except",
"ValueError",
":",
"continue",
"# int conversion failed - reject candidate",
"cred_ids",
".",
"add",
"(",
"cred_info",
"[",
"'referent'",
"]",
")",
"else",
":",
"cred_ids",
".",
"add",
"(",
"cred_info",
"[",
"'referent'",
"]",
")",
"if",
"filt",
":",
"creds",
"=",
"json",
".",
"loads",
"(",
"prune_creds_json",
"(",
"creds",
",",
"cred_ids",
")",
")",
"rv",
"=",
"(",
"cred_ids",
",",
"json",
".",
"dumps",
"(",
"creds",
")",
")",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.get_creds <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | Get credentials from HolderProver wallet corresponding to proof request and
filter criteria; return credential identifiers from wallet and credentials json.
Return empty set and empty production for no such credentials.
:param proof_req_json: proof request json as Verifier creates; has entries for proof request's
nonce, name, and version; plus credential's requested attributes, requested predicates. I.e.,
::
{
'nonce': string, # indy-sdk makes no semantic specification on this value
'name': string, # indy-sdk makes no semantic specification on this value
'version': numeric-string, # indy-sdk makes no semantic specification on this value
'requested_attributes': {
'<attr_uuid>': { # aka attr_referent, a proof-request local identifier
'name': string, # attribute name (matches case- and space-insensitively)
'restrictions' [ # optional
{
"schema_id": string, # optional
"schema_issuer_did": string, # optional
"schema_name": string, # optional
"schema_version": string, # optional
"issuer_did": string, # optional
"cred_def_id": string # optional
},
{
... # if more than one restriction given, combined disjunctively (i.e., via OR)
}
],
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': int, # optional, epoch seconds
'to': int # optional, epoch seconds
}
},
...
},
'requested_predicates': {
'<pred_uuid>': { # aka predicate_referent, a proof-request local predicate identifier
'name': string, # attribute name (matches case- and space-insensitively)
'p_type': '>=',
'p_value': int, # predicate value
'restrictions': [ # optional
{
"schema_id": string, # optional
"schema_issuer_did": string, # optional
"schema_name": string, # optional
"schema_version": string, # optional
"issuer_did": string, # optional
"cred_def_id": string # optional
},
{
... # if more than one restriction given, combined disjunctively (i.e., via OR)
}
],
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': int, # optional, epoch seconds
'to': int # optional, epoch seconds
}
},
...
},
'non_revoked': { # optional - indy-sdk ignores when getting creds from wallet
'from': Optional<int>,
'to': Optional<int>
}
}
:param filt: filter for matching attribute-value pairs and predicates; dict mapping each
cred def id to dict (specify empty dict or none for no filter, matching all)
mapping attributes to values to match or compare. E.g.,
::
{
'Vx4E82R17q...:3:CL:16:0': {
'attr-match': {
'name': 'Alex',
'sex': 'M',
'favouriteDrink': None
},
'minima': { # if both attr-match and minima present, combined conjunctively (i.e., via AND)
'favouriteNumber' : 10,
'score': '100' # nicety: implementation converts to int for caller
},
},
'R17v42T4pk...:3:CL:19:0': {
'attr-match': {
'height': 175,
'birthdate': '1975-11-15' # combined conjunctively (i.e., via AND)
}
},
'Z9ccax812j...:3:CL:27:0': {
'attr-match': {} # match all attributes on this cred def
}
...
}
:param filt_dflt_incl: whether to include (True) all credentials from wallet that filter does not
identify by cred def, or to exclude (False) all such credentials
:return: tuple with (set of referents, creds json for input proof request);
empty set and empty production for no such credential | [
"Get",
"credentials",
"from",
"HolderProver",
"wallet",
"corresponding",
"to",
"proof",
"request",
"and",
"filter",
"criteria",
";",
"return",
"credential",
"identifiers",
"from",
"wallet",
"and",
"credentials",
"json",
".",
"Return",
"empty",
"set",
"and",
"empty",
"production",
"for",
"no",
"such",
"credentials",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L784-L937 | train |
PSPC-SPAC-buyandsell/von_agent | von_agent/agent/holder_prover.py | HolderProver.get_creds_by_id | async def get_creds_by_id(self, proof_req_json: str, cred_ids: set) -> str:
"""
Get creds structure from HolderProver wallet by credential identifiers.
:param proof_req_json: proof request as per get_creds() above
:param cred_ids: set of credential identifiers of interest
:return: json with cred(s) for input credential identifier(s)
"""
LOGGER.debug('HolderProver.get_creds_by_id >>> proof_req_json: %s, cred_ids: %s', proof_req_json, cred_ids)
creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json)
# retain only creds of interest: find corresponding referents
rv_json = prune_creds_json(json.loads(creds_json), cred_ids)
LOGGER.debug('HolderProver.get_cred_by_referent <<< %s', rv_json)
return rv_json | python | async def get_creds_by_id(self, proof_req_json: str, cred_ids: set) -> str:
"""
Get creds structure from HolderProver wallet by credential identifiers.
:param proof_req_json: proof request as per get_creds() above
:param cred_ids: set of credential identifiers of interest
:return: json with cred(s) for input credential identifier(s)
"""
LOGGER.debug('HolderProver.get_creds_by_id >>> proof_req_json: %s, cred_ids: %s', proof_req_json, cred_ids)
creds_json = await anoncreds.prover_get_credentials_for_proof_req(self.wallet.handle, proof_req_json)
# retain only creds of interest: find corresponding referents
rv_json = prune_creds_json(json.loads(creds_json), cred_ids)
LOGGER.debug('HolderProver.get_cred_by_referent <<< %s', rv_json)
return rv_json | [
"async",
"def",
"get_creds_by_id",
"(",
"self",
",",
"proof_req_json",
":",
"str",
",",
"cred_ids",
":",
"set",
")",
"->",
"str",
":",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.get_creds_by_id >>> proof_req_json: %s, cred_ids: %s'",
",",
"proof_req_json",
",",
"cred_ids",
")",
"creds_json",
"=",
"await",
"anoncreds",
".",
"prover_get_credentials_for_proof_req",
"(",
"self",
".",
"wallet",
".",
"handle",
",",
"proof_req_json",
")",
"# retain only creds of interest: find corresponding referents",
"rv_json",
"=",
"prune_creds_json",
"(",
"json",
".",
"loads",
"(",
"creds_json",
")",
",",
"cred_ids",
")",
"LOGGER",
".",
"debug",
"(",
"'HolderProver.get_cred_by_referent <<< %s'",
",",
"rv_json",
")",
"return",
"rv_json"
] | Get creds structure from HolderProver wallet by credential identifiers.
:param proof_req_json: proof request as per get_creds() above
:param cred_ids: set of credential identifiers of interest
:return: json with cred(s) for input credential identifier(s) | [
"Get",
"creds",
"structure",
"from",
"HolderProver",
"wallet",
"by",
"credential",
"identifiers",
"."
] | 0b1c17cca3bd178b6e6974af84dbac1dfce5cf45 | https://github.com/PSPC-SPAC-buyandsell/von_agent/blob/0b1c17cca3bd178b6e6974af84dbac1dfce5cf45/von_agent/agent/holder_prover.py#L939-L955 | train |
palantir/typedjsonrpc | contrib/multi-module-example/typedjsonrpc_example/valid.py | histogram | def histogram(data):
"""Returns a histogram of your data.
:param data: The data to histogram
:type data: list[object]
:return: The histogram
:rtype: dict[object, int]
"""
ret = {}
for datum in data:
if datum in ret:
ret[datum] += 1
else:
ret[datum] = 1
return ret | python | def histogram(data):
"""Returns a histogram of your data.
:param data: The data to histogram
:type data: list[object]
:return: The histogram
:rtype: dict[object, int]
"""
ret = {}
for datum in data:
if datum in ret:
ret[datum] += 1
else:
ret[datum] = 1
return ret | [
"def",
"histogram",
"(",
"data",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"datum",
"in",
"data",
":",
"if",
"datum",
"in",
"ret",
":",
"ret",
"[",
"datum",
"]",
"+=",
"1",
"else",
":",
"ret",
"[",
"datum",
"]",
"=",
"1",
"return",
"ret"
] | Returns a histogram of your data.
:param data: The data to histogram
:type data: list[object]
:return: The histogram
:rtype: dict[object, int] | [
"Returns",
"a",
"histogram",
"of",
"your",
"data",
"."
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/contrib/multi-module-example/typedjsonrpc_example/valid.py#L22-L36 | train |
palantir/typedjsonrpc | contrib/multi-module-example/typedjsonrpc_example/valid.py | print_data | def print_data(data):
"""Prints object key-value pairs in a custom format
:param data: The dict to print
:type data: dict
:rtype: None
"""
print(", ".join(["{}=>{}".format(key, value) for key, value in data])) | python | def print_data(data):
"""Prints object key-value pairs in a custom format
:param data: The dict to print
:type data: dict
:rtype: None
"""
print(", ".join(["{}=>{}".format(key, value) for key, value in data])) | [
"def",
"print_data",
"(",
"data",
")",
":",
"print",
"(",
"\", \"",
".",
"join",
"(",
"[",
"\"{}=>{}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"data",
"]",
")",
")"
] | Prints object key-value pairs in a custom format
:param data: The dict to print
:type data: dict
:rtype: None | [
"Prints",
"object",
"key",
"-",
"value",
"pairs",
"in",
"a",
"custom",
"format"
] | 274218fcd236ff9643506caa629029c9ba25a0fb | https://github.com/palantir/typedjsonrpc/blob/274218fcd236ff9643506caa629029c9ba25a0fb/contrib/multi-module-example/typedjsonrpc_example/valid.py#L65-L72 | train |
yamins81/tabular | setup.py | subdir_findall | def subdir_findall(dir, subdir):
"""
Find all files in a subdirectory and return paths relative to dir
This is similar to (and uses) setuptools.findall
However, the paths returned are in the form needed for package_data
"""
strip_n = len(dir.split('/'))
path = '/'.join((dir, subdir))
return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)] | python | def subdir_findall(dir, subdir):
"""
Find all files in a subdirectory and return paths relative to dir
This is similar to (and uses) setuptools.findall
However, the paths returned are in the form needed for package_data
"""
strip_n = len(dir.split('/'))
path = '/'.join((dir, subdir))
return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)] | [
"def",
"subdir_findall",
"(",
"dir",
",",
"subdir",
")",
":",
"strip_n",
"=",
"len",
"(",
"dir",
".",
"split",
"(",
"'/'",
")",
")",
"path",
"=",
"'/'",
".",
"join",
"(",
"(",
"dir",
",",
"subdir",
")",
")",
"return",
"[",
"'/'",
".",
"join",
"(",
"s",
".",
"split",
"(",
"'/'",
")",
"[",
"strip_n",
":",
"]",
")",
"for",
"s",
"in",
"setuptools",
".",
"findall",
"(",
"path",
")",
"]"
] | Find all files in a subdirectory and return paths relative to dir
This is similar to (and uses) setuptools.findall
However, the paths returned are in the form needed for package_data | [
"Find",
"all",
"files",
"in",
"a",
"subdirectory",
"and",
"return",
"paths",
"relative",
"to",
"dir"
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/setup.py#L82-L91 | train |
yamins81/tabular | setup.py | find_package_data | def find_package_data(packages):
"""
For a list of packages, find the package_data
This function scans the subdirectories of a package and considers all
non-submodule subdirectories as resources, including them in
the package_data
Returns a dictionary suitable for setup(package_data=<result>)
"""
package_data = {}
for package in packages:
package_data[package] = []
for subdir in find_subdirectories(package):
if '.'.join((package, subdir)) in packages: # skip submodules
logging.debug("skipping submodule %s/%s" % (package, subdir))
continue
if skip_tests and (subdir == 'tests'): # skip tests
logging.debug("skipping tests %s/%s" % (package, subdir))
continue
package_data[package] += subdir_findall(package_to_path(package), subdir)
return package_data | python | def find_package_data(packages):
"""
For a list of packages, find the package_data
This function scans the subdirectories of a package and considers all
non-submodule subdirectories as resources, including them in
the package_data
Returns a dictionary suitable for setup(package_data=<result>)
"""
package_data = {}
for package in packages:
package_data[package] = []
for subdir in find_subdirectories(package):
if '.'.join((package, subdir)) in packages: # skip submodules
logging.debug("skipping submodule %s/%s" % (package, subdir))
continue
if skip_tests and (subdir == 'tests'): # skip tests
logging.debug("skipping tests %s/%s" % (package, subdir))
continue
package_data[package] += subdir_findall(package_to_path(package), subdir)
return package_data | [
"def",
"find_package_data",
"(",
"packages",
")",
":",
"package_data",
"=",
"{",
"}",
"for",
"package",
"in",
"packages",
":",
"package_data",
"[",
"package",
"]",
"=",
"[",
"]",
"for",
"subdir",
"in",
"find_subdirectories",
"(",
"package",
")",
":",
"if",
"'.'",
".",
"join",
"(",
"(",
"package",
",",
"subdir",
")",
")",
"in",
"packages",
":",
"# skip submodules",
"logging",
".",
"debug",
"(",
"\"skipping submodule %s/%s\"",
"%",
"(",
"package",
",",
"subdir",
")",
")",
"continue",
"if",
"skip_tests",
"and",
"(",
"subdir",
"==",
"'tests'",
")",
":",
"# skip tests",
"logging",
".",
"debug",
"(",
"\"skipping tests %s/%s\"",
"%",
"(",
"package",
",",
"subdir",
")",
")",
"continue",
"package_data",
"[",
"package",
"]",
"+=",
"subdir_findall",
"(",
"package_to_path",
"(",
"package",
")",
",",
"subdir",
")",
"return",
"package_data"
] | For a list of packages, find the package_data
This function scans the subdirectories of a package and considers all
non-submodule subdirectories as resources, including them in
the package_data
Returns a dictionary suitable for setup(package_data=<result>) | [
"For",
"a",
"list",
"of",
"packages",
"find",
"the",
"package_data"
] | 1caf091c8c395960a9ad7078f95158b533cc52dd | https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/setup.py#L93-L114 | train |
finklabs/metrics | metrics/metrics_utils.py | process_file_metrics | def process_file_metrics(context, file_processors):
"""Main routine for metrics."""
file_metrics = OrderedDict()
# TODO make available the includes and excludes feature
gitignore = []
if os.path.isfile('.gitignore'):
with open('.gitignore', 'r') as ifile:
gitignore = ifile.read().splitlines()
in_files = glob_files(context['root_dir'], context['in_file_names'], gitignore=gitignore)
# main loop
for in_file, key in in_files:
# print 'file %i: %s' % (i, in_file)
try:
with open(in_file, 'rb') as ifile:
code = ifile.read()
# lookup lexicographical scanner to use for this run
try:
lex = guess_lexer_for_filename(in_file, code, encoding='guess')
# encoding is 'guess', chardet', 'utf-8'
except:
pass
else:
token_list = lex.get_tokens(code) # parse code
file_metrics[key] = OrderedDict()
file_metrics[key].update(compute_file_metrics(file_processors, lex.name, key, token_list))
file_metrics[key]['language'] = lex.name
except IOError as e:
sys.stderr.writelines(str(e) + " -- Skipping input file.\n\n")
return file_metrics | python | def process_file_metrics(context, file_processors):
"""Main routine for metrics."""
file_metrics = OrderedDict()
# TODO make available the includes and excludes feature
gitignore = []
if os.path.isfile('.gitignore'):
with open('.gitignore', 'r') as ifile:
gitignore = ifile.read().splitlines()
in_files = glob_files(context['root_dir'], context['in_file_names'], gitignore=gitignore)
# main loop
for in_file, key in in_files:
# print 'file %i: %s' % (i, in_file)
try:
with open(in_file, 'rb') as ifile:
code = ifile.read()
# lookup lexicographical scanner to use for this run
try:
lex = guess_lexer_for_filename(in_file, code, encoding='guess')
# encoding is 'guess', chardet', 'utf-8'
except:
pass
else:
token_list = lex.get_tokens(code) # parse code
file_metrics[key] = OrderedDict()
file_metrics[key].update(compute_file_metrics(file_processors, lex.name, key, token_list))
file_metrics[key]['language'] = lex.name
except IOError as e:
sys.stderr.writelines(str(e) + " -- Skipping input file.\n\n")
return file_metrics | [
"def",
"process_file_metrics",
"(",
"context",
",",
"file_processors",
")",
":",
"file_metrics",
"=",
"OrderedDict",
"(",
")",
"# TODO make available the includes and excludes feature",
"gitignore",
"=",
"[",
"]",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"'.gitignore'",
")",
":",
"with",
"open",
"(",
"'.gitignore'",
",",
"'r'",
")",
"as",
"ifile",
":",
"gitignore",
"=",
"ifile",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"in_files",
"=",
"glob_files",
"(",
"context",
"[",
"'root_dir'",
"]",
",",
"context",
"[",
"'in_file_names'",
"]",
",",
"gitignore",
"=",
"gitignore",
")",
"# main loop",
"for",
"in_file",
",",
"key",
"in",
"in_files",
":",
"# print 'file %i: %s' % (i, in_file)",
"try",
":",
"with",
"open",
"(",
"in_file",
",",
"'rb'",
")",
"as",
"ifile",
":",
"code",
"=",
"ifile",
".",
"read",
"(",
")",
"# lookup lexicographical scanner to use for this run",
"try",
":",
"lex",
"=",
"guess_lexer_for_filename",
"(",
"in_file",
",",
"code",
",",
"encoding",
"=",
"'guess'",
")",
"# encoding is 'guess', chardet', 'utf-8'",
"except",
":",
"pass",
"else",
":",
"token_list",
"=",
"lex",
".",
"get_tokens",
"(",
"code",
")",
"# parse code",
"file_metrics",
"[",
"key",
"]",
"=",
"OrderedDict",
"(",
")",
"file_metrics",
"[",
"key",
"]",
".",
"update",
"(",
"compute_file_metrics",
"(",
"file_processors",
",",
"lex",
".",
"name",
",",
"key",
",",
"token_list",
")",
")",
"file_metrics",
"[",
"key",
"]",
"[",
"'language'",
"]",
"=",
"lex",
".",
"name",
"except",
"IOError",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"writelines",
"(",
"str",
"(",
"e",
")",
"+",
"\" -- Skipping input file.\\n\\n\"",
")",
"return",
"file_metrics"
] | Main routine for metrics. | [
"Main",
"routine",
"for",
"metrics",
"."
] | fd9974af498831664b9ae8e8f3834e1ec2e8a699 | https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/metrics_utils.py#L117-L150 | train |
finklabs/metrics | metrics/metrics_utils.py | process_build_metrics | def process_build_metrics(context, build_processors):
"""use processors to collect build metrics."""
build_metrics = OrderedDict()
# reset all processors
for p in build_processors:
p.reset()
# collect metrics from all processors
for p in build_processors:
build_metrics.update(p.build_metrics)
return build_metrics | python | def process_build_metrics(context, build_processors):
"""use processors to collect build metrics."""
build_metrics = OrderedDict()
# reset all processors
for p in build_processors:
p.reset()
# collect metrics from all processors
for p in build_processors:
build_metrics.update(p.build_metrics)
return build_metrics | [
"def",
"process_build_metrics",
"(",
"context",
",",
"build_processors",
")",
":",
"build_metrics",
"=",
"OrderedDict",
"(",
")",
"# reset all processors",
"for",
"p",
"in",
"build_processors",
":",
"p",
".",
"reset",
"(",
")",
"# collect metrics from all processors",
"for",
"p",
"in",
"build_processors",
":",
"build_metrics",
".",
"update",
"(",
"p",
".",
"build_metrics",
")",
"return",
"build_metrics"
] | use processors to collect build metrics. | [
"use",
"processors",
"to",
"collect",
"build",
"metrics",
"."
] | fd9974af498831664b9ae8e8f3834e1ec2e8a699 | https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/metrics_utils.py#L153-L165 | train |
finklabs/metrics | metrics/metrics_utils.py | summary | def summary(processors, metrics, context):
"""Print the summary"""
# display aggregated metric values on language level
def display_header(processors, before='', after=''):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_header()
print(after)
def display_separator(processors, before='', after=''):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_separator()
print(after)
def display_metrics(processors, before='', after='', metrics=[]):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_metrics(metrics)
print(after)
summary = {}
for m in metrics:
lang = metrics[m]['language']
has_key = lang in summary
if not has_key:
summary[lang] = {'file_count': 0, 'language': lang}
summary[lang]['file_count'] += 1
for i in metrics[m]:
if i not in ['sloc', 'comments', 'mccabe']: # include metrics to be used
continue
if not has_key:
summary[lang][i] = 0
summary[lang][i] += metrics[m][i]
total = {'language': 'Total'}
for m in summary:
for i in summary[m]:
if i == 'language':
continue
if i not in total:
total[i] = 0
total[i] += summary[m][i]
print('Metrics Summary:')
display_header(processors, 'Files', '')
display_separator(processors, '-'*5, '')
for k in sorted(summary.keys(), key=str.lower):
display_metrics(processors, '%5d' %
summary[k]['file_count'], '', summary[k])
display_separator(processors, '-'*5, '')
display_metrics(processors, '%5d' % total['file_count'],
'', total) | python | def summary(processors, metrics, context):
"""Print the summary"""
# display aggregated metric values on language level
def display_header(processors, before='', after=''):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_header()
print(after)
def display_separator(processors, before='', after=''):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_separator()
print(after)
def display_metrics(processors, before='', after='', metrics=[]):
"""Display the header for the summary results."""
print(before, end=' ')
for processor in processors:
processor.display_metrics(metrics)
print(after)
summary = {}
for m in metrics:
lang = metrics[m]['language']
has_key = lang in summary
if not has_key:
summary[lang] = {'file_count': 0, 'language': lang}
summary[lang]['file_count'] += 1
for i in metrics[m]:
if i not in ['sloc', 'comments', 'mccabe']: # include metrics to be used
continue
if not has_key:
summary[lang][i] = 0
summary[lang][i] += metrics[m][i]
total = {'language': 'Total'}
for m in summary:
for i in summary[m]:
if i == 'language':
continue
if i not in total:
total[i] = 0
total[i] += summary[m][i]
print('Metrics Summary:')
display_header(processors, 'Files', '')
display_separator(processors, '-'*5, '')
for k in sorted(summary.keys(), key=str.lower):
display_metrics(processors, '%5d' %
summary[k]['file_count'], '', summary[k])
display_separator(processors, '-'*5, '')
display_metrics(processors, '%5d' % total['file_count'],
'', total) | [
"def",
"summary",
"(",
"processors",
",",
"metrics",
",",
"context",
")",
":",
"# display aggregated metric values on language level",
"def",
"display_header",
"(",
"processors",
",",
"before",
"=",
"''",
",",
"after",
"=",
"''",
")",
":",
"\"\"\"Display the header for the summary results.\"\"\"",
"print",
"(",
"before",
",",
"end",
"=",
"' '",
")",
"for",
"processor",
"in",
"processors",
":",
"processor",
".",
"display_header",
"(",
")",
"print",
"(",
"after",
")",
"def",
"display_separator",
"(",
"processors",
",",
"before",
"=",
"''",
",",
"after",
"=",
"''",
")",
":",
"\"\"\"Display the header for the summary results.\"\"\"",
"print",
"(",
"before",
",",
"end",
"=",
"' '",
")",
"for",
"processor",
"in",
"processors",
":",
"processor",
".",
"display_separator",
"(",
")",
"print",
"(",
"after",
")",
"def",
"display_metrics",
"(",
"processors",
",",
"before",
"=",
"''",
",",
"after",
"=",
"''",
",",
"metrics",
"=",
"[",
"]",
")",
":",
"\"\"\"Display the header for the summary results.\"\"\"",
"print",
"(",
"before",
",",
"end",
"=",
"' '",
")",
"for",
"processor",
"in",
"processors",
":",
"processor",
".",
"display_metrics",
"(",
"metrics",
")",
"print",
"(",
"after",
")",
"summary",
"=",
"{",
"}",
"for",
"m",
"in",
"metrics",
":",
"lang",
"=",
"metrics",
"[",
"m",
"]",
"[",
"'language'",
"]",
"has_key",
"=",
"lang",
"in",
"summary",
"if",
"not",
"has_key",
":",
"summary",
"[",
"lang",
"]",
"=",
"{",
"'file_count'",
":",
"0",
",",
"'language'",
":",
"lang",
"}",
"summary",
"[",
"lang",
"]",
"[",
"'file_count'",
"]",
"+=",
"1",
"for",
"i",
"in",
"metrics",
"[",
"m",
"]",
":",
"if",
"i",
"not",
"in",
"[",
"'sloc'",
",",
"'comments'",
",",
"'mccabe'",
"]",
":",
"# include metrics to be used",
"continue",
"if",
"not",
"has_key",
":",
"summary",
"[",
"lang",
"]",
"[",
"i",
"]",
"=",
"0",
"summary",
"[",
"lang",
"]",
"[",
"i",
"]",
"+=",
"metrics",
"[",
"m",
"]",
"[",
"i",
"]",
"total",
"=",
"{",
"'language'",
":",
"'Total'",
"}",
"for",
"m",
"in",
"summary",
":",
"for",
"i",
"in",
"summary",
"[",
"m",
"]",
":",
"if",
"i",
"==",
"'language'",
":",
"continue",
"if",
"i",
"not",
"in",
"total",
":",
"total",
"[",
"i",
"]",
"=",
"0",
"total",
"[",
"i",
"]",
"+=",
"summary",
"[",
"m",
"]",
"[",
"i",
"]",
"print",
"(",
"'Metrics Summary:'",
")",
"display_header",
"(",
"processors",
",",
"'Files'",
",",
"''",
")",
"display_separator",
"(",
"processors",
",",
"'-'",
"*",
"5",
",",
"''",
")",
"for",
"k",
"in",
"sorted",
"(",
"summary",
".",
"keys",
"(",
")",
",",
"key",
"=",
"str",
".",
"lower",
")",
":",
"display_metrics",
"(",
"processors",
",",
"'%5d'",
"%",
"summary",
"[",
"k",
"]",
"[",
"'file_count'",
"]",
",",
"''",
",",
"summary",
"[",
"k",
"]",
")",
"display_separator",
"(",
"processors",
",",
"'-'",
"*",
"5",
",",
"''",
")",
"display_metrics",
"(",
"processors",
",",
"'%5d'",
"%",
"total",
"[",
"'file_count'",
"]",
",",
"''",
",",
"total",
")"
] | Print the summary | [
"Print",
"the",
"summary"
] | fd9974af498831664b9ae8e8f3834e1ec2e8a699 | https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/metrics_utils.py#L168-L224 | train |
rwl/pylon | examples/pyreto/thesis/ex6_1.py | get_portfolios3 | def get_portfolios3():
""" Returns portfolios with U12 and U20 generators removed and generators
of the same type at the same bus aggregated.
"""
g1 = [0]
g2 = [1]
g7 = [2]
g13 = [3]
g14 = [4] # sync cond
g15 = [5]
g16 = [6]
g18 = [7]
g21 = [8]
g22 = [9]
g23 = [10, 11]
portfolios = [g1 + g15 + g18,
g2 + g16 + g21,
g13 + g22,
g7 + g23]
passive = g14 # sync_cond
return portfolios, passive | python | def get_portfolios3():
""" Returns portfolios with U12 and U20 generators removed and generators
of the same type at the same bus aggregated.
"""
g1 = [0]
g2 = [1]
g7 = [2]
g13 = [3]
g14 = [4] # sync cond
g15 = [5]
g16 = [6]
g18 = [7]
g21 = [8]
g22 = [9]
g23 = [10, 11]
portfolios = [g1 + g15 + g18,
g2 + g16 + g21,
g13 + g22,
g7 + g23]
passive = g14 # sync_cond
return portfolios, passive | [
"def",
"get_portfolios3",
"(",
")",
":",
"g1",
"=",
"[",
"0",
"]",
"g2",
"=",
"[",
"1",
"]",
"g7",
"=",
"[",
"2",
"]",
"g13",
"=",
"[",
"3",
"]",
"g14",
"=",
"[",
"4",
"]",
"# sync cond",
"g15",
"=",
"[",
"5",
"]",
"g16",
"=",
"[",
"6",
"]",
"g18",
"=",
"[",
"7",
"]",
"g21",
"=",
"[",
"8",
"]",
"g22",
"=",
"[",
"9",
"]",
"g23",
"=",
"[",
"10",
",",
"11",
"]",
"portfolios",
"=",
"[",
"g1",
"+",
"g15",
"+",
"g18",
",",
"g2",
"+",
"g16",
"+",
"g21",
",",
"g13",
"+",
"g22",
",",
"g7",
"+",
"g23",
"]",
"passive",
"=",
"g14",
"# sync_cond",
"return",
"portfolios",
",",
"passive"
] | Returns portfolios with U12 and U20 generators removed and generators
of the same type at the same bus aggregated. | [
"Returns",
"portfolios",
"with",
"U12",
"and",
"U20",
"generators",
"removed",
"and",
"generators",
"of",
"the",
"same",
"type",
"at",
"the",
"same",
"bus",
"aggregated",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/ex6_1.py#L95-L118 | train |
pymoca/pymoca | src/pymoca/backends/xml/parser.py | ModelListener.call | def call(self, tag_name: str, *args, **kwargs):
"""Convenience method for calling methods with walker."""
if hasattr(self, tag_name):
getattr(self, tag_name)(*args, **kwargs) | python | def call(self, tag_name: str, *args, **kwargs):
"""Convenience method for calling methods with walker."""
if hasattr(self, tag_name):
getattr(self, tag_name)(*args, **kwargs) | [
"def",
"call",
"(",
"self",
",",
"tag_name",
":",
"str",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"tag_name",
")",
":",
"getattr",
"(",
"self",
",",
"tag_name",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Convenience method for calling methods with walker. | [
"Convenience",
"method",
"for",
"calling",
"methods",
"with",
"walker",
"."
] | 14b5eb7425e96689de6cc5c10f400895d586a978 | https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L85-L88 | train |
pymoca/pymoca | src/pymoca/backends/xml/parser.py | ModelListener.der | def der(self, x: Sym):
"""Get the derivative of the variable, create it if it doesn't exist."""
name = 'der({:s})'.format(x.name())
if name not in self.scope['dvar'].keys():
self.scope['dvar'][name] = self.sym.sym(name, *x.shape)
self.scope['states'].append(x.name())
return self.scope['dvar'][name] | python | def der(self, x: Sym):
"""Get the derivative of the variable, create it if it doesn't exist."""
name = 'der({:s})'.format(x.name())
if name not in self.scope['dvar'].keys():
self.scope['dvar'][name] = self.sym.sym(name, *x.shape)
self.scope['states'].append(x.name())
return self.scope['dvar'][name] | [
"def",
"der",
"(",
"self",
",",
"x",
":",
"Sym",
")",
":",
"name",
"=",
"'der({:s})'",
".",
"format",
"(",
"x",
".",
"name",
"(",
")",
")",
"if",
"name",
"not",
"in",
"self",
".",
"scope",
"[",
"'dvar'",
"]",
".",
"keys",
"(",
")",
":",
"self",
".",
"scope",
"[",
"'dvar'",
"]",
"[",
"name",
"]",
"=",
"self",
".",
"sym",
".",
"sym",
"(",
"name",
",",
"*",
"x",
".",
"shape",
")",
"self",
".",
"scope",
"[",
"'states'",
"]",
".",
"append",
"(",
"x",
".",
"name",
"(",
")",
")",
"return",
"self",
".",
"scope",
"[",
"'dvar'",
"]",
"[",
"name",
"]"
] | Get the derivative of the variable, create it if it doesn't exist. | [
"Get",
"the",
"derivative",
"of",
"the",
"variable",
"create",
"it",
"if",
"it",
"doesn",
"t",
"exist",
"."
] | 14b5eb7425e96689de6cc5c10f400895d586a978 | https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L94-L100 | train |
pymoca/pymoca | src/pymoca/backends/xml/parser.py | ModelListener.noise_gaussian | def noise_gaussian(self, mean, std):
"""Create a gaussian noise variable"""
assert std > 0
ng = self.sym.sym('ng_{:d}'.format(len(self.scope['ng'])))
self.scope['ng'].append(ng)
return mean + std*ng | python | def noise_gaussian(self, mean, std):
"""Create a gaussian noise variable"""
assert std > 0
ng = self.sym.sym('ng_{:d}'.format(len(self.scope['ng'])))
self.scope['ng'].append(ng)
return mean + std*ng | [
"def",
"noise_gaussian",
"(",
"self",
",",
"mean",
",",
"std",
")",
":",
"assert",
"std",
">",
"0",
"ng",
"=",
"self",
".",
"sym",
".",
"sym",
"(",
"'ng_{:d}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"scope",
"[",
"'ng'",
"]",
")",
")",
")",
"self",
".",
"scope",
"[",
"'ng'",
"]",
".",
"append",
"(",
"ng",
")",
"return",
"mean",
"+",
"std",
"*",
"ng"
] | Create a gaussian noise variable | [
"Create",
"a",
"gaussian",
"noise",
"variable"
] | 14b5eb7425e96689de6cc5c10f400895d586a978 | https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L126-L131 | train |
pymoca/pymoca | src/pymoca/backends/xml/parser.py | ModelListener.noise_uniform | def noise_uniform(self, lower_bound, upper_bound):
"""Create a uniform noise variable"""
assert upper_bound > lower_bound
nu = self.sym.sym('nu_{:d}'.format(len(self.scope['nu'])))
self.scope['nu'].append(nu)
return lower_bound + nu*(upper_bound - lower_bound) | python | def noise_uniform(self, lower_bound, upper_bound):
"""Create a uniform noise variable"""
assert upper_bound > lower_bound
nu = self.sym.sym('nu_{:d}'.format(len(self.scope['nu'])))
self.scope['nu'].append(nu)
return lower_bound + nu*(upper_bound - lower_bound) | [
"def",
"noise_uniform",
"(",
"self",
",",
"lower_bound",
",",
"upper_bound",
")",
":",
"assert",
"upper_bound",
">",
"lower_bound",
"nu",
"=",
"self",
".",
"sym",
".",
"sym",
"(",
"'nu_{:d}'",
".",
"format",
"(",
"len",
"(",
"self",
".",
"scope",
"[",
"'nu'",
"]",
")",
")",
")",
"self",
".",
"scope",
"[",
"'nu'",
"]",
".",
"append",
"(",
"nu",
")",
"return",
"lower_bound",
"+",
"nu",
"*",
"(",
"upper_bound",
"-",
"lower_bound",
")"
] | Create a uniform noise variable | [
"Create",
"a",
"uniform",
"noise",
"variable"
] | 14b5eb7425e96689de6cc5c10f400895d586a978 | https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L133-L138 | train |
pymoca/pymoca | src/pymoca/backends/xml/parser.py | ModelListener.log | def log(self, *args, **kwargs):
"""Convenience function for printing indenting debug output."""
if self.verbose:
print(' ' * self.depth, *args, **kwargs) | python | def log(self, *args, **kwargs):
"""Convenience function for printing indenting debug output."""
if self.verbose:
print(' ' * self.depth, *args, **kwargs) | [
"def",
"log",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"' '",
"*",
"self",
".",
"depth",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Convenience function for printing indenting debug output. | [
"Convenience",
"function",
"for",
"printing",
"indenting",
"debug",
"output",
"."
] | 14b5eb7425e96689de6cc5c10f400895d586a978 | https://github.com/pymoca/pymoca/blob/14b5eb7425e96689de6cc5c10f400895d586a978/src/pymoca/backends/xml/parser.py#L158-L161 | train |
rwl/pylon | examples/pyreto/thesis/common.py | get_case6ww | def get_case6ww():
""" Returns the 6 bus case from Wood & Wollenberg PG&C.
"""
path = os.path.dirname(pylon.__file__)
path = os.path.join(path, "test", "data")
path = os.path.join(path, "case6ww", "case6ww.pkl")
case = pylon.Case.load(path)
case.generators[0].p_cost = (0.0, 4.0, 200.0)
case.generators[1].p_cost = (0.0, 3.0, 200.0)
# case.generators[0].p_cost = (0.0, 5.1, 200.0) # 10%
# case.generators[1].p_cost = (0.0, 4.5, 200.0) # 30%
case.generators[2].p_cost = (0.0, 6.0, 200.0) # passive
# case.generators[0].c_shutdown = 100.0
# case.generators[1].c_shutdown = 100.0
# case.generators[2].c_shutdown = 100.0
case.generators[0].p_min = 0.0 # TODO: Unit-decommitment.
case.generators[1].p_min = 0.0
case.generators[2].p_min = 0.0
case.generators[0].p_max = 110.0
case.generators[1].p_max = 110.0
case.generators[2].p_max = 220.0 # passive
# FIXME: Correct generator naming order.
for g in case.generators:
g.name
#pyreto.util.plotGenCost(case.generators)
return case | python | def get_case6ww():
""" Returns the 6 bus case from Wood & Wollenberg PG&C.
"""
path = os.path.dirname(pylon.__file__)
path = os.path.join(path, "test", "data")
path = os.path.join(path, "case6ww", "case6ww.pkl")
case = pylon.Case.load(path)
case.generators[0].p_cost = (0.0, 4.0, 200.0)
case.generators[1].p_cost = (0.0, 3.0, 200.0)
# case.generators[0].p_cost = (0.0, 5.1, 200.0) # 10%
# case.generators[1].p_cost = (0.0, 4.5, 200.0) # 30%
case.generators[2].p_cost = (0.0, 6.0, 200.0) # passive
# case.generators[0].c_shutdown = 100.0
# case.generators[1].c_shutdown = 100.0
# case.generators[2].c_shutdown = 100.0
case.generators[0].p_min = 0.0 # TODO: Unit-decommitment.
case.generators[1].p_min = 0.0
case.generators[2].p_min = 0.0
case.generators[0].p_max = 110.0
case.generators[1].p_max = 110.0
case.generators[2].p_max = 220.0 # passive
# FIXME: Correct generator naming order.
for g in case.generators:
g.name
#pyreto.util.plotGenCost(case.generators)
return case | [
"def",
"get_case6ww",
"(",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"pylon",
".",
"__file__",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"test\"",
",",
"\"data\"",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"case6ww\"",
",",
"\"case6ww.pkl\"",
")",
"case",
"=",
"pylon",
".",
"Case",
".",
"load",
"(",
"path",
")",
"case",
".",
"generators",
"[",
"0",
"]",
".",
"p_cost",
"=",
"(",
"0.0",
",",
"4.0",
",",
"200.0",
")",
"case",
".",
"generators",
"[",
"1",
"]",
".",
"p_cost",
"=",
"(",
"0.0",
",",
"3.0",
",",
"200.0",
")",
"# case.generators[0].p_cost = (0.0, 5.1, 200.0) # 10%",
"# case.generators[1].p_cost = (0.0, 4.5, 200.0) # 30%",
"case",
".",
"generators",
"[",
"2",
"]",
".",
"p_cost",
"=",
"(",
"0.0",
",",
"6.0",
",",
"200.0",
")",
"# passive",
"# case.generators[0].c_shutdown = 100.0",
"# case.generators[1].c_shutdown = 100.0",
"# case.generators[2].c_shutdown = 100.0",
"case",
".",
"generators",
"[",
"0",
"]",
".",
"p_min",
"=",
"0.0",
"# TODO: Unit-decommitment.",
"case",
".",
"generators",
"[",
"1",
"]",
".",
"p_min",
"=",
"0.0",
"case",
".",
"generators",
"[",
"2",
"]",
".",
"p_min",
"=",
"0.0",
"case",
".",
"generators",
"[",
"0",
"]",
".",
"p_max",
"=",
"110.0",
"case",
".",
"generators",
"[",
"1",
"]",
".",
"p_max",
"=",
"110.0",
"case",
".",
"generators",
"[",
"2",
"]",
".",
"p_max",
"=",
"220.0",
"# passive",
"# FIXME: Correct generator naming order.",
"for",
"g",
"in",
"case",
".",
"generators",
":",
"g",
".",
"name",
"#pyreto.util.plotGenCost(case.generators)",
"return",
"case"
] | Returns the 6 bus case from Wood & Wollenberg PG&C. | [
"Returns",
"the",
"6",
"bus",
"case",
"from",
"Wood",
"&",
"Wollenberg",
"PG&C",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L36-L70 | train |
rwl/pylon | examples/pyreto/thesis/common.py | get_case24_ieee_rts | def get_case24_ieee_rts():
""" Returns the 24 bus IEEE Reliability Test System.
"""
path = os.path.dirname(pylon.__file__)
path = os.path.join(path, "test", "data")
path = os.path.join(path, "case24_ieee_rts", "case24_ieee_rts.pkl")
case = pylon.Case.load(path)
# FIXME: Correct generator naming order.
for g in case.generators:
g.name
return case | python | def get_case24_ieee_rts():
""" Returns the 24 bus IEEE Reliability Test System.
"""
path = os.path.dirname(pylon.__file__)
path = os.path.join(path, "test", "data")
path = os.path.join(path, "case24_ieee_rts", "case24_ieee_rts.pkl")
case = pylon.Case.load(path)
# FIXME: Correct generator naming order.
for g in case.generators:
g.name
return case | [
"def",
"get_case24_ieee_rts",
"(",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"pylon",
".",
"__file__",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"test\"",
",",
"\"data\"",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"\"case24_ieee_rts\"",
",",
"\"case24_ieee_rts.pkl\"",
")",
"case",
"=",
"pylon",
".",
"Case",
".",
"load",
"(",
"path",
")",
"# FIXME: Correct generator naming order.",
"for",
"g",
"in",
"case",
".",
"generators",
":",
"g",
".",
"name",
"return",
"case"
] | Returns the 24 bus IEEE Reliability Test System. | [
"Returns",
"the",
"24",
"bus",
"IEEE",
"Reliability",
"Test",
"System",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L107-L120 | train |
rwl/pylon | examples/pyreto/thesis/common.py | get_discrete_task_agent | def get_discrete_task_agent(generators, market, nStates, nOffer, markups,
withholds, maxSteps, learner, Pd0=None, Pd_min=0.0):
""" Returns a tuple of task and agent for the given learner.
"""
env = pyreto.discrete.MarketEnvironment(generators, market,
numStates=nStates,
numOffbids=nOffer,
markups=markups,
withholds=withholds,
Pd0=Pd0,
Pd_min=Pd_min)
task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)
nActions = len(env._allActions)
module = ActionValueTable(numStates=nStates, numActions=nActions)
agent = LearningAgent(module, learner)
return task, agent | python | def get_discrete_task_agent(generators, market, nStates, nOffer, markups,
withholds, maxSteps, learner, Pd0=None, Pd_min=0.0):
""" Returns a tuple of task and agent for the given learner.
"""
env = pyreto.discrete.MarketEnvironment(generators, market,
numStates=nStates,
numOffbids=nOffer,
markups=markups,
withholds=withholds,
Pd0=Pd0,
Pd_min=Pd_min)
task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)
nActions = len(env._allActions)
module = ActionValueTable(numStates=nStates, numActions=nActions)
agent = LearningAgent(module, learner)
return task, agent | [
"def",
"get_discrete_task_agent",
"(",
"generators",
",",
"market",
",",
"nStates",
",",
"nOffer",
",",
"markups",
",",
"withholds",
",",
"maxSteps",
",",
"learner",
",",
"Pd0",
"=",
"None",
",",
"Pd_min",
"=",
"0.0",
")",
":",
"env",
"=",
"pyreto",
".",
"discrete",
".",
"MarketEnvironment",
"(",
"generators",
",",
"market",
",",
"numStates",
"=",
"nStates",
",",
"numOffbids",
"=",
"nOffer",
",",
"markups",
"=",
"markups",
",",
"withholds",
"=",
"withholds",
",",
"Pd0",
"=",
"Pd0",
",",
"Pd_min",
"=",
"Pd_min",
")",
"task",
"=",
"pyreto",
".",
"discrete",
".",
"ProfitTask",
"(",
"env",
",",
"maxSteps",
"=",
"maxSteps",
")",
"nActions",
"=",
"len",
"(",
"env",
".",
"_allActions",
")",
"module",
"=",
"ActionValueTable",
"(",
"numStates",
"=",
"nStates",
",",
"numActions",
"=",
"nActions",
")",
"agent",
"=",
"LearningAgent",
"(",
"module",
",",
"learner",
")",
"return",
"task",
",",
"agent"
] | Returns a tuple of task and agent for the given learner. | [
"Returns",
"a",
"tuple",
"of",
"task",
"and",
"agent",
"for",
"the",
"given",
"learner",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L166-L184 | train |
rwl/pylon | examples/pyreto/thesis/common.py | get_zero_task_agent | def get_zero_task_agent(generators, market, nOffer, maxSteps):
""" Returns a task-agent tuple whose action is always zero.
"""
env = pyreto.discrete.MarketEnvironment(generators, market, nOffer)
task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)
agent = pyreto.util.ZeroAgent(env.outdim, env.indim)
return task, agent | python | def get_zero_task_agent(generators, market, nOffer, maxSteps):
""" Returns a task-agent tuple whose action is always zero.
"""
env = pyreto.discrete.MarketEnvironment(generators, market, nOffer)
task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)
agent = pyreto.util.ZeroAgent(env.outdim, env.indim)
return task, agent | [
"def",
"get_zero_task_agent",
"(",
"generators",
",",
"market",
",",
"nOffer",
",",
"maxSteps",
")",
":",
"env",
"=",
"pyreto",
".",
"discrete",
".",
"MarketEnvironment",
"(",
"generators",
",",
"market",
",",
"nOffer",
")",
"task",
"=",
"pyreto",
".",
"discrete",
".",
"ProfitTask",
"(",
"env",
",",
"maxSteps",
"=",
"maxSteps",
")",
"agent",
"=",
"pyreto",
".",
"util",
".",
"ZeroAgent",
"(",
"env",
".",
"outdim",
",",
"env",
".",
"indim",
")",
"return",
"task",
",",
"agent"
] | Returns a task-agent tuple whose action is always zero. | [
"Returns",
"a",
"task",
"-",
"agent",
"tuple",
"whose",
"action",
"is",
"always",
"zero",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L211-L217 | train |
rwl/pylon | examples/pyreto/thesis/common.py | get_neg_one_task_agent | def get_neg_one_task_agent(generators, market, nOffer, maxSteps):
""" Returns a task-agent tuple whose action is always minus one.
"""
env = pyreto.discrete.MarketEnvironment(generators, market, nOffer)
task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)
agent = pyreto.util.NegOneAgent(env.outdim, env.indim)
return task, agent | python | def get_neg_one_task_agent(generators, market, nOffer, maxSteps):
""" Returns a task-agent tuple whose action is always minus one.
"""
env = pyreto.discrete.MarketEnvironment(generators, market, nOffer)
task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)
agent = pyreto.util.NegOneAgent(env.outdim, env.indim)
return task, agent | [
"def",
"get_neg_one_task_agent",
"(",
"generators",
",",
"market",
",",
"nOffer",
",",
"maxSteps",
")",
":",
"env",
"=",
"pyreto",
".",
"discrete",
".",
"MarketEnvironment",
"(",
"generators",
",",
"market",
",",
"nOffer",
")",
"task",
"=",
"pyreto",
".",
"discrete",
".",
"ProfitTask",
"(",
"env",
",",
"maxSteps",
"=",
"maxSteps",
")",
"agent",
"=",
"pyreto",
".",
"util",
".",
"NegOneAgent",
"(",
"env",
".",
"outdim",
",",
"env",
".",
"indim",
")",
"return",
"task",
",",
"agent"
] | Returns a task-agent tuple whose action is always minus one. | [
"Returns",
"a",
"task",
"-",
"agent",
"tuple",
"whose",
"action",
"is",
"always",
"minus",
"one",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L220-L226 | train |
rwl/pylon | examples/pyreto/thesis/common.py | run_experiment | def run_experiment(experiment, roleouts, episodes, in_cloud=False,
dynProfile=None):
""" Runs the given experiment and returns the results.
"""
def run():
if dynProfile is None:
maxsteps = len(experiment.profile) # episode length
else:
maxsteps = dynProfile.shape[1]
na = len(experiment.agents)
ni = roleouts * episodes * maxsteps
all_action = zeros((na, 0))
all_reward = zeros((na, 0))
epsilon = zeros((na, ni)) # exploration rate
# Converts to action vector in percentage markup values.
vmarkup = vectorize(get_markup)
for roleout in range(roleouts):
if dynProfile is not None:
# Apply new load profile before each roleout (week).
i = roleout * episodes # index of first profile value
experiment.profile = dynProfile[i:i + episodes, :]
# print "PROFILE:", experiment.profile, episodes
experiment.doEpisodes(episodes) # number of samples per learning step
nei = episodes * maxsteps # num interactions per role
epi_action = zeros((0, nei))
epi_reward = zeros((0, nei))
for i, (task, agent) in \
enumerate(zip(experiment.tasks, experiment.agents)):
action = copy(agent.history["action"])
reward = copy(agent.history["reward"])
for j in range(nei):
if isinstance(agent.learner, DirectSearchLearner):
action[j, :] = task.denormalize(action[j, :])
k = nei * roleout
epsilon[i, k:k + nei] = agent.learner.explorer.sigma[0]
elif isinstance(agent.learner, ValueBasedLearner):
action[j, :] = vmarkup(action[j, :], task)
k = nei * roleout
epsilon[i, k:k + nei] = agent.learner.explorer.epsilon
else:
action = vmarkup(action, task)
# FIXME: Only stores action[0] for all interactions.
epi_action = c_[epi_action.T, action[:, 0].flatten()].T
epi_reward = c_[epi_reward.T, reward.flatten()].T
if hasattr(agent, "module"):
print "PARAMS:", agent.module.params
agent.learn()
agent.reset()
all_action = c_[all_action, epi_action]
all_reward = c_[all_reward, epi_reward]
return all_action, all_reward, epsilon
if in_cloud:
import cloud
job_id = cloud.call(run, _high_cpu=False)
result = cloud.result(job_id)
all_action, all_reward, epsilon = result
else:
all_action, all_reward, epsilon = run()
return all_action, all_reward, epsilon | python | def run_experiment(experiment, roleouts, episodes, in_cloud=False,
dynProfile=None):
""" Runs the given experiment and returns the results.
"""
def run():
if dynProfile is None:
maxsteps = len(experiment.profile) # episode length
else:
maxsteps = dynProfile.shape[1]
na = len(experiment.agents)
ni = roleouts * episodes * maxsteps
all_action = zeros((na, 0))
all_reward = zeros((na, 0))
epsilon = zeros((na, ni)) # exploration rate
# Converts to action vector in percentage markup values.
vmarkup = vectorize(get_markup)
for roleout in range(roleouts):
if dynProfile is not None:
# Apply new load profile before each roleout (week).
i = roleout * episodes # index of first profile value
experiment.profile = dynProfile[i:i + episodes, :]
# print "PROFILE:", experiment.profile, episodes
experiment.doEpisodes(episodes) # number of samples per learning step
nei = episodes * maxsteps # num interactions per role
epi_action = zeros((0, nei))
epi_reward = zeros((0, nei))
for i, (task, agent) in \
enumerate(zip(experiment.tasks, experiment.agents)):
action = copy(agent.history["action"])
reward = copy(agent.history["reward"])
for j in range(nei):
if isinstance(agent.learner, DirectSearchLearner):
action[j, :] = task.denormalize(action[j, :])
k = nei * roleout
epsilon[i, k:k + nei] = agent.learner.explorer.sigma[0]
elif isinstance(agent.learner, ValueBasedLearner):
action[j, :] = vmarkup(action[j, :], task)
k = nei * roleout
epsilon[i, k:k + nei] = agent.learner.explorer.epsilon
else:
action = vmarkup(action, task)
# FIXME: Only stores action[0] for all interactions.
epi_action = c_[epi_action.T, action[:, 0].flatten()].T
epi_reward = c_[epi_reward.T, reward.flatten()].T
if hasattr(agent, "module"):
print "PARAMS:", agent.module.params
agent.learn()
agent.reset()
all_action = c_[all_action, epi_action]
all_reward = c_[all_reward, epi_reward]
return all_action, all_reward, epsilon
if in_cloud:
import cloud
job_id = cloud.call(run, _high_cpu=False)
result = cloud.result(job_id)
all_action, all_reward, epsilon = result
else:
all_action, all_reward, epsilon = run()
return all_action, all_reward, epsilon | [
"def",
"run_experiment",
"(",
"experiment",
",",
"roleouts",
",",
"episodes",
",",
"in_cloud",
"=",
"False",
",",
"dynProfile",
"=",
"None",
")",
":",
"def",
"run",
"(",
")",
":",
"if",
"dynProfile",
"is",
"None",
":",
"maxsteps",
"=",
"len",
"(",
"experiment",
".",
"profile",
")",
"# episode length",
"else",
":",
"maxsteps",
"=",
"dynProfile",
".",
"shape",
"[",
"1",
"]",
"na",
"=",
"len",
"(",
"experiment",
".",
"agents",
")",
"ni",
"=",
"roleouts",
"*",
"episodes",
"*",
"maxsteps",
"all_action",
"=",
"zeros",
"(",
"(",
"na",
",",
"0",
")",
")",
"all_reward",
"=",
"zeros",
"(",
"(",
"na",
",",
"0",
")",
")",
"epsilon",
"=",
"zeros",
"(",
"(",
"na",
",",
"ni",
")",
")",
"# exploration rate",
"# Converts to action vector in percentage markup values.",
"vmarkup",
"=",
"vectorize",
"(",
"get_markup",
")",
"for",
"roleout",
"in",
"range",
"(",
"roleouts",
")",
":",
"if",
"dynProfile",
"is",
"not",
"None",
":",
"# Apply new load profile before each roleout (week).",
"i",
"=",
"roleout",
"*",
"episodes",
"# index of first profile value",
"experiment",
".",
"profile",
"=",
"dynProfile",
"[",
"i",
":",
"i",
"+",
"episodes",
",",
":",
"]",
"# print \"PROFILE:\", experiment.profile, episodes",
"experiment",
".",
"doEpisodes",
"(",
"episodes",
")",
"# number of samples per learning step",
"nei",
"=",
"episodes",
"*",
"maxsteps",
"# num interactions per role",
"epi_action",
"=",
"zeros",
"(",
"(",
"0",
",",
"nei",
")",
")",
"epi_reward",
"=",
"zeros",
"(",
"(",
"0",
",",
"nei",
")",
")",
"for",
"i",
",",
"(",
"task",
",",
"agent",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"experiment",
".",
"tasks",
",",
"experiment",
".",
"agents",
")",
")",
":",
"action",
"=",
"copy",
"(",
"agent",
".",
"history",
"[",
"\"action\"",
"]",
")",
"reward",
"=",
"copy",
"(",
"agent",
".",
"history",
"[",
"\"reward\"",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"nei",
")",
":",
"if",
"isinstance",
"(",
"agent",
".",
"learner",
",",
"DirectSearchLearner",
")",
":",
"action",
"[",
"j",
",",
":",
"]",
"=",
"task",
".",
"denormalize",
"(",
"action",
"[",
"j",
",",
":",
"]",
")",
"k",
"=",
"nei",
"*",
"roleout",
"epsilon",
"[",
"i",
",",
"k",
":",
"k",
"+",
"nei",
"]",
"=",
"agent",
".",
"learner",
".",
"explorer",
".",
"sigma",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"agent",
".",
"learner",
",",
"ValueBasedLearner",
")",
":",
"action",
"[",
"j",
",",
":",
"]",
"=",
"vmarkup",
"(",
"action",
"[",
"j",
",",
":",
"]",
",",
"task",
")",
"k",
"=",
"nei",
"*",
"roleout",
"epsilon",
"[",
"i",
",",
"k",
":",
"k",
"+",
"nei",
"]",
"=",
"agent",
".",
"learner",
".",
"explorer",
".",
"epsilon",
"else",
":",
"action",
"=",
"vmarkup",
"(",
"action",
",",
"task",
")",
"# FIXME: Only stores action[0] for all interactions.",
"epi_action",
"=",
"c_",
"[",
"epi_action",
".",
"T",
",",
"action",
"[",
":",
",",
"0",
"]",
".",
"flatten",
"(",
")",
"]",
".",
"T",
"epi_reward",
"=",
"c_",
"[",
"epi_reward",
".",
"T",
",",
"reward",
".",
"flatten",
"(",
")",
"]",
".",
"T",
"if",
"hasattr",
"(",
"agent",
",",
"\"module\"",
")",
":",
"print",
"\"PARAMS:\"",
",",
"agent",
".",
"module",
".",
"params",
"agent",
".",
"learn",
"(",
")",
"agent",
".",
"reset",
"(",
")",
"all_action",
"=",
"c_",
"[",
"all_action",
",",
"epi_action",
"]",
"all_reward",
"=",
"c_",
"[",
"all_reward",
",",
"epi_reward",
"]",
"return",
"all_action",
",",
"all_reward",
",",
"epsilon",
"if",
"in_cloud",
":",
"import",
"cloud",
"job_id",
"=",
"cloud",
".",
"call",
"(",
"run",
",",
"_high_cpu",
"=",
"False",
")",
"result",
"=",
"cloud",
".",
"result",
"(",
"job_id",
")",
"all_action",
",",
"all_reward",
",",
"epsilon",
"=",
"result",
"else",
":",
"all_action",
",",
"all_reward",
",",
"epsilon",
"=",
"run",
"(",
")",
"return",
"all_action",
",",
"all_reward",
",",
"epsilon"
] | Runs the given experiment and returns the results. | [
"Runs",
"the",
"given",
"experiment",
"and",
"returns",
"the",
"results",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L241-L314 | train |
rwl/pylon | examples/pyreto/thesis/common.py | get_full_year | def get_full_year():
""" Returns percentages of peak load for all hours of the year.
@return:
Numpy array of doubles with length 8736.
"""
weekly = get_weekly()
daily = get_daily()
hourly_winter_wkdy, hourly_winter_wknd = get_winter_hourly()
hourly_summer_wkdy, hourly_summer_wknd = get_summer_hourly()
hourly_spring_autumn_wkdy, hourly_spring_autumn_wknd = \
get_spring_autumn_hourly()
fullyear = zeros(364 * 24)
c = 0
l = [(0, 7, hourly_winter_wkdy, hourly_winter_wknd),
(8, 16, hourly_spring_autumn_wkdy, hourly_spring_autumn_wknd),
(17, 29, hourly_summer_wkdy, hourly_summer_wknd),
(30, 42, hourly_spring_autumn_wkdy, hourly_spring_autumn_wknd),
(43, 51, hourly_winter_wkdy, hourly_winter_wknd)]
for start, end, wkdy, wknd in l:
for w in weekly[start:end + 1]:
for d in daily[:5]:
for h in wkdy:
fullyear[c] = w * (d / 100.0) * (h / 100.0)
c += 1
for d in daily[5:]:
for h in wknd:
fullyear[c] = w * (d / 100.0) * (h / 100.0)
c += 1
return fullyear | python | def get_full_year():
""" Returns percentages of peak load for all hours of the year.
@return:
Numpy array of doubles with length 8736.
"""
weekly = get_weekly()
daily = get_daily()
hourly_winter_wkdy, hourly_winter_wknd = get_winter_hourly()
hourly_summer_wkdy, hourly_summer_wknd = get_summer_hourly()
hourly_spring_autumn_wkdy, hourly_spring_autumn_wknd = \
get_spring_autumn_hourly()
fullyear = zeros(364 * 24)
c = 0
l = [(0, 7, hourly_winter_wkdy, hourly_winter_wknd),
(8, 16, hourly_spring_autumn_wkdy, hourly_spring_autumn_wknd),
(17, 29, hourly_summer_wkdy, hourly_summer_wknd),
(30, 42, hourly_spring_autumn_wkdy, hourly_spring_autumn_wknd),
(43, 51, hourly_winter_wkdy, hourly_winter_wknd)]
for start, end, wkdy, wknd in l:
for w in weekly[start:end + 1]:
for d in daily[:5]:
for h in wkdy:
fullyear[c] = w * (d / 100.0) * (h / 100.0)
c += 1
for d in daily[5:]:
for h in wknd:
fullyear[c] = w * (d / 100.0) * (h / 100.0)
c += 1
return fullyear | [
"def",
"get_full_year",
"(",
")",
":",
"weekly",
"=",
"get_weekly",
"(",
")",
"daily",
"=",
"get_daily",
"(",
")",
"hourly_winter_wkdy",
",",
"hourly_winter_wknd",
"=",
"get_winter_hourly",
"(",
")",
"hourly_summer_wkdy",
",",
"hourly_summer_wknd",
"=",
"get_summer_hourly",
"(",
")",
"hourly_spring_autumn_wkdy",
",",
"hourly_spring_autumn_wknd",
"=",
"get_spring_autumn_hourly",
"(",
")",
"fullyear",
"=",
"zeros",
"(",
"364",
"*",
"24",
")",
"c",
"=",
"0",
"l",
"=",
"[",
"(",
"0",
",",
"7",
",",
"hourly_winter_wkdy",
",",
"hourly_winter_wknd",
")",
",",
"(",
"8",
",",
"16",
",",
"hourly_spring_autumn_wkdy",
",",
"hourly_spring_autumn_wknd",
")",
",",
"(",
"17",
",",
"29",
",",
"hourly_summer_wkdy",
",",
"hourly_summer_wknd",
")",
",",
"(",
"30",
",",
"42",
",",
"hourly_spring_autumn_wkdy",
",",
"hourly_spring_autumn_wknd",
")",
",",
"(",
"43",
",",
"51",
",",
"hourly_winter_wkdy",
",",
"hourly_winter_wknd",
")",
"]",
"for",
"start",
",",
"end",
",",
"wkdy",
",",
"wknd",
"in",
"l",
":",
"for",
"w",
"in",
"weekly",
"[",
"start",
":",
"end",
"+",
"1",
"]",
":",
"for",
"d",
"in",
"daily",
"[",
":",
"5",
"]",
":",
"for",
"h",
"in",
"wkdy",
":",
"fullyear",
"[",
"c",
"]",
"=",
"w",
"*",
"(",
"d",
"/",
"100.0",
")",
"*",
"(",
"h",
"/",
"100.0",
")",
"c",
"+=",
"1",
"for",
"d",
"in",
"daily",
"[",
"5",
":",
"]",
":",
"for",
"h",
"in",
"wknd",
":",
"fullyear",
"[",
"c",
"]",
"=",
"w",
"*",
"(",
"d",
"/",
"100.0",
")",
"*",
"(",
"h",
"/",
"100.0",
")",
"c",
"+=",
"1",
"return",
"fullyear"
] | Returns percentages of peak load for all hours of the year.
@return:
Numpy array of doubles with length 8736. | [
"Returns",
"percentages",
"of",
"peak",
"load",
"for",
"all",
"hours",
"of",
"the",
"year",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L426-L457 | train |
rwl/pylon | examples/pyreto/thesis/common.py | get_all_days | def get_all_days():
""" Returns percentages of peak load for all days of the year.
Data from the IEEE RTS.
"""
weekly = get_weekly()
daily = get_daily()
return [w * (d / 100.0) for w in weekly for d in daily] | python | def get_all_days():
""" Returns percentages of peak load for all days of the year.
Data from the IEEE RTS.
"""
weekly = get_weekly()
daily = get_daily()
return [w * (d / 100.0) for w in weekly for d in daily] | [
"def",
"get_all_days",
"(",
")",
":",
"weekly",
"=",
"get_weekly",
"(",
")",
"daily",
"=",
"get_daily",
"(",
")",
"return",
"[",
"w",
"*",
"(",
"d",
"/",
"100.0",
")",
"for",
"w",
"in",
"weekly",
"for",
"d",
"in",
"daily",
"]"
] | Returns percentages of peak load for all days of the year.
Data from the IEEE RTS. | [
"Returns",
"percentages",
"of",
"peak",
"load",
"for",
"all",
"days",
"of",
"the",
"year",
".",
"Data",
"from",
"the",
"IEEE",
"RTS",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L460-L467 | train |
rwl/pylon | examples/pyreto/thesis/ex5_1.py | get_q_experiment | def get_q_experiment(case, minor=1):
""" Returns an experiment that uses Q-learning.
"""
gen = case.generators
profile = array([1.0])
maxSteps = len(profile)
if minor == 1:
alpha = 0.3 # Learning rate.
gamma = 0.99 # Discount factor
# The closer epsilon gets to 0, the more greedy and less explorative.
epsilon = 0.9
decay = 0.97
tau = 150.0 # Boltzmann temperature.
qlambda = 0.9
elif minor == 2:
alpha = 0.1 # Learning rate.
gamma = 0.99 # Discount factor
# The closer epsilon gets to 0, the more greedy and less explorative.
epsilon = 0.9
decay = 0.99
tau = 150.0 # Boltzmann temperature.
qlambda = 0.9
else:
raise ValueError
market = pyreto.SmartMarket(case, priceCap=cap, decommit=decommit,
auctionType=auctionType)
experiment = pyreto.continuous.MarketExperiment([], [], market, profile)
for g in gen[0:2]:
learner = Q(alpha, gamma)
# learner = QLambda(alpha, gamma, qlambda)
# learner = SARSA(alpha, gamma)
learner.explorer.epsilon = epsilon
learner.explorer.decay = decay
# learner.explorer = BoltzmannExplorer(tau, decay)
task, agent = get_discrete_task_agent([g], market, nStates, nOffer,
markups, withholds, maxSteps, learner)
experiment.tasks.append(task)
experiment.agents.append(agent)
# Passive agent.
task, agent = get_zero_task_agent(gen[2:3], market, nOffer, maxSteps)
experiment.tasks.append(task)
experiment.agents.append(agent)
return experiment | python | def get_q_experiment(case, minor=1):
""" Returns an experiment that uses Q-learning.
"""
gen = case.generators
profile = array([1.0])
maxSteps = len(profile)
if minor == 1:
alpha = 0.3 # Learning rate.
gamma = 0.99 # Discount factor
# The closer epsilon gets to 0, the more greedy and less explorative.
epsilon = 0.9
decay = 0.97
tau = 150.0 # Boltzmann temperature.
qlambda = 0.9
elif minor == 2:
alpha = 0.1 # Learning rate.
gamma = 0.99 # Discount factor
# The closer epsilon gets to 0, the more greedy and less explorative.
epsilon = 0.9
decay = 0.99
tau = 150.0 # Boltzmann temperature.
qlambda = 0.9
else:
raise ValueError
market = pyreto.SmartMarket(case, priceCap=cap, decommit=decommit,
auctionType=auctionType)
experiment = pyreto.continuous.MarketExperiment([], [], market, profile)
for g in gen[0:2]:
learner = Q(alpha, gamma)
# learner = QLambda(alpha, gamma, qlambda)
# learner = SARSA(alpha, gamma)
learner.explorer.epsilon = epsilon
learner.explorer.decay = decay
# learner.explorer = BoltzmannExplorer(tau, decay)
task, agent = get_discrete_task_agent([g], market, nStates, nOffer,
markups, withholds, maxSteps, learner)
experiment.tasks.append(task)
experiment.agents.append(agent)
# Passive agent.
task, agent = get_zero_task_agent(gen[2:3], market, nOffer, maxSteps)
experiment.tasks.append(task)
experiment.agents.append(agent)
return experiment | [
"def",
"get_q_experiment",
"(",
"case",
",",
"minor",
"=",
"1",
")",
":",
"gen",
"=",
"case",
".",
"generators",
"profile",
"=",
"array",
"(",
"[",
"1.0",
"]",
")",
"maxSteps",
"=",
"len",
"(",
"profile",
")",
"if",
"minor",
"==",
"1",
":",
"alpha",
"=",
"0.3",
"# Learning rate.",
"gamma",
"=",
"0.99",
"# Discount factor",
"# The closer epsilon gets to 0, the more greedy and less explorative.",
"epsilon",
"=",
"0.9",
"decay",
"=",
"0.97",
"tau",
"=",
"150.0",
"# Boltzmann temperature.",
"qlambda",
"=",
"0.9",
"elif",
"minor",
"==",
"2",
":",
"alpha",
"=",
"0.1",
"# Learning rate.",
"gamma",
"=",
"0.99",
"# Discount factor",
"# The closer epsilon gets to 0, the more greedy and less explorative.",
"epsilon",
"=",
"0.9",
"decay",
"=",
"0.99",
"tau",
"=",
"150.0",
"# Boltzmann temperature.",
"qlambda",
"=",
"0.9",
"else",
":",
"raise",
"ValueError",
"market",
"=",
"pyreto",
".",
"SmartMarket",
"(",
"case",
",",
"priceCap",
"=",
"cap",
",",
"decommit",
"=",
"decommit",
",",
"auctionType",
"=",
"auctionType",
")",
"experiment",
"=",
"pyreto",
".",
"continuous",
".",
"MarketExperiment",
"(",
"[",
"]",
",",
"[",
"]",
",",
"market",
",",
"profile",
")",
"for",
"g",
"in",
"gen",
"[",
"0",
":",
"2",
"]",
":",
"learner",
"=",
"Q",
"(",
"alpha",
",",
"gamma",
")",
"# learner = QLambda(alpha, gamma, qlambda)",
"# learner = SARSA(alpha, gamma)",
"learner",
".",
"explorer",
".",
"epsilon",
"=",
"epsilon",
"learner",
".",
"explorer",
".",
"decay",
"=",
"decay",
"# learner.explorer = BoltzmannExplorer(tau, decay)",
"task",
",",
"agent",
"=",
"get_discrete_task_agent",
"(",
"[",
"g",
"]",
",",
"market",
",",
"nStates",
",",
"nOffer",
",",
"markups",
",",
"withholds",
",",
"maxSteps",
",",
"learner",
")",
"experiment",
".",
"tasks",
".",
"append",
"(",
"task",
")",
"experiment",
".",
"agents",
".",
"append",
"(",
"agent",
")",
"# Passive agent.",
"task",
",",
"agent",
"=",
"get_zero_task_agent",
"(",
"gen",
"[",
"2",
":",
"3",
"]",
",",
"market",
",",
"nOffer",
",",
"maxSteps",
")",
"experiment",
".",
"tasks",
".",
"append",
"(",
"task",
")",
"experiment",
".",
"agents",
".",
"append",
"(",
"agent",
")",
"return",
"experiment"
] | Returns an experiment that uses Q-learning. | [
"Returns",
"an",
"experiment",
"that",
"uses",
"Q",
"-",
"learning",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/ex5_1.py#L79-L133 | train |
rwl/pylon | pylon/generator.py | Generator.q_limited | def q_limited(self):
""" Is the machine at it's limit of reactive power?
"""
if (self.q >= self.q_max) or (self.q <= self.q_min):
return True
else:
return False | python | def q_limited(self):
""" Is the machine at it's limit of reactive power?
"""
if (self.q >= self.q_max) or (self.q <= self.q_min):
return True
else:
return False | [
"def",
"q_limited",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"q",
">=",
"self",
".",
"q_max",
")",
"or",
"(",
"self",
".",
"q",
"<=",
"self",
".",
"q_min",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | Is the machine at it's limit of reactive power? | [
"Is",
"the",
"machine",
"at",
"it",
"s",
"limit",
"of",
"reactive",
"power?"
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L156-L162 | train |
rwl/pylon | pylon/generator.py | Generator.total_cost | def total_cost(self, p=None, p_cost=None, pcost_model=None):
""" Computes total cost for the generator at the given output level.
"""
p = self.p if p is None else p
p_cost = self.p_cost if p_cost is None else p_cost
pcost_model = self.pcost_model if pcost_model is None else pcost_model
p = 0.0 if not self.online else p
if pcost_model == PW_LINEAR:
n_segments = len(p_cost) - 1
# Iterate over the piece-wise linear segments.
for i in range(n_segments):
x1, y1 = p_cost[i]
x2, y2 = p_cost[i + 1]
m = (y2 - y1) / (x2 - x1)
c = y1 - m * x1
if x1 <= p <= x2:
result = m*p + c
break
else:
# print "TOTC:", self.name, p, self.p_max, p_cost
# raise ValueError, "Value [%f] outwith pwl cost curve." % p
# Use the last segment for values outwith the cost curve.
logger.error("Value [%f] outside pwl cost curve [%s]." %
(p, p_cost[-1][0]))
result = m*p + c
elif pcost_model == POLYNOMIAL:
# result = p_cost[-1]
# for i in range(1, len(p_cost)):
# result += p_cost[-(i + 1)] * p**i
result = polyval(p_cost, p)
else:
raise ValueError
if self.is_load:
return -result
else:
return result | python | def total_cost(self, p=None, p_cost=None, pcost_model=None):
""" Computes total cost for the generator at the given output level.
"""
p = self.p if p is None else p
p_cost = self.p_cost if p_cost is None else p_cost
pcost_model = self.pcost_model if pcost_model is None else pcost_model
p = 0.0 if not self.online else p
if pcost_model == PW_LINEAR:
n_segments = len(p_cost) - 1
# Iterate over the piece-wise linear segments.
for i in range(n_segments):
x1, y1 = p_cost[i]
x2, y2 = p_cost[i + 1]
m = (y2 - y1) / (x2 - x1)
c = y1 - m * x1
if x1 <= p <= x2:
result = m*p + c
break
else:
# print "TOTC:", self.name, p, self.p_max, p_cost
# raise ValueError, "Value [%f] outwith pwl cost curve." % p
# Use the last segment for values outwith the cost curve.
logger.error("Value [%f] outside pwl cost curve [%s]." %
(p, p_cost[-1][0]))
result = m*p + c
elif pcost_model == POLYNOMIAL:
# result = p_cost[-1]
# for i in range(1, len(p_cost)):
# result += p_cost[-(i + 1)] * p**i
result = polyval(p_cost, p)
else:
raise ValueError
if self.is_load:
return -result
else:
return result | [
"def",
"total_cost",
"(",
"self",
",",
"p",
"=",
"None",
",",
"p_cost",
"=",
"None",
",",
"pcost_model",
"=",
"None",
")",
":",
"p",
"=",
"self",
".",
"p",
"if",
"p",
"is",
"None",
"else",
"p",
"p_cost",
"=",
"self",
".",
"p_cost",
"if",
"p_cost",
"is",
"None",
"else",
"p_cost",
"pcost_model",
"=",
"self",
".",
"pcost_model",
"if",
"pcost_model",
"is",
"None",
"else",
"pcost_model",
"p",
"=",
"0.0",
"if",
"not",
"self",
".",
"online",
"else",
"p",
"if",
"pcost_model",
"==",
"PW_LINEAR",
":",
"n_segments",
"=",
"len",
"(",
"p_cost",
")",
"-",
"1",
"# Iterate over the piece-wise linear segments.",
"for",
"i",
"in",
"range",
"(",
"n_segments",
")",
":",
"x1",
",",
"y1",
"=",
"p_cost",
"[",
"i",
"]",
"x2",
",",
"y2",
"=",
"p_cost",
"[",
"i",
"+",
"1",
"]",
"m",
"=",
"(",
"y2",
"-",
"y1",
")",
"/",
"(",
"x2",
"-",
"x1",
")",
"c",
"=",
"y1",
"-",
"m",
"*",
"x1",
"if",
"x1",
"<=",
"p",
"<=",
"x2",
":",
"result",
"=",
"m",
"*",
"p",
"+",
"c",
"break",
"else",
":",
"# print \"TOTC:\", self.name, p, self.p_max, p_cost",
"# raise ValueError, \"Value [%f] outwith pwl cost curve.\" % p",
"# Use the last segment for values outwith the cost curve.",
"logger",
".",
"error",
"(",
"\"Value [%f] outside pwl cost curve [%s].\"",
"%",
"(",
"p",
",",
"p_cost",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
")",
")",
"result",
"=",
"m",
"*",
"p",
"+",
"c",
"elif",
"pcost_model",
"==",
"POLYNOMIAL",
":",
"# result = p_cost[-1]",
"# for i in range(1, len(p_cost)):",
"# result += p_cost[-(i + 1)] * p**i",
"result",
"=",
"polyval",
"(",
"p_cost",
",",
"p",
")",
"else",
":",
"raise",
"ValueError",
"if",
"self",
".",
"is_load",
":",
"return",
"-",
"result",
"else",
":",
"return",
"result"
] | Computes total cost for the generator at the given output level. | [
"Computes",
"total",
"cost",
"for",
"the",
"generator",
"at",
"the",
"given",
"output",
"level",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L180-L220 | train |
rwl/pylon | pylon/generator.py | Generator.poly_to_pwl | def poly_to_pwl(self, n_points=4):
""" Sets the piece-wise linear cost attribute, converting the
polynomial cost variable by evaluating at zero and then at n_points
evenly spaced points between p_min and p_max.
"""
assert self.pcost_model == POLYNOMIAL
p_min = self.p_min
p_max = self.p_max
p_cost = []
if p_min > 0.0:
# Make the first segment go from the origin to p_min.
step = (p_max - p_min) / (n_points - 2)
y0 = self.total_cost(0.0)
p_cost.append((0.0, y0))
x = p_min
n_points -= 1
else:
step = (p_max - p_min) / (n_points - 1)
x = 0.0
for _ in range(n_points):
y = self.total_cost(x)
p_cost.append((x, y))
x += step
# Change the cost model and set the new cost.
self.pcost_model = PW_LINEAR
self.p_cost = p_cost | python | def poly_to_pwl(self, n_points=4):
""" Sets the piece-wise linear cost attribute, converting the
polynomial cost variable by evaluating at zero and then at n_points
evenly spaced points between p_min and p_max.
"""
assert self.pcost_model == POLYNOMIAL
p_min = self.p_min
p_max = self.p_max
p_cost = []
if p_min > 0.0:
# Make the first segment go from the origin to p_min.
step = (p_max - p_min) / (n_points - 2)
y0 = self.total_cost(0.0)
p_cost.append((0.0, y0))
x = p_min
n_points -= 1
else:
step = (p_max - p_min) / (n_points - 1)
x = 0.0
for _ in range(n_points):
y = self.total_cost(x)
p_cost.append((x, y))
x += step
# Change the cost model and set the new cost.
self.pcost_model = PW_LINEAR
self.p_cost = p_cost | [
"def",
"poly_to_pwl",
"(",
"self",
",",
"n_points",
"=",
"4",
")",
":",
"assert",
"self",
".",
"pcost_model",
"==",
"POLYNOMIAL",
"p_min",
"=",
"self",
".",
"p_min",
"p_max",
"=",
"self",
".",
"p_max",
"p_cost",
"=",
"[",
"]",
"if",
"p_min",
">",
"0.0",
":",
"# Make the first segment go from the origin to p_min.",
"step",
"=",
"(",
"p_max",
"-",
"p_min",
")",
"/",
"(",
"n_points",
"-",
"2",
")",
"y0",
"=",
"self",
".",
"total_cost",
"(",
"0.0",
")",
"p_cost",
".",
"append",
"(",
"(",
"0.0",
",",
"y0",
")",
")",
"x",
"=",
"p_min",
"n_points",
"-=",
"1",
"else",
":",
"step",
"=",
"(",
"p_max",
"-",
"p_min",
")",
"/",
"(",
"n_points",
"-",
"1",
")",
"x",
"=",
"0.0",
"for",
"_",
"in",
"range",
"(",
"n_points",
")",
":",
"y",
"=",
"self",
".",
"total_cost",
"(",
"x",
")",
"p_cost",
".",
"append",
"(",
"(",
"x",
",",
"y",
")",
")",
"x",
"+=",
"step",
"# Change the cost model and set the new cost.",
"self",
".",
"pcost_model",
"=",
"PW_LINEAR",
"self",
".",
"p_cost",
"=",
"p_cost"
] | Sets the piece-wise linear cost attribute, converting the
polynomial cost variable by evaluating at zero and then at n_points
evenly spaced points between p_min and p_max. | [
"Sets",
"the",
"piece",
"-",
"wise",
"linear",
"cost",
"attribute",
"converting",
"the",
"polynomial",
"cost",
"variable",
"by",
"evaluating",
"at",
"zero",
"and",
"then",
"at",
"n_points",
"evenly",
"spaced",
"points",
"between",
"p_min",
"and",
"p_max",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L278-L308 | train |
rwl/pylon | pylon/generator.py | Generator.get_offers | def get_offers(self, n_points=6):
""" Returns quantity and price offers created from the cost function.
"""
from pyreto.smart_market import Offer
qtyprc = self._get_qtyprc(n_points)
return [Offer(self, qty, prc) for qty, prc in qtyprc] | python | def get_offers(self, n_points=6):
""" Returns quantity and price offers created from the cost function.
"""
from pyreto.smart_market import Offer
qtyprc = self._get_qtyprc(n_points)
return [Offer(self, qty, prc) for qty, prc in qtyprc] | [
"def",
"get_offers",
"(",
"self",
",",
"n_points",
"=",
"6",
")",
":",
"from",
"pyreto",
".",
"smart_market",
"import",
"Offer",
"qtyprc",
"=",
"self",
".",
"_get_qtyprc",
"(",
"n_points",
")",
"return",
"[",
"Offer",
"(",
"self",
",",
"qty",
",",
"prc",
")",
"for",
"qty",
",",
"prc",
"in",
"qtyprc",
"]"
] | Returns quantity and price offers created from the cost function. | [
"Returns",
"quantity",
"and",
"price",
"offers",
"created",
"from",
"the",
"cost",
"function",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L311-L317 | train |
rwl/pylon | pylon/generator.py | Generator.get_bids | def get_bids(self, n_points=6):
""" Returns quantity and price bids created from the cost function.
"""
from pyreto.smart_market import Bid
qtyprc = self._get_qtyprc(n_points)
return [Bid(self, qty, prc) for qty, prc in qtyprc] | python | def get_bids(self, n_points=6):
""" Returns quantity and price bids created from the cost function.
"""
from pyreto.smart_market import Bid
qtyprc = self._get_qtyprc(n_points)
return [Bid(self, qty, prc) for qty, prc in qtyprc] | [
"def",
"get_bids",
"(",
"self",
",",
"n_points",
"=",
"6",
")",
":",
"from",
"pyreto",
".",
"smart_market",
"import",
"Bid",
"qtyprc",
"=",
"self",
".",
"_get_qtyprc",
"(",
"n_points",
")",
"return",
"[",
"Bid",
"(",
"self",
",",
"qty",
",",
"prc",
")",
"for",
"qty",
",",
"prc",
"in",
"qtyprc",
"]"
] | Returns quantity and price bids created from the cost function. | [
"Returns",
"quantity",
"and",
"price",
"bids",
"created",
"from",
"the",
"cost",
"function",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L320-L326 | train |
rwl/pylon | pylon/generator.py | Generator.offers_to_pwl | def offers_to_pwl(self, offers):
""" Updates the piece-wise linear total cost function using the given
offer blocks.
Based on off2case.m from MATPOWER by Ray Zimmerman, developed at PSERC
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
"""
assert not self.is_load
# Only apply offers associated with this generator.
g_offers = [offer for offer in offers if offer.generator == self]
# Fliter out zero quantity offers.
gt_zero = [offr for offr in g_offers if round(offr.quantity, 4) > 0.0]
# Ignore withheld offers.
valid = [offer for offer in gt_zero if not offer.withheld]
p_offers = [v for v in valid if not v.reactive]
q_offers = [v for v in valid if v.reactive]
if p_offers:
self.p_cost = self._offbids_to_points(p_offers)
self.pcost_model = PW_LINEAR
self.online = True
else:
self.p_cost = [(0.0, 0.0), (self.p_max, 0.0)]
self.pcost_model = PW_LINEAR
if q_offers:
# Dispatch at zero real power without shutting down
# if capacity offered for reactive power.
self.p_min = 0.0
self.p_max = 0.0
self.online = True
else:
self.online = False
if q_offers:
self.q_cost = self._offbids_to_points(q_offers)
self.qcost_model = PW_LINEAR
else:
self.q_cost = None#[(0.0, 0.0), (self.q_max, 0.0)]
self.qcost_model = PW_LINEAR
if not len(p_offers) and not len(q_offers):
logger.info("No valid offers for generator [%s], shutting down." %
self.name)
self.online = False
self._adjust_limits() | python | def offers_to_pwl(self, offers):
""" Updates the piece-wise linear total cost function using the given
offer blocks.
Based on off2case.m from MATPOWER by Ray Zimmerman, developed at PSERC
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
"""
assert not self.is_load
# Only apply offers associated with this generator.
g_offers = [offer for offer in offers if offer.generator == self]
# Fliter out zero quantity offers.
gt_zero = [offr for offr in g_offers if round(offr.quantity, 4) > 0.0]
# Ignore withheld offers.
valid = [offer for offer in gt_zero if not offer.withheld]
p_offers = [v for v in valid if not v.reactive]
q_offers = [v for v in valid if v.reactive]
if p_offers:
self.p_cost = self._offbids_to_points(p_offers)
self.pcost_model = PW_LINEAR
self.online = True
else:
self.p_cost = [(0.0, 0.0), (self.p_max, 0.0)]
self.pcost_model = PW_LINEAR
if q_offers:
# Dispatch at zero real power without shutting down
# if capacity offered for reactive power.
self.p_min = 0.0
self.p_max = 0.0
self.online = True
else:
self.online = False
if q_offers:
self.q_cost = self._offbids_to_points(q_offers)
self.qcost_model = PW_LINEAR
else:
self.q_cost = None#[(0.0, 0.0), (self.q_max, 0.0)]
self.qcost_model = PW_LINEAR
if not len(p_offers) and not len(q_offers):
logger.info("No valid offers for generator [%s], shutting down." %
self.name)
self.online = False
self._adjust_limits() | [
"def",
"offers_to_pwl",
"(",
"self",
",",
"offers",
")",
":",
"assert",
"not",
"self",
".",
"is_load",
"# Only apply offers associated with this generator.",
"g_offers",
"=",
"[",
"offer",
"for",
"offer",
"in",
"offers",
"if",
"offer",
".",
"generator",
"==",
"self",
"]",
"# Fliter out zero quantity offers.",
"gt_zero",
"=",
"[",
"offr",
"for",
"offr",
"in",
"g_offers",
"if",
"round",
"(",
"offr",
".",
"quantity",
",",
"4",
")",
">",
"0.0",
"]",
"# Ignore withheld offers.",
"valid",
"=",
"[",
"offer",
"for",
"offer",
"in",
"gt_zero",
"if",
"not",
"offer",
".",
"withheld",
"]",
"p_offers",
"=",
"[",
"v",
"for",
"v",
"in",
"valid",
"if",
"not",
"v",
".",
"reactive",
"]",
"q_offers",
"=",
"[",
"v",
"for",
"v",
"in",
"valid",
"if",
"v",
".",
"reactive",
"]",
"if",
"p_offers",
":",
"self",
".",
"p_cost",
"=",
"self",
".",
"_offbids_to_points",
"(",
"p_offers",
")",
"self",
".",
"pcost_model",
"=",
"PW_LINEAR",
"self",
".",
"online",
"=",
"True",
"else",
":",
"self",
".",
"p_cost",
"=",
"[",
"(",
"0.0",
",",
"0.0",
")",
",",
"(",
"self",
".",
"p_max",
",",
"0.0",
")",
"]",
"self",
".",
"pcost_model",
"=",
"PW_LINEAR",
"if",
"q_offers",
":",
"# Dispatch at zero real power without shutting down",
"# if capacity offered for reactive power.",
"self",
".",
"p_min",
"=",
"0.0",
"self",
".",
"p_max",
"=",
"0.0",
"self",
".",
"online",
"=",
"True",
"else",
":",
"self",
".",
"online",
"=",
"False",
"if",
"q_offers",
":",
"self",
".",
"q_cost",
"=",
"self",
".",
"_offbids_to_points",
"(",
"q_offers",
")",
"self",
".",
"qcost_model",
"=",
"PW_LINEAR",
"else",
":",
"self",
".",
"q_cost",
"=",
"None",
"#[(0.0, 0.0), (self.q_max, 0.0)]",
"self",
".",
"qcost_model",
"=",
"PW_LINEAR",
"if",
"not",
"len",
"(",
"p_offers",
")",
"and",
"not",
"len",
"(",
"q_offers",
")",
":",
"logger",
".",
"info",
"(",
"\"No valid offers for generator [%s], shutting down.\"",
"%",
"self",
".",
"name",
")",
"self",
".",
"online",
"=",
"False",
"self",
".",
"_adjust_limits",
"(",
")"
] | Updates the piece-wise linear total cost function using the given
offer blocks.
Based on off2case.m from MATPOWER by Ray Zimmerman, developed at PSERC
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info. | [
"Updates",
"the",
"piece",
"-",
"wise",
"linear",
"total",
"cost",
"function",
"using",
"the",
"given",
"offer",
"blocks",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L420-L466 | train |
rwl/pylon | pylon/generator.py | Generator.bids_to_pwl | def bids_to_pwl(self, bids):
""" Updates the piece-wise linear total cost function using the given
bid blocks.
Based on off2case.m from MATPOWER by Ray Zimmerman, developed at PSERC
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
"""
assert self.is_load
# Apply only those bids associated with this dispatchable load.
vl_bids = [bid for bid in bids if bid.vLoad == self]
# Filter out zero quantity bids.
gt_zero = [bid for bid in vl_bids if round(bid.quantity, 4) > 0.0]
# Ignore withheld offers.
valid_bids = [bid for bid in gt_zero if not bid.withheld]
p_bids = [v for v in valid_bids if not v.reactive]
q_bids = [v for v in valid_bids if v.reactive]
if p_bids:
self.p_cost = self._offbids_to_points(p_bids, True)
self.pcost_model = PW_LINEAR
self.online = True
else:
self.p_cost = [(0.0, 0.0), (self.p_max, 0.0)]
self.pcost_model = PW_LINEAR
logger.info("No valid active power bids for dispatchable load "
"[%s], shutting down." % self.name)
self.online = False
if q_bids:
self.q_cost = self._offbids_to_points(q_bids, True)
self.qcost_model = PW_LINEAR
self.online = True
else:
self.q_cost = [(self.q_min, 0.0), (0.0, 0.0), (self.q_max, 0.0)]
self.qcost_model = PW_LINEAR
# logger.info("No valid bids for dispatchable load, shutting down.")
# self.online = False
self._adjust_limits() | python | def bids_to_pwl(self, bids):
""" Updates the piece-wise linear total cost function using the given
bid blocks.
Based on off2case.m from MATPOWER by Ray Zimmerman, developed at PSERC
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
"""
assert self.is_load
# Apply only those bids associated with this dispatchable load.
vl_bids = [bid for bid in bids if bid.vLoad == self]
# Filter out zero quantity bids.
gt_zero = [bid for bid in vl_bids if round(bid.quantity, 4) > 0.0]
# Ignore withheld offers.
valid_bids = [bid for bid in gt_zero if not bid.withheld]
p_bids = [v for v in valid_bids if not v.reactive]
q_bids = [v for v in valid_bids if v.reactive]
if p_bids:
self.p_cost = self._offbids_to_points(p_bids, True)
self.pcost_model = PW_LINEAR
self.online = True
else:
self.p_cost = [(0.0, 0.0), (self.p_max, 0.0)]
self.pcost_model = PW_LINEAR
logger.info("No valid active power bids for dispatchable load "
"[%s], shutting down." % self.name)
self.online = False
if q_bids:
self.q_cost = self._offbids_to_points(q_bids, True)
self.qcost_model = PW_LINEAR
self.online = True
else:
self.q_cost = [(self.q_min, 0.0), (0.0, 0.0), (self.q_max, 0.0)]
self.qcost_model = PW_LINEAR
# logger.info("No valid bids for dispatchable load, shutting down.")
# self.online = False
self._adjust_limits() | [
"def",
"bids_to_pwl",
"(",
"self",
",",
"bids",
")",
":",
"assert",
"self",
".",
"is_load",
"# Apply only those bids associated with this dispatchable load.",
"vl_bids",
"=",
"[",
"bid",
"for",
"bid",
"in",
"bids",
"if",
"bid",
".",
"vLoad",
"==",
"self",
"]",
"# Filter out zero quantity bids.",
"gt_zero",
"=",
"[",
"bid",
"for",
"bid",
"in",
"vl_bids",
"if",
"round",
"(",
"bid",
".",
"quantity",
",",
"4",
")",
">",
"0.0",
"]",
"# Ignore withheld offers.",
"valid_bids",
"=",
"[",
"bid",
"for",
"bid",
"in",
"gt_zero",
"if",
"not",
"bid",
".",
"withheld",
"]",
"p_bids",
"=",
"[",
"v",
"for",
"v",
"in",
"valid_bids",
"if",
"not",
"v",
".",
"reactive",
"]",
"q_bids",
"=",
"[",
"v",
"for",
"v",
"in",
"valid_bids",
"if",
"v",
".",
"reactive",
"]",
"if",
"p_bids",
":",
"self",
".",
"p_cost",
"=",
"self",
".",
"_offbids_to_points",
"(",
"p_bids",
",",
"True",
")",
"self",
".",
"pcost_model",
"=",
"PW_LINEAR",
"self",
".",
"online",
"=",
"True",
"else",
":",
"self",
".",
"p_cost",
"=",
"[",
"(",
"0.0",
",",
"0.0",
")",
",",
"(",
"self",
".",
"p_max",
",",
"0.0",
")",
"]",
"self",
".",
"pcost_model",
"=",
"PW_LINEAR",
"logger",
".",
"info",
"(",
"\"No valid active power bids for dispatchable load \"",
"\"[%s], shutting down.\"",
"%",
"self",
".",
"name",
")",
"self",
".",
"online",
"=",
"False",
"if",
"q_bids",
":",
"self",
".",
"q_cost",
"=",
"self",
".",
"_offbids_to_points",
"(",
"q_bids",
",",
"True",
")",
"self",
".",
"qcost_model",
"=",
"PW_LINEAR",
"self",
".",
"online",
"=",
"True",
"else",
":",
"self",
".",
"q_cost",
"=",
"[",
"(",
"self",
".",
"q_min",
",",
"0.0",
")",
",",
"(",
"0.0",
",",
"0.0",
")",
",",
"(",
"self",
".",
"q_max",
",",
"0.0",
")",
"]",
"self",
".",
"qcost_model",
"=",
"PW_LINEAR",
"# logger.info(\"No valid bids for dispatchable load, shutting down.\")",
"# self.online = False",
"self",
".",
"_adjust_limits",
"(",
")"
] | Updates the piece-wise linear total cost function using the given
bid blocks.
Based on off2case.m from MATPOWER by Ray Zimmerman, developed at PSERC
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info. | [
"Updates",
"the",
"piece",
"-",
"wise",
"linear",
"total",
"cost",
"function",
"using",
"the",
"given",
"bid",
"blocks",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L469-L508 | train |
rwl/pylon | pylon/generator.py | Generator._adjust_limits | def _adjust_limits(self):
""" Sets the active power limits, 'p_max' and 'p_min', according to
the pwl cost function points.
"""
if not self.is_load:
# self.p_min = min([point[0] for point in self.p_cost])
self.p_max = max([point[0] for point in self.p_cost])
else:
p_min = min([point[0] for point in self.p_cost])
self.p_max = 0.0
self.q_min = self.q_min * p_min / self.p_min
self.q_max = self.q_max * p_min / self.p_min
self.p_min = p_min | python | def _adjust_limits(self):
""" Sets the active power limits, 'p_max' and 'p_min', according to
the pwl cost function points.
"""
if not self.is_load:
# self.p_min = min([point[0] for point in self.p_cost])
self.p_max = max([point[0] for point in self.p_cost])
else:
p_min = min([point[0] for point in self.p_cost])
self.p_max = 0.0
self.q_min = self.q_min * p_min / self.p_min
self.q_max = self.q_max * p_min / self.p_min
self.p_min = p_min | [
"def",
"_adjust_limits",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_load",
":",
"# self.p_min = min([point[0] for point in self.p_cost])",
"self",
".",
"p_max",
"=",
"max",
"(",
"[",
"point",
"[",
"0",
"]",
"for",
"point",
"in",
"self",
".",
"p_cost",
"]",
")",
"else",
":",
"p_min",
"=",
"min",
"(",
"[",
"point",
"[",
"0",
"]",
"for",
"point",
"in",
"self",
".",
"p_cost",
"]",
")",
"self",
".",
"p_max",
"=",
"0.0",
"self",
".",
"q_min",
"=",
"self",
".",
"q_min",
"*",
"p_min",
"/",
"self",
".",
"p_min",
"self",
".",
"q_max",
"=",
"self",
".",
"q_max",
"*",
"p_min",
"/",
"self",
".",
"p_min",
"self",
".",
"p_min",
"=",
"p_min"
] | Sets the active power limits, 'p_max' and 'p_min', according to
the pwl cost function points. | [
"Sets",
"the",
"active",
"power",
"limits",
"p_max",
"and",
"p_min",
"according",
"to",
"the",
"pwl",
"cost",
"function",
"points",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/generator.py#L537-L549 | train |
rwl/pylon | pyreto/continuous/environment.py | MarketEnvironment.indim | def indim(self):
""" The number of action values that the environment accepts.
"""
indim = self.numOffbids * len(self.generators)
if self.maxWithhold is not None:
return indim * 2
else:
return indim | python | def indim(self):
""" The number of action values that the environment accepts.
"""
indim = self.numOffbids * len(self.generators)
if self.maxWithhold is not None:
return indim * 2
else:
return indim | [
"def",
"indim",
"(",
"self",
")",
":",
"indim",
"=",
"self",
".",
"numOffbids",
"*",
"len",
"(",
"self",
".",
"generators",
")",
"if",
"self",
".",
"maxWithhold",
"is",
"not",
"None",
":",
"return",
"indim",
"*",
"2",
"else",
":",
"return",
"indim"
] | The number of action values that the environment accepts. | [
"The",
"number",
"of",
"action",
"values",
"that",
"the",
"environment",
"accepts",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/continuous/environment.py#L103-L111 | train |
rwl/pylon | pyreto/continuous/environment.py | MarketEnvironment._getBusVoltageLambdaSensor | def _getBusVoltageLambdaSensor(self):
""" Returns an array of length nb where each value is the sum of the
Lagrangian multipliers on the upper and the negative of the Lagrangian
multipliers on the lower voltage limits. """
muVmin = array([b.mu_vmin for b in self.market.case.connected_buses])
muVmax = array([b.mu_vmax for b in self.market.case.connected_buses])
muVmin = -1.0 * muVmin
diff = muVmin + muVmax
return diff | python | def _getBusVoltageLambdaSensor(self):
""" Returns an array of length nb where each value is the sum of the
Lagrangian multipliers on the upper and the negative of the Lagrangian
multipliers on the lower voltage limits. """
muVmin = array([b.mu_vmin for b in self.market.case.connected_buses])
muVmax = array([b.mu_vmax for b in self.market.case.connected_buses])
muVmin = -1.0 * muVmin
diff = muVmin + muVmax
return diff | [
"def",
"_getBusVoltageLambdaSensor",
"(",
"self",
")",
":",
"muVmin",
"=",
"array",
"(",
"[",
"b",
".",
"mu_vmin",
"for",
"b",
"in",
"self",
".",
"market",
".",
"case",
".",
"connected_buses",
"]",
")",
"muVmax",
"=",
"array",
"(",
"[",
"b",
".",
"mu_vmax",
"for",
"b",
"in",
"self",
".",
"market",
".",
"case",
".",
"connected_buses",
"]",
")",
"muVmin",
"=",
"-",
"1.0",
"*",
"muVmin",
"diff",
"=",
"muVmin",
"+",
"muVmax",
"return",
"diff"
] | Returns an array of length nb where each value is the sum of the
Lagrangian multipliers on the upper and the negative of the Lagrangian
multipliers on the lower voltage limits. | [
"Returns",
"an",
"array",
"of",
"length",
"nb",
"where",
"each",
"value",
"is",
"the",
"sum",
"of",
"the",
"Lagrangian",
"multipliers",
"on",
"the",
"upper",
"and",
"the",
"negative",
"of",
"the",
"Lagrangian",
"multipliers",
"on",
"the",
"lower",
"voltage",
"limits",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/continuous/environment.py#L177-L185 | train |
melizalab/libtfr | site_scons/site_tools/doxygen.py | DoxyfileParse | def DoxyfileParse(file_contents):
"""
Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings.
"""
data = {}
import shlex
lex = shlex.shlex(instream = file_contents, posix = True)
lex.wordchars += "*+./-:"
lex.whitespace = lex.whitespace.replace("\n", "")
lex.escape = ""
lineno = lex.lineno
token = lex.get_token()
key = token # the first token should be a key
last_token = ""
key_token = False
next_key = False
new_data = True
def append_data(data, key, new_data, token):
if new_data or len(data[key]) == 0:
data[key].append(token)
else:
data[key][-1] += token
while token:
if token in ['\n']:
if last_token not in ['\\']:
key_token = True
elif token in ['\\']:
pass
elif key_token:
key = token
key_token = False
else:
if token == "+=":
if not data.has_key(key):
data[key] = list()
elif token == "=":
if key == "TAGFILES" and data.has_key(key):
append_data( data, key, False, "=" )
new_data=False
else:
data[key] = list()
else:
append_data( data, key, new_data, token )
new_data = True
last_token = token
token = lex.get_token()
if last_token == '\\' and token != '\n':
new_data = False
append_data( data, key, new_data, '\\' )
# compress lists of len 1 into single strings
for (k, v) in data.items():
if len(v) == 0:
data.pop(k)
# items in the following list will be kept as lists and not converted to strings
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS", "TAGFILES"]:
continue
if len(v) == 1:
data[k] = v[0]
return data | python | def DoxyfileParse(file_contents):
"""
Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings.
"""
data = {}
import shlex
lex = shlex.shlex(instream = file_contents, posix = True)
lex.wordchars += "*+./-:"
lex.whitespace = lex.whitespace.replace("\n", "")
lex.escape = ""
lineno = lex.lineno
token = lex.get_token()
key = token # the first token should be a key
last_token = ""
key_token = False
next_key = False
new_data = True
def append_data(data, key, new_data, token):
if new_data or len(data[key]) == 0:
data[key].append(token)
else:
data[key][-1] += token
while token:
if token in ['\n']:
if last_token not in ['\\']:
key_token = True
elif token in ['\\']:
pass
elif key_token:
key = token
key_token = False
else:
if token == "+=":
if not data.has_key(key):
data[key] = list()
elif token == "=":
if key == "TAGFILES" and data.has_key(key):
append_data( data, key, False, "=" )
new_data=False
else:
data[key] = list()
else:
append_data( data, key, new_data, token )
new_data = True
last_token = token
token = lex.get_token()
if last_token == '\\' and token != '\n':
new_data = False
append_data( data, key, new_data, '\\' )
# compress lists of len 1 into single strings
for (k, v) in data.items():
if len(v) == 0:
data.pop(k)
# items in the following list will be kept as lists and not converted to strings
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS", "TAGFILES"]:
continue
if len(v) == 1:
data[k] = v[0]
return data | [
"def",
"DoxyfileParse",
"(",
"file_contents",
")",
":",
"data",
"=",
"{",
"}",
"import",
"shlex",
"lex",
"=",
"shlex",
".",
"shlex",
"(",
"instream",
"=",
"file_contents",
",",
"posix",
"=",
"True",
")",
"lex",
".",
"wordchars",
"+=",
"\"*+./-:\"",
"lex",
".",
"whitespace",
"=",
"lex",
".",
"whitespace",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"lex",
".",
"escape",
"=",
"\"\"",
"lineno",
"=",
"lex",
".",
"lineno",
"token",
"=",
"lex",
".",
"get_token",
"(",
")",
"key",
"=",
"token",
"# the first token should be a key",
"last_token",
"=",
"\"\"",
"key_token",
"=",
"False",
"next_key",
"=",
"False",
"new_data",
"=",
"True",
"def",
"append_data",
"(",
"data",
",",
"key",
",",
"new_data",
",",
"token",
")",
":",
"if",
"new_data",
"or",
"len",
"(",
"data",
"[",
"key",
"]",
")",
"==",
"0",
":",
"data",
"[",
"key",
"]",
".",
"append",
"(",
"token",
")",
"else",
":",
"data",
"[",
"key",
"]",
"[",
"-",
"1",
"]",
"+=",
"token",
"while",
"token",
":",
"if",
"token",
"in",
"[",
"'\\n'",
"]",
":",
"if",
"last_token",
"not",
"in",
"[",
"'\\\\'",
"]",
":",
"key_token",
"=",
"True",
"elif",
"token",
"in",
"[",
"'\\\\'",
"]",
":",
"pass",
"elif",
"key_token",
":",
"key",
"=",
"token",
"key_token",
"=",
"False",
"else",
":",
"if",
"token",
"==",
"\"+=\"",
":",
"if",
"not",
"data",
".",
"has_key",
"(",
"key",
")",
":",
"data",
"[",
"key",
"]",
"=",
"list",
"(",
")",
"elif",
"token",
"==",
"\"=\"",
":",
"if",
"key",
"==",
"\"TAGFILES\"",
"and",
"data",
".",
"has_key",
"(",
"key",
")",
":",
"append_data",
"(",
"data",
",",
"key",
",",
"False",
",",
"\"=\"",
")",
"new_data",
"=",
"False",
"else",
":",
"data",
"[",
"key",
"]",
"=",
"list",
"(",
")",
"else",
":",
"append_data",
"(",
"data",
",",
"key",
",",
"new_data",
",",
"token",
")",
"new_data",
"=",
"True",
"last_token",
"=",
"token",
"token",
"=",
"lex",
".",
"get_token",
"(",
")",
"if",
"last_token",
"==",
"'\\\\'",
"and",
"token",
"!=",
"'\\n'",
":",
"new_data",
"=",
"False",
"append_data",
"(",
"data",
",",
"key",
",",
"new_data",
",",
"'\\\\'",
")",
"# compress lists of len 1 into single strings",
"for",
"(",
"k",
",",
"v",
")",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"v",
")",
"==",
"0",
":",
"data",
".",
"pop",
"(",
"k",
")",
"# items in the following list will be kept as lists and not converted to strings",
"if",
"k",
"in",
"[",
"\"INPUT\"",
",",
"\"FILE_PATTERNS\"",
",",
"\"EXCLUDE_PATTERNS\"",
",",
"\"TAGFILES\"",
"]",
":",
"continue",
"if",
"len",
"(",
"v",
")",
"==",
"1",
":",
"data",
"[",
"k",
"]",
"=",
"v",
"[",
"0",
"]",
"return",
"data"
] | Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings. | [
"Parse",
"a",
"Doxygen",
"source",
"file",
"and",
"return",
"a",
"dictionary",
"of",
"all",
"the",
"values",
".",
"Values",
"will",
"be",
"strings",
"and",
"lists",
"of",
"strings",
"."
] | 9f7e7705793d258a0b205f185b20e3bbcda473da | https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/site_scons/site_tools/doxygen.py#L28-L97 | train |
melizalab/libtfr | site_scons/site_tools/doxygen.py | DoxySourceScan | def DoxySourceScan(node, env, path):
"""
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files.
"""
default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
'*.py',
]
default_exclude_patterns = [
'*~',
]
sources = []
data = DoxyfileParse(node.get_contents())
if data.get("RECURSIVE", "NO") == "YES":
recursive = True
else:
recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
# We're running in the top-level directory, but the doxygen
# configuration file is in the same directory as node; this means
# that relative pathnames in node must be adjusted before they can
# go onto the sources list
conf_dir = os.path.dirname(str(node))
for node in data.get("INPUT", []):
if not os.path.isabs(node):
node = os.path.join(conf_dir, node)
if os.path.isfile(node):
sources.append(node)
elif os.path.isdir(node):
if recursive:
for root, dirs, files in os.walk(node):
for f in files:
filename = os.path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check:
sources.append(filename)
else:
for pattern in file_patterns:
sources.extend(glob.glob("/".join([node, pattern])))
# Add tagfiles to the list of source files:
for node in data.get("TAGFILES", []):
file = node.split("=")[0]
if not os.path.isabs(file):
file = os.path.join(conf_dir, file)
sources.append(file)
# Add additional files to the list of source files:
def append_additional_source(option):
file = data.get(option, "")
if file != "":
if not os.path.isabs(file):
file = os.path.join(conf_dir, file)
if os.path.isfile(file):
sources.append(file)
append_additional_source("HTML_STYLESHEET")
append_additional_source("HTML_HEADER")
append_additional_source("HTML_FOOTER")
sources = map( lambda path: env.File(path), sources )
return sources | python | def DoxySourceScan(node, env, path):
"""
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files.
"""
default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
'*.py',
]
default_exclude_patterns = [
'*~',
]
sources = []
data = DoxyfileParse(node.get_contents())
if data.get("RECURSIVE", "NO") == "YES":
recursive = True
else:
recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
# We're running in the top-level directory, but the doxygen
# configuration file is in the same directory as node; this means
# that relative pathnames in node must be adjusted before they can
# go onto the sources list
conf_dir = os.path.dirname(str(node))
for node in data.get("INPUT", []):
if not os.path.isabs(node):
node = os.path.join(conf_dir, node)
if os.path.isfile(node):
sources.append(node)
elif os.path.isdir(node):
if recursive:
for root, dirs, files in os.walk(node):
for f in files:
filename = os.path.join(root, f)
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
if pattern_check and not exclude_check:
sources.append(filename)
else:
for pattern in file_patterns:
sources.extend(glob.glob("/".join([node, pattern])))
# Add tagfiles to the list of source files:
for node in data.get("TAGFILES", []):
file = node.split("=")[0]
if not os.path.isabs(file):
file = os.path.join(conf_dir, file)
sources.append(file)
# Add additional files to the list of source files:
def append_additional_source(option):
file = data.get(option, "")
if file != "":
if not os.path.isabs(file):
file = os.path.join(conf_dir, file)
if os.path.isfile(file):
sources.append(file)
append_additional_source("HTML_STYLESHEET")
append_additional_source("HTML_HEADER")
append_additional_source("HTML_FOOTER")
sources = map( lambda path: env.File(path), sources )
return sources | [
"def",
"DoxySourceScan",
"(",
"node",
",",
"env",
",",
"path",
")",
":",
"default_file_patterns",
"=",
"[",
"'*.c'",
",",
"'*.cc'",
",",
"'*.cxx'",
",",
"'*.cpp'",
",",
"'*.c++'",
",",
"'*.java'",
",",
"'*.ii'",
",",
"'*.ixx'",
",",
"'*.ipp'",
",",
"'*.i++'",
",",
"'*.inl'",
",",
"'*.h'",
",",
"'*.hh '",
",",
"'*.hxx'",
",",
"'*.hpp'",
",",
"'*.h++'",
",",
"'*.idl'",
",",
"'*.odl'",
",",
"'*.cs'",
",",
"'*.php'",
",",
"'*.php3'",
",",
"'*.inc'",
",",
"'*.m'",
",",
"'*.mm'",
",",
"'*.py'",
",",
"]",
"default_exclude_patterns",
"=",
"[",
"'*~'",
",",
"]",
"sources",
"=",
"[",
"]",
"data",
"=",
"DoxyfileParse",
"(",
"node",
".",
"get_contents",
"(",
")",
")",
"if",
"data",
".",
"get",
"(",
"\"RECURSIVE\"",
",",
"\"NO\"",
")",
"==",
"\"YES\"",
":",
"recursive",
"=",
"True",
"else",
":",
"recursive",
"=",
"False",
"file_patterns",
"=",
"data",
".",
"get",
"(",
"\"FILE_PATTERNS\"",
",",
"default_file_patterns",
")",
"exclude_patterns",
"=",
"data",
".",
"get",
"(",
"\"EXCLUDE_PATTERNS\"",
",",
"default_exclude_patterns",
")",
"# We're running in the top-level directory, but the doxygen",
"# configuration file is in the same directory as node; this means",
"# that relative pathnames in node must be adjusted before they can",
"# go onto the sources list",
"conf_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"str",
"(",
"node",
")",
")",
"for",
"node",
"in",
"data",
".",
"get",
"(",
"\"INPUT\"",
",",
"[",
"]",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"node",
")",
":",
"node",
"=",
"os",
".",
"path",
".",
"join",
"(",
"conf_dir",
",",
"node",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"node",
")",
":",
"sources",
".",
"append",
"(",
"node",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"node",
")",
":",
"if",
"recursive",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"node",
")",
":",
"for",
"f",
"in",
"files",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"f",
")",
"pattern_check",
"=",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"or",
"bool",
"(",
"fnmatch",
"(",
"filename",
",",
"y",
")",
")",
",",
"file_patterns",
",",
"False",
")",
"exclude_check",
"=",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"and",
"fnmatch",
"(",
"filename",
",",
"y",
")",
",",
"exclude_patterns",
",",
"True",
")",
"if",
"pattern_check",
"and",
"not",
"exclude_check",
":",
"sources",
".",
"append",
"(",
"filename",
")",
"else",
":",
"for",
"pattern",
"in",
"file_patterns",
":",
"sources",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"\"/\"",
".",
"join",
"(",
"[",
"node",
",",
"pattern",
"]",
")",
")",
")",
"# Add tagfiles to the list of source files:",
"for",
"node",
"in",
"data",
".",
"get",
"(",
"\"TAGFILES\"",
",",
"[",
"]",
")",
":",
"file",
"=",
"node",
".",
"split",
"(",
"\"=\"",
")",
"[",
"0",
"]",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"file",
")",
":",
"file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"conf_dir",
",",
"file",
")",
"sources",
".",
"append",
"(",
"file",
")",
"# Add additional files to the list of source files:",
"def",
"append_additional_source",
"(",
"option",
")",
":",
"file",
"=",
"data",
".",
"get",
"(",
"option",
",",
"\"\"",
")",
"if",
"file",
"!=",
"\"\"",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"file",
")",
":",
"file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"conf_dir",
",",
"file",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file",
")",
":",
"sources",
".",
"append",
"(",
"file",
")",
"append_additional_source",
"(",
"\"HTML_STYLESHEET\"",
")",
"append_additional_source",
"(",
"\"HTML_HEADER\"",
")",
"append_additional_source",
"(",
"\"HTML_FOOTER\"",
")",
"sources",
"=",
"map",
"(",
"lambda",
"path",
":",
"env",
".",
"File",
"(",
"path",
")",
",",
"sources",
")",
"return",
"sources"
] | Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files. | [
"Doxygen",
"Doxyfile",
"source",
"scanner",
".",
"This",
"should",
"scan",
"the",
"Doxygen",
"file",
"and",
"add",
"any",
"files",
"used",
"to",
"generate",
"docs",
"to",
"the",
"list",
"of",
"source",
"files",
"."
] | 9f7e7705793d258a0b205f185b20e3bbcda473da | https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/site_scons/site_tools/doxygen.py#L99-L174 | train |
melizalab/libtfr | site_scons/site_tools/doxygen.py | DoxyEmitter | def DoxyEmitter(source, target, env):
"""Doxygen Doxyfile emitter"""
# possible output formats and their default values and output locations
output_formats = {
"HTML": ("YES", "html"),
"LATEX": ("YES", "latex"),
"RTF": ("NO", "rtf"),
"MAN": ("YES", "man"),
"XML": ("NO", "xml"),
}
data = DoxyfileParse(source[0].get_contents())
targets = []
out_dir = data.get("OUTPUT_DIRECTORY", ".")
if not os.path.isabs(out_dir):
conf_dir = os.path.dirname(str(source[0]))
out_dir = os.path.join(conf_dir, out_dir)
# add our output locations
for (k, v) in output_formats.items():
if data.get("GENERATE_" + k, v[0]) == "YES":
targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
# add the tag file if neccessary:
tagfile = data.get("GENERATE_TAGFILE", "")
if tagfile != "":
if not os.path.isabs(tagfile):
conf_dir = os.path.dirname(str(source[0]))
tagfile = os.path.join(conf_dir, tagfile)
targets.append(env.File(tagfile))
# don't clobber targets
for node in targets:
env.Precious(node)
# set up cleaning stuff
for node in targets:
env.Clean(node, node)
return (targets, source) | python | def DoxyEmitter(source, target, env):
"""Doxygen Doxyfile emitter"""
# possible output formats and their default values and output locations
output_formats = {
"HTML": ("YES", "html"),
"LATEX": ("YES", "latex"),
"RTF": ("NO", "rtf"),
"MAN": ("YES", "man"),
"XML": ("NO", "xml"),
}
data = DoxyfileParse(source[0].get_contents())
targets = []
out_dir = data.get("OUTPUT_DIRECTORY", ".")
if not os.path.isabs(out_dir):
conf_dir = os.path.dirname(str(source[0]))
out_dir = os.path.join(conf_dir, out_dir)
# add our output locations
for (k, v) in output_formats.items():
if data.get("GENERATE_" + k, v[0]) == "YES":
targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
# add the tag file if neccessary:
tagfile = data.get("GENERATE_TAGFILE", "")
if tagfile != "":
if not os.path.isabs(tagfile):
conf_dir = os.path.dirname(str(source[0]))
tagfile = os.path.join(conf_dir, tagfile)
targets.append(env.File(tagfile))
# don't clobber targets
for node in targets:
env.Precious(node)
# set up cleaning stuff
for node in targets:
env.Clean(node, node)
return (targets, source) | [
"def",
"DoxyEmitter",
"(",
"source",
",",
"target",
",",
"env",
")",
":",
"# possible output formats and their default values and output locations",
"output_formats",
"=",
"{",
"\"HTML\"",
":",
"(",
"\"YES\"",
",",
"\"html\"",
")",
",",
"\"LATEX\"",
":",
"(",
"\"YES\"",
",",
"\"latex\"",
")",
",",
"\"RTF\"",
":",
"(",
"\"NO\"",
",",
"\"rtf\"",
")",
",",
"\"MAN\"",
":",
"(",
"\"YES\"",
",",
"\"man\"",
")",
",",
"\"XML\"",
":",
"(",
"\"NO\"",
",",
"\"xml\"",
")",
",",
"}",
"data",
"=",
"DoxyfileParse",
"(",
"source",
"[",
"0",
"]",
".",
"get_contents",
"(",
")",
")",
"targets",
"=",
"[",
"]",
"out_dir",
"=",
"data",
".",
"get",
"(",
"\"OUTPUT_DIRECTORY\"",
",",
"\".\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"out_dir",
")",
":",
"conf_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"str",
"(",
"source",
"[",
"0",
"]",
")",
")",
"out_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"conf_dir",
",",
"out_dir",
")",
"# add our output locations",
"for",
"(",
"k",
",",
"v",
")",
"in",
"output_formats",
".",
"items",
"(",
")",
":",
"if",
"data",
".",
"get",
"(",
"\"GENERATE_\"",
"+",
"k",
",",
"v",
"[",
"0",
"]",
")",
"==",
"\"YES\"",
":",
"targets",
".",
"append",
"(",
"env",
".",
"Dir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"data",
".",
"get",
"(",
"k",
"+",
"\"_OUTPUT\"",
",",
"v",
"[",
"1",
"]",
")",
")",
")",
")",
"# add the tag file if neccessary:",
"tagfile",
"=",
"data",
".",
"get",
"(",
"\"GENERATE_TAGFILE\"",
",",
"\"\"",
")",
"if",
"tagfile",
"!=",
"\"\"",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"tagfile",
")",
":",
"conf_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"str",
"(",
"source",
"[",
"0",
"]",
")",
")",
"tagfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"conf_dir",
",",
"tagfile",
")",
"targets",
".",
"append",
"(",
"env",
".",
"File",
"(",
"tagfile",
")",
")",
"# don't clobber targets",
"for",
"node",
"in",
"targets",
":",
"env",
".",
"Precious",
"(",
"node",
")",
"# set up cleaning stuff",
"for",
"node",
"in",
"targets",
":",
"env",
".",
"Clean",
"(",
"node",
",",
"node",
")",
"return",
"(",
"targets",
",",
"source",
")"
] | Doxygen Doxyfile emitter | [
"Doxygen",
"Doxyfile",
"emitter"
] | 9f7e7705793d258a0b205f185b20e3bbcda473da | https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/site_scons/site_tools/doxygen.py#L181-L221 | train |
melizalab/libtfr | site_scons/site_tools/doxygen.py | generate | def generate(env):
"""
Add builders and construction variables for the
Doxygen tool. This is currently for Doxygen 1.4.6.
"""
doxyfile_scanner = env.Scanner(
DoxySourceScan,
"DoxySourceScan",
scan_check = DoxySourceScanCheck,
)
import SCons.Builder
doxyfile_builder = SCons.Builder.Builder(
action = "cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}",
emitter = DoxyEmitter,
target_factory = env.fs.Entry,
single_source = True,
source_scanner = doxyfile_scanner,
)
env.Append(BUILDERS = {
'Doxygen': doxyfile_builder,
})
env.AppendUnique(
DOXYGEN = 'doxygen',
) | python | def generate(env):
"""
Add builders and construction variables for the
Doxygen tool. This is currently for Doxygen 1.4.6.
"""
doxyfile_scanner = env.Scanner(
DoxySourceScan,
"DoxySourceScan",
scan_check = DoxySourceScanCheck,
)
import SCons.Builder
doxyfile_builder = SCons.Builder.Builder(
action = "cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}",
emitter = DoxyEmitter,
target_factory = env.fs.Entry,
single_source = True,
source_scanner = doxyfile_scanner,
)
env.Append(BUILDERS = {
'Doxygen': doxyfile_builder,
})
env.AppendUnique(
DOXYGEN = 'doxygen',
) | [
"def",
"generate",
"(",
"env",
")",
":",
"doxyfile_scanner",
"=",
"env",
".",
"Scanner",
"(",
"DoxySourceScan",
",",
"\"DoxySourceScan\"",
",",
"scan_check",
"=",
"DoxySourceScanCheck",
",",
")",
"import",
"SCons",
".",
"Builder",
"doxyfile_builder",
"=",
"SCons",
".",
"Builder",
".",
"Builder",
"(",
"action",
"=",
"\"cd ${SOURCE.dir} && ${DOXYGEN} ${SOURCE.file}\"",
",",
"emitter",
"=",
"DoxyEmitter",
",",
"target_factory",
"=",
"env",
".",
"fs",
".",
"Entry",
",",
"single_source",
"=",
"True",
",",
"source_scanner",
"=",
"doxyfile_scanner",
",",
")",
"env",
".",
"Append",
"(",
"BUILDERS",
"=",
"{",
"'Doxygen'",
":",
"doxyfile_builder",
",",
"}",
")",
"env",
".",
"AppendUnique",
"(",
"DOXYGEN",
"=",
"'doxygen'",
",",
")"
] | Add builders and construction variables for the
Doxygen tool. This is currently for Doxygen 1.4.6. | [
"Add",
"builders",
"and",
"construction",
"variables",
"for",
"the",
"Doxygen",
"tool",
".",
"This",
"is",
"currently",
"for",
"Doxygen",
"1",
".",
"4",
".",
"6",
"."
] | 9f7e7705793d258a0b205f185b20e3bbcda473da | https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/site_scons/site_tools/doxygen.py#L223-L249 | train |
finklabs/metrics | metrics/position.py | PosMetric.reset | def reset(self):
"""Reset metric counter."""
self._positions = []
self._line = 1
self._curr = None # current scope we are analyzing
self._scope = 0
self.language = None | python | def reset(self):
"""Reset metric counter."""
self._positions = []
self._line = 1
self._curr = None # current scope we are analyzing
self._scope = 0
self.language = None | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"_positions",
"=",
"[",
"]",
"self",
".",
"_line",
"=",
"1",
"self",
".",
"_curr",
"=",
"None",
"# current scope we are analyzing",
"self",
".",
"_scope",
"=",
"0",
"self",
".",
"language",
"=",
"None"
] | Reset metric counter. | [
"Reset",
"metric",
"counter",
"."
] | fd9974af498831664b9ae8e8f3834e1ec2e8a699 | https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/position.py#L87-L93 | train |
finklabs/metrics | metrics/position.py | PosMetric.add_scope | def add_scope(self, scope_type, scope_name, scope_start, is_method=False):
"""we identified a scope and add it to positions."""
if self._curr is not None:
self._curr['end'] = scope_start - 1 # close last scope
self._curr = {
'type': scope_type, 'name': scope_name,
'start': scope_start, 'end': scope_start
}
if is_method and self._positions:
last = self._positions[-1]
if not 'methods' in last:
last['methods'] = []
last['methods'].append(self._curr)
else:
self._positions.append(self._curr) | python | def add_scope(self, scope_type, scope_name, scope_start, is_method=False):
"""we identified a scope and add it to positions."""
if self._curr is not None:
self._curr['end'] = scope_start - 1 # close last scope
self._curr = {
'type': scope_type, 'name': scope_name,
'start': scope_start, 'end': scope_start
}
if is_method and self._positions:
last = self._positions[-1]
if not 'methods' in last:
last['methods'] = []
last['methods'].append(self._curr)
else:
self._positions.append(self._curr) | [
"def",
"add_scope",
"(",
"self",
",",
"scope_type",
",",
"scope_name",
",",
"scope_start",
",",
"is_method",
"=",
"False",
")",
":",
"if",
"self",
".",
"_curr",
"is",
"not",
"None",
":",
"self",
".",
"_curr",
"[",
"'end'",
"]",
"=",
"scope_start",
"-",
"1",
"# close last scope",
"self",
".",
"_curr",
"=",
"{",
"'type'",
":",
"scope_type",
",",
"'name'",
":",
"scope_name",
",",
"'start'",
":",
"scope_start",
",",
"'end'",
":",
"scope_start",
"}",
"if",
"is_method",
"and",
"self",
".",
"_positions",
":",
"last",
"=",
"self",
".",
"_positions",
"[",
"-",
"1",
"]",
"if",
"not",
"'methods'",
"in",
"last",
":",
"last",
"[",
"'methods'",
"]",
"=",
"[",
"]",
"last",
"[",
"'methods'",
"]",
".",
"append",
"(",
"self",
".",
"_curr",
")",
"else",
":",
"self",
".",
"_positions",
".",
"append",
"(",
"self",
".",
"_curr",
")"
] | we identified a scope and add it to positions. | [
"we",
"identified",
"a",
"scope",
"and",
"add",
"it",
"to",
"positions",
"."
] | fd9974af498831664b9ae8e8f3834e1ec2e8a699 | https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/position.py#L95-L110 | train |
finklabs/metrics | metrics/position.py | PosMetric.process_token | def process_token(self, tok):
"""count lines and track position of classes and functions"""
if tok[0] == Token.Text:
count = tok[1].count('\n')
if count:
self._line += count # adjust linecount
if self._detector.process(tok):
pass # works been completed in the detector
elif tok[0] == Token.Punctuation:
if tok[0] == Token.Punctuation and tok[1] == '{':
self._scope += 1
if tok[0] == Token.Punctuation and tok[1] == '}':
self._scope += -1
if self._scope == 0 and self._curr is not None:
self._curr['end'] = self._line # close last scope
self._curr = None
elif tok[0] == Token.Name.Class and self._scope == 0:
self.add_scope('Class', tok[1], self._line)
elif tok[0] == Token.Name.Function and self._scope in [0, 1]:
self.add_scope('Function', tok[1], self._line, self._scope == 1) | python | def process_token(self, tok):
"""count lines and track position of classes and functions"""
if tok[0] == Token.Text:
count = tok[1].count('\n')
if count:
self._line += count # adjust linecount
if self._detector.process(tok):
pass # works been completed in the detector
elif tok[0] == Token.Punctuation:
if tok[0] == Token.Punctuation and tok[1] == '{':
self._scope += 1
if tok[0] == Token.Punctuation and tok[1] == '}':
self._scope += -1
if self._scope == 0 and self._curr is not None:
self._curr['end'] = self._line # close last scope
self._curr = None
elif tok[0] == Token.Name.Class and self._scope == 0:
self.add_scope('Class', tok[1], self._line)
elif tok[0] == Token.Name.Function and self._scope in [0, 1]:
self.add_scope('Function', tok[1], self._line, self._scope == 1) | [
"def",
"process_token",
"(",
"self",
",",
"tok",
")",
":",
"if",
"tok",
"[",
"0",
"]",
"==",
"Token",
".",
"Text",
":",
"count",
"=",
"tok",
"[",
"1",
"]",
".",
"count",
"(",
"'\\n'",
")",
"if",
"count",
":",
"self",
".",
"_line",
"+=",
"count",
"# adjust linecount",
"if",
"self",
".",
"_detector",
".",
"process",
"(",
"tok",
")",
":",
"pass",
"# works been completed in the detector",
"elif",
"tok",
"[",
"0",
"]",
"==",
"Token",
".",
"Punctuation",
":",
"if",
"tok",
"[",
"0",
"]",
"==",
"Token",
".",
"Punctuation",
"and",
"tok",
"[",
"1",
"]",
"==",
"'{'",
":",
"self",
".",
"_scope",
"+=",
"1",
"if",
"tok",
"[",
"0",
"]",
"==",
"Token",
".",
"Punctuation",
"and",
"tok",
"[",
"1",
"]",
"==",
"'}'",
":",
"self",
".",
"_scope",
"+=",
"-",
"1",
"if",
"self",
".",
"_scope",
"==",
"0",
"and",
"self",
".",
"_curr",
"is",
"not",
"None",
":",
"self",
".",
"_curr",
"[",
"'end'",
"]",
"=",
"self",
".",
"_line",
"# close last scope",
"self",
".",
"_curr",
"=",
"None",
"elif",
"tok",
"[",
"0",
"]",
"==",
"Token",
".",
"Name",
".",
"Class",
"and",
"self",
".",
"_scope",
"==",
"0",
":",
"self",
".",
"add_scope",
"(",
"'Class'",
",",
"tok",
"[",
"1",
"]",
",",
"self",
".",
"_line",
")",
"elif",
"tok",
"[",
"0",
"]",
"==",
"Token",
".",
"Name",
".",
"Function",
"and",
"self",
".",
"_scope",
"in",
"[",
"0",
",",
"1",
"]",
":",
"self",
".",
"add_scope",
"(",
"'Function'",
",",
"tok",
"[",
"1",
"]",
",",
"self",
".",
"_line",
",",
"self",
".",
"_scope",
"==",
"1",
")"
] | count lines and track position of classes and functions | [
"count",
"lines",
"and",
"track",
"position",
"of",
"classes",
"and",
"functions"
] | fd9974af498831664b9ae8e8f3834e1ec2e8a699 | https://github.com/finklabs/metrics/blob/fd9974af498831664b9ae8e8f3834e1ec2e8a699/metrics/position.py#L112-L132 | train |
rwl/pylon | pylon/solver.py | _Solver._unpack_model | def _unpack_model(self, om):
""" Returns data from the OPF model.
"""
buses = om.case.connected_buses
branches = om.case.online_branches
gens = om.case.online_generators
cp = om.get_cost_params()
# Bf = om._Bf
# Pfinj = om._Pfinj
return buses, branches, gens, cp | python | def _unpack_model(self, om):
""" Returns data from the OPF model.
"""
buses = om.case.connected_buses
branches = om.case.online_branches
gens = om.case.online_generators
cp = om.get_cost_params()
# Bf = om._Bf
# Pfinj = om._Pfinj
return buses, branches, gens, cp | [
"def",
"_unpack_model",
"(",
"self",
",",
"om",
")",
":",
"buses",
"=",
"om",
".",
"case",
".",
"connected_buses",
"branches",
"=",
"om",
".",
"case",
".",
"online_branches",
"gens",
"=",
"om",
".",
"case",
".",
"online_generators",
"cp",
"=",
"om",
".",
"get_cost_params",
"(",
")",
"# Bf = om._Bf",
"# Pfinj = om._Pfinj",
"return",
"buses",
",",
"branches",
",",
"gens",
",",
"cp"
] | Returns data from the OPF model. | [
"Returns",
"data",
"from",
"the",
"OPF",
"model",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L78-L90 | train |
rwl/pylon | pylon/solver.py | _Solver._dimension_data | def _dimension_data(self, buses, branches, generators):
""" Returns the problem dimensions.
"""
ipol = [i for i, g in enumerate(generators)
if g.pcost_model == POLYNOMIAL]
ipwl = [i for i, g in enumerate(generators)
if g.pcost_model == PW_LINEAR]
nb = len(buses)
nl = len(branches)
# Number of general cost vars, w.
nw = self.om.cost_N
# Number of piece-wise linear costs.
if "y" in [v.name for v in self.om.vars]:
ny = self.om.get_var_N("y")
else:
ny = 0
# Total number of control variables of all types.
nxyz = self.om.var_N
return ipol, ipwl, nb, nl, nw, ny, nxyz | python | def _dimension_data(self, buses, branches, generators):
""" Returns the problem dimensions.
"""
ipol = [i for i, g in enumerate(generators)
if g.pcost_model == POLYNOMIAL]
ipwl = [i for i, g in enumerate(generators)
if g.pcost_model == PW_LINEAR]
nb = len(buses)
nl = len(branches)
# Number of general cost vars, w.
nw = self.om.cost_N
# Number of piece-wise linear costs.
if "y" in [v.name for v in self.om.vars]:
ny = self.om.get_var_N("y")
else:
ny = 0
# Total number of control variables of all types.
nxyz = self.om.var_N
return ipol, ipwl, nb, nl, nw, ny, nxyz | [
"def",
"_dimension_data",
"(",
"self",
",",
"buses",
",",
"branches",
",",
"generators",
")",
":",
"ipol",
"=",
"[",
"i",
"for",
"i",
",",
"g",
"in",
"enumerate",
"(",
"generators",
")",
"if",
"g",
".",
"pcost_model",
"==",
"POLYNOMIAL",
"]",
"ipwl",
"=",
"[",
"i",
"for",
"i",
",",
"g",
"in",
"enumerate",
"(",
"generators",
")",
"if",
"g",
".",
"pcost_model",
"==",
"PW_LINEAR",
"]",
"nb",
"=",
"len",
"(",
"buses",
")",
"nl",
"=",
"len",
"(",
"branches",
")",
"# Number of general cost vars, w.",
"nw",
"=",
"self",
".",
"om",
".",
"cost_N",
"# Number of piece-wise linear costs.",
"if",
"\"y\"",
"in",
"[",
"v",
".",
"name",
"for",
"v",
"in",
"self",
".",
"om",
".",
"vars",
"]",
":",
"ny",
"=",
"self",
".",
"om",
".",
"get_var_N",
"(",
"\"y\"",
")",
"else",
":",
"ny",
"=",
"0",
"# Total number of control variables of all types.",
"nxyz",
"=",
"self",
".",
"om",
".",
"var_N",
"return",
"ipol",
",",
"ipwl",
",",
"nb",
",",
"nl",
",",
"nw",
",",
"ny",
",",
"nxyz"
] | Returns the problem dimensions. | [
"Returns",
"the",
"problem",
"dimensions",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L93-L112 | train |
rwl/pylon | pylon/solver.py | _Solver._linear_constraints | def _linear_constraints(self, om):
""" Returns the linear problem constraints.
"""
A, l, u = om.linear_constraints() # l <= A*x <= u
# Indexes for equality, greater than (unbounded above), less than
# (unbounded below) and doubly-bounded box constraints.
# ieq = flatnonzero( abs(u - l) <= EPS )
# igt = flatnonzero( (u >= 1e10) & (l > -1e10) )
# ilt = flatnonzero( (l <= -1e10) & (u < 1e10) )
# ibx = flatnonzero( (abs(u - l) > EPS) & (u < 1e10) & (l > -1e10) )
# Zero-sized sparse matrices not supported. Assume equality
# constraints exist.
## AA = A[ieq, :]
## if len(ilt) > 0:
## AA = vstack([AA, A[ilt, :]], "csr")
## if len(igt) > 0:
## AA = vstack([AA, -A[igt, :]], "csr")
## if len(ibx) > 0:
## AA = vstack([AA, A[ibx, :], -A[ibx, :]], "csr")
#
# if len(ieq) or len(igt) or len(ilt) or len(ibx):
# sig_idx = [(1, ieq), (1, ilt), (-1, igt), (1, ibx), (-1, ibx)]
# AA = vstack([sig * A[idx, :] for sig, idx in sig_idx if len(idx)])
# else:
# AA = None
#
# bb = r_[u[ieq, :], u[ilt], -l[igt], u[ibx], -l[ibx]]
#
# self._nieq = ieq.shape[0]
#
# return AA, bb
return A, l, u | python | def _linear_constraints(self, om):
""" Returns the linear problem constraints.
"""
A, l, u = om.linear_constraints() # l <= A*x <= u
# Indexes for equality, greater than (unbounded above), less than
# (unbounded below) and doubly-bounded box constraints.
# ieq = flatnonzero( abs(u - l) <= EPS )
# igt = flatnonzero( (u >= 1e10) & (l > -1e10) )
# ilt = flatnonzero( (l <= -1e10) & (u < 1e10) )
# ibx = flatnonzero( (abs(u - l) > EPS) & (u < 1e10) & (l > -1e10) )
# Zero-sized sparse matrices not supported. Assume equality
# constraints exist.
## AA = A[ieq, :]
## if len(ilt) > 0:
## AA = vstack([AA, A[ilt, :]], "csr")
## if len(igt) > 0:
## AA = vstack([AA, -A[igt, :]], "csr")
## if len(ibx) > 0:
## AA = vstack([AA, A[ibx, :], -A[ibx, :]], "csr")
#
# if len(ieq) or len(igt) or len(ilt) or len(ibx):
# sig_idx = [(1, ieq), (1, ilt), (-1, igt), (1, ibx), (-1, ibx)]
# AA = vstack([sig * A[idx, :] for sig, idx in sig_idx if len(idx)])
# else:
# AA = None
#
# bb = r_[u[ieq, :], u[ilt], -l[igt], u[ibx], -l[ibx]]
#
# self._nieq = ieq.shape[0]
#
# return AA, bb
return A, l, u | [
"def",
"_linear_constraints",
"(",
"self",
",",
"om",
")",
":",
"A",
",",
"l",
",",
"u",
"=",
"om",
".",
"linear_constraints",
"(",
")",
"# l <= A*x <= u",
"# Indexes for equality, greater than (unbounded above), less than",
"# (unbounded below) and doubly-bounded box constraints.",
"# ieq = flatnonzero( abs(u - l) <= EPS )",
"# igt = flatnonzero( (u >= 1e10) & (l > -1e10) )",
"# ilt = flatnonzero( (l <= -1e10) & (u < 1e10) )",
"# ibx = flatnonzero( (abs(u - l) > EPS) & (u < 1e10) & (l > -1e10) )",
"# Zero-sized sparse matrices not supported. Assume equality",
"# constraints exist.",
"## AA = A[ieq, :]",
"## if len(ilt) > 0:",
"## AA = vstack([AA, A[ilt, :]], \"csr\")",
"## if len(igt) > 0:",
"## AA = vstack([AA, -A[igt, :]], \"csr\")",
"## if len(ibx) > 0:",
"## AA = vstack([AA, A[ibx, :], -A[ibx, :]], \"csr\")",
"#",
"# if len(ieq) or len(igt) or len(ilt) or len(ibx):",
"# sig_idx = [(1, ieq), (1, ilt), (-1, igt), (1, ibx), (-1, ibx)]",
"# AA = vstack([sig * A[idx, :] for sig, idx in sig_idx if len(idx)])",
"# else:",
"# AA = None",
"#",
"# bb = r_[u[ieq, :], u[ilt], -l[igt], u[ibx], -l[ibx]]",
"#",
"# self._nieq = ieq.shape[0]",
"#",
"# return AA, bb",
"return",
"A",
",",
"l",
",",
"u"
] | Returns the linear problem constraints. | [
"Returns",
"the",
"linear",
"problem",
"constraints",
"."
] | 916514255db1ae1661406f0283df756baf960d14 | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/solver.py#L115-L149 | train |