repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
mitodl/django-server-status | server_status/views.py | get_redis_info | def get_redis_info():
"""Check Redis connection."""
from kombu.utils.url import _parse_url as parse_redis_url
from redis import (
StrictRedis,
ConnectionError as RedisConnectionError,
ResponseError as RedisResponseError,
)
for conf_name in ('REDIS_URL', 'BROKER_URL', 'CELERY_BROKER_URL'):
if hasattr(settings, conf_name):
url = getattr(settings, conf_name)
if url.startswith('redis://'):
break
else:
log.error("No redis connection info found in settings.")
return {"status": NO_CONFIG}
_, host, port, _, password, database, _ = parse_redis_url(url)
start = datetime.now()
try:
rdb = StrictRedis(
host=host, port=port, db=database,
password=password, socket_timeout=TIMEOUT_SECONDS,
)
info = rdb.info()
except (RedisConnectionError, TypeError) as ex:
log.error("Error making Redis connection: %s", ex.args)
return {"status": DOWN}
except RedisResponseError as ex:
log.error("Bad Redis response: %s", ex.args)
return {"status": DOWN, "message": "auth error"}
micro = (datetime.now() - start).microseconds
del rdb # the redis package does not support Redis's QUIT.
ret = {
"status": UP, "response_microseconds": micro,
}
fields = ("uptime_in_seconds", "used_memory", "used_memory_peak")
ret.update({x: info[x] for x in fields})
return ret | python | def get_redis_info():
"""Check Redis connection."""
from kombu.utils.url import _parse_url as parse_redis_url
from redis import (
StrictRedis,
ConnectionError as RedisConnectionError,
ResponseError as RedisResponseError,
)
for conf_name in ('REDIS_URL', 'BROKER_URL', 'CELERY_BROKER_URL'):
if hasattr(settings, conf_name):
url = getattr(settings, conf_name)
if url.startswith('redis://'):
break
else:
log.error("No redis connection info found in settings.")
return {"status": NO_CONFIG}
_, host, port, _, password, database, _ = parse_redis_url(url)
start = datetime.now()
try:
rdb = StrictRedis(
host=host, port=port, db=database,
password=password, socket_timeout=TIMEOUT_SECONDS,
)
info = rdb.info()
except (RedisConnectionError, TypeError) as ex:
log.error("Error making Redis connection: %s", ex.args)
return {"status": DOWN}
except RedisResponseError as ex:
log.error("Bad Redis response: %s", ex.args)
return {"status": DOWN, "message": "auth error"}
micro = (datetime.now() - start).microseconds
del rdb # the redis package does not support Redis's QUIT.
ret = {
"status": UP, "response_microseconds": micro,
}
fields = ("uptime_in_seconds", "used_memory", "used_memory_peak")
ret.update({x: info[x] for x in fields})
return ret | [
"def",
"get_redis_info",
"(",
")",
":",
"from",
"kombu",
".",
"utils",
".",
"url",
"import",
"_parse_url",
"as",
"parse_redis_url",
"from",
"redis",
"import",
"(",
"StrictRedis",
",",
"ConnectionError",
"as",
"RedisConnectionError",
",",
"ResponseError",
"as",
"RedisResponseError",
",",
")",
"for",
"conf_name",
"in",
"(",
"'REDIS_URL'",
",",
"'BROKER_URL'",
",",
"'CELERY_BROKER_URL'",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"conf_name",
")",
":",
"url",
"=",
"getattr",
"(",
"settings",
",",
"conf_name",
")",
"if",
"url",
".",
"startswith",
"(",
"'redis://'",
")",
":",
"break",
"else",
":",
"log",
".",
"error",
"(",
"\"No redis connection info found in settings.\"",
")",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"_",
",",
"host",
",",
"port",
",",
"_",
",",
"password",
",",
"database",
",",
"_",
"=",
"parse_redis_url",
"(",
"url",
")",
"start",
"=",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"rdb",
"=",
"StrictRedis",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"db",
"=",
"database",
",",
"password",
"=",
"password",
",",
"socket_timeout",
"=",
"TIMEOUT_SECONDS",
",",
")",
"info",
"=",
"rdb",
".",
"info",
"(",
")",
"except",
"(",
"RedisConnectionError",
",",
"TypeError",
")",
"as",
"ex",
":",
"log",
".",
"error",
"(",
"\"Error making Redis connection: %s\"",
",",
"ex",
".",
"args",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
"}",
"except",
"RedisResponseError",
"as",
"ex",
":",
"log",
".",
"error",
"(",
"\"Bad Redis response: %s\"",
",",
"ex",
".",
"args",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
",",
"\"message\"",
":",
"\"auth error\"",
"}",
"micro",
"=",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
".",
"microseconds",
"del",
"rdb",
"# the redis package does not support Redis's QUIT.",
"ret",
"=",
"{",
"\"status\"",
":",
"UP",
",",
"\"response_microseconds\"",
":",
"micro",
",",
"}",
"fields",
"=",
"(",
"\"uptime_in_seconds\"",
",",
"\"used_memory\"",
",",
"\"used_memory_peak\"",
")",
"ret",
".",
"update",
"(",
"{",
"x",
":",
"info",
"[",
"x",
"]",
"for",
"x",
"in",
"fields",
"}",
")",
"return",
"ret"
] | Check Redis connection. | [
"Check",
"Redis",
"connection",
"."
] | 99bd29343138f94a08718fdbd9285e551751777b | https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L64-L102 | train |
mitodl/django-server-status | server_status/views.py | get_elasticsearch_info | def get_elasticsearch_info():
"""Check Elasticsearch connection."""
from elasticsearch import (
Elasticsearch,
ConnectionError as ESConnectionError
)
if hasattr(settings, 'ELASTICSEARCH_URL'):
url = settings.ELASTICSEARCH_URL
else:
return {"status": NO_CONFIG}
start = datetime.now()
try:
search = Elasticsearch(url, request_timeout=TIMEOUT_SECONDS)
search.info()
except ESConnectionError:
return {"status": DOWN}
del search # The elasticsearch library has no "close" or "disconnect."
micro = (datetime.now() - start).microseconds
return {
"status": UP, "response_microseconds": micro,
} | python | def get_elasticsearch_info():
"""Check Elasticsearch connection."""
from elasticsearch import (
Elasticsearch,
ConnectionError as ESConnectionError
)
if hasattr(settings, 'ELASTICSEARCH_URL'):
url = settings.ELASTICSEARCH_URL
else:
return {"status": NO_CONFIG}
start = datetime.now()
try:
search = Elasticsearch(url, request_timeout=TIMEOUT_SECONDS)
search.info()
except ESConnectionError:
return {"status": DOWN}
del search # The elasticsearch library has no "close" or "disconnect."
micro = (datetime.now() - start).microseconds
return {
"status": UP, "response_microseconds": micro,
} | [
"def",
"get_elasticsearch_info",
"(",
")",
":",
"from",
"elasticsearch",
"import",
"(",
"Elasticsearch",
",",
"ConnectionError",
"as",
"ESConnectionError",
")",
"if",
"hasattr",
"(",
"settings",
",",
"'ELASTICSEARCH_URL'",
")",
":",
"url",
"=",
"settings",
".",
"ELASTICSEARCH_URL",
"else",
":",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"start",
"=",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"search",
"=",
"Elasticsearch",
"(",
"url",
",",
"request_timeout",
"=",
"TIMEOUT_SECONDS",
")",
"search",
".",
"info",
"(",
")",
"except",
"ESConnectionError",
":",
"return",
"{",
"\"status\"",
":",
"DOWN",
"}",
"del",
"search",
"# The elasticsearch library has no \"close\" or \"disconnect.\"",
"micro",
"=",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
".",
"microseconds",
"return",
"{",
"\"status\"",
":",
"UP",
",",
"\"response_microseconds\"",
":",
"micro",
",",
"}"
] | Check Elasticsearch connection. | [
"Check",
"Elasticsearch",
"connection",
"."
] | 99bd29343138f94a08718fdbd9285e551751777b | https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L105-L125 | train |
mitodl/django-server-status | server_status/views.py | get_celery_info | def get_celery_info():
"""
Check celery availability
"""
import celery
if not getattr(settings, 'USE_CELERY', False):
log.error("No celery config found. Set USE_CELERY in settings to enable.")
return {"status": NO_CONFIG}
start = datetime.now()
try:
# pylint: disable=no-member
app = celery.Celery('tasks')
app.config_from_object('django.conf:settings', namespace='CELERY')
# Make sure celery is connected with max_retries=1
# and not the default of max_retries=None if the connection
# is made lazily
app.connection().ensure_connection(max_retries=1)
celery_stats = celery.task.control.inspect().stats()
if not celery_stats:
log.error("No running Celery workers were found.")
return {"status": DOWN, "message": "No running Celery workers"}
except Exception as exp: # pylint: disable=broad-except
log.error("Error connecting to the backend: %s", exp)
return {"status": DOWN, "message": "Error connecting to the backend"}
return {"status": UP, "response_microseconds": (datetime.now() - start).microseconds} | python | def get_celery_info():
"""
Check celery availability
"""
import celery
if not getattr(settings, 'USE_CELERY', False):
log.error("No celery config found. Set USE_CELERY in settings to enable.")
return {"status": NO_CONFIG}
start = datetime.now()
try:
# pylint: disable=no-member
app = celery.Celery('tasks')
app.config_from_object('django.conf:settings', namespace='CELERY')
# Make sure celery is connected with max_retries=1
# and not the default of max_retries=None if the connection
# is made lazily
app.connection().ensure_connection(max_retries=1)
celery_stats = celery.task.control.inspect().stats()
if not celery_stats:
log.error("No running Celery workers were found.")
return {"status": DOWN, "message": "No running Celery workers"}
except Exception as exp: # pylint: disable=broad-except
log.error("Error connecting to the backend: %s", exp)
return {"status": DOWN, "message": "Error connecting to the backend"}
return {"status": UP, "response_microseconds": (datetime.now() - start).microseconds} | [
"def",
"get_celery_info",
"(",
")",
":",
"import",
"celery",
"if",
"not",
"getattr",
"(",
"settings",
",",
"'USE_CELERY'",
",",
"False",
")",
":",
"log",
".",
"error",
"(",
"\"No celery config found. Set USE_CELERY in settings to enable.\"",
")",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"start",
"=",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"# pylint: disable=no-member",
"app",
"=",
"celery",
".",
"Celery",
"(",
"'tasks'",
")",
"app",
".",
"config_from_object",
"(",
"'django.conf:settings'",
",",
"namespace",
"=",
"'CELERY'",
")",
"# Make sure celery is connected with max_retries=1",
"# and not the default of max_retries=None if the connection",
"# is made lazily",
"app",
".",
"connection",
"(",
")",
".",
"ensure_connection",
"(",
"max_retries",
"=",
"1",
")",
"celery_stats",
"=",
"celery",
".",
"task",
".",
"control",
".",
"inspect",
"(",
")",
".",
"stats",
"(",
")",
"if",
"not",
"celery_stats",
":",
"log",
".",
"error",
"(",
"\"No running Celery workers were found.\"",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
",",
"\"message\"",
":",
"\"No running Celery workers\"",
"}",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"log",
".",
"error",
"(",
"\"Error connecting to the backend: %s\"",
",",
"exp",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
",",
"\"message\"",
":",
"\"Error connecting to the backend\"",
"}",
"return",
"{",
"\"status\"",
":",
"UP",
",",
"\"response_microseconds\"",
":",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
".",
"microseconds",
"}"
] | Check celery availability | [
"Check",
"celery",
"availability"
] | 99bd29343138f94a08718fdbd9285e551751777b | https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L128-L153 | train |
mitodl/django-server-status | server_status/views.py | get_certificate_info | def get_certificate_info():
"""
checks app certificate expiry status
"""
if hasattr(settings, 'MIT_WS_CERTIFICATE') and settings.MIT_WS_CERTIFICATE:
mit_ws_certificate = settings.MIT_WS_CERTIFICATE
else:
return {"status": NO_CONFIG}
app_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, (
mit_ws_certificate if not isinstance(mit_ws_certificate, str)
else mit_ws_certificate.encode().decode('unicode_escape').encode()
)
)
app_cert_expiration = datetime.strptime(
app_cert.get_notAfter().decode('ascii'),
'%Y%m%d%H%M%SZ'
)
date_delta = app_cert_expiration - datetime.now()
# if more then 30 days left in expiry of certificate then app is safe
return {
'app_cert_expires': app_cert_expiration.strftime('%Y-%m-%dT%H:%M:%S'),
'status': UP if date_delta.days > 30 else DOWN
} | python | def get_certificate_info():
"""
checks app certificate expiry status
"""
if hasattr(settings, 'MIT_WS_CERTIFICATE') and settings.MIT_WS_CERTIFICATE:
mit_ws_certificate = settings.MIT_WS_CERTIFICATE
else:
return {"status": NO_CONFIG}
app_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, (
mit_ws_certificate if not isinstance(mit_ws_certificate, str)
else mit_ws_certificate.encode().decode('unicode_escape').encode()
)
)
app_cert_expiration = datetime.strptime(
app_cert.get_notAfter().decode('ascii'),
'%Y%m%d%H%M%SZ'
)
date_delta = app_cert_expiration - datetime.now()
# if more then 30 days left in expiry of certificate then app is safe
return {
'app_cert_expires': app_cert_expiration.strftime('%Y-%m-%dT%H:%M:%S'),
'status': UP if date_delta.days > 30 else DOWN
} | [
"def",
"get_certificate_info",
"(",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"'MIT_WS_CERTIFICATE'",
")",
"and",
"settings",
".",
"MIT_WS_CERTIFICATE",
":",
"mit_ws_certificate",
"=",
"settings",
".",
"MIT_WS_CERTIFICATE",
"else",
":",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"app_cert",
"=",
"OpenSSL",
".",
"crypto",
".",
"load_certificate",
"(",
"OpenSSL",
".",
"crypto",
".",
"FILETYPE_PEM",
",",
"(",
"mit_ws_certificate",
"if",
"not",
"isinstance",
"(",
"mit_ws_certificate",
",",
"str",
")",
"else",
"mit_ws_certificate",
".",
"encode",
"(",
")",
".",
"decode",
"(",
"'unicode_escape'",
")",
".",
"encode",
"(",
")",
")",
")",
"app_cert_expiration",
"=",
"datetime",
".",
"strptime",
"(",
"app_cert",
".",
"get_notAfter",
"(",
")",
".",
"decode",
"(",
"'ascii'",
")",
",",
"'%Y%m%d%H%M%SZ'",
")",
"date_delta",
"=",
"app_cert_expiration",
"-",
"datetime",
".",
"now",
"(",
")",
"# if more then 30 days left in expiry of certificate then app is safe",
"return",
"{",
"'app_cert_expires'",
":",
"app_cert_expiration",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%S'",
")",
",",
"'status'",
":",
"UP",
"if",
"date_delta",
".",
"days",
">",
"30",
"else",
"DOWN",
"}"
] | checks app certificate expiry status | [
"checks",
"app",
"certificate",
"expiry",
"status"
] | 99bd29343138f94a08718fdbd9285e551751777b | https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L156-L182 | train |
bitcaster-io/bitcaster | src/telebot/__init__.py | TeleBot._start | def _start(self):
'''Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot.
'''
if self.whoami is None:
me = self.get_me()
if me.get('ok', False):
self.whoami = me['result']
else:
raise ValueError('Bot Cannot request information, check '
'api_key') | python | def _start(self):
'''Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot.
'''
if self.whoami is None:
me = self.get_me()
if me.get('ok', False):
self.whoami = me['result']
else:
raise ValueError('Bot Cannot request information, check '
'api_key') | [
"def",
"_start",
"(",
"self",
")",
":",
"if",
"self",
".",
"whoami",
"is",
"None",
":",
"me",
"=",
"self",
".",
"get_me",
"(",
")",
"if",
"me",
".",
"get",
"(",
"'ok'",
",",
"False",
")",
":",
"self",
".",
"whoami",
"=",
"me",
"[",
"'result'",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Bot Cannot request information, check '",
"'api_key'",
")"
] | Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot. | [
"Requests",
"bot",
"information",
"based",
"on",
"current",
"api_key",
"and",
"sets",
"self",
".",
"whoami",
"to",
"dictionary",
"with",
"username",
"first_name",
"and",
"id",
"of",
"the",
"configured",
"bot",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/telebot/__init__.py#L74-L86 | train |
bitcaster-io/bitcaster | src/telebot/__init__.py | TeleBot.poll | def poll(self, offset=None, poll_timeout=600, cooldown=60, debug=False):
'''These should also be in the config section, but some here for
overrides
'''
if self.config['api_key'] is None:
raise ValueError('config api_key is undefined')
if offset or self.config.get('offset', None):
self.offset = offset or self.config.get('offset', None)
self._start()
while True:
try:
response = self.get_updates(poll_timeout, self.offset)
if response.get('ok', False) is False:
raise ValueError(response['error'])
else:
self.process_updates(response)
except Exception as e:
print('Error: Unknown Exception')
print(e)
if debug:
raise e
else:
time.sleep(cooldown) | python | def poll(self, offset=None, poll_timeout=600, cooldown=60, debug=False):
'''These should also be in the config section, but some here for
overrides
'''
if self.config['api_key'] is None:
raise ValueError('config api_key is undefined')
if offset or self.config.get('offset', None):
self.offset = offset or self.config.get('offset', None)
self._start()
while True:
try:
response = self.get_updates(poll_timeout, self.offset)
if response.get('ok', False) is False:
raise ValueError(response['error'])
else:
self.process_updates(response)
except Exception as e:
print('Error: Unknown Exception')
print(e)
if debug:
raise e
else:
time.sleep(cooldown) | [
"def",
"poll",
"(",
"self",
",",
"offset",
"=",
"None",
",",
"poll_timeout",
"=",
"600",
",",
"cooldown",
"=",
"60",
",",
"debug",
"=",
"False",
")",
":",
"if",
"self",
".",
"config",
"[",
"'api_key'",
"]",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'config api_key is undefined'",
")",
"if",
"offset",
"or",
"self",
".",
"config",
".",
"get",
"(",
"'offset'",
",",
"None",
")",
":",
"self",
".",
"offset",
"=",
"offset",
"or",
"self",
".",
"config",
".",
"get",
"(",
"'offset'",
",",
"None",
")",
"self",
".",
"_start",
"(",
")",
"while",
"True",
":",
"try",
":",
"response",
"=",
"self",
".",
"get_updates",
"(",
"poll_timeout",
",",
"self",
".",
"offset",
")",
"if",
"response",
".",
"get",
"(",
"'ok'",
",",
"False",
")",
"is",
"False",
":",
"raise",
"ValueError",
"(",
"response",
"[",
"'error'",
"]",
")",
"else",
":",
"self",
".",
"process_updates",
"(",
"response",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Error: Unknown Exception'",
")",
"print",
"(",
"e",
")",
"if",
"debug",
":",
"raise",
"e",
"else",
":",
"time",
".",
"sleep",
"(",
"cooldown",
")"
] | These should also be in the config section, but some here for
overrides | [
"These",
"should",
"also",
"be",
"in",
"the",
"config",
"section",
"but",
"some",
"here",
"for",
"overrides"
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/telebot/__init__.py#L88-L114 | train |
bitcaster-io/bitcaster | src/bitcaster/utils/language.py | get_attr | def get_attr(obj, attr, default=None):
"""Recursive get object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
"""
if '.' not in attr:
return getattr(obj, attr, default)
else:
L = attr.split('.')
return get_attr(getattr(obj, L[0], default), '.'.join(L[1:]), default) | python | def get_attr(obj, attr, default=None):
"""Recursive get object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
"""
if '.' not in attr:
return getattr(obj, attr, default)
else:
L = attr.split('.')
return get_attr(getattr(obj, L[0], default), '.'.join(L[1:]), default) | [
"def",
"get_attr",
"(",
"obj",
",",
"attr",
",",
"default",
"=",
"None",
")",
":",
"if",
"'.'",
"not",
"in",
"attr",
":",
"return",
"getattr",
"(",
"obj",
",",
"attr",
",",
"default",
")",
"else",
":",
"L",
"=",
"attr",
".",
"split",
"(",
"'.'",
")",
"return",
"get_attr",
"(",
"getattr",
"(",
"obj",
",",
"L",
"[",
"0",
"]",
",",
"default",
")",
",",
"'.'",
".",
"join",
"(",
"L",
"[",
"1",
":",
"]",
")",
",",
"default",
")"
] | Recursive get object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1 | [
"Recursive",
"get",
"object",
"s",
"attribute",
".",
"May",
"use",
"dot",
"notation",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/utils/language.py#L32-L51 | train |
bitcaster-io/bitcaster | src/bitcaster/web/templatetags/bc_assets.py | asset | def asset(path):
"""
Join the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
commit = bitcaster.get_full_version()
return mark_safe('{0}?{1}'.format(_static(path), commit)) | python | def asset(path):
"""
Join the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
commit = bitcaster.get_full_version()
return mark_safe('{0}?{1}'.format(_static(path), commit)) | [
"def",
"asset",
"(",
"path",
")",
":",
"commit",
"=",
"bitcaster",
".",
"get_full_version",
"(",
")",
"return",
"mark_safe",
"(",
"'{0}?{1}'",
".",
"format",
"(",
"_static",
"(",
"path",
")",
",",
"commit",
")",
")"
] | Join the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %} | [
"Join",
"the",
"given",
"path",
"with",
"the",
"STATIC_URL",
"setting",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/web/templatetags/bc_assets.py#L19-L35 | train |
bitcaster-io/bitcaster | src/bitcaster/utils/wsgi.py | get_client_ip | def get_client_ip(request):
"""
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
"""
try:
return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
except (KeyError, IndexError):
return request.META.get('REMOTE_ADDR') | python | def get_client_ip(request):
"""
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
"""
try:
return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
except (KeyError, IndexError):
return request.META.get('REMOTE_ADDR') | [
"def",
"get_client_ip",
"(",
"request",
")",
":",
"try",
":",
"return",
"request",
".",
"META",
"[",
"'HTTP_X_FORWARDED_FOR'",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"return",
"request",
".",
"META",
".",
"get",
"(",
"'REMOTE_ADDR'",
")"
] | Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client. | [
"Naively",
"yank",
"the",
"first",
"IP",
"address",
"in",
"an",
"X",
"-",
"Forwarded",
"-",
"For",
"header",
"and",
"assume",
"this",
"is",
"correct",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/utils/wsgi.py#L6-L17 | train |
bitcaster-io/bitcaster | src/tweepy/api.py | API._pack_image | def _pack_image(filename, max_size, form_field='image', f=None):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 700kb in size
if f is None:
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
except os.error as e:
raise TweepError('Unable to access file: %s' % e.strerror)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
else:
f.seek(0, 2) # Seek to end of file
if f.tell() > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
f.seek(0) # Reset to beginning of file
fp = f
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise TweepError('Invalid file type for image: %s' % file_type)
if isinstance(filename, six.text_type):
filename = filename.encode('utf-8')
BOUNDARY = b'Tw3ePy'
body = []
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}";'
' filename="{1}"'.format(form_field, filename)
.encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': str(len(body))
}
return headers, body | python | def _pack_image(filename, max_size, form_field='image', f=None):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 700kb in size
if f is None:
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
except os.error as e:
raise TweepError('Unable to access file: %s' % e.strerror)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
else:
f.seek(0, 2) # Seek to end of file
if f.tell() > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
f.seek(0) # Reset to beginning of file
fp = f
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise TweepError('Invalid file type for image: %s' % file_type)
if isinstance(filename, six.text_type):
filename = filename.encode('utf-8')
BOUNDARY = b'Tw3ePy'
body = []
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}";'
' filename="{1}"'.format(form_field, filename)
.encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': str(len(body))
}
return headers, body | [
"def",
"_pack_image",
"(",
"filename",
",",
"max_size",
",",
"form_field",
"=",
"'image'",
",",
"f",
"=",
"None",
")",
":",
"# image must be less than 700kb in size",
"if",
"f",
"is",
"None",
":",
"try",
":",
"if",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
">",
"(",
"max_size",
"*",
"1024",
")",
":",
"raise",
"TweepError",
"(",
"'File is too big, must be less than %skb.'",
"%",
"max_size",
")",
"except",
"os",
".",
"error",
"as",
"e",
":",
"raise",
"TweepError",
"(",
"'Unable to access file: %s'",
"%",
"e",
".",
"strerror",
")",
"# build the mulitpart-formdata body",
"fp",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"else",
":",
"f",
".",
"seek",
"(",
"0",
",",
"2",
")",
"# Seek to end of file",
"if",
"f",
".",
"tell",
"(",
")",
">",
"(",
"max_size",
"*",
"1024",
")",
":",
"raise",
"TweepError",
"(",
"'File is too big, must be less than %skb.'",
"%",
"max_size",
")",
"f",
".",
"seek",
"(",
"0",
")",
"# Reset to beginning of file",
"fp",
"=",
"f",
"# image must be gif, jpeg, or png",
"file_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"filename",
")",
"if",
"file_type",
"is",
"None",
":",
"raise",
"TweepError",
"(",
"'Could not determine file type'",
")",
"file_type",
"=",
"file_type",
"[",
"0",
"]",
"if",
"file_type",
"not",
"in",
"[",
"'image/gif'",
",",
"'image/jpeg'",
",",
"'image/png'",
"]",
":",
"raise",
"TweepError",
"(",
"'Invalid file type for image: %s'",
"%",
"file_type",
")",
"if",
"isinstance",
"(",
"filename",
",",
"six",
".",
"text_type",
")",
":",
"filename",
"=",
"filename",
".",
"encode",
"(",
"'utf-8'",
")",
"BOUNDARY",
"=",
"b'Tw3ePy'",
"body",
"=",
"[",
"]",
"body",
".",
"append",
"(",
"b'--'",
"+",
"BOUNDARY",
")",
"body",
".",
"append",
"(",
"'Content-Disposition: form-data; name=\"{0}\";'",
"' filename=\"{1}\"'",
".",
"format",
"(",
"form_field",
",",
"filename",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"body",
".",
"append",
"(",
"'Content-Type: {0}'",
".",
"format",
"(",
"file_type",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"body",
".",
"append",
"(",
"b''",
")",
"body",
".",
"append",
"(",
"fp",
".",
"read",
"(",
")",
")",
"body",
".",
"append",
"(",
"b'--'",
"+",
"BOUNDARY",
"+",
"b'--'",
")",
"body",
".",
"append",
"(",
"b''",
")",
"fp",
".",
"close",
"(",
")",
"body",
"=",
"b'\\r\\n'",
".",
"join",
"(",
"body",
")",
"# build headers",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'multipart/form-data; boundary=Tw3ePy'",
",",
"'Content-Length'",
":",
"str",
"(",
"len",
"(",
"body",
")",
")",
"}",
"return",
"headers",
",",
"body"
] | Pack image from file into multipart-formdata post body | [
"Pack",
"image",
"from",
"file",
"into",
"multipart",
"-",
"formdata",
"post",
"body"
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/tweepy/api.py#L1344-L1394 | train |
bitcaster-io/bitcaster | src/bitcaster/web/templatetags/bitcaster.py | channel_submit_row | def channel_submit_row(context):
"""
Display the row of buttons for delete and save.
"""
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
can_delete = context['has_delete_permission']
can_add = context['has_add_permission']
can_change = context['has_change_permission']
ctx = Context(context)
ctx.update({
'show_delete_link': (not is_popup and
can_delete and
change and
context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (can_add and
not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': (not is_popup and can_change and show_save_and_continue),
'show_save': show_save,
})
return ctx | python | def channel_submit_row(context):
"""
Display the row of buttons for delete and save.
"""
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
can_delete = context['has_delete_permission']
can_add = context['has_add_permission']
can_change = context['has_change_permission']
ctx = Context(context)
ctx.update({
'show_delete_link': (not is_popup and
can_delete and
change and
context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (can_add and
not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': (not is_popup and can_change and show_save_and_continue),
'show_save': show_save,
})
return ctx | [
"def",
"channel_submit_row",
"(",
"context",
")",
":",
"change",
"=",
"context",
"[",
"'change'",
"]",
"is_popup",
"=",
"context",
"[",
"'is_popup'",
"]",
"save_as",
"=",
"context",
"[",
"'save_as'",
"]",
"show_save",
"=",
"context",
".",
"get",
"(",
"'show_save'",
",",
"True",
")",
"show_save_and_continue",
"=",
"context",
".",
"get",
"(",
"'show_save_and_continue'",
",",
"True",
")",
"can_delete",
"=",
"context",
"[",
"'has_delete_permission'",
"]",
"can_add",
"=",
"context",
"[",
"'has_add_permission'",
"]",
"can_change",
"=",
"context",
"[",
"'has_change_permission'",
"]",
"ctx",
"=",
"Context",
"(",
"context",
")",
"ctx",
".",
"update",
"(",
"{",
"'show_delete_link'",
":",
"(",
"not",
"is_popup",
"and",
"can_delete",
"and",
"change",
"and",
"context",
".",
"get",
"(",
"'show_delete'",
",",
"True",
")",
")",
",",
"'show_save_as_new'",
":",
"not",
"is_popup",
"and",
"change",
"and",
"save_as",
",",
"'show_save_and_add_another'",
":",
"(",
"can_add",
"and",
"not",
"is_popup",
"and",
"(",
"not",
"save_as",
"or",
"context",
"[",
"'add'",
"]",
")",
")",
",",
"'show_save_and_continue'",
":",
"(",
"not",
"is_popup",
"and",
"can_change",
"and",
"show_save_and_continue",
")",
",",
"'show_save'",
":",
"show_save",
",",
"}",
")",
"return",
"ctx"
] | Display the row of buttons for delete and save. | [
"Display",
"the",
"row",
"of",
"buttons",
"for",
"delete",
"and",
"save",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/web/templatetags/bitcaster.py#L77-L106 | train |
bitcaster-io/bitcaster | src/bitcaster/social_auth.py | BitcasterStrategy.get_setting | def get_setting(self, name):
notfound = object()
"get configuration from 'constance.config' first "
value = getattr(config, name, notfound)
if name.endswith('_WHITELISTED_DOMAINS'):
if value:
return value.split(',')
else:
return []
if value is notfound:
value = getattr(settings, name)
# Force text on URL named settings that are instance of Promise
if name.endswith('_URL'):
if isinstance(value, Promise):
value = force_text(value)
value = resolve_url(value)
return value | python | def get_setting(self, name):
notfound = object()
"get configuration from 'constance.config' first "
value = getattr(config, name, notfound)
if name.endswith('_WHITELISTED_DOMAINS'):
if value:
return value.split(',')
else:
return []
if value is notfound:
value = getattr(settings, name)
# Force text on URL named settings that are instance of Promise
if name.endswith('_URL'):
if isinstance(value, Promise):
value = force_text(value)
value = resolve_url(value)
return value | [
"def",
"get_setting",
"(",
"self",
",",
"name",
")",
":",
"notfound",
"=",
"object",
"(",
")",
"value",
"=",
"getattr",
"(",
"config",
",",
"name",
",",
"notfound",
")",
"if",
"name",
".",
"endswith",
"(",
"'_WHITELISTED_DOMAINS'",
")",
":",
"if",
"value",
":",
"return",
"value",
".",
"split",
"(",
"','",
")",
"else",
":",
"return",
"[",
"]",
"if",
"value",
"is",
"notfound",
":",
"value",
"=",
"getattr",
"(",
"settings",
",",
"name",
")",
"# Force text on URL named settings that are instance of Promise",
"if",
"name",
".",
"endswith",
"(",
"'_URL'",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Promise",
")",
":",
"value",
"=",
"force_text",
"(",
"value",
")",
"value",
"=",
"resolve_url",
"(",
"value",
")",
"return",
"value"
] | get configuration from 'constance.config' first | [
"get",
"configuration",
"from",
"constance",
".",
"config",
"first"
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/social_auth.py#L78-L95 | train |
bitcaster-io/bitcaster | src/bitcaster/messages.py | Wrapper.debug | def debug(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``DEBUG`` level."""
add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently) | python | def debug(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``DEBUG`` level."""
add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently) | [
"def",
"debug",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"DEBUG",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] | Add a message with the ``DEBUG`` level. | [
"Add",
"a",
"message",
"with",
"the",
"DEBUG",
"level",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L54-L57 | train |
bitcaster-io/bitcaster | src/bitcaster/messages.py | Wrapper.info | def info(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``INFO`` level."""
add(self.target_name,
request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently) | python | def info(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``INFO`` level."""
add(self.target_name,
request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently) | [
"def",
"info",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"INFO",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] | Add a message with the ``INFO`` level. | [
"Add",
"a",
"message",
"with",
"the",
"INFO",
"level",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L59-L63 | train |
bitcaster-io/bitcaster | src/bitcaster/messages.py | Wrapper.success | def success(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``SUCCESS`` level."""
add(self.target_name, request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently) | python | def success(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``SUCCESS`` level."""
add(self.target_name, request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently) | [
"def",
"success",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"SUCCESS",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] | Add a message with the ``SUCCESS`` level. | [
"Add",
"a",
"message",
"with",
"the",
"SUCCESS",
"level",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L65-L68 | train |
bitcaster-io/bitcaster | src/bitcaster/messages.py | Wrapper.warning | def warning(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``WARNING`` level."""
add(self.target_name, request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently) | python | def warning(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``WARNING`` level."""
add(self.target_name, request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently) | [
"def",
"warning",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"WARNING",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] | Add a message with the ``WARNING`` level. | [
"Add",
"a",
"message",
"with",
"the",
"WARNING",
"level",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L70-L73 | train |
bitcaster-io/bitcaster | src/bitcaster/messages.py | Wrapper.error | def error(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``ERROR`` level."""
add(self.target_name, request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently) | python | def error(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``ERROR`` level."""
add(self.target_name, request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently) | [
"def",
"error",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"ERROR",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] | Add a message with the ``ERROR`` level. | [
"Add",
"a",
"message",
"with",
"the",
"ERROR",
"level",
"."
] | 04625a4b67c1ad01e5d38faa3093828b360d4a98 | https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L75-L78 | train |
bread-and-pepper/django-userena | userena/views.py | signup | def signup(request, signup_form=SignupForm,
template_name='userena/signup_form.html', success_url=None,
extra_context=None):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If signup is disabled, return 403
if userena_settings.USERENA_DISABLE_SIGNUP:
raise PermissionDenied
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == 'POST':
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_signup_complete',
kwargs={'username': user.username})
# A new signed user should logout the old one.
if request.user.is_authenticated():
logout(request)
if (userena_settings.USERENA_SIGNIN_AFTER_SIGNUP and
not userena_settings.USERENA_ACTIVATION_REQUIRED):
user = authenticate(identification=user.email, check_password=False)
login(request, user)
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request) | python | def signup(request, signup_form=SignupForm,
template_name='userena/signup_form.html', success_url=None,
extra_context=None):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If signup is disabled, return 403
if userena_settings.USERENA_DISABLE_SIGNUP:
raise PermissionDenied
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == 'POST':
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_signup_complete',
kwargs={'username': user.username})
# A new signed user should logout the old one.
if request.user.is_authenticated():
logout(request)
if (userena_settings.USERENA_SIGNIN_AFTER_SIGNUP and
not userena_settings.USERENA_ACTIVATION_REQUIRED):
user = authenticate(identification=user.email, check_password=False)
login(request, user)
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request) | [
"def",
"signup",
"(",
"request",
",",
"signup_form",
"=",
"SignupForm",
",",
"template_name",
"=",
"'userena/signup_form.html'",
",",
"success_url",
"=",
"None",
",",
"extra_context",
"=",
"None",
")",
":",
"# If signup is disabled, return 403",
"if",
"userena_settings",
".",
"USERENA_DISABLE_SIGNUP",
":",
"raise",
"PermissionDenied",
"# If no usernames are wanted and the default form is used, fallback to the",
"# default form that doesn't display to enter the username.",
"if",
"userena_settings",
".",
"USERENA_WITHOUT_USERNAMES",
"and",
"(",
"signup_form",
"==",
"SignupForm",
")",
":",
"signup_form",
"=",
"SignupFormOnlyEmail",
"form",
"=",
"signup_form",
"(",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"form",
"=",
"signup_form",
"(",
"request",
".",
"POST",
",",
"request",
".",
"FILES",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"user",
"=",
"form",
".",
"save",
"(",
")",
"# Send the signup complete signal",
"userena_signals",
".",
"signup_complete",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"user",
"=",
"user",
")",
"if",
"success_url",
":",
"redirect_to",
"=",
"success_url",
"else",
":",
"redirect_to",
"=",
"reverse",
"(",
"'userena_signup_complete'",
",",
"kwargs",
"=",
"{",
"'username'",
":",
"user",
".",
"username",
"}",
")",
"# A new signed user should logout the old one.",
"if",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
":",
"logout",
"(",
"request",
")",
"if",
"(",
"userena_settings",
".",
"USERENA_SIGNIN_AFTER_SIGNUP",
"and",
"not",
"userena_settings",
".",
"USERENA_ACTIVATION_REQUIRED",
")",
":",
"user",
"=",
"authenticate",
"(",
"identification",
"=",
"user",
".",
"email",
",",
"check_password",
"=",
"False",
")",
"login",
"(",
"request",
",",
"user",
")",
"return",
"redirect",
"(",
"redirect_to",
")",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"extra_context",
"[",
"'form'",
"]",
"=",
"form",
"return",
"ExtraContextTemplateView",
".",
"as_view",
"(",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
")",
"(",
"request",
")"
] | Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``. | [
"Signup",
"of",
"an",
"account",
"."
] | 7dfb3d5d148127e32f217a62096d507266a3a83c | https://github.com/bread-and-pepper/django-userena/blob/7dfb3d5d148127e32f217a62096d507266a3a83c/userena/views.py#L73-L146 | train |
openvax/mhcflurry | mhcflurry/hyperparameters.py | HyperparameterDefaults.extend | def extend(self, other):
"""
Return a new HyperparameterDefaults instance containing the
hyperparameters from the current instance combined with
those from other.
It is an error if self and other have any hyperparameters in
common.
"""
overlap = [key for key in other.defaults if key in self.defaults]
if overlap:
raise ValueError(
"Duplicate hyperparameter(s): %s" % " ".join(overlap))
new = dict(self.defaults)
new.update(other.defaults)
return HyperparameterDefaults(**new) | python | def extend(self, other):
"""
Return a new HyperparameterDefaults instance containing the
hyperparameters from the current instance combined with
those from other.
It is an error if self and other have any hyperparameters in
common.
"""
overlap = [key for key in other.defaults if key in self.defaults]
if overlap:
raise ValueError(
"Duplicate hyperparameter(s): %s" % " ".join(overlap))
new = dict(self.defaults)
new.update(other.defaults)
return HyperparameterDefaults(**new) | [
"def",
"extend",
"(",
"self",
",",
"other",
")",
":",
"overlap",
"=",
"[",
"key",
"for",
"key",
"in",
"other",
".",
"defaults",
"if",
"key",
"in",
"self",
".",
"defaults",
"]",
"if",
"overlap",
":",
"raise",
"ValueError",
"(",
"\"Duplicate hyperparameter(s): %s\"",
"%",
"\" \"",
".",
"join",
"(",
"overlap",
")",
")",
"new",
"=",
"dict",
"(",
"self",
".",
"defaults",
")",
"new",
".",
"update",
"(",
"other",
".",
"defaults",
")",
"return",
"HyperparameterDefaults",
"(",
"*",
"*",
"new",
")"
] | Return a new HyperparameterDefaults instance containing the
hyperparameters from the current instance combined with
those from other.
It is an error if self and other have any hyperparameters in
common. | [
"Return",
"a",
"new",
"HyperparameterDefaults",
"instance",
"containing",
"the",
"hyperparameters",
"from",
"the",
"current",
"instance",
"combined",
"with",
"those",
"from",
"other",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L22-L37 | train |
openvax/mhcflurry | mhcflurry/hyperparameters.py | HyperparameterDefaults.with_defaults | def with_defaults(self, obj):
"""
Given a dict of hyperparameter settings, return a dict containing
those settings augmented by the defaults for any keys missing from
the dict.
"""
self.check_valid_keys(obj)
obj = dict(obj)
for (key, value) in self.defaults.items():
if key not in obj:
obj[key] = value
return obj | python | def with_defaults(self, obj):
"""
Given a dict of hyperparameter settings, return a dict containing
those settings augmented by the defaults for any keys missing from
the dict.
"""
self.check_valid_keys(obj)
obj = dict(obj)
for (key, value) in self.defaults.items():
if key not in obj:
obj[key] = value
return obj | [
"def",
"with_defaults",
"(",
"self",
",",
"obj",
")",
":",
"self",
".",
"check_valid_keys",
"(",
"obj",
")",
"obj",
"=",
"dict",
"(",
"obj",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"self",
".",
"defaults",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"obj",
":",
"obj",
"[",
"key",
"]",
"=",
"value",
"return",
"obj"
] | Given a dict of hyperparameter settings, return a dict containing
those settings augmented by the defaults for any keys missing from
the dict. | [
"Given",
"a",
"dict",
"of",
"hyperparameter",
"settings",
"return",
"a",
"dict",
"containing",
"those",
"settings",
"augmented",
"by",
"the",
"defaults",
"for",
"any",
"keys",
"missing",
"from",
"the",
"dict",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L39-L50 | train |
openvax/mhcflurry | mhcflurry/hyperparameters.py | HyperparameterDefaults.subselect | def subselect(self, obj):
"""
Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults .
"""
return dict(
(key, value) for (key, value)
in obj.items()
if key in self.defaults) | python | def subselect(self, obj):
"""
Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults .
"""
return dict(
(key, value) for (key, value)
in obj.items()
if key in self.defaults) | [
"def",
"subselect",
"(",
"self",
",",
"obj",
")",
":",
"return",
"dict",
"(",
"(",
"key",
",",
"value",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"obj",
".",
"items",
"(",
")",
"if",
"key",
"in",
"self",
".",
"defaults",
")"
] | Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults . | [
"Filter",
"a",
"dict",
"of",
"hyperparameter",
"settings",
"to",
"only",
"those",
"keys",
"defined",
"in",
"this",
"HyperparameterDefaults",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L52-L60 | train |
openvax/mhcflurry | mhcflurry/hyperparameters.py | HyperparameterDefaults.check_valid_keys | def check_valid_keys(self, obj):
"""
Given a dict of hyperparameter settings, throw an exception if any
keys are not defined in this HyperparameterDefaults instance.
"""
invalid_keys = [
x for x in obj if x not in self.defaults
]
if invalid_keys:
raise ValueError(
"No such model parameters: %s. Valid parameters are: %s"
% (" ".join(invalid_keys),
" ".join(self.defaults))) | python | def check_valid_keys(self, obj):
"""
Given a dict of hyperparameter settings, throw an exception if any
keys are not defined in this HyperparameterDefaults instance.
"""
invalid_keys = [
x for x in obj if x not in self.defaults
]
if invalid_keys:
raise ValueError(
"No such model parameters: %s. Valid parameters are: %s"
% (" ".join(invalid_keys),
" ".join(self.defaults))) | [
"def",
"check_valid_keys",
"(",
"self",
",",
"obj",
")",
":",
"invalid_keys",
"=",
"[",
"x",
"for",
"x",
"in",
"obj",
"if",
"x",
"not",
"in",
"self",
".",
"defaults",
"]",
"if",
"invalid_keys",
":",
"raise",
"ValueError",
"(",
"\"No such model parameters: %s. Valid parameters are: %s\"",
"%",
"(",
"\" \"",
".",
"join",
"(",
"invalid_keys",
")",
",",
"\" \"",
".",
"join",
"(",
"self",
".",
"defaults",
")",
")",
")"
] | Given a dict of hyperparameter settings, throw an exception if any
keys are not defined in this HyperparameterDefaults instance. | [
"Given",
"a",
"dict",
"of",
"hyperparameter",
"settings",
"throw",
"an",
"exception",
"if",
"any",
"keys",
"are",
"not",
"defined",
"in",
"this",
"HyperparameterDefaults",
"instance",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L62-L74 | train |
openvax/mhcflurry | mhcflurry/hyperparameters.py | HyperparameterDefaults.models_grid | def models_grid(self, **kwargs):
'''
Make a grid of models by taking the cartesian product of all specified
model parameter lists.
Parameters
-----------
The valid kwarg parameters are the entries of this
HyperparameterDefaults instance. Each parameter must be a list
giving the values to search across.
Returns
-----------
list of dict giving the parameters for each model. The length of the
list is the product of the lengths of the input lists.
'''
# Check parameters
self.check_valid_keys(kwargs)
for (key, value) in kwargs.items():
if not isinstance(value, list):
raise ValueError(
"All parameters must be lists, but %s is %s"
% (key, str(type(value))))
# Make models, using defaults.
parameters = dict(
(key, [value]) for (key, value) in self.defaults.items())
parameters.update(kwargs)
parameter_names = list(parameters)
parameter_values = [parameters[name] for name in parameter_names]
models = [
dict(zip(parameter_names, model_values))
for model_values in itertools.product(*parameter_values)
]
return models | python | def models_grid(self, **kwargs):
'''
Make a grid of models by taking the cartesian product of all specified
model parameter lists.
Parameters
-----------
The valid kwarg parameters are the entries of this
HyperparameterDefaults instance. Each parameter must be a list
giving the values to search across.
Returns
-----------
list of dict giving the parameters for each model. The length of the
list is the product of the lengths of the input lists.
'''
# Check parameters
self.check_valid_keys(kwargs)
for (key, value) in kwargs.items():
if not isinstance(value, list):
raise ValueError(
"All parameters must be lists, but %s is %s"
% (key, str(type(value))))
# Make models, using defaults.
parameters = dict(
(key, [value]) for (key, value) in self.defaults.items())
parameters.update(kwargs)
parameter_names = list(parameters)
parameter_values = [parameters[name] for name in parameter_names]
models = [
dict(zip(parameter_names, model_values))
for model_values in itertools.product(*parameter_values)
]
return models | [
"def",
"models_grid",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Check parameters",
"self",
".",
"check_valid_keys",
"(",
"kwargs",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"All parameters must be lists, but %s is %s\"",
"%",
"(",
"key",
",",
"str",
"(",
"type",
"(",
"value",
")",
")",
")",
")",
"# Make models, using defaults.",
"parameters",
"=",
"dict",
"(",
"(",
"key",
",",
"[",
"value",
"]",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"self",
".",
"defaults",
".",
"items",
"(",
")",
")",
"parameters",
".",
"update",
"(",
"kwargs",
")",
"parameter_names",
"=",
"list",
"(",
"parameters",
")",
"parameter_values",
"=",
"[",
"parameters",
"[",
"name",
"]",
"for",
"name",
"in",
"parameter_names",
"]",
"models",
"=",
"[",
"dict",
"(",
"zip",
"(",
"parameter_names",
",",
"model_values",
")",
")",
"for",
"model_values",
"in",
"itertools",
".",
"product",
"(",
"*",
"parameter_values",
")",
"]",
"return",
"models"
] | Make a grid of models by taking the cartesian product of all specified
model parameter lists.
Parameters
-----------
The valid kwarg parameters are the entries of this
HyperparameterDefaults instance. Each parameter must be a list
giving the values to search across.
Returns
-----------
list of dict giving the parameters for each model. The length of the
list is the product of the lengths of the input lists. | [
"Make",
"a",
"grid",
"of",
"models",
"by",
"taking",
"the",
"cartesian",
"product",
"of",
"all",
"specified",
"model",
"parameter",
"lists",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L76-L112 | train |
openvax/mhcflurry | mhcflurry/allele_encoding.py | AlleleEncoding.fixed_length_vector_encoded_sequences | def fixed_length_vector_encoded_sequences(self, vector_encoding_name):
"""
Encode alleles.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings() in amino_acid.
Returns
-------
numpy.array with shape (num sequences, sequence length, m) where m is
vector_encoding_length(vector_encoding_name)
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name)
if cache_key not in self.encoding_cache:
index_encoded_matrix = amino_acid.index_encoding(
self.fixed_length_sequences.values,
amino_acid.AMINO_ACID_INDEX)
vector_encoded = amino_acid.fixed_vectors_encoding(
index_encoded_matrix,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
result = vector_encoded[self.indices]
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key] | python | def fixed_length_vector_encoded_sequences(self, vector_encoding_name):
"""
Encode alleles.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings() in amino_acid.
Returns
-------
numpy.array with shape (num sequences, sequence length, m) where m is
vector_encoding_length(vector_encoding_name)
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name)
if cache_key not in self.encoding_cache:
index_encoded_matrix = amino_acid.index_encoding(
self.fixed_length_sequences.values,
amino_acid.AMINO_ACID_INDEX)
vector_encoded = amino_acid.fixed_vectors_encoding(
index_encoded_matrix,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
result = vector_encoded[self.indices]
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key] | [
"def",
"fixed_length_vector_encoded_sequences",
"(",
"self",
",",
"vector_encoding_name",
")",
":",
"cache_key",
"=",
"(",
"\"fixed_length_vector_encoding\"",
",",
"vector_encoding_name",
")",
"if",
"cache_key",
"not",
"in",
"self",
".",
"encoding_cache",
":",
"index_encoded_matrix",
"=",
"amino_acid",
".",
"index_encoding",
"(",
"self",
".",
"fixed_length_sequences",
".",
"values",
",",
"amino_acid",
".",
"AMINO_ACID_INDEX",
")",
"vector_encoded",
"=",
"amino_acid",
".",
"fixed_vectors_encoding",
"(",
"index_encoded_matrix",
",",
"amino_acid",
".",
"ENCODING_DATA_FRAMES",
"[",
"vector_encoding_name",
"]",
")",
"result",
"=",
"vector_encoded",
"[",
"self",
".",
"indices",
"]",
"self",
".",
"encoding_cache",
"[",
"cache_key",
"]",
"=",
"result",
"return",
"self",
".",
"encoding_cache",
"[",
"cache_key",
"]"
] | Encode alleles.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings() in amino_acid.
Returns
-------
numpy.array with shape (num sequences, sequence length, m) where m is
vector_encoding_length(vector_encoding_name) | [
"Encode",
"alleles",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/allele_encoding.py#L40-L68 | train |
openvax/mhcflurry | mhcflurry/amino_acid.py | index_encoding | def index_encoding(sequences, letter_to_index_dict):
"""
Encode a sequence of same-length strings to a matrix of integers of the
same shape. The map from characters to integers is given by
`letter_to_index_dict`.
Given a sequence of `n` strings all of length `k`, return a `k * n` array where
the (`i`, `j`)th element is `letter_to_index_dict[sequence[i][j]]`.
Parameters
----------
sequences : list of length n of strings of length k
letter_to_index_dict : dict : string -> int
Returns
-------
numpy.array of integers with shape (`k`, `n`)
"""
df = pandas.DataFrame(iter(s) for s in sequences)
result = df.replace(letter_to_index_dict)
return result.values | python | def index_encoding(sequences, letter_to_index_dict):
"""
Encode a sequence of same-length strings to a matrix of integers of the
same shape. The map from characters to integers is given by
`letter_to_index_dict`.
Given a sequence of `n` strings all of length `k`, return a `k * n` array where
the (`i`, `j`)th element is `letter_to_index_dict[sequence[i][j]]`.
Parameters
----------
sequences : list of length n of strings of length k
letter_to_index_dict : dict : string -> int
Returns
-------
numpy.array of integers with shape (`k`, `n`)
"""
df = pandas.DataFrame(iter(s) for s in sequences)
result = df.replace(letter_to_index_dict)
return result.values | [
"def",
"index_encoding",
"(",
"sequences",
",",
"letter_to_index_dict",
")",
":",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"iter",
"(",
"s",
")",
"for",
"s",
"in",
"sequences",
")",
"result",
"=",
"df",
".",
"replace",
"(",
"letter_to_index_dict",
")",
"return",
"result",
".",
"values"
] | Encode a sequence of same-length strings to a matrix of integers of the
same shape. The map from characters to integers is given by
`letter_to_index_dict`.
Given a sequence of `n` strings all of length `k`, return a `k * n` array where
the (`i`, `j`)th element is `letter_to_index_dict[sequence[i][j]]`.
Parameters
----------
sequences : list of length n of strings of length k
letter_to_index_dict : dict : string -> int
Returns
-------
numpy.array of integers with shape (`k`, `n`) | [
"Encode",
"a",
"sequence",
"of",
"same",
"-",
"length",
"strings",
"to",
"a",
"matrix",
"of",
"integers",
"of",
"the",
"same",
"shape",
".",
"The",
"map",
"from",
"characters",
"to",
"integers",
"is",
"given",
"by",
"letter_to_index_dict",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/amino_acid.py#L110-L130 | train |
openvax/mhcflurry | mhcflurry/class1_neural_network.py | Class1NeuralNetwork.apply_hyperparameter_renames | def apply_hyperparameter_renames(cls, hyperparameters):
"""
Handle hyperparameter renames.
Parameters
----------
hyperparameters : dict
Returns
-------
dict : updated hyperparameters
"""
for (from_name, to_name) in cls.hyperparameter_renames.items():
if from_name in hyperparameters:
value = hyperparameters.pop(from_name)
if to_name:
hyperparameters[to_name] = value
return hyperparameters | python | def apply_hyperparameter_renames(cls, hyperparameters):
"""
Handle hyperparameter renames.
Parameters
----------
hyperparameters : dict
Returns
-------
dict : updated hyperparameters
"""
for (from_name, to_name) in cls.hyperparameter_renames.items():
if from_name in hyperparameters:
value = hyperparameters.pop(from_name)
if to_name:
hyperparameters[to_name] = value
return hyperparameters | [
"def",
"apply_hyperparameter_renames",
"(",
"cls",
",",
"hyperparameters",
")",
":",
"for",
"(",
"from_name",
",",
"to_name",
")",
"in",
"cls",
".",
"hyperparameter_renames",
".",
"items",
"(",
")",
":",
"if",
"from_name",
"in",
"hyperparameters",
":",
"value",
"=",
"hyperparameters",
".",
"pop",
"(",
"from_name",
")",
"if",
"to_name",
":",
"hyperparameters",
"[",
"to_name",
"]",
"=",
"value",
"return",
"hyperparameters"
] | Handle hyperparameter renames.
Parameters
----------
hyperparameters : dict
Returns
-------
dict : updated hyperparameters | [
"Handle",
"hyperparameter",
"renames",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L136-L154 | train |
openvax/mhcflurry | mhcflurry/class1_neural_network.py | Class1NeuralNetwork.borrow_cached_network | def borrow_cached_network(klass, network_json, network_weights):
"""
Return a keras Model with the specified architecture and weights.
As an optimization, when possible this will reuse architectures from a
process-wide cache.
The returned object is "borrowed" in the sense that its weights can
change later after subsequent calls to this method from other objects.
If you're using this from a parallel implementation you'll need to
hold a lock while using the returned object.
Parameters
----------
network_json : string of JSON
network_weights : list of numpy.array
Returns
-------
keras.models.Model
"""
assert network_weights is not None
key = klass.keras_network_cache_key(network_json)
if key not in klass.KERAS_MODELS_CACHE:
# Cache miss.
import keras.models
network = keras.models.model_from_json(network_json)
existing_weights = None
else:
# Cache hit.
(network, existing_weights) = klass.KERAS_MODELS_CACHE[key]
if existing_weights is not network_weights:
network.set_weights(network_weights)
klass.KERAS_MODELS_CACHE[key] = (network, network_weights)
# As an added safety check we overwrite the fit method on the returned
# model to throw an error if it is called.
def throw(*args, **kwargs):
raise NotImplementedError("Do not call fit on cached model.")
network.fit = throw
return network | python | def borrow_cached_network(klass, network_json, network_weights):
"""
Return a keras Model with the specified architecture and weights.
As an optimization, when possible this will reuse architectures from a
process-wide cache.
The returned object is "borrowed" in the sense that its weights can
change later after subsequent calls to this method from other objects.
If you're using this from a parallel implementation you'll need to
hold a lock while using the returned object.
Parameters
----------
network_json : string of JSON
network_weights : list of numpy.array
Returns
-------
keras.models.Model
"""
assert network_weights is not None
key = klass.keras_network_cache_key(network_json)
if key not in klass.KERAS_MODELS_CACHE:
# Cache miss.
import keras.models
network = keras.models.model_from_json(network_json)
existing_weights = None
else:
# Cache hit.
(network, existing_weights) = klass.KERAS_MODELS_CACHE[key]
if existing_weights is not network_weights:
network.set_weights(network_weights)
klass.KERAS_MODELS_CACHE[key] = (network, network_weights)
# As an added safety check we overwrite the fit method on the returned
# model to throw an error if it is called.
def throw(*args, **kwargs):
raise NotImplementedError("Do not call fit on cached model.")
network.fit = throw
return network | [
"def",
"borrow_cached_network",
"(",
"klass",
",",
"network_json",
",",
"network_weights",
")",
":",
"assert",
"network_weights",
"is",
"not",
"None",
"key",
"=",
"klass",
".",
"keras_network_cache_key",
"(",
"network_json",
")",
"if",
"key",
"not",
"in",
"klass",
".",
"KERAS_MODELS_CACHE",
":",
"# Cache miss.",
"import",
"keras",
".",
"models",
"network",
"=",
"keras",
".",
"models",
".",
"model_from_json",
"(",
"network_json",
")",
"existing_weights",
"=",
"None",
"else",
":",
"# Cache hit.",
"(",
"network",
",",
"existing_weights",
")",
"=",
"klass",
".",
"KERAS_MODELS_CACHE",
"[",
"key",
"]",
"if",
"existing_weights",
"is",
"not",
"network_weights",
":",
"network",
".",
"set_weights",
"(",
"network_weights",
")",
"klass",
".",
"KERAS_MODELS_CACHE",
"[",
"key",
"]",
"=",
"(",
"network",
",",
"network_weights",
")",
"# As an added safety check we overwrite the fit method on the returned",
"# model to throw an error if it is called.",
"def",
"throw",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Do not call fit on cached model.\"",
")",
"network",
".",
"fit",
"=",
"throw",
"return",
"network"
] | Return a keras Model with the specified architecture and weights.
As an optimization, when possible this will reuse architectures from a
process-wide cache.
The returned object is "borrowed" in the sense that its weights can
change later after subsequent calls to this method from other objects.
If you're using this from a parallel implementation you'll need to
hold a lock while using the returned object.
Parameters
----------
network_json : string of JSON
network_weights : list of numpy.array
Returns
-------
keras.models.Model | [
"Return",
"a",
"keras",
"Model",
"with",
"the",
"specified",
"architecture",
"and",
"weights",
".",
"As",
"an",
"optimization",
"when",
"possible",
"this",
"will",
"reuse",
"architectures",
"from",
"a",
"process",
"-",
"wide",
"cache",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L183-L224 | train |
openvax/mhcflurry | mhcflurry/class1_neural_network.py | Class1NeuralNetwork.network | def network(self, borrow=False):
"""
Return the keras model associated with this predictor.
Parameters
----------
borrow : bool
Whether to return a cached model if possible. See
borrow_cached_network for details
Returns
-------
keras.models.Model
"""
if self._network is None and self.network_json is not None:
self.load_weights()
if borrow:
return self.borrow_cached_network(
self.network_json,
self.network_weights)
else:
import keras.models
self._network = keras.models.model_from_json(self.network_json)
if self.network_weights is not None:
self._network.set_weights(self.network_weights)
self.network_json = None
self.network_weights = None
return self._network | python | def network(self, borrow=False):
"""
Return the keras model associated with this predictor.
Parameters
----------
borrow : bool
Whether to return a cached model if possible. See
borrow_cached_network for details
Returns
-------
keras.models.Model
"""
if self._network is None and self.network_json is not None:
self.load_weights()
if borrow:
return self.borrow_cached_network(
self.network_json,
self.network_weights)
else:
import keras.models
self._network = keras.models.model_from_json(self.network_json)
if self.network_weights is not None:
self._network.set_weights(self.network_weights)
self.network_json = None
self.network_weights = None
return self._network | [
"def",
"network",
"(",
"self",
",",
"borrow",
"=",
"False",
")",
":",
"if",
"self",
".",
"_network",
"is",
"None",
"and",
"self",
".",
"network_json",
"is",
"not",
"None",
":",
"self",
".",
"load_weights",
"(",
")",
"if",
"borrow",
":",
"return",
"self",
".",
"borrow_cached_network",
"(",
"self",
".",
"network_json",
",",
"self",
".",
"network_weights",
")",
"else",
":",
"import",
"keras",
".",
"models",
"self",
".",
"_network",
"=",
"keras",
".",
"models",
".",
"model_from_json",
"(",
"self",
".",
"network_json",
")",
"if",
"self",
".",
"network_weights",
"is",
"not",
"None",
":",
"self",
".",
"_network",
".",
"set_weights",
"(",
"self",
".",
"network_weights",
")",
"self",
".",
"network_json",
"=",
"None",
"self",
".",
"network_weights",
"=",
"None",
"return",
"self",
".",
"_network"
] | Return the keras model associated with this predictor.
Parameters
----------
borrow : bool
Whether to return a cached model if possible. See
borrow_cached_network for details
Returns
-------
keras.models.Model | [
"Return",
"the",
"keras",
"model",
"associated",
"with",
"this",
"predictor",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L226-L253 | train |
openvax/mhcflurry | mhcflurry/class1_neural_network.py | Class1NeuralNetwork.load_weights | def load_weights(self):
"""
Load weights by evaluating self.network_weights_loader, if needed.
After calling this, self.network_weights_loader will be None and
self.network_weights will be the weights list, if available.
"""
if self.network_weights_loader:
self.network_weights = self.network_weights_loader()
self.network_weights_loader = None | python | def load_weights(self):
"""
Load weights by evaluating self.network_weights_loader, if needed.
After calling this, self.network_weights_loader will be None and
self.network_weights will be the weights list, if available.
"""
if self.network_weights_loader:
self.network_weights = self.network_weights_loader()
self.network_weights_loader = None | [
"def",
"load_weights",
"(",
"self",
")",
":",
"if",
"self",
".",
"network_weights_loader",
":",
"self",
".",
"network_weights",
"=",
"self",
".",
"network_weights_loader",
"(",
")",
"self",
".",
"network_weights_loader",
"=",
"None"
] | Load weights by evaluating self.network_weights_loader, if needed.
After calling this, self.network_weights_loader will be None and
self.network_weights will be the weights list, if available. | [
"Load",
"weights",
"by",
"evaluating",
"self",
".",
"network_weights_loader",
"if",
"needed",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L315-L324 | train |
openvax/mhcflurry | mhcflurry/class1_neural_network.py | Class1NeuralNetwork.predict | def predict(self, peptides, allele_encoding=None, batch_size=4096):
"""
Predict affinities.
If peptides are specified as EncodableSequences, then the predictions
will be cached for this predictor as long as the EncodableSequences object
remains in memory. The cache is keyed in the object identity of the
EncodableSequences, not the sequences themselves.
Parameters
----------
peptides : EncodableSequences or list of string
allele_encoding : AlleleEncoding, optional
Only required when this model is a pan-allele model
batch_size : int
batch_size passed to Keras
Returns
-------
numpy.array of nM affinity predictions
"""
assert self.prediction_cache is not None
use_cache = (
allele_encoding is None and
isinstance(peptides, EncodableSequences))
if use_cache and peptides in self.prediction_cache:
return self.prediction_cache[peptides].copy()
x_dict = {
'peptide': self.peptides_to_network_input(peptides)
}
if allele_encoding is not None:
allele_input = self.allele_encoding_to_network_input(allele_encoding)
x_dict['allele'] = allele_input
network = self.network(borrow=True)
raw_predictions = network.predict(x_dict, batch_size=batch_size)
predictions = numpy.array(raw_predictions, dtype = "float64")[:,0]
result = to_ic50(predictions)
if use_cache:
self.prediction_cache[peptides] = result
return result | python | def predict(self, peptides, allele_encoding=None, batch_size=4096):
"""
Predict affinities.
If peptides are specified as EncodableSequences, then the predictions
will be cached for this predictor as long as the EncodableSequences object
remains in memory. The cache is keyed in the object identity of the
EncodableSequences, not the sequences themselves.
Parameters
----------
peptides : EncodableSequences or list of string
allele_encoding : AlleleEncoding, optional
Only required when this model is a pan-allele model
batch_size : int
batch_size passed to Keras
Returns
-------
numpy.array of nM affinity predictions
"""
assert self.prediction_cache is not None
use_cache = (
allele_encoding is None and
isinstance(peptides, EncodableSequences))
if use_cache and peptides in self.prediction_cache:
return self.prediction_cache[peptides].copy()
x_dict = {
'peptide': self.peptides_to_network_input(peptides)
}
if allele_encoding is not None:
allele_input = self.allele_encoding_to_network_input(allele_encoding)
x_dict['allele'] = allele_input
network = self.network(borrow=True)
raw_predictions = network.predict(x_dict, batch_size=batch_size)
predictions = numpy.array(raw_predictions, dtype = "float64")[:,0]
result = to_ic50(predictions)
if use_cache:
self.prediction_cache[peptides] = result
return result | [
"def",
"predict",
"(",
"self",
",",
"peptides",
",",
"allele_encoding",
"=",
"None",
",",
"batch_size",
"=",
"4096",
")",
":",
"assert",
"self",
".",
"prediction_cache",
"is",
"not",
"None",
"use_cache",
"=",
"(",
"allele_encoding",
"is",
"None",
"and",
"isinstance",
"(",
"peptides",
",",
"EncodableSequences",
")",
")",
"if",
"use_cache",
"and",
"peptides",
"in",
"self",
".",
"prediction_cache",
":",
"return",
"self",
".",
"prediction_cache",
"[",
"peptides",
"]",
".",
"copy",
"(",
")",
"x_dict",
"=",
"{",
"'peptide'",
":",
"self",
".",
"peptides_to_network_input",
"(",
"peptides",
")",
"}",
"if",
"allele_encoding",
"is",
"not",
"None",
":",
"allele_input",
"=",
"self",
".",
"allele_encoding_to_network_input",
"(",
"allele_encoding",
")",
"x_dict",
"[",
"'allele'",
"]",
"=",
"allele_input",
"network",
"=",
"self",
".",
"network",
"(",
"borrow",
"=",
"True",
")",
"raw_predictions",
"=",
"network",
".",
"predict",
"(",
"x_dict",
",",
"batch_size",
"=",
"batch_size",
")",
"predictions",
"=",
"numpy",
".",
"array",
"(",
"raw_predictions",
",",
"dtype",
"=",
"\"float64\"",
")",
"[",
":",
",",
"0",
"]",
"result",
"=",
"to_ic50",
"(",
"predictions",
")",
"if",
"use_cache",
":",
"self",
".",
"prediction_cache",
"[",
"peptides",
"]",
"=",
"result",
"return",
"result"
] | Predict affinities.
If peptides are specified as EncodableSequences, then the predictions
will be cached for this predictor as long as the EncodableSequences object
remains in memory. The cache is keyed in the object identity of the
EncodableSequences, not the sequences themselves.
Parameters
----------
peptides : EncodableSequences or list of string
allele_encoding : AlleleEncoding, optional
Only required when this model is a pan-allele model
batch_size : int
batch_size passed to Keras
Returns
-------
numpy.array of nM affinity predictions | [
"Predict",
"affinities",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L739-L782 | train |
openvax/mhcflurry | mhcflurry/scoring.py | make_scores | def make_scores(
ic50_y,
ic50_y_pred,
sample_weight=None,
threshold_nm=500,
max_ic50=50000):
"""
Calculate AUC, F1, and Kendall Tau scores.
Parameters
-----------
ic50_y : float list
true IC50s (i.e. affinities)
ic50_y_pred : float list
predicted IC50s
sample_weight : float list [optional]
threshold_nm : float [optional]
max_ic50 : float [optional]
Returns
-----------
dict with entries "auc", "f1", "tau"
"""
y_pred = from_ic50(ic50_y_pred, max_ic50)
try:
auc = sklearn.metrics.roc_auc_score(
ic50_y <= threshold_nm,
y_pred,
sample_weight=sample_weight)
except ValueError as e:
logging.warning(e)
auc = numpy.nan
try:
f1 = sklearn.metrics.f1_score(
ic50_y <= threshold_nm,
ic50_y_pred <= threshold_nm,
sample_weight=sample_weight)
except ValueError as e:
logging.warning(e)
f1 = numpy.nan
try:
tau = scipy.stats.kendalltau(ic50_y_pred, ic50_y)[0]
except ValueError as e:
logging.warning(e)
tau = numpy.nan
return dict(
auc=auc,
f1=f1,
tau=tau) | python | def make_scores(
ic50_y,
ic50_y_pred,
sample_weight=None,
threshold_nm=500,
max_ic50=50000):
"""
Calculate AUC, F1, and Kendall Tau scores.
Parameters
-----------
ic50_y : float list
true IC50s (i.e. affinities)
ic50_y_pred : float list
predicted IC50s
sample_weight : float list [optional]
threshold_nm : float [optional]
max_ic50 : float [optional]
Returns
-----------
dict with entries "auc", "f1", "tau"
"""
y_pred = from_ic50(ic50_y_pred, max_ic50)
try:
auc = sklearn.metrics.roc_auc_score(
ic50_y <= threshold_nm,
y_pred,
sample_weight=sample_weight)
except ValueError as e:
logging.warning(e)
auc = numpy.nan
try:
f1 = sklearn.metrics.f1_score(
ic50_y <= threshold_nm,
ic50_y_pred <= threshold_nm,
sample_weight=sample_weight)
except ValueError as e:
logging.warning(e)
f1 = numpy.nan
try:
tau = scipy.stats.kendalltau(ic50_y_pred, ic50_y)[0]
except ValueError as e:
logging.warning(e)
tau = numpy.nan
return dict(
auc=auc,
f1=f1,
tau=tau) | [
"def",
"make_scores",
"(",
"ic50_y",
",",
"ic50_y_pred",
",",
"sample_weight",
"=",
"None",
",",
"threshold_nm",
"=",
"500",
",",
"max_ic50",
"=",
"50000",
")",
":",
"y_pred",
"=",
"from_ic50",
"(",
"ic50_y_pred",
",",
"max_ic50",
")",
"try",
":",
"auc",
"=",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
"(",
"ic50_y",
"<=",
"threshold_nm",
",",
"y_pred",
",",
"sample_weight",
"=",
"sample_weight",
")",
"except",
"ValueError",
"as",
"e",
":",
"logging",
".",
"warning",
"(",
"e",
")",
"auc",
"=",
"numpy",
".",
"nan",
"try",
":",
"f1",
"=",
"sklearn",
".",
"metrics",
".",
"f1_score",
"(",
"ic50_y",
"<=",
"threshold_nm",
",",
"ic50_y_pred",
"<=",
"threshold_nm",
",",
"sample_weight",
"=",
"sample_weight",
")",
"except",
"ValueError",
"as",
"e",
":",
"logging",
".",
"warning",
"(",
"e",
")",
"f1",
"=",
"numpy",
".",
"nan",
"try",
":",
"tau",
"=",
"scipy",
".",
"stats",
".",
"kendalltau",
"(",
"ic50_y_pred",
",",
"ic50_y",
")",
"[",
"0",
"]",
"except",
"ValueError",
"as",
"e",
":",
"logging",
".",
"warning",
"(",
"e",
")",
"tau",
"=",
"numpy",
".",
"nan",
"return",
"dict",
"(",
"auc",
"=",
"auc",
",",
"f1",
"=",
"f1",
",",
"tau",
"=",
"tau",
")"
] | Calculate AUC, F1, and Kendall Tau scores.
Parameters
-----------
ic50_y : float list
true IC50s (i.e. affinities)
ic50_y_pred : float list
predicted IC50s
sample_weight : float list [optional]
threshold_nm : float [optional]
max_ic50 : float [optional]
Returns
-----------
dict with entries "auc", "f1", "tau" | [
"Calculate",
"AUC",
"F1",
"and",
"Kendall",
"Tau",
"scores",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/scoring.py#L14-L68 | train |
openvax/mhcflurry | mhcflurry/encodable_sequences.py | EncodableSequences.variable_length_to_fixed_length_vector_encoding | def variable_length_to_fixed_length_vector_encoding(
self, vector_encoding_name, left_edge=4, right_edge=4, max_length=15):
"""
Encode variable-length sequences using a fixed-length encoding designed
for preserving the anchor positions of class I peptides.
The sequences must be of length at least left_edge + right_edge, and at
most max_length.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings().
left_edge : int, size of fixed-position left side
right_edge : int, size of the fixed-position right side
max_length : sequence length of the resulting encoding
Returns
-------
numpy.array with shape (num sequences, max_length, m) where m is
vector_encoding_length(vector_encoding_name)
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name,
left_edge,
right_edge,
max_length)
if cache_key not in self.encoding_cache:
fixed_length_sequences = (
self.sequences_to_fixed_length_index_encoded_array(
self.sequences,
left_edge=left_edge,
right_edge=right_edge,
max_length=max_length))
result = amino_acid.fixed_vectors_encoding(
fixed_length_sequences,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
assert result.shape[0] == len(self.sequences)
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key] | python | def variable_length_to_fixed_length_vector_encoding(
self, vector_encoding_name, left_edge=4, right_edge=4, max_length=15):
"""
Encode variable-length sequences using a fixed-length encoding designed
for preserving the anchor positions of class I peptides.
The sequences must be of length at least left_edge + right_edge, and at
most max_length.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings().
left_edge : int, size of fixed-position left side
right_edge : int, size of the fixed-position right side
max_length : sequence length of the resulting encoding
Returns
-------
numpy.array with shape (num sequences, max_length, m) where m is
vector_encoding_length(vector_encoding_name)
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name,
left_edge,
right_edge,
max_length)
if cache_key not in self.encoding_cache:
fixed_length_sequences = (
self.sequences_to_fixed_length_index_encoded_array(
self.sequences,
left_edge=left_edge,
right_edge=right_edge,
max_length=max_length))
result = amino_acid.fixed_vectors_encoding(
fixed_length_sequences,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
assert result.shape[0] == len(self.sequences)
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key] | [
"def",
"variable_length_to_fixed_length_vector_encoding",
"(",
"self",
",",
"vector_encoding_name",
",",
"left_edge",
"=",
"4",
",",
"right_edge",
"=",
"4",
",",
"max_length",
"=",
"15",
")",
":",
"cache_key",
"=",
"(",
"\"fixed_length_vector_encoding\"",
",",
"vector_encoding_name",
",",
"left_edge",
",",
"right_edge",
",",
"max_length",
")",
"if",
"cache_key",
"not",
"in",
"self",
".",
"encoding_cache",
":",
"fixed_length_sequences",
"=",
"(",
"self",
".",
"sequences_to_fixed_length_index_encoded_array",
"(",
"self",
".",
"sequences",
",",
"left_edge",
"=",
"left_edge",
",",
"right_edge",
"=",
"right_edge",
",",
"max_length",
"=",
"max_length",
")",
")",
"result",
"=",
"amino_acid",
".",
"fixed_vectors_encoding",
"(",
"fixed_length_sequences",
",",
"amino_acid",
".",
"ENCODING_DATA_FRAMES",
"[",
"vector_encoding_name",
"]",
")",
"assert",
"result",
".",
"shape",
"[",
"0",
"]",
"==",
"len",
"(",
"self",
".",
"sequences",
")",
"self",
".",
"encoding_cache",
"[",
"cache_key",
"]",
"=",
"result",
"return",
"self",
".",
"encoding_cache",
"[",
"cache_key",
"]"
] | Encode variable-length sequences using a fixed-length encoding designed
for preserving the anchor positions of class I peptides.
The sequences must be of length at least left_edge + right_edge, and at
most max_length.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings().
left_edge : int, size of fixed-position left side
right_edge : int, size of the fixed-position right side
max_length : sequence length of the resulting encoding
Returns
-------
numpy.array with shape (num sequences, max_length, m) where m is
vector_encoding_length(vector_encoding_name) | [
"Encode",
"variable",
"-",
"length",
"sequences",
"using",
"a",
"fixed",
"-",
"length",
"encoding",
"designed",
"for",
"preserving",
"the",
"anchor",
"positions",
"of",
"class",
"I",
"peptides",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/encodable_sequences.py#L89-L131 | train |
openvax/mhcflurry | mhcflurry/encodable_sequences.py | EncodableSequences.sequences_to_fixed_length_index_encoded_array | def sequences_to_fixed_length_index_encoded_array(
klass, sequences, left_edge=4, right_edge=4, max_length=15):
"""
Transform a sequence of strings, where each string is of length at least
left_edge + right_edge and at most max_length into strings of length
max_length using a scheme designed to preserve the anchor positions of
class I peptides.
The first left_edge characters in the input always map to the first
left_edge characters in the output. Similarly for the last right_edge
characters. The middle characters are filled in based on the length,
with the X character filling in the blanks.
For example, using defaults:
AAAACDDDD -> AAAAXXXCXXXDDDD
The strings are also converted to int categorical amino acid indices.
Parameters
----------
sequence : string
left_edge : int
right_edge : int
max_length : int
Returns
-------
numpy array of shape (len(sequences), max_length) and dtype int
"""
# Result array is int32, filled with X (null amino acid) value.
result = numpy.full(
fill_value=amino_acid.AMINO_ACID_INDEX['X'],
shape=(len(sequences), max_length),
dtype="int32")
df = pandas.DataFrame({"peptide": sequences})
df["length"] = df.peptide.str.len()
middle_length = max_length - left_edge - right_edge
# For efficiency we handle each supported peptide length using bulk
# array operations.
for (length, sub_df) in df.groupby("length"):
if length < left_edge + right_edge:
raise ValueError(
"Sequence '%s' (length %d) unsupported: length must be at "
"least %d. There are %d total peptides with this length." % (
sub_df.iloc[0].peptide, length, left_edge + right_edge,
len(sub_df)))
if length > max_length:
raise ValueError(
"Sequence '%s' (length %d) unsupported: length must be at "
"most %d. There are %d total peptides with this length." % (
sub_df.iloc[0].peptide, length, max_length,
len(sub_df)))
# Array of shape (num peptides, length) giving fixed-length amino
# acid encoding each peptide of the current length.
fixed_length_sequences = numpy.stack(
sub_df.peptide.map(
lambda s: numpy.array([
amino_acid.AMINO_ACID_INDEX[char] for char in s
])).values)
num_null = max_length - length
num_null_left = int(math.ceil(num_null / 2))
num_middle_filled = middle_length - num_null
middle_start = left_edge + num_null_left
# Set left edge
result[sub_df.index, :left_edge] = fixed_length_sequences[
:, :left_edge
]
# Set middle.
result[
sub_df.index,
middle_start : middle_start + num_middle_filled
] = fixed_length_sequences[
:, left_edge : left_edge + num_middle_filled
]
# Set right edge.
result[
sub_df.index,
-right_edge:
] = fixed_length_sequences[:, -right_edge:]
return result | python | def sequences_to_fixed_length_index_encoded_array(
klass, sequences, left_edge=4, right_edge=4, max_length=15):
"""
Transform a sequence of strings, where each string is of length at least
left_edge + right_edge and at most max_length into strings of length
max_length using a scheme designed to preserve the anchor positions of
class I peptides.
The first left_edge characters in the input always map to the first
left_edge characters in the output. Similarly for the last right_edge
characters. The middle characters are filled in based on the length,
with the X character filling in the blanks.
For example, using defaults:
AAAACDDDD -> AAAAXXXCXXXDDDD
The strings are also converted to int categorical amino acid indices.
Parameters
----------
sequence : string
left_edge : int
right_edge : int
max_length : int
Returns
-------
numpy array of shape (len(sequences), max_length) and dtype int
"""
# Result array is int32, filled with X (null amino acid) value.
result = numpy.full(
fill_value=amino_acid.AMINO_ACID_INDEX['X'],
shape=(len(sequences), max_length),
dtype="int32")
df = pandas.DataFrame({"peptide": sequences})
df["length"] = df.peptide.str.len()
middle_length = max_length - left_edge - right_edge
# For efficiency we handle each supported peptide length using bulk
# array operations.
for (length, sub_df) in df.groupby("length"):
if length < left_edge + right_edge:
raise ValueError(
"Sequence '%s' (length %d) unsupported: length must be at "
"least %d. There are %d total peptides with this length." % (
sub_df.iloc[0].peptide, length, left_edge + right_edge,
len(sub_df)))
if length > max_length:
raise ValueError(
"Sequence '%s' (length %d) unsupported: length must be at "
"most %d. There are %d total peptides with this length." % (
sub_df.iloc[0].peptide, length, max_length,
len(sub_df)))
# Array of shape (num peptides, length) giving fixed-length amino
# acid encoding each peptide of the current length.
fixed_length_sequences = numpy.stack(
sub_df.peptide.map(
lambda s: numpy.array([
amino_acid.AMINO_ACID_INDEX[char] for char in s
])).values)
num_null = max_length - length
num_null_left = int(math.ceil(num_null / 2))
num_middle_filled = middle_length - num_null
middle_start = left_edge + num_null_left
# Set left edge
result[sub_df.index, :left_edge] = fixed_length_sequences[
:, :left_edge
]
# Set middle.
result[
sub_df.index,
middle_start : middle_start + num_middle_filled
] = fixed_length_sequences[
:, left_edge : left_edge + num_middle_filled
]
# Set right edge.
result[
sub_df.index,
-right_edge:
] = fixed_length_sequences[:, -right_edge:]
return result | [
"def",
"sequences_to_fixed_length_index_encoded_array",
"(",
"klass",
",",
"sequences",
",",
"left_edge",
"=",
"4",
",",
"right_edge",
"=",
"4",
",",
"max_length",
"=",
"15",
")",
":",
"# Result array is int32, filled with X (null amino acid) value.",
"result",
"=",
"numpy",
".",
"full",
"(",
"fill_value",
"=",
"amino_acid",
".",
"AMINO_ACID_INDEX",
"[",
"'X'",
"]",
",",
"shape",
"=",
"(",
"len",
"(",
"sequences",
")",
",",
"max_length",
")",
",",
"dtype",
"=",
"\"int32\"",
")",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"{",
"\"peptide\"",
":",
"sequences",
"}",
")",
"df",
"[",
"\"length\"",
"]",
"=",
"df",
".",
"peptide",
".",
"str",
".",
"len",
"(",
")",
"middle_length",
"=",
"max_length",
"-",
"left_edge",
"-",
"right_edge",
"# For efficiency we handle each supported peptide length using bulk",
"# array operations.",
"for",
"(",
"length",
",",
"sub_df",
")",
"in",
"df",
".",
"groupby",
"(",
"\"length\"",
")",
":",
"if",
"length",
"<",
"left_edge",
"+",
"right_edge",
":",
"raise",
"ValueError",
"(",
"\"Sequence '%s' (length %d) unsupported: length must be at \"",
"\"least %d. There are %d total peptides with this length.\"",
"%",
"(",
"sub_df",
".",
"iloc",
"[",
"0",
"]",
".",
"peptide",
",",
"length",
",",
"left_edge",
"+",
"right_edge",
",",
"len",
"(",
"sub_df",
")",
")",
")",
"if",
"length",
">",
"max_length",
":",
"raise",
"ValueError",
"(",
"\"Sequence '%s' (length %d) unsupported: length must be at \"",
"\"most %d. There are %d total peptides with this length.\"",
"%",
"(",
"sub_df",
".",
"iloc",
"[",
"0",
"]",
".",
"peptide",
",",
"length",
",",
"max_length",
",",
"len",
"(",
"sub_df",
")",
")",
")",
"# Array of shape (num peptides, length) giving fixed-length amino",
"# acid encoding each peptide of the current length.",
"fixed_length_sequences",
"=",
"numpy",
".",
"stack",
"(",
"sub_df",
".",
"peptide",
".",
"map",
"(",
"lambda",
"s",
":",
"numpy",
".",
"array",
"(",
"[",
"amino_acid",
".",
"AMINO_ACID_INDEX",
"[",
"char",
"]",
"for",
"char",
"in",
"s",
"]",
")",
")",
".",
"values",
")",
"num_null",
"=",
"max_length",
"-",
"length",
"num_null_left",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"num_null",
"/",
"2",
")",
")",
"num_middle_filled",
"=",
"middle_length",
"-",
"num_null",
"middle_start",
"=",
"left_edge",
"+",
"num_null_left",
"# Set left edge",
"result",
"[",
"sub_df",
".",
"index",
",",
":",
"left_edge",
"]",
"=",
"fixed_length_sequences",
"[",
":",
",",
":",
"left_edge",
"]",
"# Set middle.",
"result",
"[",
"sub_df",
".",
"index",
",",
"middle_start",
":",
"middle_start",
"+",
"num_middle_filled",
"]",
"=",
"fixed_length_sequences",
"[",
":",
",",
"left_edge",
":",
"left_edge",
"+",
"num_middle_filled",
"]",
"# Set right edge.",
"result",
"[",
"sub_df",
".",
"index",
",",
"-",
"right_edge",
":",
"]",
"=",
"fixed_length_sequences",
"[",
":",
",",
"-",
"right_edge",
":",
"]",
"return",
"result"
] | Transform a sequence of strings, where each string is of length at least
left_edge + right_edge and at most max_length into strings of length
max_length using a scheme designed to preserve the anchor positions of
class I peptides.
The first left_edge characters in the input always map to the first
left_edge characters in the output. Similarly for the last right_edge
characters. The middle characters are filled in based on the length,
with the X character filling in the blanks.
For example, using defaults:
AAAACDDDD -> AAAAXXXCXXXDDDD
The strings are also converted to int categorical amino acid indices.
Parameters
----------
sequence : string
left_edge : int
right_edge : int
max_length : int
Returns
-------
numpy array of shape (len(sequences), max_length) and dtype int | [
"Transform",
"a",
"sequence",
"of",
"strings",
"where",
"each",
"string",
"is",
"of",
"length",
"at",
"least",
"left_edge",
"+",
"right_edge",
"and",
"at",
"most",
"max_length",
"into",
"strings",
"of",
"length",
"max_length",
"using",
"a",
"scheme",
"designed",
"to",
"preserve",
"the",
"anchor",
"positions",
"of",
"class",
"I",
"peptides",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/encodable_sequences.py#L134-L223 | train |
openvax/mhcflurry | mhcflurry/ensemble_centrality.py | robust_mean | def robust_mean(log_values):
"""
Mean of values falling within the 25-75 percentiles.
Parameters
----------
log_values : 2-d numpy.array
Center is computed along the second axis (i.e. per row).
Returns
-------
center : numpy.array of length log_values.shape[1]
"""
if log_values.shape[1] <= 3:
# Too few values to use robust mean.
return numpy.nanmean(log_values, axis=1)
without_nans = numpy.nan_to_num(log_values) # replace nan with 0
mask = (
(~numpy.isnan(log_values)) &
(without_nans <= numpy.nanpercentile(log_values, 75, axis=1).reshape((-1, 1))) &
(without_nans >= numpy.nanpercentile(log_values, 25, axis=1).reshape((-1, 1))))
return (without_nans * mask.astype(float)).sum(1) / mask.sum(1) | python | def robust_mean(log_values):
"""
Mean of values falling within the 25-75 percentiles.
Parameters
----------
log_values : 2-d numpy.array
Center is computed along the second axis (i.e. per row).
Returns
-------
center : numpy.array of length log_values.shape[1]
"""
if log_values.shape[1] <= 3:
# Too few values to use robust mean.
return numpy.nanmean(log_values, axis=1)
without_nans = numpy.nan_to_num(log_values) # replace nan with 0
mask = (
(~numpy.isnan(log_values)) &
(without_nans <= numpy.nanpercentile(log_values, 75, axis=1).reshape((-1, 1))) &
(without_nans >= numpy.nanpercentile(log_values, 25, axis=1).reshape((-1, 1))))
return (without_nans * mask.astype(float)).sum(1) / mask.sum(1) | [
"def",
"robust_mean",
"(",
"log_values",
")",
":",
"if",
"log_values",
".",
"shape",
"[",
"1",
"]",
"<=",
"3",
":",
"# Too few values to use robust mean.",
"return",
"numpy",
".",
"nanmean",
"(",
"log_values",
",",
"axis",
"=",
"1",
")",
"without_nans",
"=",
"numpy",
".",
"nan_to_num",
"(",
"log_values",
")",
"# replace nan with 0",
"mask",
"=",
"(",
"(",
"~",
"numpy",
".",
"isnan",
"(",
"log_values",
")",
")",
"&",
"(",
"without_nans",
"<=",
"numpy",
".",
"nanpercentile",
"(",
"log_values",
",",
"75",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
"&",
"(",
"without_nans",
">=",
"numpy",
".",
"nanpercentile",
"(",
"log_values",
",",
"25",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
")",
"return",
"(",
"without_nans",
"*",
"mask",
".",
"astype",
"(",
"float",
")",
")",
".",
"sum",
"(",
"1",
")",
"/",
"mask",
".",
"sum",
"(",
"1",
")"
] | Mean of values falling within the 25-75 percentiles.
Parameters
----------
log_values : 2-d numpy.array
Center is computed along the second axis (i.e. per row).
Returns
-------
center : numpy.array of length log_values.shape[1] | [
"Mean",
"of",
"values",
"falling",
"within",
"the",
"25",
"-",
"75",
"percentiles",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/ensemble_centrality.py#L11-L33 | train |
openvax/mhcflurry | mhcflurry/class1_affinity_predictor.py | Class1AffinityPredictor.neural_networks | def neural_networks(self):
"""
List of the neural networks in the ensemble.
Returns
-------
list of `Class1NeuralNetwork`
"""
result = []
for models in self.allele_to_allele_specific_models.values():
result.extend(models)
result.extend(self.class1_pan_allele_models)
return result | python | def neural_networks(self):
"""
List of the neural networks in the ensemble.
Returns
-------
list of `Class1NeuralNetwork`
"""
result = []
for models in self.allele_to_allele_specific_models.values():
result.extend(models)
result.extend(self.class1_pan_allele_models)
return result | [
"def",
"neural_networks",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"for",
"models",
"in",
"self",
".",
"allele_to_allele_specific_models",
".",
"values",
"(",
")",
":",
"result",
".",
"extend",
"(",
"models",
")",
"result",
".",
"extend",
"(",
"self",
".",
"class1_pan_allele_models",
")",
"return",
"result"
] | List of the neural networks in the ensemble.
Returns
-------
list of `Class1NeuralNetwork` | [
"List",
"of",
"the",
"neural",
"networks",
"in",
"the",
"ensemble",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L140-L152 | train |
openvax/mhcflurry | mhcflurry/class1_affinity_predictor.py | Class1AffinityPredictor.merge | def merge(cls, predictors):
"""
Merge the ensembles of two or more `Class1AffinityPredictor` instances.
Note: the resulting merged predictor will NOT have calibrated percentile
ranks. Call `calibrate_percentile_ranks` on it if these are needed.
Parameters
----------
predictors : sequence of `Class1AffinityPredictor`
Returns
-------
`Class1AffinityPredictor` instance
"""
assert len(predictors) > 0
if len(predictors) == 1:
return predictors[0]
allele_to_allele_specific_models = collections.defaultdict(list)
class1_pan_allele_models = []
allele_to_fixed_length_sequence = predictors[0].allele_to_fixed_length_sequence
for predictor in predictors:
for (allele, networks) in (
predictor.allele_to_allele_specific_models.items()):
allele_to_allele_specific_models[allele].extend(networks)
class1_pan_allele_models.extend(
predictor.class1_pan_allele_models)
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=allele_to_fixed_length_sequence
) | python | def merge(cls, predictors):
"""
Merge the ensembles of two or more `Class1AffinityPredictor` instances.
Note: the resulting merged predictor will NOT have calibrated percentile
ranks. Call `calibrate_percentile_ranks` on it if these are needed.
Parameters
----------
predictors : sequence of `Class1AffinityPredictor`
Returns
-------
`Class1AffinityPredictor` instance
"""
assert len(predictors) > 0
if len(predictors) == 1:
return predictors[0]
allele_to_allele_specific_models = collections.defaultdict(list)
class1_pan_allele_models = []
allele_to_fixed_length_sequence = predictors[0].allele_to_fixed_length_sequence
for predictor in predictors:
for (allele, networks) in (
predictor.allele_to_allele_specific_models.items()):
allele_to_allele_specific_models[allele].extend(networks)
class1_pan_allele_models.extend(
predictor.class1_pan_allele_models)
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=allele_to_fixed_length_sequence
) | [
"def",
"merge",
"(",
"cls",
",",
"predictors",
")",
":",
"assert",
"len",
"(",
"predictors",
")",
">",
"0",
"if",
"len",
"(",
"predictors",
")",
"==",
"1",
":",
"return",
"predictors",
"[",
"0",
"]",
"allele_to_allele_specific_models",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"class1_pan_allele_models",
"=",
"[",
"]",
"allele_to_fixed_length_sequence",
"=",
"predictors",
"[",
"0",
"]",
".",
"allele_to_fixed_length_sequence",
"for",
"predictor",
"in",
"predictors",
":",
"for",
"(",
"allele",
",",
"networks",
")",
"in",
"(",
"predictor",
".",
"allele_to_allele_specific_models",
".",
"items",
"(",
")",
")",
":",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
".",
"extend",
"(",
"networks",
")",
"class1_pan_allele_models",
".",
"extend",
"(",
"predictor",
".",
"class1_pan_allele_models",
")",
"return",
"Class1AffinityPredictor",
"(",
"allele_to_allele_specific_models",
"=",
"allele_to_allele_specific_models",
",",
"class1_pan_allele_models",
"=",
"class1_pan_allele_models",
",",
"allele_to_fixed_length_sequence",
"=",
"allele_to_fixed_length_sequence",
")"
] | Merge the ensembles of two or more `Class1AffinityPredictor` instances.
Note: the resulting merged predictor will NOT have calibrated percentile
ranks. Call `calibrate_percentile_ranks` on it if these are needed.
Parameters
----------
predictors : sequence of `Class1AffinityPredictor`
Returns
-------
`Class1AffinityPredictor` instance | [
"Merge",
"the",
"ensembles",
"of",
"two",
"or",
"more",
"Class1AffinityPredictor",
"instances",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L155-L190 | train |
openvax/mhcflurry | mhcflurry/class1_affinity_predictor.py | Class1AffinityPredictor.merge_in_place | def merge_in_place(self, others):
"""
Add the models present other predictors into the current predictor.
Parameters
----------
others : list of Class1AffinityPredictor
Other predictors to merge into the current predictor.
Returns
-------
list of string : names of newly added models
"""
new_model_names = []
for predictor in others:
for model in predictor.class1_pan_allele_models:
model_name = self.model_name(
"pan-class1",
len(self.class1_pan_allele_models))
self.class1_pan_allele_models.append(model)
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", "pan-class1"),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
new_model_names.append(model_name)
for allele in predictor.allele_to_allele_specific_models:
if allele not in self.allele_to_allele_specific_models:
self.allele_to_allele_specific_models[allele] = []
current_models = self.allele_to_allele_specific_models[allele]
for model in predictor.allele_to_allele_specific_models[allele]:
model_name = self.model_name(allele, len(current_models))
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", allele),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
current_models.append(model)
new_model_names.append(model_name)
self.clear_cache()
return new_model_names | python | def merge_in_place(self, others):
"""
Add the models present other predictors into the current predictor.
Parameters
----------
others : list of Class1AffinityPredictor
Other predictors to merge into the current predictor.
Returns
-------
list of string : names of newly added models
"""
new_model_names = []
for predictor in others:
for model in predictor.class1_pan_allele_models:
model_name = self.model_name(
"pan-class1",
len(self.class1_pan_allele_models))
self.class1_pan_allele_models.append(model)
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", "pan-class1"),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
new_model_names.append(model_name)
for allele in predictor.allele_to_allele_specific_models:
if allele not in self.allele_to_allele_specific_models:
self.allele_to_allele_specific_models[allele] = []
current_models = self.allele_to_allele_specific_models[allele]
for model in predictor.allele_to_allele_specific_models[allele]:
model_name = self.model_name(allele, len(current_models))
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", allele),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
current_models.append(model)
new_model_names.append(model_name)
self.clear_cache()
return new_model_names | [
"def",
"merge_in_place",
"(",
"self",
",",
"others",
")",
":",
"new_model_names",
"=",
"[",
"]",
"for",
"predictor",
"in",
"others",
":",
"for",
"model",
"in",
"predictor",
".",
"class1_pan_allele_models",
":",
"model_name",
"=",
"self",
".",
"model_name",
"(",
"\"pan-class1\"",
",",
"len",
"(",
"self",
".",
"class1_pan_allele_models",
")",
")",
"self",
".",
"class1_pan_allele_models",
".",
"append",
"(",
"model",
")",
"row",
"=",
"pandas",
".",
"Series",
"(",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"\"model_name\"",
",",
"model_name",
")",
",",
"(",
"\"allele\"",
",",
"\"pan-class1\"",
")",
",",
"(",
"\"config_json\"",
",",
"json",
".",
"dumps",
"(",
"model",
".",
"get_config",
"(",
")",
")",
")",
",",
"(",
"\"model\"",
",",
"model",
")",
",",
"]",
")",
")",
".",
"to_frame",
"(",
")",
".",
"T",
"self",
".",
"_manifest_df",
"=",
"pandas",
".",
"concat",
"(",
"[",
"self",
".",
"manifest_df",
",",
"row",
"]",
",",
"ignore_index",
"=",
"True",
")",
"new_model_names",
".",
"append",
"(",
"model_name",
")",
"for",
"allele",
"in",
"predictor",
".",
"allele_to_allele_specific_models",
":",
"if",
"allele",
"not",
"in",
"self",
".",
"allele_to_allele_specific_models",
":",
"self",
".",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"=",
"[",
"]",
"current_models",
"=",
"self",
".",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"for",
"model",
"in",
"predictor",
".",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
":",
"model_name",
"=",
"self",
".",
"model_name",
"(",
"allele",
",",
"len",
"(",
"current_models",
")",
")",
"row",
"=",
"pandas",
".",
"Series",
"(",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"\"model_name\"",
",",
"model_name",
")",
",",
"(",
"\"allele\"",
",",
"allele",
")",
",",
"(",
"\"config_json\"",
",",
"json",
".",
"dumps",
"(",
"model",
".",
"get_config",
"(",
")",
")",
")",
",",
"(",
"\"model\"",
",",
"model",
")",
",",
"]",
")",
")",
".",
"to_frame",
"(",
")",
".",
"T",
"self",
".",
"_manifest_df",
"=",
"pandas",
".",
"concat",
"(",
"[",
"self",
".",
"manifest_df",
",",
"row",
"]",
",",
"ignore_index",
"=",
"True",
")",
"current_models",
".",
"append",
"(",
"model",
")",
"new_model_names",
".",
"append",
"(",
"model_name",
")",
"self",
".",
"clear_cache",
"(",
")",
"return",
"new_model_names"
] | Add the models present other predictors into the current predictor.
Parameters
----------
others : list of Class1AffinityPredictor
Other predictors to merge into the current predictor.
Returns
-------
list of string : names of newly added models | [
"Add",
"the",
"models",
"present",
"other",
"predictors",
"into",
"the",
"current",
"predictor",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L192-L241 | train |
openvax/mhcflurry | mhcflurry/class1_affinity_predictor.py | Class1AffinityPredictor.percentile_ranks | def percentile_ranks(self, affinities, allele=None, alleles=None, throw=True):
"""
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
"""
if allele is not None:
try:
transform = self.allele_to_percent_rank_transform[allele]
return transform.transform(affinities)
except KeyError:
msg = "Allele %s has no percentile rank information" % allele
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
# Return NaNs
return numpy.ones(len(affinities)) * numpy.nan
if alleles is None:
raise ValueError("Specify allele or alleles")
df = pandas.DataFrame({"affinity": affinities})
df["allele"] = alleles
df["result"] = numpy.nan
for (allele, sub_df) in df.groupby("allele"):
df.loc[sub_df.index, "result"] = self.percentile_ranks(
sub_df.affinity, allele=allele, throw=throw)
return df.result.values | python | def percentile_ranks(self, affinities, allele=None, alleles=None, throw=True):
"""
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
"""
if allele is not None:
try:
transform = self.allele_to_percent_rank_transform[allele]
return transform.transform(affinities)
except KeyError:
msg = "Allele %s has no percentile rank information" % allele
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
# Return NaNs
return numpy.ones(len(affinities)) * numpy.nan
if alleles is None:
raise ValueError("Specify allele or alleles")
df = pandas.DataFrame({"affinity": affinities})
df["allele"] = alleles
df["result"] = numpy.nan
for (allele, sub_df) in df.groupby("allele"):
df.loc[sub_df.index, "result"] = self.percentile_ranks(
sub_df.affinity, allele=allele, throw=throw)
return df.result.values | [
"def",
"percentile_ranks",
"(",
"self",
",",
"affinities",
",",
"allele",
"=",
"None",
",",
"alleles",
"=",
"None",
",",
"throw",
"=",
"True",
")",
":",
"if",
"allele",
"is",
"not",
"None",
":",
"try",
":",
"transform",
"=",
"self",
".",
"allele_to_percent_rank_transform",
"[",
"allele",
"]",
"return",
"transform",
".",
"transform",
"(",
"affinities",
")",
"except",
"KeyError",
":",
"msg",
"=",
"\"Allele %s has no percentile rank information\"",
"%",
"allele",
"if",
"throw",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"msg",
")",
"# Return NaNs",
"return",
"numpy",
".",
"ones",
"(",
"len",
"(",
"affinities",
")",
")",
"*",
"numpy",
".",
"nan",
"if",
"alleles",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Specify allele or alleles\"",
")",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"{",
"\"affinity\"",
":",
"affinities",
"}",
")",
"df",
"[",
"\"allele\"",
"]",
"=",
"alleles",
"df",
"[",
"\"result\"",
"]",
"=",
"numpy",
".",
"nan",
"for",
"(",
"allele",
",",
"sub_df",
")",
"in",
"df",
".",
"groupby",
"(",
"\"allele\"",
")",
":",
"df",
".",
"loc",
"[",
"sub_df",
".",
"index",
",",
"\"result\"",
"]",
"=",
"self",
".",
"percentile_ranks",
"(",
"sub_df",
".",
"affinity",
",",
"allele",
"=",
"allele",
",",
"throw",
"=",
"throw",
")",
"return",
"df",
".",
"result",
".",
"values"
] | Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float | [
"Return",
"percentile",
"ranks",
"for",
"the",
"given",
"ic50",
"affinities",
"and",
"alleles",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L722-L766 | train |
openvax/mhcflurry | mhcflurry/class1_affinity_predictor.py | Class1AffinityPredictor.calibrate_percentile_ranks | def calibrate_percentile_ranks(
self,
peptides=None,
num_peptides_per_length=int(1e5),
alleles=None,
bins=None):
"""
Compute the cumulative distribution of ic50 values for a set of alleles
over a large universe of random peptides, to enable computing quantiles in
this distribution later.
Parameters
----------
peptides : sequence of string or EncodableSequences, optional
Peptides to use
num_peptides_per_length : int, optional
If peptides argument is not specified, then num_peptides_per_length
peptides are randomly sampled from a uniform distribution for each
supported length
alleles : sequence of string, optional
Alleles to perform calibration for. If not specified all supported
alleles will be calibrated.
bins : object
Anything that can be passed to numpy.histogram's "bins" argument
can be used here, i.e. either an integer or a sequence giving bin
edges. This is in ic50 space.
Returns
----------
EncodableSequences : peptides used for calibration
"""
if bins is None:
bins = to_ic50(numpy.linspace(1, 0, 1000))
if alleles is None:
alleles = self.supported_alleles
if peptides is None:
peptides = []
lengths = range(
self.supported_peptide_lengths[0],
self.supported_peptide_lengths[1] + 1)
for length in lengths:
peptides.extend(
random_peptides(num_peptides_per_length, length))
encoded_peptides = EncodableSequences.create(peptides)
for (i, allele) in enumerate(alleles):
predictions = self.predict(encoded_peptides, allele=allele)
transform = PercentRankTransform()
transform.fit(predictions, bins=bins)
self.allele_to_percent_rank_transform[allele] = transform
return encoded_peptides | python | def calibrate_percentile_ranks(
self,
peptides=None,
num_peptides_per_length=int(1e5),
alleles=None,
bins=None):
"""
Compute the cumulative distribution of ic50 values for a set of alleles
over a large universe of random peptides, to enable computing quantiles in
this distribution later.
Parameters
----------
peptides : sequence of string or EncodableSequences, optional
Peptides to use
num_peptides_per_length : int, optional
If peptides argument is not specified, then num_peptides_per_length
peptides are randomly sampled from a uniform distribution for each
supported length
alleles : sequence of string, optional
Alleles to perform calibration for. If not specified all supported
alleles will be calibrated.
bins : object
Anything that can be passed to numpy.histogram's "bins" argument
can be used here, i.e. either an integer or a sequence giving bin
edges. This is in ic50 space.
Returns
----------
EncodableSequences : peptides used for calibration
"""
if bins is None:
bins = to_ic50(numpy.linspace(1, 0, 1000))
if alleles is None:
alleles = self.supported_alleles
if peptides is None:
peptides = []
lengths = range(
self.supported_peptide_lengths[0],
self.supported_peptide_lengths[1] + 1)
for length in lengths:
peptides.extend(
random_peptides(num_peptides_per_length, length))
encoded_peptides = EncodableSequences.create(peptides)
for (i, allele) in enumerate(alleles):
predictions = self.predict(encoded_peptides, allele=allele)
transform = PercentRankTransform()
transform.fit(predictions, bins=bins)
self.allele_to_percent_rank_transform[allele] = transform
return encoded_peptides | [
"def",
"calibrate_percentile_ranks",
"(",
"self",
",",
"peptides",
"=",
"None",
",",
"num_peptides_per_length",
"=",
"int",
"(",
"1e5",
")",
",",
"alleles",
"=",
"None",
",",
"bins",
"=",
"None",
")",
":",
"if",
"bins",
"is",
"None",
":",
"bins",
"=",
"to_ic50",
"(",
"numpy",
".",
"linspace",
"(",
"1",
",",
"0",
",",
"1000",
")",
")",
"if",
"alleles",
"is",
"None",
":",
"alleles",
"=",
"self",
".",
"supported_alleles",
"if",
"peptides",
"is",
"None",
":",
"peptides",
"=",
"[",
"]",
"lengths",
"=",
"range",
"(",
"self",
".",
"supported_peptide_lengths",
"[",
"0",
"]",
",",
"self",
".",
"supported_peptide_lengths",
"[",
"1",
"]",
"+",
"1",
")",
"for",
"length",
"in",
"lengths",
":",
"peptides",
".",
"extend",
"(",
"random_peptides",
"(",
"num_peptides_per_length",
",",
"length",
")",
")",
"encoded_peptides",
"=",
"EncodableSequences",
".",
"create",
"(",
"peptides",
")",
"for",
"(",
"i",
",",
"allele",
")",
"in",
"enumerate",
"(",
"alleles",
")",
":",
"predictions",
"=",
"self",
".",
"predict",
"(",
"encoded_peptides",
",",
"allele",
"=",
"allele",
")",
"transform",
"=",
"PercentRankTransform",
"(",
")",
"transform",
".",
"fit",
"(",
"predictions",
",",
"bins",
"=",
"bins",
")",
"self",
".",
"allele_to_percent_rank_transform",
"[",
"allele",
"]",
"=",
"transform",
"return",
"encoded_peptides"
] | Compute the cumulative distribution of ic50 values for a set of alleles
over a large universe of random peptides, to enable computing quantiles in
this distribution later.
Parameters
----------
peptides : sequence of string or EncodableSequences, optional
Peptides to use
num_peptides_per_length : int, optional
If peptides argument is not specified, then num_peptides_per_length
peptides are randomly sampled from a uniform distribution for each
supported length
alleles : sequence of string, optional
Alleles to perform calibration for. If not specified all supported
alleles will be calibrated.
bins : object
Anything that can be passed to numpy.histogram's "bins" argument
can be used here, i.e. either an integer or a sequence giving bin
edges. This is in ic50 space.
Returns
----------
EncodableSequences : peptides used for calibration | [
"Compute",
"the",
"cumulative",
"distribution",
"of",
"ic50",
"values",
"for",
"a",
"set",
"of",
"alleles",
"over",
"a",
"large",
"universe",
"of",
"random",
"peptides",
"to",
"enable",
"computing",
"quantiles",
"in",
"this",
"distribution",
"later",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L1074-L1128 | train |
openvax/mhcflurry | mhcflurry/class1_affinity_predictor.py | Class1AffinityPredictor.filter_networks | def filter_networks(self, predicate):
"""
Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor
"""
allele_to_allele_specific_models = {}
for (allele, models) in self.allele_to_allele_specific_models.items():
allele_to_allele_specific_models[allele] = [
m for m in models if predicate(m)
]
class1_pan_allele_models = [
m for m in self.class1_pan_allele_models if predicate(m)
]
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence,
) | python | def filter_networks(self, predicate):
"""
Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor
"""
allele_to_allele_specific_models = {}
for (allele, models) in self.allele_to_allele_specific_models.items():
allele_to_allele_specific_models[allele] = [
m for m in models if predicate(m)
]
class1_pan_allele_models = [
m for m in self.class1_pan_allele_models if predicate(m)
]
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence,
) | [
"def",
"filter_networks",
"(",
"self",
",",
"predicate",
")",
":",
"allele_to_allele_specific_models",
"=",
"{",
"}",
"for",
"(",
"allele",
",",
"models",
")",
"in",
"self",
".",
"allele_to_allele_specific_models",
".",
"items",
"(",
")",
":",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"=",
"[",
"m",
"for",
"m",
"in",
"models",
"if",
"predicate",
"(",
"m",
")",
"]",
"class1_pan_allele_models",
"=",
"[",
"m",
"for",
"m",
"in",
"self",
".",
"class1_pan_allele_models",
"if",
"predicate",
"(",
"m",
")",
"]",
"return",
"Class1AffinityPredictor",
"(",
"allele_to_allele_specific_models",
"=",
"allele_to_allele_specific_models",
",",
"class1_pan_allele_models",
"=",
"class1_pan_allele_models",
",",
"allele_to_fixed_length_sequence",
"=",
"self",
".",
"allele_to_fixed_length_sequence",
",",
")"
] | Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor | [
"Return",
"a",
"new",
"Class1AffinityPredictor",
"containing",
"a",
"subset",
"of",
"this",
"predictor",
"s",
"neural",
"networks",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L1130-L1157 | train |
openvax/mhcflurry | mhcflurry/class1_affinity_predictor.py | Class1AffinityPredictor.model_select | def model_select(
self,
score_function,
alleles=None,
min_models=1,
max_models=10000):
"""
Perform model selection using a user-specified scoring function.
Model selection is done using a "step up" variable selection procedure,
in which models are repeatedly added to an ensemble until the score
stops improving.
Parameters
----------
score_function : Class1AffinityPredictor -> float function
Scoring function
alleles : list of string, optional
If not specified, model selection is performed for all alleles.
min_models : int, optional
Min models to select per allele
max_models : int, optional
Max models to select per allele
Returns
-------
Class1AffinityPredictor : predictor containing the selected models
"""
if alleles is None:
alleles = self.supported_alleles
dfs = []
allele_to_allele_specific_models = {}
for allele in alleles:
df = pandas.DataFrame({
'model': self.allele_to_allele_specific_models[allele]
})
df["model_num"] = df.index
df["allele"] = allele
df["selected"] = False
round_num = 1
while not df.selected.all() and sum(df.selected) < max_models:
score_col = "score_%2d" % round_num
prev_score_col = "score_%2d" % (round_num - 1)
existing_selected = list(df[df.selected].model)
df[score_col] = [
numpy.nan if row.selected else
score_function(
Class1AffinityPredictor(
allele_to_allele_specific_models={
allele: [row.model] + existing_selected
}
)
)
for (_, row) in df.iterrows()
]
if round_num > min_models and (
df[score_col].max() < df[prev_score_col].max()):
break
# In case of a tie, pick a model at random.
(best_model_index,) = df.loc[
(df[score_col] == df[score_col].max())
].sample(1).index
df.loc[best_model_index, "selected"] = True
round_num += 1
dfs.append(df)
allele_to_allele_specific_models[allele] = list(
df.loc[df.selected].model)
df = pandas.concat(dfs, ignore_index=True)
new_predictor = Class1AffinityPredictor(
allele_to_allele_specific_models,
metadata_dataframes={
"model_selection": df,
})
return new_predictor | python | def model_select(
self,
score_function,
alleles=None,
min_models=1,
max_models=10000):
"""
Perform model selection using a user-specified scoring function.
Model selection is done using a "step up" variable selection procedure,
in which models are repeatedly added to an ensemble until the score
stops improving.
Parameters
----------
score_function : Class1AffinityPredictor -> float function
Scoring function
alleles : list of string, optional
If not specified, model selection is performed for all alleles.
min_models : int, optional
Min models to select per allele
max_models : int, optional
Max models to select per allele
Returns
-------
Class1AffinityPredictor : predictor containing the selected models
"""
if alleles is None:
alleles = self.supported_alleles
dfs = []
allele_to_allele_specific_models = {}
for allele in alleles:
df = pandas.DataFrame({
'model': self.allele_to_allele_specific_models[allele]
})
df["model_num"] = df.index
df["allele"] = allele
df["selected"] = False
round_num = 1
while not df.selected.all() and sum(df.selected) < max_models:
score_col = "score_%2d" % round_num
prev_score_col = "score_%2d" % (round_num - 1)
existing_selected = list(df[df.selected].model)
df[score_col] = [
numpy.nan if row.selected else
score_function(
Class1AffinityPredictor(
allele_to_allele_specific_models={
allele: [row.model] + existing_selected
}
)
)
for (_, row) in df.iterrows()
]
if round_num > min_models and (
df[score_col].max() < df[prev_score_col].max()):
break
# In case of a tie, pick a model at random.
(best_model_index,) = df.loc[
(df[score_col] == df[score_col].max())
].sample(1).index
df.loc[best_model_index, "selected"] = True
round_num += 1
dfs.append(df)
allele_to_allele_specific_models[allele] = list(
df.loc[df.selected].model)
df = pandas.concat(dfs, ignore_index=True)
new_predictor = Class1AffinityPredictor(
allele_to_allele_specific_models,
metadata_dataframes={
"model_selection": df,
})
return new_predictor | [
"def",
"model_select",
"(",
"self",
",",
"score_function",
",",
"alleles",
"=",
"None",
",",
"min_models",
"=",
"1",
",",
"max_models",
"=",
"10000",
")",
":",
"if",
"alleles",
"is",
"None",
":",
"alleles",
"=",
"self",
".",
"supported_alleles",
"dfs",
"=",
"[",
"]",
"allele_to_allele_specific_models",
"=",
"{",
"}",
"for",
"allele",
"in",
"alleles",
":",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"{",
"'model'",
":",
"self",
".",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"}",
")",
"df",
"[",
"\"model_num\"",
"]",
"=",
"df",
".",
"index",
"df",
"[",
"\"allele\"",
"]",
"=",
"allele",
"df",
"[",
"\"selected\"",
"]",
"=",
"False",
"round_num",
"=",
"1",
"while",
"not",
"df",
".",
"selected",
".",
"all",
"(",
")",
"and",
"sum",
"(",
"df",
".",
"selected",
")",
"<",
"max_models",
":",
"score_col",
"=",
"\"score_%2d\"",
"%",
"round_num",
"prev_score_col",
"=",
"\"score_%2d\"",
"%",
"(",
"round_num",
"-",
"1",
")",
"existing_selected",
"=",
"list",
"(",
"df",
"[",
"df",
".",
"selected",
"]",
".",
"model",
")",
"df",
"[",
"score_col",
"]",
"=",
"[",
"numpy",
".",
"nan",
"if",
"row",
".",
"selected",
"else",
"score_function",
"(",
"Class1AffinityPredictor",
"(",
"allele_to_allele_specific_models",
"=",
"{",
"allele",
":",
"[",
"row",
".",
"model",
"]",
"+",
"existing_selected",
"}",
")",
")",
"for",
"(",
"_",
",",
"row",
")",
"in",
"df",
".",
"iterrows",
"(",
")",
"]",
"if",
"round_num",
">",
"min_models",
"and",
"(",
"df",
"[",
"score_col",
"]",
".",
"max",
"(",
")",
"<",
"df",
"[",
"prev_score_col",
"]",
".",
"max",
"(",
")",
")",
":",
"break",
"# In case of a tie, pick a model at random.",
"(",
"best_model_index",
",",
")",
"=",
"df",
".",
"loc",
"[",
"(",
"df",
"[",
"score_col",
"]",
"==",
"df",
"[",
"score_col",
"]",
".",
"max",
"(",
")",
")",
"]",
".",
"sample",
"(",
"1",
")",
".",
"index",
"df",
".",
"loc",
"[",
"best_model_index",
",",
"\"selected\"",
"]",
"=",
"True",
"round_num",
"+=",
"1",
"dfs",
".",
"append",
"(",
"df",
")",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"=",
"list",
"(",
"df",
".",
"loc",
"[",
"df",
".",
"selected",
"]",
".",
"model",
")",
"df",
"=",
"pandas",
".",
"concat",
"(",
"dfs",
",",
"ignore_index",
"=",
"True",
")",
"new_predictor",
"=",
"Class1AffinityPredictor",
"(",
"allele_to_allele_specific_models",
",",
"metadata_dataframes",
"=",
"{",
"\"model_selection\"",
":",
"df",
",",
"}",
")",
"return",
"new_predictor"
] | Perform model selection using a user-specified scoring function.
Model selection is done using a "step up" variable selection procedure,
in which models are repeatedly added to an ensemble until the score
stops improving.
Parameters
----------
score_function : Class1AffinityPredictor -> float function
Scoring function
alleles : list of string, optional
If not specified, model selection is performed for all alleles.
min_models : int, optional
Min models to select per allele
max_models : int, optional
Max models to select per allele
Returns
-------
Class1AffinityPredictor : predictor containing the selected models | [
"Perform",
"model",
"selection",
"using",
"a",
"user",
"-",
"specified",
"scoring",
"function",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L1159-L1245 | train |
openvax/mhcflurry | mhcflurry/percent_rank_transform.py | PercentRankTransform.to_series | def to_series(self):
"""
Serialize the fit to a pandas.Series.
The index on the series gives the bin edges and the valeus give the CDF.
Returns
-------
pandas.Series
"""
return pandas.Series(
self.cdf, index=[numpy.nan] + list(self.bin_edges) + [numpy.nan]) | python | def to_series(self):
"""
Serialize the fit to a pandas.Series.
The index on the series gives the bin edges and the valeus give the CDF.
Returns
-------
pandas.Series
"""
return pandas.Series(
self.cdf, index=[numpy.nan] + list(self.bin_edges) + [numpy.nan]) | [
"def",
"to_series",
"(",
"self",
")",
":",
"return",
"pandas",
".",
"Series",
"(",
"self",
".",
"cdf",
",",
"index",
"=",
"[",
"numpy",
".",
"nan",
"]",
"+",
"list",
"(",
"self",
".",
"bin_edges",
")",
"+",
"[",
"numpy",
".",
"nan",
"]",
")"
] | Serialize the fit to a pandas.Series.
The index on the series gives the bin edges and the valeus give the CDF.
Returns
-------
pandas.Series | [
"Serialize",
"the",
"fit",
"to",
"a",
"pandas",
".",
"Series",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/percent_rank_transform.py#L46-L58 | train |
openvax/mhcflurry | mhcflurry/downloads.py | get_default_class1_models_dir | def get_default_class1_models_dir(test_exists=True):
"""
Return the absolute path to the default class1 models dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is set to an
absolute path, return that path. If it's set to a relative path (i.e. does
not start with /) then return that path taken to be relative to the mhcflurry
downloads dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is NOT set,
then return the path to downloaded models in the "models_class1" download.
Parameters
----------
test_exists : boolean, optional
Whether to raise an exception of the path does not exist
Returns
-------
string : absolute path
"""
if _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR:
result = join(get_downloads_dir(), _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR)
if test_exists and not exists(result):
raise IOError("No such directory: %s" % result)
return result
else:
return get_path("models_class1", "models", test_exists=test_exists) | python | def get_default_class1_models_dir(test_exists=True):
"""
Return the absolute path to the default class1 models dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is set to an
absolute path, return that path. If it's set to a relative path (i.e. does
not start with /) then return that path taken to be relative to the mhcflurry
downloads dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is NOT set,
then return the path to downloaded models in the "models_class1" download.
Parameters
----------
test_exists : boolean, optional
Whether to raise an exception of the path does not exist
Returns
-------
string : absolute path
"""
if _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR:
result = join(get_downloads_dir(), _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR)
if test_exists and not exists(result):
raise IOError("No such directory: %s" % result)
return result
else:
return get_path("models_class1", "models", test_exists=test_exists) | [
"def",
"get_default_class1_models_dir",
"(",
"test_exists",
"=",
"True",
")",
":",
"if",
"_MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR",
":",
"result",
"=",
"join",
"(",
"get_downloads_dir",
"(",
")",
",",
"_MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR",
")",
"if",
"test_exists",
"and",
"not",
"exists",
"(",
"result",
")",
":",
"raise",
"IOError",
"(",
"\"No such directory: %s\"",
"%",
"result",
")",
"return",
"result",
"else",
":",
"return",
"get_path",
"(",
"\"models_class1\"",
",",
"\"models\"",
",",
"test_exists",
"=",
"test_exists",
")"
] | Return the absolute path to the default class1 models dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is set to an
absolute path, return that path. If it's set to a relative path (i.e. does
not start with /) then return that path taken to be relative to the mhcflurry
downloads dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is NOT set,
then return the path to downloaded models in the "models_class1" download.
Parameters
----------
test_exists : boolean, optional
Whether to raise an exception of the path does not exist
Returns
-------
string : absolute path | [
"Return",
"the",
"absolute",
"path",
"to",
"the",
"default",
"class1",
"models",
"dir",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/downloads.py#L57-L85 | train |
openvax/mhcflurry | mhcflurry/downloads.py | get_current_release_downloads | def get_current_release_downloads():
"""
Return a dict of all available downloads in the current release.
The dict keys are the names of the downloads. The values are a dict
with two entries:
downloaded : bool
Whether the download is currently available locally
metadata : dict
Info about the download from downloads.yml such as URL
"""
downloads = (
get_downloads_metadata()
['releases']
[get_current_release()]
['downloads'])
return OrderedDict(
(download["name"], {
'downloaded': exists(join(get_downloads_dir(), download["name"])),
'metadata': download,
}) for download in downloads
) | python | def get_current_release_downloads():
"""
Return a dict of all available downloads in the current release.
The dict keys are the names of the downloads. The values are a dict
with two entries:
downloaded : bool
Whether the download is currently available locally
metadata : dict
Info about the download from downloads.yml such as URL
"""
downloads = (
get_downloads_metadata()
['releases']
[get_current_release()]
['downloads'])
return OrderedDict(
(download["name"], {
'downloaded': exists(join(get_downloads_dir(), download["name"])),
'metadata': download,
}) for download in downloads
) | [
"def",
"get_current_release_downloads",
"(",
")",
":",
"downloads",
"=",
"(",
"get_downloads_metadata",
"(",
")",
"[",
"'releases'",
"]",
"[",
"get_current_release",
"(",
")",
"]",
"[",
"'downloads'",
"]",
")",
"return",
"OrderedDict",
"(",
"(",
"download",
"[",
"\"name\"",
"]",
",",
"{",
"'downloaded'",
":",
"exists",
"(",
"join",
"(",
"get_downloads_dir",
"(",
")",
",",
"download",
"[",
"\"name\"",
"]",
")",
")",
",",
"'metadata'",
":",
"download",
",",
"}",
")",
"for",
"download",
"in",
"downloads",
")"
] | Return a dict of all available downloads in the current release.
The dict keys are the names of the downloads. The values are a dict
with two entries:
downloaded : bool
Whether the download is currently available locally
metadata : dict
Info about the download from downloads.yml such as URL | [
"Return",
"a",
"dict",
"of",
"all",
"available",
"downloads",
"in",
"the",
"current",
"release",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/downloads.py#L88-L111 | train |
openvax/mhcflurry | mhcflurry/downloads.py | get_path | def get_path(download_name, filename='', test_exists=True):
"""
Get the local path to a file in a MHCflurry download
Parameters
-----------
download_name : string
filename : string
Relative path within the download to the file of interest
test_exists : boolean
If True (default) throw an error telling the user how to download the
data if the file does not exist
Returns
-----------
string giving local absolute path
"""
assert '/' not in download_name, "Invalid download: %s" % download_name
path = join(get_downloads_dir(), download_name, filename)
if test_exists and not exists(path):
raise RuntimeError(
"Missing MHCflurry downloadable file: %s. "
"To download this data, run:\n\tmhcflurry-downloads fetch %s\n"
"in a shell."
% (quote(path), download_name))
return path | python | def get_path(download_name, filename='', test_exists=True):
"""
Get the local path to a file in a MHCflurry download
Parameters
-----------
download_name : string
filename : string
Relative path within the download to the file of interest
test_exists : boolean
If True (default) throw an error telling the user how to download the
data if the file does not exist
Returns
-----------
string giving local absolute path
"""
assert '/' not in download_name, "Invalid download: %s" % download_name
path = join(get_downloads_dir(), download_name, filename)
if test_exists and not exists(path):
raise RuntimeError(
"Missing MHCflurry downloadable file: %s. "
"To download this data, run:\n\tmhcflurry-downloads fetch %s\n"
"in a shell."
% (quote(path), download_name))
return path | [
"def",
"get_path",
"(",
"download_name",
",",
"filename",
"=",
"''",
",",
"test_exists",
"=",
"True",
")",
":",
"assert",
"'/'",
"not",
"in",
"download_name",
",",
"\"Invalid download: %s\"",
"%",
"download_name",
"path",
"=",
"join",
"(",
"get_downloads_dir",
"(",
")",
",",
"download_name",
",",
"filename",
")",
"if",
"test_exists",
"and",
"not",
"exists",
"(",
"path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Missing MHCflurry downloadable file: %s. \"",
"\"To download this data, run:\\n\\tmhcflurry-downloads fetch %s\\n\"",
"\"in a shell.\"",
"%",
"(",
"quote",
"(",
"path",
")",
",",
"download_name",
")",
")",
"return",
"path"
] | Get the local path to a file in a MHCflurry download
Parameters
-----------
download_name : string
filename : string
Relative path within the download to the file of interest
test_exists : boolean
If True (default) throw an error telling the user how to download the
data if the file does not exist
Returns
-----------
string giving local absolute path | [
"Get",
"the",
"local",
"path",
"to",
"a",
"file",
"in",
"a",
"MHCflurry",
"download"
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/downloads.py#L114-L141 | train |
openvax/mhcflurry | mhcflurry/downloads.py | configure | def configure():
"""
Setup various global variables based on environment variables.
"""
global _DOWNLOADS_DIR
global _CURRENT_RELEASE
_CURRENT_RELEASE = None
_DOWNLOADS_DIR = environ.get("MHCFLURRY_DOWNLOADS_DIR")
if not _DOWNLOADS_DIR:
metadata = get_downloads_metadata()
_CURRENT_RELEASE = environ.get("MHCFLURRY_DOWNLOADS_CURRENT_RELEASE")
if not _CURRENT_RELEASE:
_CURRENT_RELEASE = metadata['current-release']
current_release_compatability = (
metadata["releases"][_CURRENT_RELEASE]["compatibility-version"])
current_compatability = metadata["current-compatibility-version"]
if current_release_compatability != current_compatability:
logging.warn(
"The specified downloads are not compatible with this version "
"of the MHCflurry codebase. Downloads: release %s, "
"compatability version: %d. Code compatability version: %d" % (
_CURRENT_RELEASE,
current_release_compatability,
current_compatability))
data_dir = environ.get("MHCFLURRY_DATA_DIR")
if not data_dir:
# increase the version every time we make a breaking change in
# how the data is organized. For changes to e.g. just model
# serialization, the downloads release numbers should be used.
data_dir = user_data_dir("mhcflurry", version="4")
_DOWNLOADS_DIR = join(data_dir, _CURRENT_RELEASE)
logging.debug("Configured MHCFLURRY_DOWNLOADS_DIR: %s" % _DOWNLOADS_DIR) | python | def configure():
"""
Setup various global variables based on environment variables.
"""
global _DOWNLOADS_DIR
global _CURRENT_RELEASE
_CURRENT_RELEASE = None
_DOWNLOADS_DIR = environ.get("MHCFLURRY_DOWNLOADS_DIR")
if not _DOWNLOADS_DIR:
metadata = get_downloads_metadata()
_CURRENT_RELEASE = environ.get("MHCFLURRY_DOWNLOADS_CURRENT_RELEASE")
if not _CURRENT_RELEASE:
_CURRENT_RELEASE = metadata['current-release']
current_release_compatability = (
metadata["releases"][_CURRENT_RELEASE]["compatibility-version"])
current_compatability = metadata["current-compatibility-version"]
if current_release_compatability != current_compatability:
logging.warn(
"The specified downloads are not compatible with this version "
"of the MHCflurry codebase. Downloads: release %s, "
"compatability version: %d. Code compatability version: %d" % (
_CURRENT_RELEASE,
current_release_compatability,
current_compatability))
data_dir = environ.get("MHCFLURRY_DATA_DIR")
if not data_dir:
# increase the version every time we make a breaking change in
# how the data is organized. For changes to e.g. just model
# serialization, the downloads release numbers should be used.
data_dir = user_data_dir("mhcflurry", version="4")
_DOWNLOADS_DIR = join(data_dir, _CURRENT_RELEASE)
logging.debug("Configured MHCFLURRY_DOWNLOADS_DIR: %s" % _DOWNLOADS_DIR) | [
"def",
"configure",
"(",
")",
":",
"global",
"_DOWNLOADS_DIR",
"global",
"_CURRENT_RELEASE",
"_CURRENT_RELEASE",
"=",
"None",
"_DOWNLOADS_DIR",
"=",
"environ",
".",
"get",
"(",
"\"MHCFLURRY_DOWNLOADS_DIR\"",
")",
"if",
"not",
"_DOWNLOADS_DIR",
":",
"metadata",
"=",
"get_downloads_metadata",
"(",
")",
"_CURRENT_RELEASE",
"=",
"environ",
".",
"get",
"(",
"\"MHCFLURRY_DOWNLOADS_CURRENT_RELEASE\"",
")",
"if",
"not",
"_CURRENT_RELEASE",
":",
"_CURRENT_RELEASE",
"=",
"metadata",
"[",
"'current-release'",
"]",
"current_release_compatability",
"=",
"(",
"metadata",
"[",
"\"releases\"",
"]",
"[",
"_CURRENT_RELEASE",
"]",
"[",
"\"compatibility-version\"",
"]",
")",
"current_compatability",
"=",
"metadata",
"[",
"\"current-compatibility-version\"",
"]",
"if",
"current_release_compatability",
"!=",
"current_compatability",
":",
"logging",
".",
"warn",
"(",
"\"The specified downloads are not compatible with this version \"",
"\"of the MHCflurry codebase. Downloads: release %s, \"",
"\"compatability version: %d. Code compatability version: %d\"",
"%",
"(",
"_CURRENT_RELEASE",
",",
"current_release_compatability",
",",
"current_compatability",
")",
")",
"data_dir",
"=",
"environ",
".",
"get",
"(",
"\"MHCFLURRY_DATA_DIR\"",
")",
"if",
"not",
"data_dir",
":",
"# increase the version every time we make a breaking change in",
"# how the data is organized. For changes to e.g. just model",
"# serialization, the downloads release numbers should be used.",
"data_dir",
"=",
"user_data_dir",
"(",
"\"mhcflurry\"",
",",
"version",
"=",
"\"4\"",
")",
"_DOWNLOADS_DIR",
"=",
"join",
"(",
"data_dir",
",",
"_CURRENT_RELEASE",
")",
"logging",
".",
"debug",
"(",
"\"Configured MHCFLURRY_DOWNLOADS_DIR: %s\"",
"%",
"_DOWNLOADS_DIR",
")"
] | Setup various global variables based on environment variables. | [
"Setup",
"various",
"global",
"variables",
"based",
"on",
"environment",
"variables",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/downloads.py#L144-L179 | train |
openvax/mhcflurry | mhcflurry/parallelism.py | make_worker_pool | def make_worker_pool(
processes=None,
initializer=None,
initializer_kwargs_per_process=None,
max_tasks_per_worker=None):
"""
Convenience wrapper to create a multiprocessing.Pool.
This function adds support for per-worker initializer arguments, which are
not natively supported by the multiprocessing module. The motivation for
this feature is to support allocating each worker to a (different) GPU.
IMPLEMENTATION NOTE:
The per-worker initializer arguments are implemented using a Queue. Each
worker reads its arguments from this queue when it starts. When it
terminates, it adds its initializer arguments back to the queue, so a
future process can initialize itself using these arguments.
There is one issue with this approach, however. If a worker crashes, it
never repopulates the queue of initializer arguments. This will prevent
any future worker from re-using those arguments. To deal with this
issue we add a second 'backup queue'. This queue always contains the
full set of initializer arguments: whenever a worker reads from it, it
always pushes the pop'd args back to the end of the queue immediately.
If the primary arg queue is ever empty, then workers will read
from this backup queue.
Parameters
----------
processes : int
Number of workers. Default: num CPUs.
initializer : function, optional
Init function to call in each worker
initializer_kwargs_per_process : list of dict, optional
Arguments to pass to initializer function for each worker. Length of
list must equal the number of workers.
max_tasks_per_worker : int, optional
Restart workers after this many tasks. Requires Python >=3.2.
Returns
-------
multiprocessing.Pool
"""
if not processes:
processes = cpu_count()
pool_kwargs = {
'processes': processes,
}
if max_tasks_per_worker:
pool_kwargs["maxtasksperchild"] = max_tasks_per_worker
if initializer:
if initializer_kwargs_per_process:
assert len(initializer_kwargs_per_process) == processes
kwargs_queue = Queue()
kwargs_queue_backup = Queue()
for kwargs in initializer_kwargs_per_process:
kwargs_queue.put(kwargs)
kwargs_queue_backup.put(kwargs)
pool_kwargs["initializer"] = worker_init_entry_point
pool_kwargs["initargs"] = (
initializer, kwargs_queue, kwargs_queue_backup)
else:
pool_kwargs["initializer"] = initializer
worker_pool = Pool(**pool_kwargs)
print("Started pool: %s" % str(worker_pool))
pprint(pool_kwargs)
return worker_pool | python | def make_worker_pool(
processes=None,
initializer=None,
initializer_kwargs_per_process=None,
max_tasks_per_worker=None):
"""
Convenience wrapper to create a multiprocessing.Pool.
This function adds support for per-worker initializer arguments, which are
not natively supported by the multiprocessing module. The motivation for
this feature is to support allocating each worker to a (different) GPU.
IMPLEMENTATION NOTE:
The per-worker initializer arguments are implemented using a Queue. Each
worker reads its arguments from this queue when it starts. When it
terminates, it adds its initializer arguments back to the queue, so a
future process can initialize itself using these arguments.
There is one issue with this approach, however. If a worker crashes, it
never repopulates the queue of initializer arguments. This will prevent
any future worker from re-using those arguments. To deal with this
issue we add a second 'backup queue'. This queue always contains the
full set of initializer arguments: whenever a worker reads from it, it
always pushes the pop'd args back to the end of the queue immediately.
If the primary arg queue is ever empty, then workers will read
from this backup queue.
Parameters
----------
processes : int
Number of workers. Default: num CPUs.
initializer : function, optional
Init function to call in each worker
initializer_kwargs_per_process : list of dict, optional
Arguments to pass to initializer function for each worker. Length of
list must equal the number of workers.
max_tasks_per_worker : int, optional
Restart workers after this many tasks. Requires Python >=3.2.
Returns
-------
multiprocessing.Pool
"""
if not processes:
processes = cpu_count()
pool_kwargs = {
'processes': processes,
}
if max_tasks_per_worker:
pool_kwargs["maxtasksperchild"] = max_tasks_per_worker
if initializer:
if initializer_kwargs_per_process:
assert len(initializer_kwargs_per_process) == processes
kwargs_queue = Queue()
kwargs_queue_backup = Queue()
for kwargs in initializer_kwargs_per_process:
kwargs_queue.put(kwargs)
kwargs_queue_backup.put(kwargs)
pool_kwargs["initializer"] = worker_init_entry_point
pool_kwargs["initargs"] = (
initializer, kwargs_queue, kwargs_queue_backup)
else:
pool_kwargs["initializer"] = initializer
worker_pool = Pool(**pool_kwargs)
print("Started pool: %s" % str(worker_pool))
pprint(pool_kwargs)
return worker_pool | [
"def",
"make_worker_pool",
"(",
"processes",
"=",
"None",
",",
"initializer",
"=",
"None",
",",
"initializer_kwargs_per_process",
"=",
"None",
",",
"max_tasks_per_worker",
"=",
"None",
")",
":",
"if",
"not",
"processes",
":",
"processes",
"=",
"cpu_count",
"(",
")",
"pool_kwargs",
"=",
"{",
"'processes'",
":",
"processes",
",",
"}",
"if",
"max_tasks_per_worker",
":",
"pool_kwargs",
"[",
"\"maxtasksperchild\"",
"]",
"=",
"max_tasks_per_worker",
"if",
"initializer",
":",
"if",
"initializer_kwargs_per_process",
":",
"assert",
"len",
"(",
"initializer_kwargs_per_process",
")",
"==",
"processes",
"kwargs_queue",
"=",
"Queue",
"(",
")",
"kwargs_queue_backup",
"=",
"Queue",
"(",
")",
"for",
"kwargs",
"in",
"initializer_kwargs_per_process",
":",
"kwargs_queue",
".",
"put",
"(",
"kwargs",
")",
"kwargs_queue_backup",
".",
"put",
"(",
"kwargs",
")",
"pool_kwargs",
"[",
"\"initializer\"",
"]",
"=",
"worker_init_entry_point",
"pool_kwargs",
"[",
"\"initargs\"",
"]",
"=",
"(",
"initializer",
",",
"kwargs_queue",
",",
"kwargs_queue_backup",
")",
"else",
":",
"pool_kwargs",
"[",
"\"initializer\"",
"]",
"=",
"initializer",
"worker_pool",
"=",
"Pool",
"(",
"*",
"*",
"pool_kwargs",
")",
"print",
"(",
"\"Started pool: %s\"",
"%",
"str",
"(",
"worker_pool",
")",
")",
"pprint",
"(",
"pool_kwargs",
")",
"return",
"worker_pool"
] | Convenience wrapper to create a multiprocessing.Pool.
This function adds support for per-worker initializer arguments, which are
not natively supported by the multiprocessing module. The motivation for
this feature is to support allocating each worker to a (different) GPU.
IMPLEMENTATION NOTE:
The per-worker initializer arguments are implemented using a Queue. Each
worker reads its arguments from this queue when it starts. When it
terminates, it adds its initializer arguments back to the queue, so a
future process can initialize itself using these arguments.
There is one issue with this approach, however. If a worker crashes, it
never repopulates the queue of initializer arguments. This will prevent
any future worker from re-using those arguments. To deal with this
issue we add a second 'backup queue'. This queue always contains the
full set of initializer arguments: whenever a worker reads from it, it
always pushes the pop'd args back to the end of the queue immediately.
If the primary arg queue is ever empty, then workers will read
from this backup queue.
Parameters
----------
processes : int
Number of workers. Default: num CPUs.
initializer : function, optional
Init function to call in each worker
initializer_kwargs_per_process : list of dict, optional
Arguments to pass to initializer function for each worker. Length of
list must equal the number of workers.
max_tasks_per_worker : int, optional
Restart workers after this many tasks. Requires Python >=3.2.
Returns
-------
multiprocessing.Pool | [
"Convenience",
"wrapper",
"to",
"create",
"a",
"multiprocessing",
".",
"Pool",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/parallelism.py#L115-L188 | train |
openvax/mhcflurry | mhcflurry/calibrate_percentile_ranks_command.py | calibrate_percentile_ranks | def calibrate_percentile_ranks(allele, predictor, peptides=None):
"""
Private helper function.
"""
global GLOBAL_DATA
if peptides is None:
peptides = GLOBAL_DATA["calibration_peptides"]
predictor.calibrate_percentile_ranks(
peptides=peptides,
alleles=[allele])
return {
allele: predictor.allele_to_percent_rank_transform[allele],
} | python | def calibrate_percentile_ranks(allele, predictor, peptides=None):
"""
Private helper function.
"""
global GLOBAL_DATA
if peptides is None:
peptides = GLOBAL_DATA["calibration_peptides"]
predictor.calibrate_percentile_ranks(
peptides=peptides,
alleles=[allele])
return {
allele: predictor.allele_to_percent_rank_transform[allele],
} | [
"def",
"calibrate_percentile_ranks",
"(",
"allele",
",",
"predictor",
",",
"peptides",
"=",
"None",
")",
":",
"global",
"GLOBAL_DATA",
"if",
"peptides",
"is",
"None",
":",
"peptides",
"=",
"GLOBAL_DATA",
"[",
"\"calibration_peptides\"",
"]",
"predictor",
".",
"calibrate_percentile_ranks",
"(",
"peptides",
"=",
"peptides",
",",
"alleles",
"=",
"[",
"allele",
"]",
")",
"return",
"{",
"allele",
":",
"predictor",
".",
"allele_to_percent_rank_transform",
"[",
"allele",
"]",
",",
"}"
] | Private helper function. | [
"Private",
"helper",
"function",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/calibrate_percentile_ranks_command.py#L140-L152 | train |
openvax/mhcflurry | mhcflurry/common.py | set_keras_backend | def set_keras_backend(backend=None, gpu_device_nums=None, num_threads=None):
"""
Configure Keras backend to use GPU or CPU. Only tensorflow is supported.
Parameters
----------
backend : string, optional
one of 'tensorflow-default', 'tensorflow-cpu', 'tensorflow-gpu'
gpu_device_nums : list of int, optional
GPU devices to potentially use
num_threads : int, optional
Tensorflow threads to use
"""
os.environ["KERAS_BACKEND"] = "tensorflow"
original_backend = backend
if not backend:
backend = "tensorflow-default"
if gpu_device_nums is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in gpu_device_nums])
if backend == "tensorflow-cpu" or gpu_device_nums == []:
print("Forcing tensorflow/CPU backend.")
os.environ["CUDA_VISIBLE_DEVICES"] = ""
device_count = {'CPU': 1, 'GPU': 0}
elif backend == "tensorflow-gpu":
print("Forcing tensorflow/GPU backend.")
device_count = {'CPU': 0, 'GPU': 1}
elif backend == "tensorflow-default":
print("Forcing tensorflow backend.")
device_count = None
else:
raise ValueError("Unsupported backend: %s" % backend)
import tensorflow
from keras import backend as K
if K.backend() == 'tensorflow':
config = tensorflow.ConfigProto(device_count=device_count)
config.gpu_options.allow_growth = True
if num_threads:
config.inter_op_parallelism_threads = num_threads
config.intra_op_parallelism_threads = num_threads
session = tensorflow.Session(config=config)
K.set_session(session)
else:
if original_backend or gpu_device_nums or num_threads:
warnings.warn(
"Only tensorflow backend can be customized. Ignoring "
" customization. Backend: %s" % K.backend()) | python | def set_keras_backend(backend=None, gpu_device_nums=None, num_threads=None):
"""
Configure Keras backend to use GPU or CPU. Only tensorflow is supported.
Parameters
----------
backend : string, optional
one of 'tensorflow-default', 'tensorflow-cpu', 'tensorflow-gpu'
gpu_device_nums : list of int, optional
GPU devices to potentially use
num_threads : int, optional
Tensorflow threads to use
"""
os.environ["KERAS_BACKEND"] = "tensorflow"
original_backend = backend
if not backend:
backend = "tensorflow-default"
if gpu_device_nums is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in gpu_device_nums])
if backend == "tensorflow-cpu" or gpu_device_nums == []:
print("Forcing tensorflow/CPU backend.")
os.environ["CUDA_VISIBLE_DEVICES"] = ""
device_count = {'CPU': 1, 'GPU': 0}
elif backend == "tensorflow-gpu":
print("Forcing tensorflow/GPU backend.")
device_count = {'CPU': 0, 'GPU': 1}
elif backend == "tensorflow-default":
print("Forcing tensorflow backend.")
device_count = None
else:
raise ValueError("Unsupported backend: %s" % backend)
import tensorflow
from keras import backend as K
if K.backend() == 'tensorflow':
config = tensorflow.ConfigProto(device_count=device_count)
config.gpu_options.allow_growth = True
if num_threads:
config.inter_op_parallelism_threads = num_threads
config.intra_op_parallelism_threads = num_threads
session = tensorflow.Session(config=config)
K.set_session(session)
else:
if original_backend or gpu_device_nums or num_threads:
warnings.warn(
"Only tensorflow backend can be customized. Ignoring "
" customization. Backend: %s" % K.backend()) | [
"def",
"set_keras_backend",
"(",
"backend",
"=",
"None",
",",
"gpu_device_nums",
"=",
"None",
",",
"num_threads",
"=",
"None",
")",
":",
"os",
".",
"environ",
"[",
"\"KERAS_BACKEND\"",
"]",
"=",
"\"tensorflow\"",
"original_backend",
"=",
"backend",
"if",
"not",
"backend",
":",
"backend",
"=",
"\"tensorflow-default\"",
"if",
"gpu_device_nums",
"is",
"not",
"None",
":",
"os",
".",
"environ",
"[",
"\"CUDA_VISIBLE_DEVICES\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"gpu_device_nums",
"]",
")",
"if",
"backend",
"==",
"\"tensorflow-cpu\"",
"or",
"gpu_device_nums",
"==",
"[",
"]",
":",
"print",
"(",
"\"Forcing tensorflow/CPU backend.\"",
")",
"os",
".",
"environ",
"[",
"\"CUDA_VISIBLE_DEVICES\"",
"]",
"=",
"\"\"",
"device_count",
"=",
"{",
"'CPU'",
":",
"1",
",",
"'GPU'",
":",
"0",
"}",
"elif",
"backend",
"==",
"\"tensorflow-gpu\"",
":",
"print",
"(",
"\"Forcing tensorflow/GPU backend.\"",
")",
"device_count",
"=",
"{",
"'CPU'",
":",
"0",
",",
"'GPU'",
":",
"1",
"}",
"elif",
"backend",
"==",
"\"tensorflow-default\"",
":",
"print",
"(",
"\"Forcing tensorflow backend.\"",
")",
"device_count",
"=",
"None",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported backend: %s\"",
"%",
"backend",
")",
"import",
"tensorflow",
"from",
"keras",
"import",
"backend",
"as",
"K",
"if",
"K",
".",
"backend",
"(",
")",
"==",
"'tensorflow'",
":",
"config",
"=",
"tensorflow",
".",
"ConfigProto",
"(",
"device_count",
"=",
"device_count",
")",
"config",
".",
"gpu_options",
".",
"allow_growth",
"=",
"True",
"if",
"num_threads",
":",
"config",
".",
"inter_op_parallelism_threads",
"=",
"num_threads",
"config",
".",
"intra_op_parallelism_threads",
"=",
"num_threads",
"session",
"=",
"tensorflow",
".",
"Session",
"(",
"config",
"=",
"config",
")",
"K",
".",
"set_session",
"(",
"session",
")",
"else",
":",
"if",
"original_backend",
"or",
"gpu_device_nums",
"or",
"num_threads",
":",
"warnings",
".",
"warn",
"(",
"\"Only tensorflow backend can be customized. Ignoring \"",
"\" customization. Backend: %s\"",
"%",
"K",
".",
"backend",
"(",
")",
")"
] | Configure Keras backend to use GPU or CPU. Only tensorflow is supported.
Parameters
----------
backend : string, optional
one of 'tensorflow-default', 'tensorflow-cpu', 'tensorflow-gpu'
gpu_device_nums : list of int, optional
GPU devices to potentially use
num_threads : int, optional
Tensorflow threads to use | [
"Configure",
"Keras",
"backend",
"to",
"use",
"GPU",
"or",
"CPU",
".",
"Only",
"tensorflow",
"is",
"supported",
"."
] | deb7c1629111254b484a2711619eb2347db36524 | https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/common.py#L14-L68 | train |
JonathanRaiman/pytreebank | pytreebank/labeled_trees.py | LabeledTree.uproot | def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted | python | def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted | [
"def",
"uproot",
"(",
"tree",
")",
":",
"uprooted",
"=",
"tree",
".",
"copy",
"(",
")",
"uprooted",
".",
"parent",
"=",
"None",
"for",
"child",
"in",
"tree",
".",
"all_children",
"(",
")",
":",
"uprooted",
".",
"add_general_child",
"(",
"child",
")",
"return",
"uprooted"
] | Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree | [
"Take",
"a",
"subranch",
"of",
"a",
"tree",
"and",
"deep",
"-",
"copy",
"the",
"children",
"of",
"this",
"subbranch",
"into",
"a",
"new",
"LabeledTree"
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L35-L44 | train |
JonathanRaiman/pytreebank | pytreebank/labeled_trees.py | LabeledTree.copy | def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent) | python | def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent) | [
"def",
"copy",
"(",
"self",
")",
":",
"return",
"LabeledTree",
"(",
"udepth",
"=",
"self",
".",
"udepth",
",",
"depth",
"=",
"self",
".",
"depth",
",",
"text",
"=",
"self",
".",
"text",
",",
"label",
"=",
"self",
".",
"label",
",",
"children",
"=",
"self",
".",
"children",
".",
"copy",
"(",
")",
"if",
"self",
".",
"children",
"!=",
"None",
"else",
"[",
"]",
",",
"parent",
"=",
"self",
".",
"parent",
")"
] | Deep Copy of a LabeledTree | [
"Deep",
"Copy",
"of",
"a",
"LabeledTree"
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L60-L70 | train |
JonathanRaiman/pytreebank | pytreebank/labeled_trees.py | LabeledTree.add_child | def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1 | python | def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1 | [
"def",
"add_child",
"(",
"self",
",",
"child",
")",
":",
"self",
".",
"children",
".",
"append",
"(",
"child",
")",
"child",
".",
"parent",
"=",
"self",
"self",
".",
"udepth",
"=",
"max",
"(",
"[",
"child",
".",
"udepth",
"for",
"child",
"in",
"self",
".",
"children",
"]",
")",
"+",
"1"
] | Adds a branch to the current tree. | [
"Adds",
"a",
"branch",
"to",
"the",
"current",
"tree",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L72-L78 | train |
JonathanRaiman/pytreebank | pytreebank/labeled_trees.py | LabeledTree.lowercase | def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower() | python | def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower() | [
"def",
"lowercase",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"children",
")",
">",
"0",
":",
"for",
"child",
"in",
"self",
".",
"children",
":",
"child",
".",
"lowercase",
"(",
")",
"else",
":",
"self",
".",
"text",
"=",
"self",
".",
"text",
".",
"lower",
"(",
")"
] | Lowercase all strings in this tree.
Works recursively and in-place. | [
"Lowercase",
"all",
"strings",
"in",
"this",
"tree",
".",
"Works",
"recursively",
"and",
"in",
"-",
"place",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L92-L101 | train |
JonathanRaiman/pytreebank | pytreebank/labeled_trees.py | LabeledTree.inject_visualization_javascript | def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius) | python | def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius) | [
"def",
"inject_visualization_javascript",
"(",
"tree_width",
"=",
"1200",
",",
"tree_height",
"=",
"400",
",",
"tree_node_radius",
"=",
"10",
")",
":",
"from",
".",
"javascript",
"import",
"insert_sentiment_markup",
"insert_sentiment_markup",
"(",
"tree_width",
"=",
"tree_width",
",",
"tree_height",
"=",
"tree_height",
",",
"tree_node_radius",
"=",
"tree_node_radius",
")"
] | In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations. | [
"In",
"an",
"Ipython",
"notebook",
"show",
"SST",
"trees",
"using",
"the",
"same",
"Javascript",
"code",
"as",
"used",
"by",
"Jason",
"Chuang",
"s",
"visualisations",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L195-L201 | train |
JonathanRaiman/pytreebank | pytreebank/parse.py | create_tree_from_string | def create_tree_from_string(line):
"""
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
"""
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
# replace current head node by this node:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
# assign current word:
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
# go up a level:
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
# add to current read word
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root | python | def create_tree_from_string(line):
"""
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
"""
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
# replace current head node by this node:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
# assign current word:
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
# go up a level:
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
# add to current read word
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root | [
"def",
"create_tree_from_string",
"(",
"line",
")",
":",
"depth",
"=",
"0",
"current_word",
"=",
"\"\"",
"root",
"=",
"None",
"current_node",
"=",
"root",
"for",
"char",
"in",
"line",
":",
"if",
"char",
"==",
"'('",
":",
"if",
"current_node",
"is",
"not",
"None",
"and",
"len",
"(",
"current_word",
")",
">",
"0",
":",
"attribute_text_label",
"(",
"current_node",
",",
"current_word",
")",
"current_word",
"=",
"\"\"",
"depth",
"+=",
"1",
"if",
"depth",
">",
"1",
":",
"# replace current head node by this node:",
"child",
"=",
"LabeledTree",
"(",
"depth",
"=",
"depth",
")",
"current_node",
".",
"add_child",
"(",
"child",
")",
"current_node",
"=",
"child",
"root",
".",
"add_general_child",
"(",
"child",
")",
"else",
":",
"root",
"=",
"LabeledTree",
"(",
"depth",
"=",
"depth",
")",
"root",
".",
"add_general_child",
"(",
"root",
")",
"current_node",
"=",
"root",
"elif",
"char",
"==",
"')'",
":",
"# assign current word:",
"if",
"len",
"(",
"current_word",
")",
">",
"0",
":",
"attribute_text_label",
"(",
"current_node",
",",
"current_word",
")",
"current_word",
"=",
"\"\"",
"# go up a level:",
"depth",
"-=",
"1",
"if",
"current_node",
".",
"parent",
"!=",
"None",
":",
"current_node",
".",
"parent",
".",
"udepth",
"=",
"max",
"(",
"current_node",
".",
"udepth",
"+",
"1",
",",
"current_node",
".",
"parent",
".",
"udepth",
")",
"current_node",
"=",
"current_node",
".",
"parent",
"else",
":",
"# add to current read word",
"current_word",
"+=",
"char",
"if",
"depth",
"!=",
"0",
":",
"raise",
"ParseError",
"(",
"\"Not an equal amount of closing and opening parentheses\"",
")",
"return",
"root"
] | Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree. | [
"Parse",
"and",
"convert",
"a",
"string",
"representation",
"of",
"an",
"example",
"into",
"a",
"LabeledTree",
"datastructure",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L49-L101 | train |
JonathanRaiman/pytreebank | pytreebank/parse.py | import_tree_corpus | def import_tree_corpus(path):
"""
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
"""
tree_list = LabeledTreeCorpus()
with codecs.open(path, "r", "UTF-8") as f:
for line in f:
tree_list.append(create_tree_from_string(line))
return tree_list | python | def import_tree_corpus(path):
"""
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
"""
tree_list = LabeledTreeCorpus()
with codecs.open(path, "r", "UTF-8") as f:
for line in f:
tree_list.append(create_tree_from_string(line))
return tree_list | [
"def",
"import_tree_corpus",
"(",
"path",
")",
":",
"tree_list",
"=",
"LabeledTreeCorpus",
"(",
")",
"with",
"codecs",
".",
"open",
"(",
"path",
",",
"\"r\"",
",",
"\"UTF-8\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"tree_list",
".",
"append",
"(",
"create_tree_from_string",
"(",
"line",
")",
")",
"return",
"tree_list"
] | Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples. | [
"Import",
"a",
"text",
"file",
"of",
"treebank",
"trees",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L144-L160 | train |
JonathanRaiman/pytreebank | pytreebank/parse.py | load_sst | def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()} | python | def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()} | [
"def",
"load_sst",
"(",
"path",
"=",
"None",
",",
"url",
"=",
"'http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'",
")",
":",
"if",
"path",
"is",
"None",
":",
"# find a good temporary path",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/stanford_sentiment_treebank/\"",
")",
"makedirs",
"(",
"path",
",",
"exist_ok",
"=",
"True",
")",
"fnames",
"=",
"download_sst",
"(",
"path",
",",
"url",
")",
"return",
"{",
"key",
":",
"import_tree_corpus",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"fnames",
".",
"items",
"(",
")",
"}"
] | Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset | [
"Download",
"and",
"read",
"in",
"the",
"Stanford",
"Sentiment",
"Treebank",
"dataset",
"into",
"a",
"dictionary",
"with",
"a",
"train",
"dev",
"and",
"test",
"keys",
".",
"The",
"dictionary",
"keys",
"point",
"to",
"lists",
"of",
"LabeledTrees",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L163-L187 | train |
JonathanRaiman/pytreebank | pytreebank/parse.py | LabeledTreeCorpus.labels | def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings | python | def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings | [
"def",
"labels",
"(",
"self",
")",
":",
"labelings",
"=",
"OrderedDict",
"(",
")",
"for",
"tree",
"in",
"self",
":",
"for",
"label",
",",
"line",
"in",
"tree",
".",
"to_labeled_lines",
"(",
")",
":",
"labelings",
"[",
"line",
"]",
"=",
"label",
"return",
"labelings"
] | Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs. | [
"Construct",
"a",
"dictionary",
"of",
"string",
"-",
">",
"labels"
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L112-L124 | train |
JonathanRaiman/pytreebank | pytreebank/parse.py | LabeledTreeCorpus.to_file | def to_file(self, path, mode="w"):
"""
Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file.
"""
with open(path, mode=mode) as f:
for tree in self:
for label, line in tree.to_labeled_lines():
f.write(line + "\n") | python | def to_file(self, path, mode="w"):
"""
Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file.
"""
with open(path, mode=mode) as f:
for tree in self:
for label, line in tree.to_labeled_lines():
f.write(line + "\n") | [
"def",
"to_file",
"(",
"self",
",",
"path",
",",
"mode",
"=",
"\"w\"",
")",
":",
"with",
"open",
"(",
"path",
",",
"mode",
"=",
"mode",
")",
"as",
"f",
":",
"for",
"tree",
"in",
"self",
":",
"for",
"label",
",",
"line",
"in",
"tree",
".",
"to_labeled_lines",
"(",
")",
":",
"f",
".",
"write",
"(",
"line",
"+",
"\"\\n\"",
")"
] | Save the corpus to a text file in the
original format.
Arguments:
----------
path : str, where to save the corpus.
mode : str, how to open the file. | [
"Save",
"the",
"corpus",
"to",
"a",
"text",
"file",
"in",
"the",
"original",
"format",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L127-L140 | train |
JonathanRaiman/pytreebank | pytreebank/treelstm.py | import_tree_corpus | def import_tree_corpus(labels_path, parents_path, texts_path):
"""
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
"""
with codecs.open(labels_path, "r", "UTF-8") as f:
label_lines = f.readlines()
with codecs.open(parents_path, "r", "UTF-8") as f:
parent_lines = f.readlines()
with codecs.open(texts_path, "r", "UTF-8") as f:
word_lines = f.readlines()
assert len(label_lines) == len(parent_lines)
assert len(label_lines) == len(word_lines)
trees = []
for labels, parents, words in zip(label_lines, parent_lines, word_lines):
labels = [int(l) + 2 for l in labels.strip().split(" ")]
parents = [int(l) for l in parents.strip().split(" ")]
words = words.strip().split(" ")
assert len(labels) == len(parents)
trees.append(read_tree(parents, labels, words))
return trees | python | def import_tree_corpus(labels_path, parents_path, texts_path):
"""
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
"""
with codecs.open(labels_path, "r", "UTF-8") as f:
label_lines = f.readlines()
with codecs.open(parents_path, "r", "UTF-8") as f:
parent_lines = f.readlines()
with codecs.open(texts_path, "r", "UTF-8") as f:
word_lines = f.readlines()
assert len(label_lines) == len(parent_lines)
assert len(label_lines) == len(word_lines)
trees = []
for labels, parents, words in zip(label_lines, parent_lines, word_lines):
labels = [int(l) + 2 for l in labels.strip().split(" ")]
parents = [int(l) for l in parents.strip().split(" ")]
words = words.strip().split(" ")
assert len(labels) == len(parents)
trees.append(read_tree(parents, labels, words))
return trees | [
"def",
"import_tree_corpus",
"(",
"labels_path",
",",
"parents_path",
",",
"texts_path",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"labels_path",
",",
"\"r\"",
",",
"\"UTF-8\"",
")",
"as",
"f",
":",
"label_lines",
"=",
"f",
".",
"readlines",
"(",
")",
"with",
"codecs",
".",
"open",
"(",
"parents_path",
",",
"\"r\"",
",",
"\"UTF-8\"",
")",
"as",
"f",
":",
"parent_lines",
"=",
"f",
".",
"readlines",
"(",
")",
"with",
"codecs",
".",
"open",
"(",
"texts_path",
",",
"\"r\"",
",",
"\"UTF-8\"",
")",
"as",
"f",
":",
"word_lines",
"=",
"f",
".",
"readlines",
"(",
")",
"assert",
"len",
"(",
"label_lines",
")",
"==",
"len",
"(",
"parent_lines",
")",
"assert",
"len",
"(",
"label_lines",
")",
"==",
"len",
"(",
"word_lines",
")",
"trees",
"=",
"[",
"]",
"for",
"labels",
",",
"parents",
",",
"words",
"in",
"zip",
"(",
"label_lines",
",",
"parent_lines",
",",
"word_lines",
")",
":",
"labels",
"=",
"[",
"int",
"(",
"l",
")",
"+",
"2",
"for",
"l",
"in",
"labels",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\" \"",
")",
"]",
"parents",
"=",
"[",
"int",
"(",
"l",
")",
"for",
"l",
"in",
"parents",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\" \"",
")",
"]",
"words",
"=",
"words",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\" \"",
")",
"assert",
"len",
"(",
"labels",
")",
"==",
"len",
"(",
"parents",
")",
"trees",
".",
"append",
"(",
"read_tree",
"(",
"parents",
",",
"labels",
",",
"words",
")",
")",
"return",
"trees"
] | Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees. | [
"Import",
"dataset",
"from",
"the",
"TreeLSTM",
"data",
"generation",
"scrips",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/treelstm.py#L8-L42 | train |
JonathanRaiman/pytreebank | pytreebank/treelstm.py | assign_texts | def assign_texts(node, words, next_idx=0):
"""
Recursively assign the words to nodes by finding and
assigning strings to the leaves of a tree in left
to right order.
"""
if len(node.children) == 0:
node.text = words[next_idx]
return next_idx + 1
else:
for child in node.children:
next_idx = assign_texts(child, words, next_idx)
return next_idx | python | def assign_texts(node, words, next_idx=0):
"""
Recursively assign the words to nodes by finding and
assigning strings to the leaves of a tree in left
to right order.
"""
if len(node.children) == 0:
node.text = words[next_idx]
return next_idx + 1
else:
for child in node.children:
next_idx = assign_texts(child, words, next_idx)
return next_idx | [
"def",
"assign_texts",
"(",
"node",
",",
"words",
",",
"next_idx",
"=",
"0",
")",
":",
"if",
"len",
"(",
"node",
".",
"children",
")",
"==",
"0",
":",
"node",
".",
"text",
"=",
"words",
"[",
"next_idx",
"]",
"return",
"next_idx",
"+",
"1",
"else",
":",
"for",
"child",
"in",
"node",
".",
"children",
":",
"next_idx",
"=",
"assign_texts",
"(",
"child",
",",
"words",
",",
"next_idx",
")",
"return",
"next_idx"
] | Recursively assign the words to nodes by finding and
assigning strings to the leaves of a tree in left
to right order. | [
"Recursively",
"assign",
"the",
"words",
"to",
"nodes",
"by",
"finding",
"and",
"assigning",
"strings",
"to",
"the",
"leaves",
"of",
"a",
"tree",
"in",
"left",
"to",
"right",
"order",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/treelstm.py#L44-L56 | train |
JonathanRaiman/pytreebank | pytreebank/treelstm.py | read_tree | def read_tree(parents, labels, words):
"""
Take as input a list of integers for parents
and labels, along with a list of words, and
reconstruct a LabeledTree.
"""
trees = {}
root = None
for i in range(1, len(parents) + 1):
if not i in trees and parents[i - 1] != - 1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = LabeledTree()
if prev is not None:
tree.add_child(prev)
trees[idx] = tree
tree.label = labels[idx - 1]
if trees.get(parent) is not None:
trees[parent].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
assert assign_texts(root, words) == len(words)
return root | python | def read_tree(parents, labels, words):
"""
Take as input a list of integers for parents
and labels, along with a list of words, and
reconstruct a LabeledTree.
"""
trees = {}
root = None
for i in range(1, len(parents) + 1):
if not i in trees and parents[i - 1] != - 1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = LabeledTree()
if prev is not None:
tree.add_child(prev)
trees[idx] = tree
tree.label = labels[idx - 1]
if trees.get(parent) is not None:
trees[parent].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
assert assign_texts(root, words) == len(words)
return root | [
"def",
"read_tree",
"(",
"parents",
",",
"labels",
",",
"words",
")",
":",
"trees",
"=",
"{",
"}",
"root",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"parents",
")",
"+",
"1",
")",
":",
"if",
"not",
"i",
"in",
"trees",
"and",
"parents",
"[",
"i",
"-",
"1",
"]",
"!=",
"-",
"1",
":",
"idx",
"=",
"i",
"prev",
"=",
"None",
"while",
"True",
":",
"parent",
"=",
"parents",
"[",
"idx",
"-",
"1",
"]",
"if",
"parent",
"==",
"-",
"1",
":",
"break",
"tree",
"=",
"LabeledTree",
"(",
")",
"if",
"prev",
"is",
"not",
"None",
":",
"tree",
".",
"add_child",
"(",
"prev",
")",
"trees",
"[",
"idx",
"]",
"=",
"tree",
"tree",
".",
"label",
"=",
"labels",
"[",
"idx",
"-",
"1",
"]",
"if",
"trees",
".",
"get",
"(",
"parent",
")",
"is",
"not",
"None",
":",
"trees",
"[",
"parent",
"]",
".",
"add_child",
"(",
"tree",
")",
"break",
"elif",
"parent",
"==",
"0",
":",
"root",
"=",
"tree",
"break",
"else",
":",
"prev",
"=",
"tree",
"idx",
"=",
"parent",
"assert",
"assign_texts",
"(",
"root",
",",
"words",
")",
"==",
"len",
"(",
"words",
")",
"return",
"root"
] | Take as input a list of integers for parents
and labels, along with a list of words, and
reconstruct a LabeledTree. | [
"Take",
"as",
"input",
"a",
"list",
"of",
"integers",
"for",
"parents",
"and",
"labels",
"along",
"with",
"a",
"list",
"of",
"words",
"and",
"reconstruct",
"a",
"LabeledTree",
"."
] | 7b4c671d3dff661cc3677e54db817e50c5a1c666 | https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/treelstm.py#L58-L89 | train |
GiulioRossetti/ndlib | ndlib/models/opinions/CognitiveOpDynModel.py | CognitiveOpDynModel.set_initial_status | def set_initial_status(self, configuration=None):
"""
Override behaviour of methods in class DiffusionModel.
Overwrites initial status using random real values.
Generates random node profiles.
"""
super(CognitiveOpDynModel, self).set_initial_status(configuration)
# set node status
for node in self.status:
self.status[node] = np.random.random_sample()
self.initial_status = self.status.copy()
# set new node parameters
self.params['nodes']['cognitive'] = {}
# first correct the input model parameters and retreive T_range, B_range and R_distribution
T_range = (self.params['model']['T_range_min'], self.params['model']['T_range_max'])
if self.params['model']['T_range_min'] > self.params['model']['T_range_max']:
T_range = (self.params['model']['T_range_max'], self.params['model']['T_range_min'])
B_range = (self.params['model']['B_range_min'], self.params['model']['B_range_max'])
if self.params['model']['B_range_min'] > self.params['model']['B_range_max']:
B_range = (self.params['model']['B_range_max'], self.params['model']['B_range_min'])
s = float(self.params['model']['R_fraction_negative'] + self.params['model']['R_fraction_neutral'] +
self.params['model']['R_fraction_positive'])
R_distribution = (self.params['model']['R_fraction_negative']/s, self.params['model']['R_fraction_neutral']/s,
self.params['model']['R_fraction_positive']/s)
# then sample parameters from the ranges and distribution
for node in self.graph.nodes():
R_prob = np.random.random_sample()
if R_prob < R_distribution[0]:
R = -1
elif R_prob < (R_distribution[0] + R_distribution[1]):
R = 0
else:
R = 1
# R, B and T parameters in a tuple
self.params['nodes']['cognitive'][node] = (R,
B_range[0] + (B_range[1] - B_range[0])*np.random.random_sample(),
T_range[0] + (T_range[1] - T_range[0])*np.random.random_sample()) | python | def set_initial_status(self, configuration=None):
"""
Override behaviour of methods in class DiffusionModel.
Overwrites initial status using random real values.
Generates random node profiles.
"""
super(CognitiveOpDynModel, self).set_initial_status(configuration)
# set node status
for node in self.status:
self.status[node] = np.random.random_sample()
self.initial_status = self.status.copy()
# set new node parameters
self.params['nodes']['cognitive'] = {}
# first correct the input model parameters and retreive T_range, B_range and R_distribution
T_range = (self.params['model']['T_range_min'], self.params['model']['T_range_max'])
if self.params['model']['T_range_min'] > self.params['model']['T_range_max']:
T_range = (self.params['model']['T_range_max'], self.params['model']['T_range_min'])
B_range = (self.params['model']['B_range_min'], self.params['model']['B_range_max'])
if self.params['model']['B_range_min'] > self.params['model']['B_range_max']:
B_range = (self.params['model']['B_range_max'], self.params['model']['B_range_min'])
s = float(self.params['model']['R_fraction_negative'] + self.params['model']['R_fraction_neutral'] +
self.params['model']['R_fraction_positive'])
R_distribution = (self.params['model']['R_fraction_negative']/s, self.params['model']['R_fraction_neutral']/s,
self.params['model']['R_fraction_positive']/s)
# then sample parameters from the ranges and distribution
for node in self.graph.nodes():
R_prob = np.random.random_sample()
if R_prob < R_distribution[0]:
R = -1
elif R_prob < (R_distribution[0] + R_distribution[1]):
R = 0
else:
R = 1
# R, B and T parameters in a tuple
self.params['nodes']['cognitive'][node] = (R,
B_range[0] + (B_range[1] - B_range[0])*np.random.random_sample(),
T_range[0] + (T_range[1] - T_range[0])*np.random.random_sample()) | [
"def",
"set_initial_status",
"(",
"self",
",",
"configuration",
"=",
"None",
")",
":",
"super",
"(",
"CognitiveOpDynModel",
",",
"self",
")",
".",
"set_initial_status",
"(",
"configuration",
")",
"# set node status",
"for",
"node",
"in",
"self",
".",
"status",
":",
"self",
".",
"status",
"[",
"node",
"]",
"=",
"np",
".",
"random",
".",
"random_sample",
"(",
")",
"self",
".",
"initial_status",
"=",
"self",
".",
"status",
".",
"copy",
"(",
")",
"# set new node parameters",
"self",
".",
"params",
"[",
"'nodes'",
"]",
"[",
"'cognitive'",
"]",
"=",
"{",
"}",
"# first correct the input model parameters and retreive T_range, B_range and R_distribution",
"T_range",
"=",
"(",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'T_range_min'",
"]",
",",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'T_range_max'",
"]",
")",
"if",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'T_range_min'",
"]",
">",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'T_range_max'",
"]",
":",
"T_range",
"=",
"(",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'T_range_max'",
"]",
",",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'T_range_min'",
"]",
")",
"B_range",
"=",
"(",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'B_range_min'",
"]",
",",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'B_range_max'",
"]",
")",
"if",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'B_range_min'",
"]",
">",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'B_range_max'",
"]",
":",
"B_range",
"=",
"(",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'B_range_max'",
"]",
",",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'B_range_min'",
"]",
")",
"s",
"=",
"float",
"(",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'R_fraction_negative'",
"]",
"+",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'R_fraction_neutral'",
"]",
"+",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'R_fraction_positive'",
"]",
")",
"R_distribution",
"=",
"(",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'R_fraction_negative'",
"]",
"/",
"s",
",",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'R_fraction_neutral'",
"]",
"/",
"s",
",",
"self",
".",
"params",
"[",
"'model'",
"]",
"[",
"'R_fraction_positive'",
"]",
"/",
"s",
")",
"# then sample parameters from the ranges and distribution",
"for",
"node",
"in",
"self",
".",
"graph",
".",
"nodes",
"(",
")",
":",
"R_prob",
"=",
"np",
".",
"random",
".",
"random_sample",
"(",
")",
"if",
"R_prob",
"<",
"R_distribution",
"[",
"0",
"]",
":",
"R",
"=",
"-",
"1",
"elif",
"R_prob",
"<",
"(",
"R_distribution",
"[",
"0",
"]",
"+",
"R_distribution",
"[",
"1",
"]",
")",
":",
"R",
"=",
"0",
"else",
":",
"R",
"=",
"1",
"# R, B and T parameters in a tuple",
"self",
".",
"params",
"[",
"'nodes'",
"]",
"[",
"'cognitive'",
"]",
"[",
"node",
"]",
"=",
"(",
"R",
",",
"B_range",
"[",
"0",
"]",
"+",
"(",
"B_range",
"[",
"1",
"]",
"-",
"B_range",
"[",
"0",
"]",
")",
"*",
"np",
".",
"random",
".",
"random_sample",
"(",
")",
",",
"T_range",
"[",
"0",
"]",
"+",
"(",
"T_range",
"[",
"1",
"]",
"-",
"T_range",
"[",
"0",
"]",
")",
"*",
"np",
".",
"random",
".",
"random_sample",
"(",
")",
")"
] | Override behaviour of methods in class DiffusionModel.
Overwrites initial status using random real values.
Generates random node profiles. | [
"Override",
"behaviour",
"of",
"methods",
"in",
"class",
"DiffusionModel",
".",
"Overwrites",
"initial",
"status",
"using",
"random",
"real",
"values",
".",
"Generates",
"random",
"node",
"profiles",
"."
] | 23ecf50c0f76ff2714471071ab9ecb600f4a9832 | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/opinions/CognitiveOpDynModel.py#L92-L133 | train |
GiulioRossetti/ndlib | ndlib/models/ModelConfig.py | Configuration.add_node_configuration | def add_node_configuration(self, param_name, node_id, param_value):
"""
Set a parameter for a given node
:param param_name: parameter identifier (as specified by the chosen model)
:param node_id: node identifier
:param param_value: parameter value
"""
if param_name not in self.config['nodes']:
self.config['nodes'][param_name] = {node_id: param_value}
else:
self.config['nodes'][param_name][node_id] = param_value | python | def add_node_configuration(self, param_name, node_id, param_value):
"""
Set a parameter for a given node
:param param_name: parameter identifier (as specified by the chosen model)
:param node_id: node identifier
:param param_value: parameter value
"""
if param_name not in self.config['nodes']:
self.config['nodes'][param_name] = {node_id: param_value}
else:
self.config['nodes'][param_name][node_id] = param_value | [
"def",
"add_node_configuration",
"(",
"self",
",",
"param_name",
",",
"node_id",
",",
"param_value",
")",
":",
"if",
"param_name",
"not",
"in",
"self",
".",
"config",
"[",
"'nodes'",
"]",
":",
"self",
".",
"config",
"[",
"'nodes'",
"]",
"[",
"param_name",
"]",
"=",
"{",
"node_id",
":",
"param_value",
"}",
"else",
":",
"self",
".",
"config",
"[",
"'nodes'",
"]",
"[",
"param_name",
"]",
"[",
"node_id",
"]",
"=",
"param_value"
] | Set a parameter for a given node
:param param_name: parameter identifier (as specified by the chosen model)
:param node_id: node identifier
:param param_value: parameter value | [
"Set",
"a",
"parameter",
"for",
"a",
"given",
"node"
] | 23ecf50c0f76ff2714471071ab9ecb600f4a9832 | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/ModelConfig.py#L72-L83 | train |
GiulioRossetti/ndlib | ndlib/models/ModelConfig.py | Configuration.add_node_set_configuration | def add_node_set_configuration(self, param_name, node_to_value):
"""
Set Nodes parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param node_to_value: dictionary mapping each node a parameter value
"""
for nid, val in future.utils.iteritems(node_to_value):
self.add_node_configuration(param_name, nid, val) | python | def add_node_set_configuration(self, param_name, node_to_value):
"""
Set Nodes parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param node_to_value: dictionary mapping each node a parameter value
"""
for nid, val in future.utils.iteritems(node_to_value):
self.add_node_configuration(param_name, nid, val) | [
"def",
"add_node_set_configuration",
"(",
"self",
",",
"param_name",
",",
"node_to_value",
")",
":",
"for",
"nid",
",",
"val",
"in",
"future",
".",
"utils",
".",
"iteritems",
"(",
"node_to_value",
")",
":",
"self",
".",
"add_node_configuration",
"(",
"param_name",
",",
"nid",
",",
"val",
")"
] | Set Nodes parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param node_to_value: dictionary mapping each node a parameter value | [
"Set",
"Nodes",
"parameter"
] | 23ecf50c0f76ff2714471071ab9ecb600f4a9832 | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/ModelConfig.py#L85-L93 | train |
GiulioRossetti/ndlib | ndlib/models/ModelConfig.py | Configuration.add_edge_configuration | def add_edge_configuration(self, param_name, edge, param_value):
"""
Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value
"""
if param_name not in self.config['edges']:
self.config['edges'][param_name] = {edge: param_value}
else:
self.config['edges'][param_name][edge] = param_value | python | def add_edge_configuration(self, param_name, edge, param_value):
"""
Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value
"""
if param_name not in self.config['edges']:
self.config['edges'][param_name] = {edge: param_value}
else:
self.config['edges'][param_name][edge] = param_value | [
"def",
"add_edge_configuration",
"(",
"self",
",",
"param_name",
",",
"edge",
",",
"param_value",
")",
":",
"if",
"param_name",
"not",
"in",
"self",
".",
"config",
"[",
"'edges'",
"]",
":",
"self",
".",
"config",
"[",
"'edges'",
"]",
"[",
"param_name",
"]",
"=",
"{",
"edge",
":",
"param_value",
"}",
"else",
":",
"self",
".",
"config",
"[",
"'edges'",
"]",
"[",
"param_name",
"]",
"[",
"edge",
"]",
"=",
"param_value"
] | Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value | [
"Set",
"a",
"parameter",
"for",
"a",
"given",
"edge"
] | 23ecf50c0f76ff2714471071ab9ecb600f4a9832 | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/ModelConfig.py#L95-L106 | train |
GiulioRossetti/ndlib | ndlib/models/ModelConfig.py | Configuration.add_edge_set_configuration | def add_edge_set_configuration(self, param_name, edge_to_value):
"""
Set Edges parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param edge_to_value: dictionary mapping each edge a parameter value
"""
for edge, val in future.utils.iteritems(edge_to_value):
self.add_edge_configuration(param_name, edge, val) | python | def add_edge_set_configuration(self, param_name, edge_to_value):
"""
Set Edges parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param edge_to_value: dictionary mapping each edge a parameter value
"""
for edge, val in future.utils.iteritems(edge_to_value):
self.add_edge_configuration(param_name, edge, val) | [
"def",
"add_edge_set_configuration",
"(",
"self",
",",
"param_name",
",",
"edge_to_value",
")",
":",
"for",
"edge",
",",
"val",
"in",
"future",
".",
"utils",
".",
"iteritems",
"(",
"edge_to_value",
")",
":",
"self",
".",
"add_edge_configuration",
"(",
"param_name",
",",
"edge",
",",
"val",
")"
] | Set Edges parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param edge_to_value: dictionary mapping each edge a parameter value | [
"Set",
"Edges",
"parameter"
] | 23ecf50c0f76ff2714471071ab9ecb600f4a9832 | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/ModelConfig.py#L108-L116 | train |
GiulioRossetti/ndlib | ndlib/utils.py | multi_runs | def multi_runs(model, execution_number=1, iteration_number=50, infection_sets=None,
nprocesses=multiprocessing.cpu_count()):
"""
Multiple executions of a given model varying the initial set of infected nodes
:param model: a configured diffusion model
:param execution_number: number of instantiations
:param iteration_number: number of iterations per execution
:param infection_sets: predefined set of infected nodes sets
:param nprocesses: number of processes. Default values cpu number.
:return: resulting trends for all the executions
"""
if nprocesses > multiprocessing.cpu_count():
nprocesses = multiprocessing.cpu_count()
executions = []
if infection_sets is not None:
if len(infection_sets) != execution_number:
raise InitializationException(
{"message": "Number of infection sets provided does not match the number of executions required"})
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [copy.copy(model).reset(infection_sets[i]) for i in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
else:
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [copy.deepcopy(model).reset() for _ in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
return executions | python | def multi_runs(model, execution_number=1, iteration_number=50, infection_sets=None,
nprocesses=multiprocessing.cpu_count()):
"""
Multiple executions of a given model varying the initial set of infected nodes
:param model: a configured diffusion model
:param execution_number: number of instantiations
:param iteration_number: number of iterations per execution
:param infection_sets: predefined set of infected nodes sets
:param nprocesses: number of processes. Default values cpu number.
:return: resulting trends for all the executions
"""
if nprocesses > multiprocessing.cpu_count():
nprocesses = multiprocessing.cpu_count()
executions = []
if infection_sets is not None:
if len(infection_sets) != execution_number:
raise InitializationException(
{"message": "Number of infection sets provided does not match the number of executions required"})
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [copy.copy(model).reset(infection_sets[i]) for i in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
else:
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [copy.deepcopy(model).reset() for _ in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
return executions | [
"def",
"multi_runs",
"(",
"model",
",",
"execution_number",
"=",
"1",
",",
"iteration_number",
"=",
"50",
",",
"infection_sets",
"=",
"None",
",",
"nprocesses",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
")",
":",
"if",
"nprocesses",
">",
"multiprocessing",
".",
"cpu_count",
"(",
")",
":",
"nprocesses",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"executions",
"=",
"[",
"]",
"if",
"infection_sets",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"infection_sets",
")",
"!=",
"execution_number",
":",
"raise",
"InitializationException",
"(",
"{",
"\"message\"",
":",
"\"Number of infection sets provided does not match the number of executions required\"",
"}",
")",
"for",
"x",
"in",
"past",
".",
"builtins",
".",
"xrange",
"(",
"0",
",",
"execution_number",
",",
"nprocesses",
")",
":",
"with",
"closing",
"(",
"multiprocessing",
".",
"Pool",
"(",
"processes",
"=",
"nprocesses",
",",
"maxtasksperchild",
"=",
"10",
")",
")",
"as",
"pool",
":",
"tasks",
"=",
"[",
"copy",
".",
"copy",
"(",
"model",
")",
".",
"reset",
"(",
"infection_sets",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"past",
".",
"builtins",
".",
"xrange",
"(",
"x",
",",
"min",
"(",
"x",
"+",
"nprocesses",
",",
"execution_number",
")",
")",
"]",
"results",
"=",
"[",
"pool",
".",
"apply_async",
"(",
"__execute",
",",
"(",
"t",
",",
"iteration_number",
")",
")",
"for",
"t",
"in",
"tasks",
"]",
"for",
"result",
"in",
"results",
":",
"executions",
".",
"append",
"(",
"result",
".",
"get",
"(",
")",
")",
"else",
":",
"for",
"x",
"in",
"past",
".",
"builtins",
".",
"xrange",
"(",
"0",
",",
"execution_number",
",",
"nprocesses",
")",
":",
"with",
"closing",
"(",
"multiprocessing",
".",
"Pool",
"(",
"processes",
"=",
"nprocesses",
",",
"maxtasksperchild",
"=",
"10",
")",
")",
"as",
"pool",
":",
"tasks",
"=",
"[",
"copy",
".",
"deepcopy",
"(",
"model",
")",
".",
"reset",
"(",
")",
"for",
"_",
"in",
"past",
".",
"builtins",
".",
"xrange",
"(",
"x",
",",
"min",
"(",
"x",
"+",
"nprocesses",
",",
"execution_number",
")",
")",
"]",
"results",
"=",
"[",
"pool",
".",
"apply_async",
"(",
"__execute",
",",
"(",
"t",
",",
"iteration_number",
")",
")",
"for",
"t",
"in",
"tasks",
"]",
"for",
"result",
"in",
"results",
":",
"executions",
".",
"append",
"(",
"result",
".",
"get",
"(",
")",
")",
"return",
"executions"
] | Multiple executions of a given model varying the initial set of infected nodes
:param model: a configured diffusion model
:param execution_number: number of instantiations
:param iteration_number: number of iterations per execution
:param infection_sets: predefined set of infected nodes sets
:param nprocesses: number of processes. Default values cpu number.
:return: resulting trends for all the executions | [
"Multiple",
"executions",
"of",
"a",
"given",
"model",
"varying",
"the",
"initial",
"set",
"of",
"infected",
"nodes"
] | 23ecf50c0f76ff2714471071ab9ecb600f4a9832 | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/utils.py#L15-L58 | train |
GiulioRossetti/ndlib | ndlib/utils.py | __execute | def __execute(model, iteration_number):
"""
Execute a simulation model
:param model: a configured diffusion model
:param iteration_number: number of iterations
:return: computed trends
"""
iterations = model.iteration_bunch(iteration_number, False)
trends = model.build_trends(iterations)[0]
del iterations
del model
return trends | python | def __execute(model, iteration_number):
"""
Execute a simulation model
:param model: a configured diffusion model
:param iteration_number: number of iterations
:return: computed trends
"""
iterations = model.iteration_bunch(iteration_number, False)
trends = model.build_trends(iterations)[0]
del iterations
del model
return trends | [
"def",
"__execute",
"(",
"model",
",",
"iteration_number",
")",
":",
"iterations",
"=",
"model",
".",
"iteration_bunch",
"(",
"iteration_number",
",",
"False",
")",
"trends",
"=",
"model",
".",
"build_trends",
"(",
"iterations",
")",
"[",
"0",
"]",
"del",
"iterations",
"del",
"model",
"return",
"trends"
] | Execute a simulation model
:param model: a configured diffusion model
:param iteration_number: number of iterations
:return: computed trends | [
"Execute",
"a",
"simulation",
"model"
] | 23ecf50c0f76ff2714471071ab9ecb600f4a9832 | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/utils.py#L61-L73 | train |
GiulioRossetti/ndlib | ndlib/models/opinions/AlgorithmicBiasModel.py | AlgorithmicBiasModel.set_initial_status | def set_initial_status(self, configuration=None):
"""
Override behaviour of methods in class DiffusionModel.
Overwrites initial status using random real values.
"""
super(AlgorithmicBiasModel, self).set_initial_status(configuration)
# set node status
for node in self.status:
self.status[node] = np.random.random_sample()
self.initial_status = self.status.copy() | python | def set_initial_status(self, configuration=None):
"""
Override behaviour of methods in class DiffusionModel.
Overwrites initial status using random real values.
"""
super(AlgorithmicBiasModel, self).set_initial_status(configuration)
# set node status
for node in self.status:
self.status[node] = np.random.random_sample()
self.initial_status = self.status.copy() | [
"def",
"set_initial_status",
"(",
"self",
",",
"configuration",
"=",
"None",
")",
":",
"super",
"(",
"AlgorithmicBiasModel",
",",
"self",
")",
".",
"set_initial_status",
"(",
"configuration",
")",
"# set node status",
"for",
"node",
"in",
"self",
".",
"status",
":",
"self",
".",
"status",
"[",
"node",
"]",
"=",
"np",
".",
"random",
".",
"random_sample",
"(",
")",
"self",
".",
"initial_status",
"=",
"self",
".",
"status",
".",
"copy",
"(",
")"
] | Override behaviour of methods in class DiffusionModel.
Overwrites initial status using random real values. | [
"Override",
"behaviour",
"of",
"methods",
"in",
"class",
"DiffusionModel",
".",
"Overwrites",
"initial",
"status",
"using",
"random",
"real",
"values",
"."
] | 23ecf50c0f76ff2714471071ab9ecb600f4a9832 | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/opinions/AlgorithmicBiasModel.py#L54-L64 | train |
HearthSim/python-hearthstone | hearthstone/entities.py | Player.names | def names(self):
"""
Returns the player's name and real name.
Returns two empty strings if the player is unknown.
AI real name is always an empty string.
"""
if self.name == self.UNKNOWN_HUMAN_PLAYER:
return "", ""
if not self.is_ai and " " in self.name:
return "", self.name
return self.name, "" | python | def names(self):
"""
Returns the player's name and real name.
Returns two empty strings if the player is unknown.
AI real name is always an empty string.
"""
if self.name == self.UNKNOWN_HUMAN_PLAYER:
return "", ""
if not self.is_ai and " " in self.name:
return "", self.name
return self.name, "" | [
"def",
"names",
"(",
"self",
")",
":",
"if",
"self",
".",
"name",
"==",
"self",
".",
"UNKNOWN_HUMAN_PLAYER",
":",
"return",
"\"\"",
",",
"\"\"",
"if",
"not",
"self",
".",
"is_ai",
"and",
"\" \"",
"in",
"self",
".",
"name",
":",
"return",
"\"\"",
",",
"self",
".",
"name",
"return",
"self",
".",
"name",
",",
"\"\""
] | Returns the player's name and real name.
Returns two empty strings if the player is unknown.
AI real name is always an empty string. | [
"Returns",
"the",
"player",
"s",
"name",
"and",
"real",
"name",
".",
"Returns",
"two",
"empty",
"strings",
"if",
"the",
"player",
"is",
"unknown",
".",
"AI",
"real",
"name",
"is",
"always",
"an",
"empty",
"string",
"."
] | 3690b714248b578dcbba8a492bf228ff09a6aeaf | https://github.com/HearthSim/python-hearthstone/blob/3690b714248b578dcbba8a492bf228ff09a6aeaf/hearthstone/entities.py#L147-L159 | train |
scikit-hep/root_pandas | root_pandas/readwrite.py | _getgroup | def _getgroup(string, depth):
"""
Get a group from the string, where group is a list of all the comma
separated substrings up to the next '}' char or the brace enclosed substring
if there is no comma
"""
out, comma = [], False
while string:
items, string = _getitem(string, depth)
if not string:
break
out += items
if string[0] == '}':
if comma:
return out, string[1:]
return ['{' + a + '}' for a in out], string[1:]
if string[0] == ',':
comma, string = True, string[1:]
return None | python | def _getgroup(string, depth):
"""
Get a group from the string, where group is a list of all the comma
separated substrings up to the next '}' char or the brace enclosed substring
if there is no comma
"""
out, comma = [], False
while string:
items, string = _getitem(string, depth)
if not string:
break
out += items
if string[0] == '}':
if comma:
return out, string[1:]
return ['{' + a + '}' for a in out], string[1:]
if string[0] == ',':
comma, string = True, string[1:]
return None | [
"def",
"_getgroup",
"(",
"string",
",",
"depth",
")",
":",
"out",
",",
"comma",
"=",
"[",
"]",
",",
"False",
"while",
"string",
":",
"items",
",",
"string",
"=",
"_getitem",
"(",
"string",
",",
"depth",
")",
"if",
"not",
"string",
":",
"break",
"out",
"+=",
"items",
"if",
"string",
"[",
"0",
"]",
"==",
"'}'",
":",
"if",
"comma",
":",
"return",
"out",
",",
"string",
"[",
"1",
":",
"]",
"return",
"[",
"'{'",
"+",
"a",
"+",
"'}'",
"for",
"a",
"in",
"out",
"]",
",",
"string",
"[",
"1",
":",
"]",
"if",
"string",
"[",
"0",
"]",
"==",
"','",
":",
"comma",
",",
"string",
"=",
"True",
",",
"string",
"[",
"1",
":",
"]",
"return",
"None"
] | Get a group from the string, where group is a list of all the comma
separated substrings up to the next '}' char or the brace enclosed substring
if there is no comma | [
"Get",
"a",
"group",
"from",
"the",
"string",
"where",
"group",
"is",
"a",
"list",
"of",
"all",
"the",
"comma",
"separated",
"substrings",
"up",
"to",
"the",
"next",
"}",
"char",
"or",
"the",
"brace",
"enclosed",
"substring",
"if",
"there",
"is",
"no",
"comma"
] | 57991a4feaeb9213575cfba7a369fc05cc0d846b | https://github.com/scikit-hep/root_pandas/blob/57991a4feaeb9213575cfba7a369fc05cc0d846b/root_pandas/readwrite.py#L55-L77 | train |
scikit-hep/root_pandas | root_pandas/readwrite.py | filter_noexpand_columns | def filter_noexpand_columns(columns):
"""Return columns not containing and containing the noexpand prefix.
Parameters
----------
columns: sequence of str
A sequence of strings to be split
Returns
-------
Two lists, the first containing strings without the noexpand prefix, the
second containing those that do with the prefix filtered out.
"""
prefix_len = len(NOEXPAND_PREFIX)
noexpand = [c[prefix_len:] for c in columns if c.startswith(NOEXPAND_PREFIX)]
other = [c for c in columns if not c.startswith(NOEXPAND_PREFIX)]
return other, noexpand | python | def filter_noexpand_columns(columns):
"""Return columns not containing and containing the noexpand prefix.
Parameters
----------
columns: sequence of str
A sequence of strings to be split
Returns
-------
Two lists, the first containing strings without the noexpand prefix, the
second containing those that do with the prefix filtered out.
"""
prefix_len = len(NOEXPAND_PREFIX)
noexpand = [c[prefix_len:] for c in columns if c.startswith(NOEXPAND_PREFIX)]
other = [c for c in columns if not c.startswith(NOEXPAND_PREFIX)]
return other, noexpand | [
"def",
"filter_noexpand_columns",
"(",
"columns",
")",
":",
"prefix_len",
"=",
"len",
"(",
"NOEXPAND_PREFIX",
")",
"noexpand",
"=",
"[",
"c",
"[",
"prefix_len",
":",
"]",
"for",
"c",
"in",
"columns",
"if",
"c",
".",
"startswith",
"(",
"NOEXPAND_PREFIX",
")",
"]",
"other",
"=",
"[",
"c",
"for",
"c",
"in",
"columns",
"if",
"not",
"c",
".",
"startswith",
"(",
"NOEXPAND_PREFIX",
")",
"]",
"return",
"other",
",",
"noexpand"
] | Return columns not containing and containing the noexpand prefix.
Parameters
----------
columns: sequence of str
A sequence of strings to be split
Returns
-------
Two lists, the first containing strings without the noexpand prefix, the
second containing those that do with the prefix filtered out. | [
"Return",
"columns",
"not",
"containing",
"and",
"containing",
"the",
"noexpand",
"prefix",
"."
] | 57991a4feaeb9213575cfba7a369fc05cc0d846b | https://github.com/scikit-hep/root_pandas/blob/57991a4feaeb9213575cfba7a369fc05cc0d846b/root_pandas/readwrite.py#L117-L133 | train |
scikit-hep/root_pandas | root_pandas/readwrite.py | to_root | def to_root(df, path, key='my_ttree', mode='w', store_index=True, *args, **kwargs):
"""
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
"""
if mode == 'a':
mode = 'update'
elif mode == 'w':
mode = 'recreate'
else:
raise ValueError('Unknown mode: {}. Must be "a" or "w".'.format(mode))
from root_numpy import array2tree
# We don't want to modify the user's DataFrame here, so we make a shallow copy
df_ = df.copy(deep=False)
if store_index:
name = df_.index.name
if name is None:
# Handle the case where the index has no name
name = ''
df_['__index__' + name] = df_.index
# Convert categorical columns into something root_numpy can serialise
for col in df_.select_dtypes(['category']).columns:
name_components = ['__rpCaT', col, str(df_[col].cat.ordered)]
name_components.extend(df_[col].cat.categories)
if ['*' not in c for c in name_components]:
sep = '*'
else:
raise ValueError('Unable to find suitable separator for columns')
df_[col] = df_[col].cat.codes
df_.rename(index=str, columns={col: sep.join(name_components)}, inplace=True)
arr = df_.to_records(index=False)
root_file = ROOT.TFile.Open(path, mode)
if not root_file:
raise IOError("cannot open file {0}".format(path))
if not root_file.IsWritable():
raise IOError("file {0} is not writable".format(path))
# Navigate to the requested directory
open_dirs = [root_file]
for dir_name in key.split('/')[:-1]:
current_dir = open_dirs[-1].Get(dir_name)
if not current_dir:
current_dir = open_dirs[-1].mkdir(dir_name)
current_dir.cd()
open_dirs.append(current_dir)
# The key is now just the top component
key = key.split('/')[-1]
# If a tree with that name exists, we want to update it
tree = open_dirs[-1].Get(key)
if not tree:
tree = None
tree = array2tree(arr, name=key, tree=tree)
tree.Write(key, ROOT.TFile.kOverwrite)
root_file.Close() | python | def to_root(df, path, key='my_ttree', mode='w', store_index=True, *args, **kwargs):
"""
Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame
"""
if mode == 'a':
mode = 'update'
elif mode == 'w':
mode = 'recreate'
else:
raise ValueError('Unknown mode: {}. Must be "a" or "w".'.format(mode))
from root_numpy import array2tree
# We don't want to modify the user's DataFrame here, so we make a shallow copy
df_ = df.copy(deep=False)
if store_index:
name = df_.index.name
if name is None:
# Handle the case where the index has no name
name = ''
df_['__index__' + name] = df_.index
# Convert categorical columns into something root_numpy can serialise
for col in df_.select_dtypes(['category']).columns:
name_components = ['__rpCaT', col, str(df_[col].cat.ordered)]
name_components.extend(df_[col].cat.categories)
if ['*' not in c for c in name_components]:
sep = '*'
else:
raise ValueError('Unable to find suitable separator for columns')
df_[col] = df_[col].cat.codes
df_.rename(index=str, columns={col: sep.join(name_components)}, inplace=True)
arr = df_.to_records(index=False)
root_file = ROOT.TFile.Open(path, mode)
if not root_file:
raise IOError("cannot open file {0}".format(path))
if not root_file.IsWritable():
raise IOError("file {0} is not writable".format(path))
# Navigate to the requested directory
open_dirs = [root_file]
for dir_name in key.split('/')[:-1]:
current_dir = open_dirs[-1].Get(dir_name)
if not current_dir:
current_dir = open_dirs[-1].mkdir(dir_name)
current_dir.cd()
open_dirs.append(current_dir)
# The key is now just the top component
key = key.split('/')[-1]
# If a tree with that name exists, we want to update it
tree = open_dirs[-1].Get(key)
if not tree:
tree = None
tree = array2tree(arr, name=key, tree=tree)
tree.Write(key, ROOT.TFile.kOverwrite)
root_file.Close() | [
"def",
"to_root",
"(",
"df",
",",
"path",
",",
"key",
"=",
"'my_ttree'",
",",
"mode",
"=",
"'w'",
",",
"store_index",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"mode",
"==",
"'a'",
":",
"mode",
"=",
"'update'",
"elif",
"mode",
"==",
"'w'",
":",
"mode",
"=",
"'recreate'",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown mode: {}. Must be \"a\" or \"w\".'",
".",
"format",
"(",
"mode",
")",
")",
"from",
"root_numpy",
"import",
"array2tree",
"# We don't want to modify the user's DataFrame here, so we make a shallow copy",
"df_",
"=",
"df",
".",
"copy",
"(",
"deep",
"=",
"False",
")",
"if",
"store_index",
":",
"name",
"=",
"df_",
".",
"index",
".",
"name",
"if",
"name",
"is",
"None",
":",
"# Handle the case where the index has no name",
"name",
"=",
"''",
"df_",
"[",
"'__index__'",
"+",
"name",
"]",
"=",
"df_",
".",
"index",
"# Convert categorical columns into something root_numpy can serialise",
"for",
"col",
"in",
"df_",
".",
"select_dtypes",
"(",
"[",
"'category'",
"]",
")",
".",
"columns",
":",
"name_components",
"=",
"[",
"'__rpCaT'",
",",
"col",
",",
"str",
"(",
"df_",
"[",
"col",
"]",
".",
"cat",
".",
"ordered",
")",
"]",
"name_components",
".",
"extend",
"(",
"df_",
"[",
"col",
"]",
".",
"cat",
".",
"categories",
")",
"if",
"[",
"'*'",
"not",
"in",
"c",
"for",
"c",
"in",
"name_components",
"]",
":",
"sep",
"=",
"'*'",
"else",
":",
"raise",
"ValueError",
"(",
"'Unable to find suitable separator for columns'",
")",
"df_",
"[",
"col",
"]",
"=",
"df_",
"[",
"col",
"]",
".",
"cat",
".",
"codes",
"df_",
".",
"rename",
"(",
"index",
"=",
"str",
",",
"columns",
"=",
"{",
"col",
":",
"sep",
".",
"join",
"(",
"name_components",
")",
"}",
",",
"inplace",
"=",
"True",
")",
"arr",
"=",
"df_",
".",
"to_records",
"(",
"index",
"=",
"False",
")",
"root_file",
"=",
"ROOT",
".",
"TFile",
".",
"Open",
"(",
"path",
",",
"mode",
")",
"if",
"not",
"root_file",
":",
"raise",
"IOError",
"(",
"\"cannot open file {0}\"",
".",
"format",
"(",
"path",
")",
")",
"if",
"not",
"root_file",
".",
"IsWritable",
"(",
")",
":",
"raise",
"IOError",
"(",
"\"file {0} is not writable\"",
".",
"format",
"(",
"path",
")",
")",
"# Navigate to the requested directory",
"open_dirs",
"=",
"[",
"root_file",
"]",
"for",
"dir_name",
"in",
"key",
".",
"split",
"(",
"'/'",
")",
"[",
":",
"-",
"1",
"]",
":",
"current_dir",
"=",
"open_dirs",
"[",
"-",
"1",
"]",
".",
"Get",
"(",
"dir_name",
")",
"if",
"not",
"current_dir",
":",
"current_dir",
"=",
"open_dirs",
"[",
"-",
"1",
"]",
".",
"mkdir",
"(",
"dir_name",
")",
"current_dir",
".",
"cd",
"(",
")",
"open_dirs",
".",
"append",
"(",
"current_dir",
")",
"# The key is now just the top component",
"key",
"=",
"key",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"# If a tree with that name exists, we want to update it",
"tree",
"=",
"open_dirs",
"[",
"-",
"1",
"]",
".",
"Get",
"(",
"key",
")",
"if",
"not",
"tree",
":",
"tree",
"=",
"None",
"tree",
"=",
"array2tree",
"(",
"arr",
",",
"name",
"=",
"key",
",",
"tree",
"=",
"tree",
")",
"tree",
".",
"Write",
"(",
"key",
",",
"ROOT",
".",
"TFile",
".",
"kOverwrite",
")",
"root_file",
".",
"Close",
"(",
")"
] | Write DataFrame to a ROOT file.
Parameters
----------
path: string
File path to new ROOT file (will be overwritten)
key: string
Name of tree that the DataFrame will be saved as
mode: string, {'w', 'a'}
Mode that the file should be opened in (default: 'w')
store_index: bool (optional, default: True)
Whether the index of the DataFrame should be stored as
an __index__* branch in the tree
Notes
-----
Further *args and *kwargs are passed to root_numpy's array2root.
>>> df = DataFrame({'x': [1,2,3], 'y': [4,5,6]})
>>> df.to_root('test.root')
The DataFrame index will be saved as a branch called '__index__*',
where * is the name of the index in the original DataFrame | [
"Write",
"DataFrame",
"to",
"a",
"ROOT",
"file",
"."
] | 57991a4feaeb9213575cfba7a369fc05cc0d846b | https://github.com/scikit-hep/root_pandas/blob/57991a4feaeb9213575cfba7a369fc05cc0d846b/root_pandas/readwrite.py#L334-L417 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/reports/security_info.py | SecurityInfoReport.run | def run(self, symbol: str) -> SecurityDetailsViewModel:
""" Loads the model for security details """
from pydatum import Datum
svc = self._svc
sec_agg = svc.securities.get_aggregate_for_symbol(symbol)
model = SecurityDetailsViewModel()
model.symbol = sec_agg.security.namespace + ":" + sec_agg.security.mnemonic
model.security = sec_agg.security
# Quantity
model.quantity = sec_agg.get_quantity()
model.value = sec_agg.get_value()
currency = sec_agg.get_currency()
if currency:
assert isinstance(currency, str)
model.currency = currency
model.price = sec_agg.get_last_available_price()
model.average_price = sec_agg.get_avg_price()
# Here we take only the amount paid for the remaining stock.
model.total_paid = sec_agg.get_total_paid_for_remaining_stock()
# Profit/loss
model.profit_loss = model.value - model.total_paid
if model.total_paid:
model.profit_loss_perc = abs(model.profit_loss) * 100 / model.total_paid
else:
model.profit_loss_perc = 0
if abs(model.value) < abs(model.total_paid):
model.profit_loss_perc *= -1
# Income
model.income = sec_agg.get_income_total()
if model.total_paid:
model.income_perc = model.income * 100 / model.total_paid
else:
model.income_perc = 0
# income in the last 12 months
start = Datum()
start.subtract_months(12)
end = Datum()
model.income_last_12m = sec_agg.get_income_in_period(start, end)
if model.total_paid == 0:
model.income_perc_last_12m = 0
else:
model.income_perc_last_12m = model.income_last_12m * 100 / model.total_paid
# Return of Capital
roc = sec_agg.get_return_of_capital()
model.return_of_capital = roc
# total return
model.total_return = model.profit_loss + model.income
if model.total_paid:
model.total_return_perc = model.total_return * 100 / model.total_paid
else:
model.total_return_perc = 0
# load all holding accounts
model.accounts = sec_agg.accounts
# Income accounts
model.income_accounts = sec_agg.get_income_accounts()
# Load asset classes to which this security belongs.
# todo load asset allocation, find the parents for this symbol
# svc.asset_allocation.load_config_only(svc.currencies.default_currency)
# stocks = svc.asset_allocation.get_stock(model.symbol)
#
# for stock in stocks:
# model.asset_classes.append(stock.asset_class)
from asset_allocation import AppAggregate
aa = AppAggregate()
aa.open_session()
aa.get_asset_classes_for_security(None, model.symbol)
return model | python | def run(self, symbol: str) -> SecurityDetailsViewModel:
""" Loads the model for security details """
from pydatum import Datum
svc = self._svc
sec_agg = svc.securities.get_aggregate_for_symbol(symbol)
model = SecurityDetailsViewModel()
model.symbol = sec_agg.security.namespace + ":" + sec_agg.security.mnemonic
model.security = sec_agg.security
# Quantity
model.quantity = sec_agg.get_quantity()
model.value = sec_agg.get_value()
currency = sec_agg.get_currency()
if currency:
assert isinstance(currency, str)
model.currency = currency
model.price = sec_agg.get_last_available_price()
model.average_price = sec_agg.get_avg_price()
# Here we take only the amount paid for the remaining stock.
model.total_paid = sec_agg.get_total_paid_for_remaining_stock()
# Profit/loss
model.profit_loss = model.value - model.total_paid
if model.total_paid:
model.profit_loss_perc = abs(model.profit_loss) * 100 / model.total_paid
else:
model.profit_loss_perc = 0
if abs(model.value) < abs(model.total_paid):
model.profit_loss_perc *= -1
# Income
model.income = sec_agg.get_income_total()
if model.total_paid:
model.income_perc = model.income * 100 / model.total_paid
else:
model.income_perc = 0
# income in the last 12 months
start = Datum()
start.subtract_months(12)
end = Datum()
model.income_last_12m = sec_agg.get_income_in_period(start, end)
if model.total_paid == 0:
model.income_perc_last_12m = 0
else:
model.income_perc_last_12m = model.income_last_12m * 100 / model.total_paid
# Return of Capital
roc = sec_agg.get_return_of_capital()
model.return_of_capital = roc
# total return
model.total_return = model.profit_loss + model.income
if model.total_paid:
model.total_return_perc = model.total_return * 100 / model.total_paid
else:
model.total_return_perc = 0
# load all holding accounts
model.accounts = sec_agg.accounts
# Income accounts
model.income_accounts = sec_agg.get_income_accounts()
# Load asset classes to which this security belongs.
# todo load asset allocation, find the parents for this symbol
# svc.asset_allocation.load_config_only(svc.currencies.default_currency)
# stocks = svc.asset_allocation.get_stock(model.symbol)
#
# for stock in stocks:
# model.asset_classes.append(stock.asset_class)
from asset_allocation import AppAggregate
aa = AppAggregate()
aa.open_session()
aa.get_asset_classes_for_security(None, model.symbol)
return model | [
"def",
"run",
"(",
"self",
",",
"symbol",
":",
"str",
")",
"->",
"SecurityDetailsViewModel",
":",
"from",
"pydatum",
"import",
"Datum",
"svc",
"=",
"self",
".",
"_svc",
"sec_agg",
"=",
"svc",
".",
"securities",
".",
"get_aggregate_for_symbol",
"(",
"symbol",
")",
"model",
"=",
"SecurityDetailsViewModel",
"(",
")",
"model",
".",
"symbol",
"=",
"sec_agg",
".",
"security",
".",
"namespace",
"+",
"\":\"",
"+",
"sec_agg",
".",
"security",
".",
"mnemonic",
"model",
".",
"security",
"=",
"sec_agg",
".",
"security",
"# Quantity",
"model",
".",
"quantity",
"=",
"sec_agg",
".",
"get_quantity",
"(",
")",
"model",
".",
"value",
"=",
"sec_agg",
".",
"get_value",
"(",
")",
"currency",
"=",
"sec_agg",
".",
"get_currency",
"(",
")",
"if",
"currency",
":",
"assert",
"isinstance",
"(",
"currency",
",",
"str",
")",
"model",
".",
"currency",
"=",
"currency",
"model",
".",
"price",
"=",
"sec_agg",
".",
"get_last_available_price",
"(",
")",
"model",
".",
"average_price",
"=",
"sec_agg",
".",
"get_avg_price",
"(",
")",
"# Here we take only the amount paid for the remaining stock.",
"model",
".",
"total_paid",
"=",
"sec_agg",
".",
"get_total_paid_for_remaining_stock",
"(",
")",
"# Profit/loss",
"model",
".",
"profit_loss",
"=",
"model",
".",
"value",
"-",
"model",
".",
"total_paid",
"if",
"model",
".",
"total_paid",
":",
"model",
".",
"profit_loss_perc",
"=",
"abs",
"(",
"model",
".",
"profit_loss",
")",
"*",
"100",
"/",
"model",
".",
"total_paid",
"else",
":",
"model",
".",
"profit_loss_perc",
"=",
"0",
"if",
"abs",
"(",
"model",
".",
"value",
")",
"<",
"abs",
"(",
"model",
".",
"total_paid",
")",
":",
"model",
".",
"profit_loss_perc",
"*=",
"-",
"1",
"# Income",
"model",
".",
"income",
"=",
"sec_agg",
".",
"get_income_total",
"(",
")",
"if",
"model",
".",
"total_paid",
":",
"model",
".",
"income_perc",
"=",
"model",
".",
"income",
"*",
"100",
"/",
"model",
".",
"total_paid",
"else",
":",
"model",
".",
"income_perc",
"=",
"0",
"# income in the last 12 months",
"start",
"=",
"Datum",
"(",
")",
"start",
".",
"subtract_months",
"(",
"12",
")",
"end",
"=",
"Datum",
"(",
")",
"model",
".",
"income_last_12m",
"=",
"sec_agg",
".",
"get_income_in_period",
"(",
"start",
",",
"end",
")",
"if",
"model",
".",
"total_paid",
"==",
"0",
":",
"model",
".",
"income_perc_last_12m",
"=",
"0",
"else",
":",
"model",
".",
"income_perc_last_12m",
"=",
"model",
".",
"income_last_12m",
"*",
"100",
"/",
"model",
".",
"total_paid",
"# Return of Capital",
"roc",
"=",
"sec_agg",
".",
"get_return_of_capital",
"(",
")",
"model",
".",
"return_of_capital",
"=",
"roc",
"# total return",
"model",
".",
"total_return",
"=",
"model",
".",
"profit_loss",
"+",
"model",
".",
"income",
"if",
"model",
".",
"total_paid",
":",
"model",
".",
"total_return_perc",
"=",
"model",
".",
"total_return",
"*",
"100",
"/",
"model",
".",
"total_paid",
"else",
":",
"model",
".",
"total_return_perc",
"=",
"0",
"# load all holding accounts",
"model",
".",
"accounts",
"=",
"sec_agg",
".",
"accounts",
"# Income accounts",
"model",
".",
"income_accounts",
"=",
"sec_agg",
".",
"get_income_accounts",
"(",
")",
"# Load asset classes to which this security belongs.",
"# todo load asset allocation, find the parents for this symbol",
"# svc.asset_allocation.load_config_only(svc.currencies.default_currency)",
"# stocks = svc.asset_allocation.get_stock(model.symbol)",
"#",
"# for stock in stocks:",
"# model.asset_classes.append(stock.asset_class)",
"from",
"asset_allocation",
"import",
"AppAggregate",
"aa",
"=",
"AppAggregate",
"(",
")",
"aa",
".",
"open_session",
"(",
")",
"aa",
".",
"get_asset_classes_for_security",
"(",
"None",
",",
"model",
".",
"symbol",
")",
"return",
"model"
] | Loads the model for security details | [
"Loads",
"the",
"model",
"for",
"security",
"details"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/reports/security_info.py#L15-L93 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/scheduledtxaggregate.py | handle_friday | def handle_friday(next_date: Datum, period: str, mult: int, start_date: Datum):
""" Extracted the calculation for when the next_day is Friday """
assert isinstance(next_date, Datum)
assert isinstance(start_date, Datum)
# Starting from line 220.
tmp_sat = next_date.clone()
tmp_sat.add_days(1)
tmp_sun = next_date.clone()
tmp_sun.add_days(2)
if period == RecurrencePeriod.END_OF_MONTH.value:
if (next_date.is_end_of_month() or tmp_sat.is_end_of_month() or
tmp_sun.is_end_of_month()):
next_date.add_months(1)
else:
next_date.add_months(mult - 1)
else:
if tmp_sat.get_day_name() == start_date.get_day_name():
next_date.add_days(1)
next_date.add_months(mult)
elif tmp_sun.get_day_name() == start_date.get_day_name():
next_date.add_days(2)
next_date.add_months(mult)
elif next_date.get_day() >= start_date.get_day():
next_date.add_months(mult)
elif next_date.is_end_of_month():
next_date.add_months(mult)
elif tmp_sat.is_end_of_month():
next_date.add_days(1)
next_date.add_months(mult)
elif tmp_sun.is_end_of_month():
next_date.add_days(2)
next_date.add_months(mult)
else:
# /* one fewer month fwd because of the occurrence in this month */
next_date.subtract_months(1)
return next_date | python | def handle_friday(next_date: Datum, period: str, mult: int, start_date: Datum):
""" Extracted the calculation for when the next_day is Friday """
assert isinstance(next_date, Datum)
assert isinstance(start_date, Datum)
# Starting from line 220.
tmp_sat = next_date.clone()
tmp_sat.add_days(1)
tmp_sun = next_date.clone()
tmp_sun.add_days(2)
if period == RecurrencePeriod.END_OF_MONTH.value:
if (next_date.is_end_of_month() or tmp_sat.is_end_of_month() or
tmp_sun.is_end_of_month()):
next_date.add_months(1)
else:
next_date.add_months(mult - 1)
else:
if tmp_sat.get_day_name() == start_date.get_day_name():
next_date.add_days(1)
next_date.add_months(mult)
elif tmp_sun.get_day_name() == start_date.get_day_name():
next_date.add_days(2)
next_date.add_months(mult)
elif next_date.get_day() >= start_date.get_day():
next_date.add_months(mult)
elif next_date.is_end_of_month():
next_date.add_months(mult)
elif tmp_sat.is_end_of_month():
next_date.add_days(1)
next_date.add_months(mult)
elif tmp_sun.is_end_of_month():
next_date.add_days(2)
next_date.add_months(mult)
else:
# /* one fewer month fwd because of the occurrence in this month */
next_date.subtract_months(1)
return next_date | [
"def",
"handle_friday",
"(",
"next_date",
":",
"Datum",
",",
"period",
":",
"str",
",",
"mult",
":",
"int",
",",
"start_date",
":",
"Datum",
")",
":",
"assert",
"isinstance",
"(",
"next_date",
",",
"Datum",
")",
"assert",
"isinstance",
"(",
"start_date",
",",
"Datum",
")",
"# Starting from line 220.",
"tmp_sat",
"=",
"next_date",
".",
"clone",
"(",
")",
"tmp_sat",
".",
"add_days",
"(",
"1",
")",
"tmp_sun",
"=",
"next_date",
".",
"clone",
"(",
")",
"tmp_sun",
".",
"add_days",
"(",
"2",
")",
"if",
"period",
"==",
"RecurrencePeriod",
".",
"END_OF_MONTH",
".",
"value",
":",
"if",
"(",
"next_date",
".",
"is_end_of_month",
"(",
")",
"or",
"tmp_sat",
".",
"is_end_of_month",
"(",
")",
"or",
"tmp_sun",
".",
"is_end_of_month",
"(",
")",
")",
":",
"next_date",
".",
"add_months",
"(",
"1",
")",
"else",
":",
"next_date",
".",
"add_months",
"(",
"mult",
"-",
"1",
")",
"else",
":",
"if",
"tmp_sat",
".",
"get_day_name",
"(",
")",
"==",
"start_date",
".",
"get_day_name",
"(",
")",
":",
"next_date",
".",
"add_days",
"(",
"1",
")",
"next_date",
".",
"add_months",
"(",
"mult",
")",
"elif",
"tmp_sun",
".",
"get_day_name",
"(",
")",
"==",
"start_date",
".",
"get_day_name",
"(",
")",
":",
"next_date",
".",
"add_days",
"(",
"2",
")",
"next_date",
".",
"add_months",
"(",
"mult",
")",
"elif",
"next_date",
".",
"get_day",
"(",
")",
">=",
"start_date",
".",
"get_day",
"(",
")",
":",
"next_date",
".",
"add_months",
"(",
"mult",
")",
"elif",
"next_date",
".",
"is_end_of_month",
"(",
")",
":",
"next_date",
".",
"add_months",
"(",
"mult",
")",
"elif",
"tmp_sat",
".",
"is_end_of_month",
"(",
")",
":",
"next_date",
".",
"add_days",
"(",
"1",
")",
"next_date",
".",
"add_months",
"(",
"mult",
")",
"elif",
"tmp_sun",
".",
"is_end_of_month",
"(",
")",
":",
"next_date",
".",
"add_days",
"(",
"2",
")",
"next_date",
".",
"add_months",
"(",
"mult",
")",
"else",
":",
"# /* one fewer month fwd because of the occurrence in this month */",
"next_date",
".",
"subtract_months",
"(",
"1",
")",
"return",
"next_date"
] | Extracted the calculation for when the next_day is Friday | [
"Extracted",
"the",
"calculation",
"for",
"when",
"the",
"next_day",
"is",
"Friday"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/scheduledtxaggregate.py#L173-L212 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/scheduledtxaggregate.py | ScheduledTxAggregate.get_next_occurrence | def get_next_occurrence(self) -> date:
""" Returns the next occurrence date for transaction """
result = get_next_occurrence(self.transaction)
assert isinstance(result, date)
return result | python | def get_next_occurrence(self) -> date:
""" Returns the next occurrence date for transaction """
result = get_next_occurrence(self.transaction)
assert isinstance(result, date)
return result | [
"def",
"get_next_occurrence",
"(",
"self",
")",
"->",
"date",
":",
"result",
"=",
"get_next_occurrence",
"(",
"self",
".",
"transaction",
")",
"assert",
"isinstance",
"(",
"result",
",",
"date",
")",
"return",
"result"
] | Returns the next occurrence date for transaction | [
"Returns",
"the",
"next",
"occurrence",
"date",
"for",
"transaction"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/scheduledtxaggregate.py#L222-L226 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/scheduledtxaggregate.py | ScheduledTxsAggregate.get_enabled | def get_enabled(self) -> List[ScheduledTransaction]:
""" Returns only enabled scheduled transactions """
query = (
self.query
.filter(ScheduledTransaction.enabled == True)
)
return query.all() | python | def get_enabled(self) -> List[ScheduledTransaction]:
""" Returns only enabled scheduled transactions """
query = (
self.query
.filter(ScheduledTransaction.enabled == True)
)
return query.all() | [
"def",
"get_enabled",
"(",
"self",
")",
"->",
"List",
"[",
"ScheduledTransaction",
"]",
":",
"query",
"=",
"(",
"self",
".",
"query",
".",
"filter",
"(",
"ScheduledTransaction",
".",
"enabled",
"==",
"True",
")",
")",
"return",
"query",
".",
"all",
"(",
")"
] | Returns only enabled scheduled transactions | [
"Returns",
"only",
"enabled",
"scheduled",
"transactions"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/scheduledtxaggregate.py#L254-L260 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/scheduledtxaggregate.py | ScheduledTxsAggregate.get_by_id | def get_by_id(self, tx_id: str) -> ScheduledTransaction:
""" Fetches a tx by id """
return self.query.filter(ScheduledTransaction.guid == tx_id).first() | python | def get_by_id(self, tx_id: str) -> ScheduledTransaction:
""" Fetches a tx by id """
return self.query.filter(ScheduledTransaction.guid == tx_id).first() | [
"def",
"get_by_id",
"(",
"self",
",",
"tx_id",
":",
"str",
")",
"->",
"ScheduledTransaction",
":",
"return",
"self",
".",
"query",
".",
"filter",
"(",
"ScheduledTransaction",
".",
"guid",
"==",
"tx_id",
")",
".",
"first",
"(",
")"
] | Fetches a tx by id | [
"Fetches",
"a",
"tx",
"by",
"id"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/scheduledtxaggregate.py#L262-L264 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/scheduledtxaggregate.py | ScheduledTxsAggregate.get_aggregate_by_id | def get_aggregate_by_id(self, tx_id: str) -> ScheduledTxAggregate:
""" Creates an aggregate for single entity """
tran = self.get_by_id(tx_id)
return self.get_aggregate_for(tran) | python | def get_aggregate_by_id(self, tx_id: str) -> ScheduledTxAggregate:
""" Creates an aggregate for single entity """
tran = self.get_by_id(tx_id)
return self.get_aggregate_for(tran) | [
"def",
"get_aggregate_by_id",
"(",
"self",
",",
"tx_id",
":",
"str",
")",
"->",
"ScheduledTxAggregate",
":",
"tran",
"=",
"self",
".",
"get_by_id",
"(",
"tx_id",
")",
"return",
"self",
".",
"get_aggregate_for",
"(",
"tran",
")"
] | Creates an aggregate for single entity | [
"Creates",
"an",
"aggregate",
"for",
"single",
"entity"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/scheduledtxaggregate.py#L270-L273 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_avg_price_stat | def get_avg_price_stat(self) -> Decimal:
"""
Calculates the statistical average price for the security,
by averaging only the prices paid. Very simple first implementation.
"""
avg_price = Decimal(0)
price_total = Decimal(0)
price_count = 0
for account in self.security.accounts:
# Ignore trading accounts.
if account.type == AccountType.TRADING.name:
continue
for split in account.splits:
# Don't count the non-transactions.
if split.quantity == 0:
continue
price = split.value / split.quantity
price_count += 1
price_total += price
if price_count:
avg_price = price_total / price_count
return avg_price | python | def get_avg_price_stat(self) -> Decimal:
"""
Calculates the statistical average price for the security,
by averaging only the prices paid. Very simple first implementation.
"""
avg_price = Decimal(0)
price_total = Decimal(0)
price_count = 0
for account in self.security.accounts:
# Ignore trading accounts.
if account.type == AccountType.TRADING.name:
continue
for split in account.splits:
# Don't count the non-transactions.
if split.quantity == 0:
continue
price = split.value / split.quantity
price_count += 1
price_total += price
if price_count:
avg_price = price_total / price_count
return avg_price | [
"def",
"get_avg_price_stat",
"(",
"self",
")",
"->",
"Decimal",
":",
"avg_price",
"=",
"Decimal",
"(",
"0",
")",
"price_total",
"=",
"Decimal",
"(",
"0",
")",
"price_count",
"=",
"0",
"for",
"account",
"in",
"self",
".",
"security",
".",
"accounts",
":",
"# Ignore trading accounts.",
"if",
"account",
".",
"type",
"==",
"AccountType",
".",
"TRADING",
".",
"name",
":",
"continue",
"for",
"split",
"in",
"account",
".",
"splits",
":",
"# Don't count the non-transactions.",
"if",
"split",
".",
"quantity",
"==",
"0",
":",
"continue",
"price",
"=",
"split",
".",
"value",
"/",
"split",
".",
"quantity",
"price_count",
"+=",
"1",
"price_total",
"+=",
"price",
"if",
"price_count",
":",
"avg_price",
"=",
"price_total",
"/",
"price_count",
"return",
"avg_price"
] | Calculates the statistical average price for the security,
by averaging only the prices paid. Very simple first implementation. | [
"Calculates",
"the",
"statistical",
"average",
"price",
"for",
"the",
"security",
"by",
"averaging",
"only",
"the",
"prices",
"paid",
".",
"Very",
"simple",
"first",
"implementation",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L41-L67 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_avg_price_fifo | def get_avg_price_fifo(self) -> Decimal:
"""
Calculates the average price paid for the security.
security = Commodity
Returns Decimal value.
"""
balance = self.get_quantity()
if not balance:
return Decimal(0)
paid = Decimal(0)
accounts = self.get_holding_accounts()
# get unused splits (quantity and total paid) per account.
for account in accounts:
splits = self.get_available_splits_for_account(account)
for split in splits:
paid += split.value
avg_price = paid / balance
return avg_price | python | def get_avg_price_fifo(self) -> Decimal:
"""
Calculates the average price paid for the security.
security = Commodity
Returns Decimal value.
"""
balance = self.get_quantity()
if not balance:
return Decimal(0)
paid = Decimal(0)
accounts = self.get_holding_accounts()
# get unused splits (quantity and total paid) per account.
for account in accounts:
splits = self.get_available_splits_for_account(account)
for split in splits:
paid += split.value
avg_price = paid / balance
return avg_price | [
"def",
"get_avg_price_fifo",
"(",
"self",
")",
"->",
"Decimal",
":",
"balance",
"=",
"self",
".",
"get_quantity",
"(",
")",
"if",
"not",
"balance",
":",
"return",
"Decimal",
"(",
"0",
")",
"paid",
"=",
"Decimal",
"(",
"0",
")",
"accounts",
"=",
"self",
".",
"get_holding_accounts",
"(",
")",
"# get unused splits (quantity and total paid) per account.",
"for",
"account",
"in",
"accounts",
":",
"splits",
"=",
"self",
".",
"get_available_splits_for_account",
"(",
"account",
")",
"for",
"split",
"in",
"splits",
":",
"paid",
"+=",
"split",
".",
"value",
"avg_price",
"=",
"paid",
"/",
"balance",
"return",
"avg_price"
] | Calculates the average price paid for the security.
security = Commodity
Returns Decimal value. | [
"Calculates",
"the",
"average",
"price",
"paid",
"for",
"the",
"security",
".",
"security",
"=",
"Commodity",
"Returns",
"Decimal",
"value",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L69-L89 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_available_splits_for_account | def get_available_splits_for_account(self, account: Account) -> List[Split]:
""" Returns all unused splits in the account. Used for the calculation of avg.price.
The split that has been partially used will have its quantity reduced to available
quantity only. """
available_splits = []
# get all purchase splits in the account
query = (
self.get_splits_query()
.filter(Split.account == account)
)
buy_splits = (
query.filter(Split.quantity > 0)
.join(Transaction)
.order_by(desc(Transaction.post_date))
).all()
buy_q = sum(split.quantity for split in buy_splits)
sell_splits = query.filter(Split.quantity < 0).all()
sell_q = sum(split.quantity for split in sell_splits)
balance = buy_q + sell_q
if balance == 0:
return available_splits
for real_split in buy_splits:
split = splitmapper.map_split(real_split, SplitModel())
if split.quantity < balance:
# take this split and reduce the balance.
balance -= split.quantity
else:
# This is the last split.
price = split.value / split.quantity
# Take only the remaining quantity.
split.quantity -= balance
# Also adjust the value for easier calculation elsewhere.
split.value = balance * price
# The remaining balance is now distributed into splits.
balance = 0
# add to the collection.
available_splits.append(split)
if balance == 0:
break
return available_splits | python | def get_available_splits_for_account(self, account: Account) -> List[Split]:
""" Returns all unused splits in the account. Used for the calculation of avg.price.
The split that has been partially used will have its quantity reduced to available
quantity only. """
available_splits = []
# get all purchase splits in the account
query = (
self.get_splits_query()
.filter(Split.account == account)
)
buy_splits = (
query.filter(Split.quantity > 0)
.join(Transaction)
.order_by(desc(Transaction.post_date))
).all()
buy_q = sum(split.quantity for split in buy_splits)
sell_splits = query.filter(Split.quantity < 0).all()
sell_q = sum(split.quantity for split in sell_splits)
balance = buy_q + sell_q
if balance == 0:
return available_splits
for real_split in buy_splits:
split = splitmapper.map_split(real_split, SplitModel())
if split.quantity < balance:
# take this split and reduce the balance.
balance -= split.quantity
else:
# This is the last split.
price = split.value / split.quantity
# Take only the remaining quantity.
split.quantity -= balance
# Also adjust the value for easier calculation elsewhere.
split.value = balance * price
# The remaining balance is now distributed into splits.
balance = 0
# add to the collection.
available_splits.append(split)
if balance == 0:
break
return available_splits | [
"def",
"get_available_splits_for_account",
"(",
"self",
",",
"account",
":",
"Account",
")",
"->",
"List",
"[",
"Split",
"]",
":",
"available_splits",
"=",
"[",
"]",
"# get all purchase splits in the account",
"query",
"=",
"(",
"self",
".",
"get_splits_query",
"(",
")",
".",
"filter",
"(",
"Split",
".",
"account",
"==",
"account",
")",
")",
"buy_splits",
"=",
"(",
"query",
".",
"filter",
"(",
"Split",
".",
"quantity",
">",
"0",
")",
".",
"join",
"(",
"Transaction",
")",
".",
"order_by",
"(",
"desc",
"(",
"Transaction",
".",
"post_date",
")",
")",
")",
".",
"all",
"(",
")",
"buy_q",
"=",
"sum",
"(",
"split",
".",
"quantity",
"for",
"split",
"in",
"buy_splits",
")",
"sell_splits",
"=",
"query",
".",
"filter",
"(",
"Split",
".",
"quantity",
"<",
"0",
")",
".",
"all",
"(",
")",
"sell_q",
"=",
"sum",
"(",
"split",
".",
"quantity",
"for",
"split",
"in",
"sell_splits",
")",
"balance",
"=",
"buy_q",
"+",
"sell_q",
"if",
"balance",
"==",
"0",
":",
"return",
"available_splits",
"for",
"real_split",
"in",
"buy_splits",
":",
"split",
"=",
"splitmapper",
".",
"map_split",
"(",
"real_split",
",",
"SplitModel",
"(",
")",
")",
"if",
"split",
".",
"quantity",
"<",
"balance",
":",
"# take this split and reduce the balance.",
"balance",
"-=",
"split",
".",
"quantity",
"else",
":",
"# This is the last split.",
"price",
"=",
"split",
".",
"value",
"/",
"split",
".",
"quantity",
"# Take only the remaining quantity.",
"split",
".",
"quantity",
"-=",
"balance",
"# Also adjust the value for easier calculation elsewhere.",
"split",
".",
"value",
"=",
"balance",
"*",
"price",
"# The remaining balance is now distributed into splits.",
"balance",
"=",
"0",
"# add to the collection.",
"available_splits",
".",
"append",
"(",
"split",
")",
"if",
"balance",
"==",
"0",
":",
"break",
"return",
"available_splits"
] | Returns all unused splits in the account. Used for the calculation of avg.price.
The split that has been partially used will have its quantity reduced to available
quantity only. | [
"Returns",
"all",
"unused",
"splits",
"in",
"the",
"account",
".",
"Used",
"for",
"the",
"calculation",
"of",
"avg",
".",
"price",
".",
"The",
"split",
"that",
"has",
"been",
"partially",
"used",
"will",
"have",
"its",
"quantity",
"reduced",
"to",
"available",
"quantity",
"only",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L91-L133 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_num_shares | def get_num_shares(self) -> Decimal:
""" Returns the number of shares at this time """
from pydatum import Datum
today = Datum().today()
return self.get_num_shares_on(today) | python | def get_num_shares(self) -> Decimal:
""" Returns the number of shares at this time """
from pydatum import Datum
today = Datum().today()
return self.get_num_shares_on(today) | [
"def",
"get_num_shares",
"(",
"self",
")",
"->",
"Decimal",
":",
"from",
"pydatum",
"import",
"Datum",
"today",
"=",
"Datum",
"(",
")",
".",
"today",
"(",
")",
"return",
"self",
".",
"get_num_shares_on",
"(",
"today",
")"
] | Returns the number of shares at this time | [
"Returns",
"the",
"number",
"of",
"shares",
"at",
"this",
"time"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L135-L139 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_last_available_price | def get_last_available_price(self) -> PriceModel:
""" Finds the last available price for security. Uses PriceDb. """
price_db = PriceDbApplication()
symbol = SecuritySymbol(self.security.namespace, self.security.mnemonic)
result = price_db.get_latest_price(symbol)
return result | python | def get_last_available_price(self) -> PriceModel:
""" Finds the last available price for security. Uses PriceDb. """
price_db = PriceDbApplication()
symbol = SecuritySymbol(self.security.namespace, self.security.mnemonic)
result = price_db.get_latest_price(symbol)
return result | [
"def",
"get_last_available_price",
"(",
"self",
")",
"->",
"PriceModel",
":",
"price_db",
"=",
"PriceDbApplication",
"(",
")",
"symbol",
"=",
"SecuritySymbol",
"(",
"self",
".",
"security",
".",
"namespace",
",",
"self",
".",
"security",
".",
"mnemonic",
")",
"result",
"=",
"price_db",
".",
"get_latest_price",
"(",
"symbol",
")",
"return",
"result"
] | Finds the last available price for security. Uses PriceDb. | [
"Finds",
"the",
"last",
"available",
"price",
"for",
"security",
".",
"Uses",
"PriceDb",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L154-L159 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.__get_holding_accounts_query | def __get_holding_accounts_query(self):
""" Returns all holding accounts, except Trading accounts. """
query = (
self.book.session.query(Account)
.filter(Account.commodity == self.security)
.filter(Account.type != AccountType.trading.value)
)
# generic.print_sql(query)
return query | python | def __get_holding_accounts_query(self):
""" Returns all holding accounts, except Trading accounts. """
query = (
self.book.session.query(Account)
.filter(Account.commodity == self.security)
.filter(Account.type != AccountType.trading.value)
)
# generic.print_sql(query)
return query | [
"def",
"__get_holding_accounts_query",
"(",
"self",
")",
":",
"query",
"=",
"(",
"self",
".",
"book",
".",
"session",
".",
"query",
"(",
"Account",
")",
".",
"filter",
"(",
"Account",
".",
"commodity",
"==",
"self",
".",
"security",
")",
".",
"filter",
"(",
"Account",
".",
"type",
"!=",
"AccountType",
".",
"trading",
".",
"value",
")",
")",
"# generic.print_sql(query)",
"return",
"query"
] | Returns all holding accounts, except Trading accounts. | [
"Returns",
"all",
"holding",
"accounts",
"except",
"Trading",
"accounts",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L180-L188 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_income_accounts | def get_income_accounts(self) -> List[Account]:
"""
Returns all income accounts for this security.
Income accounts are accounts not under Trading, expressed in currency, and
having the same name as the mnemonic.
They should be under Assets but this requires a recursive SQL query.
"""
# trading = self.book.trading_account(self.security)
# log(DEBUG, "trading account = %s, %s", trading.fullname, trading.guid)
# Example on how to self-link, i.e. parent account, using alias.
# parent_alias = aliased(Account)
# .join(parent_alias, Account.parent)
# parent_alias.parent_guid != trading.guid
query = (
self.book.session.query(Account)
.join(Commodity)
.filter(Account.name == self.security.mnemonic)
.filter(Commodity.namespace == "CURRENCY")
# .filter(Account.type != "TRADING")
.filter(Account.type == AccountType.income.value)
)
# generic.print_sql(query)
return query.all() | python | def get_income_accounts(self) -> List[Account]:
"""
Returns all income accounts for this security.
Income accounts are accounts not under Trading, expressed in currency, and
having the same name as the mnemonic.
They should be under Assets but this requires a recursive SQL query.
"""
# trading = self.book.trading_account(self.security)
# log(DEBUG, "trading account = %s, %s", trading.fullname, trading.guid)
# Example on how to self-link, i.e. parent account, using alias.
# parent_alias = aliased(Account)
# .join(parent_alias, Account.parent)
# parent_alias.parent_guid != trading.guid
query = (
self.book.session.query(Account)
.join(Commodity)
.filter(Account.name == self.security.mnemonic)
.filter(Commodity.namespace == "CURRENCY")
# .filter(Account.type != "TRADING")
.filter(Account.type == AccountType.income.value)
)
# generic.print_sql(query)
return query.all() | [
"def",
"get_income_accounts",
"(",
"self",
")",
"->",
"List",
"[",
"Account",
"]",
":",
"# trading = self.book.trading_account(self.security)",
"# log(DEBUG, \"trading account = %s, %s\", trading.fullname, trading.guid)",
"# Example on how to self-link, i.e. parent account, using alias.",
"# parent_alias = aliased(Account)",
"# .join(parent_alias, Account.parent)",
"# parent_alias.parent_guid != trading.guid",
"query",
"=",
"(",
"self",
".",
"book",
".",
"session",
".",
"query",
"(",
"Account",
")",
".",
"join",
"(",
"Commodity",
")",
".",
"filter",
"(",
"Account",
".",
"name",
"==",
"self",
".",
"security",
".",
"mnemonic",
")",
".",
"filter",
"(",
"Commodity",
".",
"namespace",
"==",
"\"CURRENCY\"",
")",
"# .filter(Account.type != \"TRADING\")",
".",
"filter",
"(",
"Account",
".",
"type",
"==",
"AccountType",
".",
"income",
".",
"value",
")",
")",
"# generic.print_sql(query)",
"return",
"query",
".",
"all",
"(",
")"
] | Returns all income accounts for this security.
Income accounts are accounts not under Trading, expressed in currency, and
having the same name as the mnemonic.
They should be under Assets but this requires a recursive SQL query. | [
"Returns",
"all",
"income",
"accounts",
"for",
"this",
"security",
".",
"Income",
"accounts",
"are",
"accounts",
"not",
"under",
"Trading",
"expressed",
"in",
"currency",
"and",
"having",
"the",
"same",
"name",
"as",
"the",
"mnemonic",
".",
"They",
"should",
"be",
"under",
"Assets",
"but",
"this",
"requires",
"a",
"recursive",
"SQL",
"query",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L190-L214 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_income_total | def get_income_total(self) -> Decimal:
""" Sum of all income = sum of balances of all income accounts. """
accounts = self.get_income_accounts()
# log(DEBUG, "income accounts: %s", accounts)
income = Decimal(0)
for acct in accounts:
income += acct.get_balance()
return income | python | def get_income_total(self) -> Decimal:
""" Sum of all income = sum of balances of all income accounts. """
accounts = self.get_income_accounts()
# log(DEBUG, "income accounts: %s", accounts)
income = Decimal(0)
for acct in accounts:
income += acct.get_balance()
return income | [
"def",
"get_income_total",
"(",
"self",
")",
"->",
"Decimal",
":",
"accounts",
"=",
"self",
".",
"get_income_accounts",
"(",
")",
"# log(DEBUG, \"income accounts: %s\", accounts)",
"income",
"=",
"Decimal",
"(",
"0",
")",
"for",
"acct",
"in",
"accounts",
":",
"income",
"+=",
"acct",
".",
"get_balance",
"(",
")",
"return",
"income"
] | Sum of all income = sum of balances of all income accounts. | [
"Sum",
"of",
"all",
"income",
"=",
"sum",
"of",
"balances",
"of",
"all",
"income",
"accounts",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L216-L223 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_income_in_period | def get_income_in_period(self, start: datetime, end: datetime) -> Decimal:
""" Returns all income in the given period """
accounts = self.get_income_accounts()
income = Decimal(0)
for acct in accounts:
acc_agg = AccountAggregate(self.book, acct)
acc_bal = acc_agg.get_balance_in_period(start, end)
income += acc_bal
return income | python | def get_income_in_period(self, start: datetime, end: datetime) -> Decimal:
""" Returns all income in the given period """
accounts = self.get_income_accounts()
income = Decimal(0)
for acct in accounts:
acc_agg = AccountAggregate(self.book, acct)
acc_bal = acc_agg.get_balance_in_period(start, end)
income += acc_bal
return income | [
"def",
"get_income_in_period",
"(",
"self",
",",
"start",
":",
"datetime",
",",
"end",
":",
"datetime",
")",
"->",
"Decimal",
":",
"accounts",
"=",
"self",
".",
"get_income_accounts",
"(",
")",
"income",
"=",
"Decimal",
"(",
"0",
")",
"for",
"acct",
"in",
"accounts",
":",
"acc_agg",
"=",
"AccountAggregate",
"(",
"self",
".",
"book",
",",
"acct",
")",
"acc_bal",
"=",
"acc_agg",
".",
"get_balance_in_period",
"(",
"start",
",",
"end",
")",
"income",
"+=",
"acc_bal",
"return",
"income"
] | Returns all income in the given period | [
"Returns",
"all",
"income",
"in",
"the",
"given",
"period"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L225-L234 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_prices | def get_prices(self) -> List[PriceModel]:
""" Returns all available prices for security """
# return self.security.prices.order_by(Price.date)
from pricedb.dal import Price
pricedb = PriceDbApplication()
repo = pricedb.get_price_repository()
query = (repo.query(Price)
.filter(Price.namespace == self.security.namespace)
.filter(Price.symbol == self.security.mnemonic)
.orderby_desc(Price.date)
)
return query.all() | python | def get_prices(self) -> List[PriceModel]:
""" Returns all available prices for security """
# return self.security.prices.order_by(Price.date)
from pricedb.dal import Price
pricedb = PriceDbApplication()
repo = pricedb.get_price_repository()
query = (repo.query(Price)
.filter(Price.namespace == self.security.namespace)
.filter(Price.symbol == self.security.mnemonic)
.orderby_desc(Price.date)
)
return query.all() | [
"def",
"get_prices",
"(",
"self",
")",
"->",
"List",
"[",
"PriceModel",
"]",
":",
"# return self.security.prices.order_by(Price.date)",
"from",
"pricedb",
".",
"dal",
"import",
"Price",
"pricedb",
"=",
"PriceDbApplication",
"(",
")",
"repo",
"=",
"pricedb",
".",
"get_price_repository",
"(",
")",
"query",
"=",
"(",
"repo",
".",
"query",
"(",
"Price",
")",
".",
"filter",
"(",
"Price",
".",
"namespace",
"==",
"self",
".",
"security",
".",
"namespace",
")",
".",
"filter",
"(",
"Price",
".",
"symbol",
"==",
"self",
".",
"security",
".",
"mnemonic",
")",
".",
"orderby_desc",
"(",
"Price",
".",
"date",
")",
")",
"return",
"query",
".",
"all",
"(",
")"
] | Returns all available prices for security | [
"Returns",
"all",
"available",
"prices",
"for",
"security"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L236-L248 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_quantity | def get_quantity(self) -> Decimal:
"""
Returns the number of shares for the given security.
It gets the number from all the accounts in the book.
"""
from pydatum import Datum
# Use today's date but reset hour and lower.
today = Datum()
today.today()
today.end_of_day()
return self.get_num_shares_on(today.value) | python | def get_quantity(self) -> Decimal:
"""
Returns the number of shares for the given security.
It gets the number from all the accounts in the book.
"""
from pydatum import Datum
# Use today's date but reset hour and lower.
today = Datum()
today.today()
today.end_of_day()
return self.get_num_shares_on(today.value) | [
"def",
"get_quantity",
"(",
"self",
")",
"->",
"Decimal",
":",
"from",
"pydatum",
"import",
"Datum",
"# Use today's date but reset hour and lower.",
"today",
"=",
"Datum",
"(",
")",
"today",
".",
"today",
"(",
")",
"today",
".",
"end_of_day",
"(",
")",
"return",
"self",
".",
"get_num_shares_on",
"(",
"today",
".",
"value",
")"
] | Returns the number of shares for the given security.
It gets the number from all the accounts in the book. | [
"Returns",
"the",
"number",
"of",
"shares",
"for",
"the",
"given",
"security",
".",
"It",
"gets",
"the",
"number",
"from",
"all",
"the",
"accounts",
"in",
"the",
"book",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L250-L260 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_splits_query | def get_splits_query(self):
""" Returns the query for all splits for this security """
query = (
self.book.session.query(Split)
.join(Account)
.filter(Account.type != AccountType.trading.value)
.filter(Account.commodity_guid == self.security.guid)
)
return query | python | def get_splits_query(self):
""" Returns the query for all splits for this security """
query = (
self.book.session.query(Split)
.join(Account)
.filter(Account.type != AccountType.trading.value)
.filter(Account.commodity_guid == self.security.guid)
)
return query | [
"def",
"get_splits_query",
"(",
"self",
")",
":",
"query",
"=",
"(",
"self",
".",
"book",
".",
"session",
".",
"query",
"(",
"Split",
")",
".",
"join",
"(",
"Account",
")",
".",
"filter",
"(",
"Account",
".",
"type",
"!=",
"AccountType",
".",
"trading",
".",
"value",
")",
".",
"filter",
"(",
"Account",
".",
"commodity_guid",
"==",
"self",
".",
"security",
".",
"guid",
")",
")",
"return",
"query"
] | Returns the query for all splits for this security | [
"Returns",
"the",
"query",
"for",
"all",
"splits",
"for",
"this",
"security"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L262-L270 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_total_paid | def get_total_paid(self) -> Decimal:
""" Returns the total amount paid, in currency, for the stocks owned """
query = (
self.get_splits_query()
)
splits = query.all()
total = Decimal(0)
for split in splits:
total += split.value
return total | python | def get_total_paid(self) -> Decimal:
""" Returns the total amount paid, in currency, for the stocks owned """
query = (
self.get_splits_query()
)
splits = query.all()
total = Decimal(0)
for split in splits:
total += split.value
return total | [
"def",
"get_total_paid",
"(",
"self",
")",
"->",
"Decimal",
":",
"query",
"=",
"(",
"self",
".",
"get_splits_query",
"(",
")",
")",
"splits",
"=",
"query",
".",
"all",
"(",
")",
"total",
"=",
"Decimal",
"(",
"0",
")",
"for",
"split",
"in",
"splits",
":",
"total",
"+=",
"split",
".",
"value",
"return",
"total"
] | Returns the total amount paid, in currency, for the stocks owned | [
"Returns",
"the",
"total",
"amount",
"paid",
"in",
"currency",
"for",
"the",
"stocks",
"owned"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L272-L283 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_total_paid_for_remaining_stock | def get_total_paid_for_remaining_stock(self) -> Decimal:
""" Returns the amount paid only for the remaining stock """
paid = Decimal(0)
accounts = self.get_holding_accounts()
for acc in accounts:
splits = self.get_available_splits_for_account(acc)
paid += sum(split.value for split in splits)
return paid | python | def get_total_paid_for_remaining_stock(self) -> Decimal:
""" Returns the amount paid only for the remaining stock """
paid = Decimal(0)
accounts = self.get_holding_accounts()
for acc in accounts:
splits = self.get_available_splits_for_account(acc)
paid += sum(split.value for split in splits)
return paid | [
"def",
"get_total_paid_for_remaining_stock",
"(",
"self",
")",
"->",
"Decimal",
":",
"paid",
"=",
"Decimal",
"(",
"0",
")",
"accounts",
"=",
"self",
".",
"get_holding_accounts",
"(",
")",
"for",
"acc",
"in",
"accounts",
":",
"splits",
"=",
"self",
".",
"get_available_splits_for_account",
"(",
"acc",
")",
"paid",
"+=",
"sum",
"(",
"split",
".",
"value",
"for",
"split",
"in",
"splits",
")",
"return",
"paid"
] | Returns the amount paid only for the remaining stock | [
"Returns",
"the",
"amount",
"paid",
"only",
"for",
"the",
"remaining",
"stock"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L285-L293 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_value | def get_value(self) -> Decimal:
""" Returns the current value of stocks """
quantity = self.get_quantity()
price = self.get_last_available_price()
if not price:
# raise ValueError("no price found for", self.full_symbol)
return Decimal(0)
value = quantity * price.value
return value | python | def get_value(self) -> Decimal:
""" Returns the current value of stocks """
quantity = self.get_quantity()
price = self.get_last_available_price()
if not price:
# raise ValueError("no price found for", self.full_symbol)
return Decimal(0)
value = quantity * price.value
return value | [
"def",
"get_value",
"(",
"self",
")",
"->",
"Decimal",
":",
"quantity",
"=",
"self",
".",
"get_quantity",
"(",
")",
"price",
"=",
"self",
".",
"get_last_available_price",
"(",
")",
"if",
"not",
"price",
":",
"# raise ValueError(\"no price found for\", self.full_symbol)",
"return",
"Decimal",
"(",
"0",
")",
"value",
"=",
"quantity",
"*",
"price",
".",
"value",
"return",
"value"
] | Returns the current value of stocks | [
"Returns",
"the",
"current",
"value",
"of",
"stocks"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L295-L304 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.get_value_in_base_currency | def get_value_in_base_currency(self) -> Decimal:
""" Calculates the value of security holdings in base currency """
# check if the currency is the base currency.
amt_orig = self.get_value()
# Security currency
sec_cur = self.get_currency()
#base_cur = self.book.default_currency
cur_svc = CurrenciesAggregate(self.book)
base_cur = cur_svc.get_default_currency()
if sec_cur == base_cur:
return amt_orig
# otherwise recalculate
single_svc = cur_svc.get_currency_aggregate(sec_cur)
rate = single_svc.get_latest_rate(base_cur)
result = amt_orig * rate.value
return result | python | def get_value_in_base_currency(self) -> Decimal:
""" Calculates the value of security holdings in base currency """
# check if the currency is the base currency.
amt_orig = self.get_value()
# Security currency
sec_cur = self.get_currency()
#base_cur = self.book.default_currency
cur_svc = CurrenciesAggregate(self.book)
base_cur = cur_svc.get_default_currency()
if sec_cur == base_cur:
return amt_orig
# otherwise recalculate
single_svc = cur_svc.get_currency_aggregate(sec_cur)
rate = single_svc.get_latest_rate(base_cur)
result = amt_orig * rate.value
return result | [
"def",
"get_value_in_base_currency",
"(",
"self",
")",
"->",
"Decimal",
":",
"# check if the currency is the base currency.",
"amt_orig",
"=",
"self",
".",
"get_value",
"(",
")",
"# Security currency",
"sec_cur",
"=",
"self",
".",
"get_currency",
"(",
")",
"#base_cur = self.book.default_currency",
"cur_svc",
"=",
"CurrenciesAggregate",
"(",
"self",
".",
"book",
")",
"base_cur",
"=",
"cur_svc",
".",
"get_default_currency",
"(",
")",
"if",
"sec_cur",
"==",
"base_cur",
":",
"return",
"amt_orig",
"# otherwise recalculate",
"single_svc",
"=",
"cur_svc",
".",
"get_currency_aggregate",
"(",
"sec_cur",
")",
"rate",
"=",
"single_svc",
".",
"get_latest_rate",
"(",
"base_cur",
")",
"result",
"=",
"amt_orig",
"*",
"rate",
".",
"value",
"return",
"result"
] | Calculates the value of security holdings in base currency | [
"Calculates",
"the",
"value",
"of",
"security",
"holdings",
"in",
"base",
"currency"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L306-L324 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecurityAggregate.accounts | def accounts(self) -> List[Account]:
""" Returns the asset accounts in which the security is held """
# use only Assets sub-accounts
result = (
[acct for acct in self.security.accounts if acct.fullname.startswith('Assets')]
)
return result | python | def accounts(self) -> List[Account]:
""" Returns the asset accounts in which the security is held """
# use only Assets sub-accounts
result = (
[acct for acct in self.security.accounts if acct.fullname.startswith('Assets')]
)
return result | [
"def",
"accounts",
"(",
"self",
")",
"->",
"List",
"[",
"Account",
"]",
":",
"# use only Assets sub-accounts",
"result",
"=",
"(",
"[",
"acct",
"for",
"acct",
"in",
"self",
".",
"security",
".",
"accounts",
"if",
"acct",
".",
"fullname",
".",
"startswith",
"(",
"'Assets'",
")",
"]",
")",
"return",
"result"
] | Returns the asset accounts in which the security is held | [
"Returns",
"the",
"asset",
"accounts",
"in",
"which",
"the",
"security",
"is",
"held"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L348-L354 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecuritiesAggregate.find | def find(self, search_term: str) -> List[Commodity]:
""" Searches for security by part of the name """
query = (
self.query
.filter(Commodity.mnemonic.like('%' + search_term + '%') |
Commodity.fullname.like('%' + search_term + '%'))
)
return query.all() | python | def find(self, search_term: str) -> List[Commodity]:
""" Searches for security by part of the name """
query = (
self.query
.filter(Commodity.mnemonic.like('%' + search_term + '%') |
Commodity.fullname.like('%' + search_term + '%'))
)
return query.all() | [
"def",
"find",
"(",
"self",
",",
"search_term",
":",
"str",
")",
"->",
"List",
"[",
"Commodity",
"]",
":",
"query",
"=",
"(",
"self",
".",
"query",
".",
"filter",
"(",
"Commodity",
".",
"mnemonic",
".",
"like",
"(",
"'%'",
"+",
"search_term",
"+",
"'%'",
")",
"|",
"Commodity",
".",
"fullname",
".",
"like",
"(",
"'%'",
"+",
"search_term",
"+",
"'%'",
")",
")",
")",
"return",
"query",
".",
"all",
"(",
")"
] | Searches for security by part of the name | [
"Searches",
"for",
"security",
"by",
"part",
"of",
"the",
"name"
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L370-L377 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecuritiesAggregate.get_all | def get_all(self) -> List[Commodity]:
""" Loads all non-currency commodities, assuming they are stocks. """
query = (
self.query
.order_by(Commodity.namespace, Commodity.mnemonic)
)
return query.all() | python | def get_all(self) -> List[Commodity]:
""" Loads all non-currency commodities, assuming they are stocks. """
query = (
self.query
.order_by(Commodity.namespace, Commodity.mnemonic)
)
return query.all() | [
"def",
"get_all",
"(",
"self",
")",
"->",
"List",
"[",
"Commodity",
"]",
":",
"query",
"=",
"(",
"self",
".",
"query",
".",
"order_by",
"(",
"Commodity",
".",
"namespace",
",",
"Commodity",
".",
"mnemonic",
")",
")",
"return",
"query",
".",
"all",
"(",
")"
] | Loads all non-currency commodities, assuming they are stocks. | [
"Loads",
"all",
"non",
"-",
"currency",
"commodities",
"assuming",
"they",
"are",
"stocks",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L379-L385 | train |
MisterY/gnucash-portfolio | gnucash_portfolio/securitiesaggregate.py | SecuritiesAggregate.get_by_symbol | def get_by_symbol(self, symbol: str) -> Commodity:
"""
Returns the commodity with the given symbol.
If more are found, an exception will be thrown.
"""
# handle namespace. Accept GnuCash and Yahoo-style symbols.
full_symbol = self.__parse_gc_symbol(symbol)
query = (
self.query
.filter(Commodity.mnemonic == full_symbol["mnemonic"])
)
if full_symbol["namespace"]:
query = query.filter(Commodity.namespace == full_symbol["namespace"])
return query.first() | python | def get_by_symbol(self, symbol: str) -> Commodity:
"""
Returns the commodity with the given symbol.
If more are found, an exception will be thrown.
"""
# handle namespace. Accept GnuCash and Yahoo-style symbols.
full_symbol = self.__parse_gc_symbol(symbol)
query = (
self.query
.filter(Commodity.mnemonic == full_symbol["mnemonic"])
)
if full_symbol["namespace"]:
query = query.filter(Commodity.namespace == full_symbol["namespace"])
return query.first() | [
"def",
"get_by_symbol",
"(",
"self",
",",
"symbol",
":",
"str",
")",
"->",
"Commodity",
":",
"# handle namespace. Accept GnuCash and Yahoo-style symbols.",
"full_symbol",
"=",
"self",
".",
"__parse_gc_symbol",
"(",
"symbol",
")",
"query",
"=",
"(",
"self",
".",
"query",
".",
"filter",
"(",
"Commodity",
".",
"mnemonic",
"==",
"full_symbol",
"[",
"\"mnemonic\"",
"]",
")",
")",
"if",
"full_symbol",
"[",
"\"namespace\"",
"]",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"Commodity",
".",
"namespace",
"==",
"full_symbol",
"[",
"\"namespace\"",
"]",
")",
"return",
"query",
".",
"first",
"(",
")"
] | Returns the commodity with the given symbol.
If more are found, an exception will be thrown. | [
"Returns",
"the",
"commodity",
"with",
"the",
"given",
"symbol",
".",
"If",
"more",
"are",
"found",
"an",
"exception",
"will",
"be",
"thrown",
"."
] | bfaad8345a5479d1cd111acee1939e25c2a638c2 | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/securitiesaggregate.py#L387-L402 | train |