lca_index
int64 0
223
| idx
stringlengths 7
11
| line_type
stringclasses 6
values | ground_truth
stringlengths 2
35
| completions
sequencelengths 3
1.16k
| prefix
stringlengths 298
32.8k
| postfix
stringlengths 0
28.6k
| repo
stringclasses 34
values |
---|---|---|---|---|---|---|---|
63 | 63-243-31 | random | authenticator | [
"active_server_limit",
"add_header",
"admin_users",
"allow_named_servers",
"app",
"append_query_parameters",
"application",
"auth_to_user",
"authenticate",
"authenticate_prometheus",
"authenticator",
"base_url",
"check_etag_header",
"check_xsrf_cookie",
"clear",
"clear_all_cookies",
"clear_cookie",
"clear_header",
"clear_login_cookie",
"compute_etag",
"concurrent_spawn_limit",
"config",
"content_security_policy",
"cookie_max_age_days",
"cookies",
"create_signed_value",
"create_template_loader",
"csp_report_uri",
"current_user",
"data_received",
"db",
"decode_argument",
"default_handle_logout",
"default_url",
"delete",
"detach",
"domain",
"eventlog",
"expanded_scopes",
"extra_error_html",
"find_user",
"finish",
"flush",
"get",
"get_accessible_services",
"get_argument",
"get_arguments",
"get_auth_token",
"get_body_argument",
"get_body_arguments",
"get_browser_locale",
"get_content_type",
"get_cookie",
"get_current_user",
"get_current_user_cookie",
"get_current_user_named_server_limit",
"get_current_user_token",
"get_login_url",
"get_next_url",
"get_query_argument",
"get_query_arguments",
"get_scope_filter",
"get_secure_cookie",
"get_secure_cookie_key_version",
"get_session_cookie",
"get_signed_cookie",
"get_signed_cookie_key_version",
"get_status",
"get_template",
"get_template_namespace",
"get_template_path",
"get_token",
"get_user_locale",
"handle_logout",
"has_scope",
"head",
"hub",
"initialize",
"locale",
"log",
"log_exception",
"login_user",
"named_server_limit_per_user",
"oauth_provider",
"on_connection_close",
"on_finish",
"options",
"parsed_scopes",
"patch",
"path_args",
"path_kwargs",
"post",
"prepare",
"proxy",
"public_url",
"put",
"redirect",
"redirect_to_server",
"refresh_auth",
"render",
"render_embed_css",
"render_embed_js",
"render_linked_css",
"render_linked_js",
"render_logout_page",
"render_string",
"render_template",
"request",
"require_setting",
"reverse_url",
"send_error",
"services",
"set_cookie",
"set_default_headers",
"set_etag_header",
"set_header",
"set_hub_cookie",
"set_login_cookie",
"set_secure_cookie",
"set_service_cookie",
"set_session_cookie",
"set_signed_cookie",
"set_status",
"settings",
"shutdown_on_logout",
"slow_spawn_timeout",
"slow_stop_timeout",
"spawn_home_error",
"spawn_single_user",
"spawner_class",
"static_url",
"statsd",
"stop_single_user",
"subdomain_host",
"SUPPORTED_METHODS",
"template_namespace",
"ui",
"user_from_username",
"user_stopped",
"users",
"version_hash",
"write",
"write_error",
"xsrf_form_html",
"xsrf_token",
"_accept_cookie_auth",
"_accept_token_auth",
"_active_modules",
"_auto_finish",
"_backend_logout_cleanup",
"_break_cycles",
"_clear_representation_headers",
"_convert_header_value",
"_current_user",
"_decode_xsrf_token",
"_execute",
"_finished",
"_get_argument",
"_get_arguments",
"_get_raw_xsrf_token",
"_handle_request_exception",
"_headers",
"_headers_written",
"_initialize",
"_INVALID_HEADER_CHAR_RE",
"_jupyterhub_user",
"_locale",
"_log",
"_new_cookie",
"_prepared_future",
"_raw_xsrf_token",
"_reason",
"_record_activity",
"_refreshed_users",
"_remove_control_chars_regex",
"_request_summary",
"_resolve_roles_and_scopes",
"_session_id",
"_set_cookie",
"_set_user_cookie",
"_shutdown_servers",
"_status_code",
"_stream_request_body",
"_template_loader_lock",
"_template_loaders",
"_token_authenticated",
"_transforms",
"_ui_method",
"_ui_module",
"_unimplemented_method",
"_user_for_cookie",
"_user_from_orm",
"_validate_next_url",
"_write_buffer",
"_xsrf_safe_methods",
"_xsrf_token",
"_xsrf_token_id",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self. | .logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-246-29 | infile | render_logout_page | [
"active_server_limit",
"add_header",
"admin_users",
"allow_named_servers",
"app",
"append_query_parameters",
"application",
"auth_to_user",
"authenticate",
"authenticate_prometheus",
"authenticator",
"base_url",
"check_etag_header",
"check_xsrf_cookie",
"clear",
"clear_all_cookies",
"clear_cookie",
"clear_header",
"clear_login_cookie",
"compute_etag",
"concurrent_spawn_limit",
"config",
"content_security_policy",
"cookie_max_age_days",
"cookies",
"create_signed_value",
"create_template_loader",
"csp_report_uri",
"current_user",
"data_received",
"db",
"decode_argument",
"default_handle_logout",
"default_url",
"delete",
"detach",
"domain",
"eventlog",
"expanded_scopes",
"extra_error_html",
"find_user",
"finish",
"flush",
"get",
"get_accessible_services",
"get_argument",
"get_arguments",
"get_auth_token",
"get_body_argument",
"get_body_arguments",
"get_browser_locale",
"get_content_type",
"get_cookie",
"get_current_user",
"get_current_user_cookie",
"get_current_user_named_server_limit",
"get_current_user_token",
"get_login_url",
"get_next_url",
"get_query_argument",
"get_query_arguments",
"get_scope_filter",
"get_secure_cookie",
"get_secure_cookie_key_version",
"get_session_cookie",
"get_signed_cookie",
"get_signed_cookie_key_version",
"get_status",
"get_template",
"get_template_namespace",
"get_template_path",
"get_token",
"get_user_locale",
"handle_logout",
"has_scope",
"head",
"hub",
"initialize",
"locale",
"log",
"log_exception",
"login_user",
"named_server_limit_per_user",
"oauth_provider",
"on_connection_close",
"on_finish",
"options",
"parsed_scopes",
"patch",
"path_args",
"path_kwargs",
"post",
"prepare",
"proxy",
"public_url",
"put",
"redirect",
"redirect_to_server",
"refresh_auth",
"render",
"render_embed_css",
"render_embed_js",
"render_linked_css",
"render_linked_js",
"render_logout_page",
"render_string",
"render_template",
"request",
"require_setting",
"reverse_url",
"send_error",
"services",
"set_cookie",
"set_default_headers",
"set_etag_header",
"set_header",
"set_hub_cookie",
"set_login_cookie",
"set_secure_cookie",
"set_service_cookie",
"set_session_cookie",
"set_signed_cookie",
"set_status",
"settings",
"shutdown_on_logout",
"slow_spawn_timeout",
"slow_stop_timeout",
"spawn_home_error",
"spawn_single_user",
"spawner_class",
"static_url",
"statsd",
"stop_single_user",
"subdomain_host",
"SUPPORTED_METHODS",
"template_namespace",
"ui",
"user_from_username",
"user_stopped",
"users",
"version_hash",
"write",
"write_error",
"xsrf_form_html",
"xsrf_token",
"_accept_cookie_auth",
"_accept_token_auth",
"_active_modules",
"_auto_finish",
"_backend_logout_cleanup",
"_break_cycles",
"_clear_representation_headers",
"_convert_header_value",
"_current_user",
"_decode_xsrf_token",
"_execute",
"_finished",
"_get_argument",
"_get_arguments",
"_get_raw_xsrf_token",
"_handle_request_exception",
"_headers",
"_headers_written",
"_initialize",
"_INVALID_HEADER_CHAR_RE",
"_jupyterhub_user",
"_locale",
"_log",
"_new_cookie",
"_prepared_future",
"_raw_xsrf_token",
"_reason",
"_record_activity",
"_refreshed_users",
"_remove_control_chars_regex",
"_request_summary",
"_resolve_roles_and_scopes",
"_session_id",
"_set_cookie",
"_set_user_cookie",
"_shutdown_servers",
"_status_code",
"_stream_request_body",
"_template_loader_lock",
"_template_loaders",
"_token_authenticated",
"_transforms",
"_ui_method",
"_ui_module",
"_unimplemented_method",
"_user_for_cookie",
"_user_from_orm",
"_validate_next_url",
"_write_buffer",
"_xsrf_safe_methods",
"_xsrf_token",
"_xsrf_token_id",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super(). | ()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-268-18 | commited | environ | [
"abc",
"abort",
"access",
"add_dll_directory",
"altsep",
"chdir",
"chflags",
"chmod",
"chown",
"chroot",
"CLD_CONTINUED",
"CLD_DUMPED",
"CLD_EXITED",
"CLD_TRAPPED",
"close",
"closerange",
"confstr",
"confstr_names",
"cpu_count",
"ctermid",
"curdir",
"defpath",
"device_encoding",
"devnull",
"DirEntry",
"dup",
"dup2",
"environ",
"environb",
"error",
"EX_CANTCREAT",
"EX_CONFIG",
"EX_DATAERR",
"EX_IOERR",
"EX_NOHOST",
"EX_NOINPUT",
"EX_NOPERM",
"EX_NOTFOUND",
"EX_NOUSER",
"EX_OK",
"EX_OSERR",
"EX_OSFILE",
"EX_PROTOCOL",
"EX_SOFTWARE",
"EX_TEMPFAIL",
"EX_UNAVAILABLE",
"EX_USAGE",
"execl",
"execle",
"execlp",
"execlpe",
"execv",
"execve",
"execvp",
"execvpe",
"extsep",
"F_LOCK",
"F_OK",
"F_TEST",
"F_TLOCK",
"F_ULOCK",
"fchdir",
"fchmod",
"fchown",
"fdatasync",
"fdopen",
"fork",
"forkpty",
"fpathconf",
"fsdecode",
"fsencode",
"fspath",
"fstat",
"fstatvfs",
"fsync",
"ftruncate",
"fwalk",
"GenericAlias",
"get_blocking",
"get_exec_path",
"get_inheritable",
"get_terminal_size",
"getcwd",
"getcwdb",
"getegid",
"getenv",
"getenvb",
"geteuid",
"getgid",
"getgrouplist",
"getgroups",
"getloadavg",
"getlogin",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresuid",
"getsid",
"getuid",
"getxattr",
"GRND_NONBLOCK",
"GRND_RANDOM",
"initgroups",
"isatty",
"kill",
"killpg",
"lchflags",
"lchmod",
"lchown",
"linesep",
"link",
"listdir",
"listxattr",
"lockf",
"lseek",
"lstat",
"major",
"makedev",
"makedirs",
"Mapping",
"memfd_create",
"MFD_ALLOW_SEALING",
"MFD_CLOEXEC",
"MFD_HUGE_16GB",
"MFD_HUGE_16MB",
"MFD_HUGE_1GB",
"MFD_HUGE_1MB",
"MFD_HUGE_256MB",
"MFD_HUGE_2GB",
"MFD_HUGE_2MB",
"MFD_HUGE_32MB",
"MFD_HUGE_512KB",
"MFD_HUGE_512MB",
"MFD_HUGE_64KB",
"MFD_HUGE_8MB",
"MFD_HUGE_MASK",
"MFD_HUGE_SHIFT",
"MFD_HUGETLB",
"minor",
"mkdir",
"mkfifo",
"mknod",
"MutableMapping",
"name",
"NGROUPS_MAX",
"nice",
"O_ACCMODE",
"O_APPEND",
"O_ASYNC",
"O_BINARY",
"O_CLOEXEC",
"O_CREAT",
"O_DIRECT",
"O_DIRECTORY",
"O_DSYNC",
"O_EXCL",
"O_EXLOCK",
"O_LARGEFILE",
"O_NDELAY",
"O_NOATIME",
"O_NOCTTY",
"O_NOFOLLOW",
"O_NOINHERIT",
"O_NONBLOCK",
"O_PATH",
"O_RANDOM",
"O_RDONLY",
"O_RDWR",
"O_RSYNC",
"O_SEQUENTIAL",
"O_SHLOCK",
"O_SHORT_LIVED",
"O_SYNC",
"O_TEMPORARY",
"O_TEXT",
"O_TMPFILE",
"O_TRUNC",
"O_WRONLY",
"open",
"openpty",
"P_ALL",
"P_DETACH",
"P_NOWAIT",
"P_NOWAITO",
"P_OVERLAY",
"P_PGID",
"P_PID",
"P_WAIT",
"pardir",
"path",
"pathconf",
"pathconf_names",
"PathLike",
"pathsep",
"pipe",
"pipe2",
"plock",
"popen",
"POSIX_FADV_DONTNEED",
"POSIX_FADV_NOREUSE",
"POSIX_FADV_NORMAL",
"POSIX_FADV_RANDOM",
"POSIX_FADV_SEQUENTIAL",
"POSIX_FADV_WILLNEED",
"posix_fadvise",
"posix_fallocate",
"pread",
"PRIO_PGRP",
"PRIO_PROCESS",
"PRIO_USER",
"putenv",
"pwrite",
"R_OK",
"read",
"readlink",
"readv",
"register_at_fork",
"remove",
"removedirs",
"removexattr",
"rename",
"renames",
"replace",
"rmdir",
"RTLD_DEEPBIND",
"RTLD_GLOBAL",
"RTLD_LAZY",
"RTLD_LOCAL",
"RTLD_NODELETE",
"RTLD_NOLOAD",
"RTLD_NOW",
"scandir",
"SCHED_BATCH",
"SCHED_FIFO",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getaffinity",
"sched_getparam",
"sched_getscheduler",
"SCHED_IDLE",
"SCHED_OTHER",
"sched_param",
"SCHED_RESET_ON_FORK",
"SCHED_RR",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setparam",
"sched_setscheduler",
"SCHED_SPORADIC",
"sched_yield",
"SEEK_CUR",
"SEEK_DATA",
"SEEK_END",
"SEEK_HOLE",
"SEEK_SET",
"sendfile",
"sep",
"set_blocking",
"set_inheritable",
"setegid",
"seteuid",
"setgid",
"setgroups",
"setpgid",
"setpgrp",
"setpriority",
"setregid",
"setresgid",
"setresuid",
"setreuid",
"setsid",
"setuid",
"setxattr",
"SF_MNOWAIT",
"SF_NODISKIO",
"SF_SYNC",
"spawnl",
"spawnle",
"spawnlp",
"spawnlpe",
"spawnv",
"spawnve",
"spawnvp",
"spawnvpe",
"st",
"ST_APPEND",
"ST_MANDLOCK",
"ST_NOATIME",
"ST_NODEV",
"ST_NODIRATIME",
"ST_NOEXEC",
"ST_NOSUID",
"ST_RDONLY",
"ST_RELATIME",
"ST_SYNCHRONOUS",
"ST_WRITE",
"startfile",
"stat",
"stat_result",
"statvfs",
"statvfs_result",
"strerror",
"supports_bytes_environ",
"supports_dir_fd",
"supports_effective_ids",
"supports_fd",
"supports_follow_symlinks",
"symlink",
"sync",
"sys",
"sysconf",
"sysconf_names",
"system",
"tcgetpgrp",
"tcsetpgrp",
"terminal_size",
"times",
"times_result",
"TMP_MAX",
"truncate",
"ttyname",
"umask",
"uname",
"uname_result",
"unlink",
"unsetenv",
"urandom",
"utime",
"W_OK",
"wait",
"wait3",
"wait4",
"waitid",
"waitid_result",
"waitpid",
"walk",
"WCONTINUED",
"WCOREDUMP",
"WEXITED",
"WEXITSTATUS",
"WIFCONTINUED",
"WIFEXITED",
"WIFSIGNALED",
"WIFSTOPPED",
"WNOHANG",
"WNOWAIT",
"write",
"writev",
"WSTOPPED",
"WSTOPSIG",
"WTERMSIG",
"WUNTRACED",
"X_OK",
"XATTR_CREATE",
"XATTR_REPLACE",
"XATTR_SIZE_MAX",
"_AddedDllDirectory",
"_check_methods",
"_Environ",
"_execvpe",
"_exists",
"_exit",
"_fspath",
"_fwalk",
"_get_exports_list",
"_spawnvef",
"_walk",
"_wrap_close",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os. | .get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-268-26 | commited | get | [
"clear",
"copy",
"decodekey",
"decodevalue",
"encodekey",
"encodevalue",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"_data",
"__abc_tpflags__",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__ror__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ. | ("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-277-18 | commited | environ | [
"abc",
"abort",
"access",
"add_dll_directory",
"altsep",
"chdir",
"chflags",
"chmod",
"chown",
"chroot",
"CLD_CONTINUED",
"CLD_DUMPED",
"CLD_EXITED",
"CLD_TRAPPED",
"close",
"closerange",
"confstr",
"confstr_names",
"cpu_count",
"ctermid",
"curdir",
"defpath",
"device_encoding",
"devnull",
"DirEntry",
"dup",
"dup2",
"environ",
"environb",
"error",
"EX_CANTCREAT",
"EX_CONFIG",
"EX_DATAERR",
"EX_IOERR",
"EX_NOHOST",
"EX_NOINPUT",
"EX_NOPERM",
"EX_NOTFOUND",
"EX_NOUSER",
"EX_OK",
"EX_OSERR",
"EX_OSFILE",
"EX_PROTOCOL",
"EX_SOFTWARE",
"EX_TEMPFAIL",
"EX_UNAVAILABLE",
"EX_USAGE",
"execl",
"execle",
"execlp",
"execlpe",
"execv",
"execve",
"execvp",
"execvpe",
"extsep",
"F_LOCK",
"F_OK",
"F_TEST",
"F_TLOCK",
"F_ULOCK",
"fchdir",
"fchmod",
"fchown",
"fdatasync",
"fdopen",
"fork",
"forkpty",
"fpathconf",
"fsdecode",
"fsencode",
"fspath",
"fstat",
"fstatvfs",
"fsync",
"ftruncate",
"fwalk",
"GenericAlias",
"get_blocking",
"get_exec_path",
"get_inheritable",
"get_terminal_size",
"getcwd",
"getcwdb",
"getegid",
"getenv",
"getenvb",
"geteuid",
"getgid",
"getgrouplist",
"getgroups",
"getloadavg",
"getlogin",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresuid",
"getsid",
"getuid",
"getxattr",
"GRND_NONBLOCK",
"GRND_RANDOM",
"initgroups",
"isatty",
"kill",
"killpg",
"lchflags",
"lchmod",
"lchown",
"linesep",
"link",
"listdir",
"listxattr",
"lockf",
"lseek",
"lstat",
"major",
"makedev",
"makedirs",
"Mapping",
"memfd_create",
"MFD_ALLOW_SEALING",
"MFD_CLOEXEC",
"MFD_HUGE_16GB",
"MFD_HUGE_16MB",
"MFD_HUGE_1GB",
"MFD_HUGE_1MB",
"MFD_HUGE_256MB",
"MFD_HUGE_2GB",
"MFD_HUGE_2MB",
"MFD_HUGE_32MB",
"MFD_HUGE_512KB",
"MFD_HUGE_512MB",
"MFD_HUGE_64KB",
"MFD_HUGE_8MB",
"MFD_HUGE_MASK",
"MFD_HUGE_SHIFT",
"MFD_HUGETLB",
"minor",
"mkdir",
"mkfifo",
"mknod",
"MutableMapping",
"name",
"NGROUPS_MAX",
"nice",
"O_ACCMODE",
"O_APPEND",
"O_ASYNC",
"O_BINARY",
"O_CLOEXEC",
"O_CREAT",
"O_DIRECT",
"O_DIRECTORY",
"O_DSYNC",
"O_EXCL",
"O_EXLOCK",
"O_LARGEFILE",
"O_NDELAY",
"O_NOATIME",
"O_NOCTTY",
"O_NOFOLLOW",
"O_NOINHERIT",
"O_NONBLOCK",
"O_PATH",
"O_RANDOM",
"O_RDONLY",
"O_RDWR",
"O_RSYNC",
"O_SEQUENTIAL",
"O_SHLOCK",
"O_SHORT_LIVED",
"O_SYNC",
"O_TEMPORARY",
"O_TEXT",
"O_TMPFILE",
"O_TRUNC",
"O_WRONLY",
"open",
"openpty",
"P_ALL",
"P_DETACH",
"P_NOWAIT",
"P_NOWAITO",
"P_OVERLAY",
"P_PGID",
"P_PID",
"P_WAIT",
"pardir",
"path",
"pathconf",
"pathconf_names",
"PathLike",
"pathsep",
"pipe",
"pipe2",
"plock",
"popen",
"POSIX_FADV_DONTNEED",
"POSIX_FADV_NOREUSE",
"POSIX_FADV_NORMAL",
"POSIX_FADV_RANDOM",
"POSIX_FADV_SEQUENTIAL",
"POSIX_FADV_WILLNEED",
"posix_fadvise",
"posix_fallocate",
"pread",
"PRIO_PGRP",
"PRIO_PROCESS",
"PRIO_USER",
"putenv",
"pwrite",
"R_OK",
"read",
"readlink",
"readv",
"register_at_fork",
"remove",
"removedirs",
"removexattr",
"rename",
"renames",
"replace",
"rmdir",
"RTLD_DEEPBIND",
"RTLD_GLOBAL",
"RTLD_LAZY",
"RTLD_LOCAL",
"RTLD_NODELETE",
"RTLD_NOLOAD",
"RTLD_NOW",
"scandir",
"SCHED_BATCH",
"SCHED_FIFO",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getaffinity",
"sched_getparam",
"sched_getscheduler",
"SCHED_IDLE",
"SCHED_OTHER",
"sched_param",
"SCHED_RESET_ON_FORK",
"SCHED_RR",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setparam",
"sched_setscheduler",
"SCHED_SPORADIC",
"sched_yield",
"SEEK_CUR",
"SEEK_DATA",
"SEEK_END",
"SEEK_HOLE",
"SEEK_SET",
"sendfile",
"sep",
"set_blocking",
"set_inheritable",
"setegid",
"seteuid",
"setgid",
"setgroups",
"setpgid",
"setpgrp",
"setpriority",
"setregid",
"setresgid",
"setresuid",
"setreuid",
"setsid",
"setuid",
"setxattr",
"SF_MNOWAIT",
"SF_NODISKIO",
"SF_SYNC",
"spawnl",
"spawnle",
"spawnlp",
"spawnlpe",
"spawnv",
"spawnve",
"spawnvp",
"spawnvpe",
"st",
"ST_APPEND",
"ST_MANDLOCK",
"ST_NOATIME",
"ST_NODEV",
"ST_NODIRATIME",
"ST_NOEXEC",
"ST_NOSUID",
"ST_RDONLY",
"ST_RELATIME",
"ST_SYNCHRONOUS",
"ST_WRITE",
"startfile",
"stat",
"stat_result",
"statvfs",
"statvfs_result",
"strerror",
"supports_bytes_environ",
"supports_dir_fd",
"supports_effective_ids",
"supports_fd",
"supports_follow_symlinks",
"symlink",
"sync",
"sys",
"sysconf",
"sysconf_names",
"system",
"tcgetpgrp",
"tcsetpgrp",
"terminal_size",
"times",
"times_result",
"TMP_MAX",
"truncate",
"ttyname",
"umask",
"uname",
"uname_result",
"unlink",
"unsetenv",
"urandom",
"utime",
"W_OK",
"wait",
"wait3",
"wait4",
"waitid",
"waitid_result",
"waitpid",
"walk",
"WCONTINUED",
"WCOREDUMP",
"WEXITED",
"WEXITSTATUS",
"WIFCONTINUED",
"WIFEXITED",
"WIFSIGNALED",
"WIFSTOPPED",
"WNOHANG",
"WNOWAIT",
"write",
"writev",
"WSTOPPED",
"WSTOPSIG",
"WTERMSIG",
"WUNTRACED",
"X_OK",
"XATTR_CREATE",
"XATTR_REPLACE",
"XATTR_SIZE_MAX",
"_AddedDllDirectory",
"_check_methods",
"_Environ",
"_execvpe",
"_exists",
"_exit",
"_fspath",
"_fwalk",
"_get_exports_list",
"_spawnvef",
"_walk",
"_wrap_close",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os. | .get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-277-26 | commited | get | [
"clear",
"copy",
"decodekey",
"decodevalue",
"encodekey",
"encodevalue",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"_data",
"__abc_tpflags__",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__ror__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ. | ("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-286-18 | commited | environ | [
"abc",
"abort",
"access",
"add_dll_directory",
"altsep",
"chdir",
"chflags",
"chmod",
"chown",
"chroot",
"CLD_CONTINUED",
"CLD_DUMPED",
"CLD_EXITED",
"CLD_TRAPPED",
"close",
"closerange",
"confstr",
"confstr_names",
"cpu_count",
"ctermid",
"curdir",
"defpath",
"device_encoding",
"devnull",
"DirEntry",
"dup",
"dup2",
"environ",
"environb",
"error",
"EX_CANTCREAT",
"EX_CONFIG",
"EX_DATAERR",
"EX_IOERR",
"EX_NOHOST",
"EX_NOINPUT",
"EX_NOPERM",
"EX_NOTFOUND",
"EX_NOUSER",
"EX_OK",
"EX_OSERR",
"EX_OSFILE",
"EX_PROTOCOL",
"EX_SOFTWARE",
"EX_TEMPFAIL",
"EX_UNAVAILABLE",
"EX_USAGE",
"execl",
"execle",
"execlp",
"execlpe",
"execv",
"execve",
"execvp",
"execvpe",
"extsep",
"F_LOCK",
"F_OK",
"F_TEST",
"F_TLOCK",
"F_ULOCK",
"fchdir",
"fchmod",
"fchown",
"fdatasync",
"fdopen",
"fork",
"forkpty",
"fpathconf",
"fsdecode",
"fsencode",
"fspath",
"fstat",
"fstatvfs",
"fsync",
"ftruncate",
"fwalk",
"GenericAlias",
"get_blocking",
"get_exec_path",
"get_inheritable",
"get_terminal_size",
"getcwd",
"getcwdb",
"getegid",
"getenv",
"getenvb",
"geteuid",
"getgid",
"getgrouplist",
"getgroups",
"getloadavg",
"getlogin",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresuid",
"getsid",
"getuid",
"getxattr",
"GRND_NONBLOCK",
"GRND_RANDOM",
"initgroups",
"isatty",
"kill",
"killpg",
"lchflags",
"lchmod",
"lchown",
"linesep",
"link",
"listdir",
"listxattr",
"lockf",
"lseek",
"lstat",
"major",
"makedev",
"makedirs",
"Mapping",
"memfd_create",
"MFD_ALLOW_SEALING",
"MFD_CLOEXEC",
"MFD_HUGE_16GB",
"MFD_HUGE_16MB",
"MFD_HUGE_1GB",
"MFD_HUGE_1MB",
"MFD_HUGE_256MB",
"MFD_HUGE_2GB",
"MFD_HUGE_2MB",
"MFD_HUGE_32MB",
"MFD_HUGE_512KB",
"MFD_HUGE_512MB",
"MFD_HUGE_64KB",
"MFD_HUGE_8MB",
"MFD_HUGE_MASK",
"MFD_HUGE_SHIFT",
"MFD_HUGETLB",
"minor",
"mkdir",
"mkfifo",
"mknod",
"MutableMapping",
"name",
"NGROUPS_MAX",
"nice",
"O_ACCMODE",
"O_APPEND",
"O_ASYNC",
"O_BINARY",
"O_CLOEXEC",
"O_CREAT",
"O_DIRECT",
"O_DIRECTORY",
"O_DSYNC",
"O_EXCL",
"O_EXLOCK",
"O_LARGEFILE",
"O_NDELAY",
"O_NOATIME",
"O_NOCTTY",
"O_NOFOLLOW",
"O_NOINHERIT",
"O_NONBLOCK",
"O_PATH",
"O_RANDOM",
"O_RDONLY",
"O_RDWR",
"O_RSYNC",
"O_SEQUENTIAL",
"O_SHLOCK",
"O_SHORT_LIVED",
"O_SYNC",
"O_TEMPORARY",
"O_TEXT",
"O_TMPFILE",
"O_TRUNC",
"O_WRONLY",
"open",
"openpty",
"P_ALL",
"P_DETACH",
"P_NOWAIT",
"P_NOWAITO",
"P_OVERLAY",
"P_PGID",
"P_PID",
"P_WAIT",
"pardir",
"path",
"pathconf",
"pathconf_names",
"PathLike",
"pathsep",
"pipe",
"pipe2",
"plock",
"popen",
"POSIX_FADV_DONTNEED",
"POSIX_FADV_NOREUSE",
"POSIX_FADV_NORMAL",
"POSIX_FADV_RANDOM",
"POSIX_FADV_SEQUENTIAL",
"POSIX_FADV_WILLNEED",
"posix_fadvise",
"posix_fallocate",
"pread",
"PRIO_PGRP",
"PRIO_PROCESS",
"PRIO_USER",
"putenv",
"pwrite",
"R_OK",
"read",
"readlink",
"readv",
"register_at_fork",
"remove",
"removedirs",
"removexattr",
"rename",
"renames",
"replace",
"rmdir",
"RTLD_DEEPBIND",
"RTLD_GLOBAL",
"RTLD_LAZY",
"RTLD_LOCAL",
"RTLD_NODELETE",
"RTLD_NOLOAD",
"RTLD_NOW",
"scandir",
"SCHED_BATCH",
"SCHED_FIFO",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getaffinity",
"sched_getparam",
"sched_getscheduler",
"SCHED_IDLE",
"SCHED_OTHER",
"sched_param",
"SCHED_RESET_ON_FORK",
"SCHED_RR",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setparam",
"sched_setscheduler",
"SCHED_SPORADIC",
"sched_yield",
"SEEK_CUR",
"SEEK_DATA",
"SEEK_END",
"SEEK_HOLE",
"SEEK_SET",
"sendfile",
"sep",
"set_blocking",
"set_inheritable",
"setegid",
"seteuid",
"setgid",
"setgroups",
"setpgid",
"setpgrp",
"setpriority",
"setregid",
"setresgid",
"setresuid",
"setreuid",
"setsid",
"setuid",
"setxattr",
"SF_MNOWAIT",
"SF_NODISKIO",
"SF_SYNC",
"spawnl",
"spawnle",
"spawnlp",
"spawnlpe",
"spawnv",
"spawnve",
"spawnvp",
"spawnvpe",
"st",
"ST_APPEND",
"ST_MANDLOCK",
"ST_NOATIME",
"ST_NODEV",
"ST_NODIRATIME",
"ST_NOEXEC",
"ST_NOSUID",
"ST_RDONLY",
"ST_RELATIME",
"ST_SYNCHRONOUS",
"ST_WRITE",
"startfile",
"stat",
"stat_result",
"statvfs",
"statvfs_result",
"strerror",
"supports_bytes_environ",
"supports_dir_fd",
"supports_effective_ids",
"supports_fd",
"supports_follow_symlinks",
"symlink",
"sync",
"sys",
"sysconf",
"sysconf_names",
"system",
"tcgetpgrp",
"tcsetpgrp",
"terminal_size",
"times",
"times_result",
"TMP_MAX",
"truncate",
"ttyname",
"umask",
"uname",
"uname_result",
"unlink",
"unsetenv",
"urandom",
"utime",
"W_OK",
"wait",
"wait3",
"wait4",
"waitid",
"waitid_result",
"waitpid",
"walk",
"WCONTINUED",
"WCOREDUMP",
"WEXITED",
"WEXITSTATUS",
"WIFCONTINUED",
"WIFEXITED",
"WIFSIGNALED",
"WIFSTOPPED",
"WNOHANG",
"WNOWAIT",
"write",
"writev",
"WSTOPPED",
"WSTOPSIG",
"WTERMSIG",
"WUNTRACED",
"X_OK",
"XATTR_CREATE",
"XATTR_REPLACE",
"XATTR_SIZE_MAX",
"_AddedDllDirectory",
"_check_methods",
"_Environ",
"_execvpe",
"_exists",
"_exit",
"_fspath",
"_fwalk",
"_get_exports_list",
"_spawnvef",
"_walk",
"_wrap_close",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os. | .get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-286-26 | commited | get | [
"clear",
"copy",
"decodekey",
"decodevalue",
"encodekey",
"encodevalue",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"_data",
"__abc_tpflags__",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__ror__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ. | ("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-368-30 | infile | http_client | [
"add_traits",
"add_user",
"admin_users",
"allow_all",
"allow_existing_users",
"allowed_users",
"any_allow_config",
"auth_refresh_age",
"authenticate",
"authorize_url",
"auto_login",
"auto_login_oauth2_authorize",
"blocked_users",
"callback_handler",
"check_allow_config",
"check_allowed",
"check_blocked_users",
"class_config_rst_doc",
"class_config_section",
"class_get_help",
"class_get_trait_help",
"class_own_trait_events",
"class_own_traits",
"class_print_help",
"class_trait_names",
"class_traits",
"client_id",
"client_id_env",
"client_secret",
"client_secret_env",
"config",
"cross_validation_lock",
"custom_html",
"db",
"delete_invalid_users",
"delete_user",
"enable_auth_state",
"extra_authorize_params",
"fetch",
"get_authenticated_user",
"get_callback_url",
"get_custom_html",
"get_handlers",
"has_trait",
"hold_trait_notifications",
"http_client",
"is_admin",
"load_managed_roles",
"log",
"login_handler",
"login_service",
"login_url",
"logout_handler",
"logout_redirect_url",
"logout_url",
"manage_groups",
"manage_roles",
"normalize_username",
"notify_change",
"oauth_callback_url",
"observe",
"on_trait_change",
"otp_prompt",
"parent",
"post_auth_hook",
"post_spawn_stop",
"pre_spawn_start",
"refresh_pre_spawn",
"refresh_user",
"request_otp",
"reset_managed_roles_on_startup",
"run_post_auth_hook",
"scope",
"section_names",
"set_trait",
"setup_instance",
"token_url",
"trait_defaults",
"trait_events",
"trait_has_value",
"trait_metadata",
"trait_names",
"trait_values",
"traits",
"unobserve",
"unobserve_all",
"update_config",
"userdata_url",
"username_map",
"username_pattern",
"username_regex",
"validate_server_cert",
"validate_server_cert_env",
"validate_username",
"whitelist",
"_add_notifiers",
"_all_trait_default_generators",
"_allow_existing_users_default",
"_authorize_url_default",
"_check_allowed_users",
"_client_id_default",
"_client_secret_default",
"_config_changed",
"_cross_validation_lock",
"_default_any_allowed",
"_default_http_client",
"_defining_class",
"_deprecated_aliases",
"_deprecated_db",
"_deprecated_db_session",
"_deprecated_oauth_aliases",
"_deprecated_oauth_trait",
"_deprecated_trait",
"_find_my_config",
"_get_log_handler",
"_get_trait_default_generator",
"_init_deprecated_methods",
"_load_config",
"_log_default",
"_logout_redirect_url_default",
"_notify_observers",
"_notify_trait",
"_register_validator",
"_remove_notifiers",
"_static_immutable_initial_values",
"_token_url_default",
"_trait_notifiers",
"_trait_validators",
"_trait_values",
"_traits",
"_userdata_url_default",
"_username_pattern_changed",
"_validate_log",
"_validate_server_cert_default",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getstate__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__setstate__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self. | .fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-439-33 | commited | _deprecated_oauth_aliases | [
"add_traits",
"add_user",
"admin_users",
"allow_all",
"allow_existing_users",
"allowed_users",
"any_allow_config",
"auth_refresh_age",
"authenticate",
"authorize_url",
"auto_login",
"auto_login_oauth2_authorize",
"blocked_users",
"callback_handler",
"check_allow_config",
"check_allowed",
"check_blocked_users",
"class_config_rst_doc",
"class_config_section",
"class_get_help",
"class_get_trait_help",
"class_own_trait_events",
"class_own_traits",
"class_print_help",
"class_trait_names",
"class_traits",
"client_id",
"client_id_env",
"client_secret",
"client_secret_env",
"config",
"cross_validation_lock",
"custom_html",
"db",
"delete_invalid_users",
"delete_user",
"enable_auth_state",
"extra_authorize_params",
"fetch",
"get_authenticated_user",
"get_callback_url",
"get_custom_html",
"get_handlers",
"has_trait",
"hold_trait_notifications",
"http_client",
"is_admin",
"load_managed_roles",
"log",
"login_handler",
"login_service",
"login_url",
"logout_handler",
"logout_redirect_url",
"logout_url",
"manage_groups",
"manage_roles",
"normalize_username",
"notify_change",
"oauth_callback_url",
"observe",
"on_trait_change",
"otp_prompt",
"parent",
"post_auth_hook",
"post_spawn_stop",
"pre_spawn_start",
"refresh_pre_spawn",
"refresh_user",
"request_otp",
"reset_managed_roles_on_startup",
"run_post_auth_hook",
"scope",
"section_names",
"set_trait",
"setup_instance",
"token_url",
"trait_defaults",
"trait_events",
"trait_has_value",
"trait_metadata",
"trait_names",
"trait_values",
"traits",
"unobserve",
"unobserve_all",
"update_config",
"userdata_url",
"username_map",
"username_pattern",
"username_regex",
"validate_server_cert",
"validate_server_cert_env",
"validate_username",
"whitelist",
"_add_notifiers",
"_all_trait_default_generators",
"_allow_existing_users_default",
"_authorize_url_default",
"_check_allowed_users",
"_client_id_default",
"_client_secret_default",
"_config_changed",
"_cross_validation_lock",
"_default_any_allowed",
"_default_http_client",
"_defining_class",
"_deprecated_aliases",
"_deprecated_db",
"_deprecated_db_session",
"_deprecated_oauth_aliases",
"_deprecated_oauth_trait",
"_deprecated_trait",
"_find_my_config",
"_get_log_handler",
"_get_trait_default_generator",
"_init_deprecated_methods",
"_load_config",
"_log_default",
"_logout_redirect_url_default",
"_notify_observers",
"_notify_trait",
"_register_validator",
"_remove_notifiers",
"_static_immutable_initial_values",
"_token_url_default",
"_trait_notifiers",
"_trait_validators",
"_trait_values",
"_traits",
"_userdata_url_default",
"_username_pattern_changed",
"_validate_log",
"_validate_server_cert_default",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getstate__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__setstate__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self. | .get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-439-59 | commited | get | [
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
"__annotations__",
"__class__",
"__class_getitem__",
"__contains__",
"__delattr__",
"__delitem__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init__",
"__init_subclass__",
"__ior__",
"__iter__",
"__len__",
"__module__",
"__ne__",
"__new__",
"__or__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__setattr__",
"__setitem__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases. | (old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-459-21 | infile | _deprecated_oauth_trait | [
"add_traits",
"add_user",
"admin_users",
"allow_all",
"allow_existing_users",
"allowed_users",
"any_allow_config",
"auth_refresh_age",
"authenticate",
"authorize_url",
"auto_login",
"auto_login_oauth2_authorize",
"blocked_users",
"callback_handler",
"check_allow_config",
"check_allowed",
"check_blocked_users",
"class_config_rst_doc",
"class_config_section",
"class_get_help",
"class_get_trait_help",
"class_own_trait_events",
"class_own_traits",
"class_print_help",
"class_trait_names",
"class_traits",
"client_id",
"client_id_env",
"client_secret",
"client_secret_env",
"config",
"cross_validation_lock",
"custom_html",
"db",
"delete_invalid_users",
"delete_user",
"enable_auth_state",
"extra_authorize_params",
"fetch",
"get_authenticated_user",
"get_callback_url",
"get_custom_html",
"get_handlers",
"has_trait",
"hold_trait_notifications",
"http_client",
"is_admin",
"load_managed_roles",
"log",
"login_handler",
"login_service",
"login_url",
"logout_handler",
"logout_redirect_url",
"logout_url",
"manage_groups",
"manage_roles",
"normalize_username",
"notify_change",
"oauth_callback_url",
"observe",
"on_trait_change",
"otp_prompt",
"parent",
"post_auth_hook",
"post_spawn_stop",
"pre_spawn_start",
"refresh_pre_spawn",
"refresh_user",
"request_otp",
"reset_managed_roles_on_startup",
"run_post_auth_hook",
"scope",
"section_names",
"set_trait",
"setup_instance",
"token_url",
"trait_defaults",
"trait_events",
"trait_has_value",
"trait_metadata",
"trait_names",
"trait_values",
"traits",
"unobserve",
"unobserve_all",
"update_config",
"userdata_url",
"username_map",
"username_pattern",
"username_regex",
"validate_server_cert",
"validate_server_cert_env",
"validate_username",
"whitelist",
"_add_notifiers",
"_all_trait_default_generators",
"_allow_existing_users_default",
"_authorize_url_default",
"_check_allowed_users",
"_client_id_default",
"_client_secret_default",
"_config_changed",
"_cross_validation_lock",
"_default_any_allowed",
"_default_http_client",
"_defining_class",
"_deprecated_aliases",
"_deprecated_db",
"_deprecated_db_session",
"_deprecated_oauth_aliases",
"_deprecated_oauth_trait",
"_deprecated_trait",
"_find_my_config",
"_get_log_handler",
"_get_trait_default_generator",
"_init_deprecated_methods",
"_load_config",
"_log_default",
"_logout_redirect_url_default",
"_notify_observers",
"_notify_trait",
"_register_validator",
"_remove_notifiers",
"_static_immutable_initial_values",
"_token_url_default",
"_trait_notifiers",
"_trait_validators",
"_trait_values",
"_traits",
"_userdata_url_default",
"_username_pattern_changed",
"_validate_log",
"_validate_server_cert_default",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getstate__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__setstate__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self. | , names=list(self._deprecated_oauth_aliases)
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-459-62 | infile | _deprecated_oauth_aliases | [
"add_traits",
"add_user",
"admin_users",
"allow_all",
"allow_existing_users",
"allowed_users",
"any_allow_config",
"auth_refresh_age",
"authenticate",
"authorize_url",
"auto_login",
"auto_login_oauth2_authorize",
"blocked_users",
"callback_handler",
"check_allow_config",
"check_allowed",
"check_blocked_users",
"class_config_rst_doc",
"class_config_section",
"class_get_help",
"class_get_trait_help",
"class_own_trait_events",
"class_own_traits",
"class_print_help",
"class_trait_names",
"class_traits",
"client_id",
"client_id_env",
"client_secret",
"client_secret_env",
"config",
"cross_validation_lock",
"custom_html",
"db",
"delete_invalid_users",
"delete_user",
"enable_auth_state",
"extra_authorize_params",
"fetch",
"get_authenticated_user",
"get_callback_url",
"get_custom_html",
"get_handlers",
"has_trait",
"hold_trait_notifications",
"http_client",
"is_admin",
"load_managed_roles",
"log",
"login_handler",
"login_service",
"login_url",
"logout_handler",
"logout_redirect_url",
"logout_url",
"manage_groups",
"manage_roles",
"normalize_username",
"notify_change",
"oauth_callback_url",
"observe",
"on_trait_change",
"otp_prompt",
"parent",
"post_auth_hook",
"post_spawn_stop",
"pre_spawn_start",
"refresh_pre_spawn",
"refresh_user",
"request_otp",
"reset_managed_roles_on_startup",
"run_post_auth_hook",
"scope",
"section_names",
"set_trait",
"setup_instance",
"token_url",
"trait_defaults",
"trait_events",
"trait_has_value",
"trait_metadata",
"trait_names",
"trait_values",
"traits",
"unobserve",
"unobserve_all",
"update_config",
"userdata_url",
"username_map",
"username_pattern",
"username_regex",
"validate_server_cert",
"validate_server_cert_env",
"validate_username",
"whitelist",
"_add_notifiers",
"_all_trait_default_generators",
"_allow_existing_users_default",
"_authorize_url_default",
"_check_allowed_users",
"_client_id_default",
"_client_secret_default",
"_config_changed",
"_cross_validation_lock",
"_default_any_allowed",
"_default_http_client",
"_defining_class",
"_deprecated_aliases",
"_deprecated_db",
"_deprecated_db_session",
"_deprecated_oauth_aliases",
"_deprecated_oauth_trait",
"_deprecated_trait",
"_find_my_config",
"_get_log_handler",
"_get_trait_default_generator",
"_init_deprecated_methods",
"_load_config",
"_log_default",
"_logout_redirect_url_default",
"_notify_observers",
"_notify_trait",
"_register_validator",
"_remove_notifiers",
"_static_immutable_initial_values",
"_token_url_default",
"_trait_notifiers",
"_trait_validators",
"_trait_values",
"_traits",
"_userdata_url_default",
"_username_pattern_changed",
"_validate_log",
"_validate_server_cert_default",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getstate__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__setstate__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self. | )
)
super().__init__(**kwargs)
| coffeateam__coffea-casa |
63 | 63-461-16 | commited | __init__ | [
"add_traits",
"add_user",
"admin_users",
"allow_all",
"allow_existing_users",
"allowed_users",
"any_allow_config",
"auth_refresh_age",
"authenticate",
"auto_login",
"auto_login_oauth2_authorize",
"blocked_users",
"check_allow_config",
"check_allowed",
"check_blocked_users",
"class_config_rst_doc",
"class_config_section",
"class_get_help",
"class_get_trait_help",
"class_own_trait_events",
"class_own_traits",
"class_print_help",
"class_trait_names",
"class_traits",
"config",
"cross_validation_lock",
"custom_html",
"db",
"delete_invalid_users",
"delete_user",
"enable_auth_state",
"get_authenticated_user",
"get_custom_html",
"get_handlers",
"has_trait",
"hold_trait_notifications",
"is_admin",
"load_managed_roles",
"log",
"login_service",
"login_url",
"logout_url",
"manage_groups",
"manage_roles",
"normalize_username",
"notify_change",
"observe",
"on_trait_change",
"otp_prompt",
"parent",
"post_auth_hook",
"post_spawn_stop",
"pre_spawn_start",
"refresh_pre_spawn",
"refresh_user",
"request_otp",
"reset_managed_roles_on_startup",
"run_post_auth_hook",
"section_names",
"set_trait",
"setup_instance",
"trait_defaults",
"trait_events",
"trait_has_value",
"trait_metadata",
"trait_names",
"trait_values",
"traits",
"unobserve",
"unobserve_all",
"update_config",
"username_map",
"username_pattern",
"username_regex",
"validate_username",
"whitelist",
"_add_notifiers",
"_all_trait_default_generators",
"_allow_existing_users_default",
"_check_allowed_users",
"_config_changed",
"_cross_validation_lock",
"_default_any_allowed",
"_defining_class",
"_deprecated_aliases",
"_deprecated_db",
"_deprecated_db_session",
"_deprecated_trait",
"_find_my_config",
"_get_log_handler",
"_get_trait_default_generator",
"_init_deprecated_methods",
"_load_config",
"_log_default",
"_notify_observers",
"_notify_trait",
"_register_validator",
"_remove_notifiers",
"_static_immutable_initial_values",
"_trait_notifiers",
"_trait_validators",
"_trait_values",
"_traits",
"_username_pattern_changed",
"_validate_log",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__getstate__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__setstate__",
"__sizeof__",
"__slots__",
"__str__"
] | """
Base classes for Custom Authenticator to use OAuth with JupyterHub
Most of the code c/o Kyle Kelley (@rgbkrk)
"""
import base64
import json
import os
import uuid
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from jupyterhub.auth import Authenticator
from jupyterhub.handlers import BaseHandler
from jupyterhub.handlers import LogoutHandler
from jupyterhub.utils import url_path_join
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClientError
from tornado.log import app_log
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
def guess_callback_uri(protocol, host, hub_server_url):
return '{proto}://{host}{path}'.format(
proto=protocol, host=host, path=url_path_join(hub_server_url, 'oauth_callback')
)
STATE_COOKIE_NAME = 'oauthenticator-state'
def _serialize_state(state):
"""Serialize OAuth state to a base64 string after passing through JSON"""
json_state = json.dumps(state)
return base64.urlsafe_b64encode(json_state.encode('utf8')).decode('ascii')
def _deserialize_state(b64_state):
"""Deserialize OAuth state as serialized in _serialize_state"""
if isinstance(b64_state, str):
b64_state = b64_state.encode('ascii')
try:
json_state = base64.urlsafe_b64decode(b64_state).decode('utf8')
except ValueError:
app_log.error("Failed to b64-decode state: %r", b64_state)
return {}
try:
return json.loads(json_state)
except ValueError:
app_log.error("Failed to json-decode state: %r", json_state)
return {}
class OAuthLoginHandler(OAuth2Mixin, BaseHandler):
"""Base class for OAuth login handler
Typically subclasses will need
"""
# these URLs are part of the OAuth2Mixin API
# get them from the Authenticator object
@property
def _OAUTH_AUTHORIZE_URL(self):
return self.authenticator.authorize_url
@property
def _OAUTH_ACCESS_TOKEN_URL(self):
return self.authenticator.token_url
@property
def _OAUTH_USERINFO_URL(self):
return self.authenticator.userdata_url
def set_state_cookie(self, state):
self._set_cookie(STATE_COOKIE_NAME, state, expires_days=1, httponly=True)
_state = None
def get_state(self):
next_url = original_next_url = self.get_argument('next', None)
if next_url:
# avoid browsers treating \ as /
next_url = next_url.replace('\\', quote('\\'))
# disallow hostname-having urls,
# force absolute path redirect
urlinfo = urlparse(next_url)
next_url = urlinfo._replace(
scheme='', netloc='', path='/' + urlinfo.path.lstrip('/')
).geturl()
if next_url != original_next_url:
self.log.warning(
"Ignoring next_url %r, using %r", original_next_url, next_url
)
if self._state is None:
self._state = _serialize_state(
{'state_id': uuid.uuid4().hex, 'next_url': next_url}
)
return self._state
def get(self):
redirect_uri = self.authenticator.get_callback_url(self)
extra_params = self.authenticator.extra_authorize_params.copy()
self.log.info('OAuth redirect: %r', redirect_uri)
state = self.get_state()
self.set_state_cookie(state)
extra_params['state'] = state
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=self.authenticator.scope,
extra_params=extra_params,
response_type='code',
)
class OAuthCallbackHandler(BaseHandler):
"""Basic handler for OAuth callback. Calls authenticator to verify username."""
_state_cookie = None
def get_state_cookie(self):
"""Get OAuth state from cookies
To be compared with the value in redirect URL
"""
if self._state_cookie is None:
self._state_cookie = (
self.get_secure_cookie(STATE_COOKIE_NAME) or b''
).decode('utf8', 'replace')
self.clear_cookie(STATE_COOKIE_NAME)
return self._state_cookie
def get_state_url(self):
"""Get OAuth state from URL parameters
to be compared with the value in cookies
"""
return self.get_argument("state")
def check_state(self):
"""Verify OAuth state
compare value in cookie with redirect url param
"""
cookie_state = self.get_state_cookie()
url_state = self.get_state_url()
if not cookie_state:
raise web.HTTPError(400, "OAuth state missing from cookies")
if not url_state:
raise web.HTTPError(400, "OAuth state missing from URL")
if cookie_state != url_state:
self.log.warning("OAuth state mismatch: %s != %s", cookie_state, url_state)
raise web.HTTPError(400, "OAuth state mismatch")
def check_error(self):
"""Check the OAuth code"""
error = self.get_argument("error", False)
if error:
message = self.get_argument("error_description", error)
raise web.HTTPError(400, "OAuth error: %s" % message)
def check_code(self):
"""Check the OAuth code"""
if not self.get_argument("code", False):
raise web.HTTPError(400, "OAuth callback made without a code")
def check_arguments(self):
"""Validate the arguments of the redirect
Default:
- check for oauth-standard error, error_description arguments
- check that there's a code
- check that state matches
"""
self.check_error()
self.check_code()
self.check_state()
def append_query_parameters(self, url, exclude=None):
"""JupyterHub 1.2 appends query parameters by default in get_next_url
This is not appropriate for oauth callback handlers, where params are oauth state, code, etc.
Override the method used to append parameters to next_url to not preserve any parameters
"""
return url
def get_next_url(self, user=None):
"""Get the redirect target from the state field"""
state = self.get_state_url()
if state:
next_url = _deserialize_state(state).get('next_url')
if next_url:
return next_url
# JupyterHub 0.8 adds default .get_next_url for a fallback
if hasattr(BaseHandler, 'get_next_url'):
return super().get_next_url(user)
return url_path_join(self.hub.server.base_url, 'home')
async def _login_user_pre_08(self):
"""login_user simplifies the login+cookie+auth_state process in JupyterHub 0.8
_login_user_07 is for backward-compatibility with JupyterHub 0.7
"""
user_info = await self.authenticator.get_authenticated_user(self, None)
if user_info is None:
return
if isinstance(user_info, dict):
username = user_info['name']
else:
username = user_info
user = self.user_from_username(username)
self.set_login_cookie(user)
return user
if not hasattr(BaseHandler, 'login_user'):
# JupyterHub 0.7 doesn't have .login_user
login_user = _login_user_pre_08
async def get(self):
self.check_arguments()
user = await self.login_user()
if user is None:
# todo: custom error page?
raise web.HTTPError(403)
self.redirect(self.get_next_url(user))
class OAuthLogoutHandler(LogoutHandler):
async def handle_logout(self):
self.clear_cookie(STATE_COOKIE_NAME)
async def render_logout_page(self):
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
return
return await super().render_logout_page()
class OAuthenticator(Authenticator):
"""Base class for OAuthenticators
Subclasses must override:
login_service (string identifying the service provider)
authenticate (method takes one arg - the request handler handling the oauth callback)
"""
login_handler = OAuthLoginHandler
callback_handler = OAuthCallbackHandler
logout_handler = OAuthLogoutHandler
authorize_url = Unicode(
config=True, help="""The authenticate url for initiating oauth"""
)
@default("authorize_url")
def _authorize_url_default(self):
return os.environ.get("OAUTH2_AUTHORIZE_URL", "")
token_url = Unicode(
config=True,
help="""The url retrieving an access token at the completion of oauth""",
)
@default("token_url")
def _token_url_default(self):
return os.environ.get("OAUTH2_TOKEN_URL", "")
userdata_url = Unicode(
config=True,
help="""The url for retrieving user data with a completed access token""",
)
@default("userdata_url")
def _userdata_url_default(self):
return os.environ.get("OAUTH2_USERDATA_URL", "")
logout_redirect_url = Unicode(config=True, help="""URL for logging out of Auth0""")
@default("logout_redirect_url")
def _logout_redirect_url_default(self):
return os.getenv("OAUTH_LOGOUT_REDIRECT_URL", "")
scope = List(
Unicode(),
config=True,
help="""The OAuth scopes to request.
See the OAuth documentation of your OAuth provider for options.
For GitHub in particular, you can see github_scopes.md in this repo.
""",
)
extra_authorize_params = Dict(
config=True,
help="""Extra GET params to send along with the initial OAuth request
to the OAuth provider.""",
)
login_service = 'override in subclass'
oauth_callback_url = Unicode(
os.getenv('OAUTH_CALLBACK_URL', ''),
config=True,
help="""Callback URL to use.
Typically `https://{host}/hub/oauth_callback`""",
)
client_id_env = ''
client_id = Unicode(config=True)
def _client_id_default(self):
if self.client_id_env:
client_id = os.getenv(self.client_id_env, '')
if client_id:
return client_id
return os.getenv('OAUTH_CLIENT_ID', '')
client_secret_env = ''
client_secret = Unicode(config=True)
def _client_secret_default(self):
if self.client_secret_env:
client_secret = os.getenv(self.client_secret_env, '')
if client_secret:
return client_secret
return os.getenv('OAUTH_CLIENT_SECRET', '')
validate_server_cert_env = 'OAUTH_TLS_VERIFY'
validate_server_cert = Bool(config=True)
def _validate_server_cert_default(self):
env_value = os.getenv(self.validate_server_cert_env, '')
if env_value == '0':
return False
else:
return True
http_client = Any()
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient()
async def fetch(self, req, label="fetching", parse_json=True, **kwargs):
"""Wrapper for http requests
logs error responses, parses successful JSON responses
Args:
req: tornado HTTPRequest
label (str): label describing what is happening,
used in log message when the request fails.
**kwargs: remaining keyword args
passed to underlying `client.fetch(req, **kwargs)`
Returns:
r: parsed JSON response
"""
try:
resp = await self.http_client.fetch(req, **kwargs)
except HTTPClientError as e:
if e.response:
# Log failed response message for debugging purposes
message = e.response.body.decode("utf8", "replace")
try:
# guess json, reformat for readability
json_message = json.loads(message)
except ValueError:
# not json
pass
else:
# reformat json log message for readability
message = json.dumps(json_message, sort_keys=True, indent=1)
else:
# didn't get a response, e.g. connection error
message = str(e)
# log url without query params
url = urlunparse(urlparse(req.url)._replace(query=""))
app_log.error(f"Error {label} {e.code} {req.method} {url}: {message}")
raise e
else:
if parse_json:
if resp.body:
return json.loads(resp.body.decode('utf8', 'replace'))
else:
# empty body is None
return None
else:
return resp
def login_url(self, base_url):
return url_path_join(base_url, 'oauth_login')
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_callback_url(self, handler=None):
"""Get my OAuth redirect URL
Either from config or guess based on the current request.
"""
if self.oauth_callback_url:
return self.oauth_callback_url
elif handler:
return guess_callback_uri(
handler.request.protocol,
handler.request.host,
handler.hub.server.base_url,
)
else:
raise ValueError(
"Specify callback oauth_callback_url or give me a handler to guess with"
)
def get_handlers(self, app):
return [
(r'/oauth_login', self.login_handler),
(r'/oauth_callback', self.callback_handler),
(r'/logout', self.logout_handler),
]
async def authenticate(self, handler, data=None):
raise NotImplementedError()
_deprecated_oauth_aliases = {}
def _deprecated_oauth_trait(self, change):
"""observer for deprecated traits"""
old_attr = change.name
new_attr, version = self._deprecated_oauth_aliases.get(old_attr)
new_value = getattr(self, new_attr)
if new_value != change.new:
# only warn if different
# protects backward-compatible config from warnings
# if they set the same value under both names
self.log.warning(
"{cls}.{old} is deprecated in {cls} {version}, use {cls}.{new} instead".format(
cls=self.__class__.__name__,
old=old_attr,
new=new_attr,
version=version,
)
)
setattr(self, new_attr, change.new)
def __init__(self, **kwargs):
# observe deprecated config names in oauthenticator
if self._deprecated_oauth_aliases:
self.observe(
self._deprecated_oauth_trait, names=list(self._deprecated_oauth_aliases)
)
super(). | (**kwargs)
| coffeateam__coffea-casa |
70 | 70-107-10 | random | set_text | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"text_splitter",
"_add_document_to_index",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1. | ("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree.query(query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index.query(query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
70 | 70-129-20 | inproject | query | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"insert_prompt",
"load_from_disk",
"num_children",
"query",
"save_to_disk",
"set_text",
"summary_template",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree. | (query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree.query(query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index.query(query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
70 | 70-134-20 | inproject | query | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"insert_prompt",
"load_from_disk",
"num_children",
"query",
"save_to_disk",
"set_text",
"summary_template",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree. | (query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index.query(query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
70 | 70-139-7 | inproject | object | [
"dict",
"multiple",
"object",
"stopall",
"TEST_PREFIX",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree.query(query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch. | (LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index.query(query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
70 | 70-165-26 | inproject | query | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"text_splitter",
"_add_document_to_index",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree.query(query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index. | (
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index.query(query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
70 | 70-172-26 | inproject | query | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"text_splitter",
"_add_document_to_index",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree.query(query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index. | (query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
70 | 70-176-7 | inproject | object | [
"dict",
"multiple",
"object",
"stopall",
"TEST_PREFIX",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree.query(query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index.query(query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch. | (TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
70 | 70-213-7 | inproject | object | [
"dict",
"multiple",
"object",
"stopall",
"TEST_PREFIX",
"__annotations__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree.query(query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index.query(query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch. | (LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
70 | 70-239-21 | inproject | query | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"keyword_extract_template",
"load_from_disk",
"max_keywords_per_chunk",
"max_keywords_per_query",
"query",
"save_to_disk",
"set_text",
"text_splitter",
"_add_document_to_index",
"_docstore",
"_extract_keywords",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Test recursive queries."""
from typing import Any, Dict, List, Tuple
from unittest.mock import patch
import pytest
from gpt_index.indices.data_structs import IndexStructType
from gpt_index.indices.keyword_table.simple_base import GPTSimpleKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.query.schema import QueryConfig, QueryMode
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.schema import Document
from tests.mock_utils.mock_predict import mock_openai_llm_predict
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
from tests.mock_utils.mock_text_splitter import mock_token_splitter_newline
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, List]:
"""Index kwargs."""
index_kwargs = {
"tree": {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
},
"list": {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
},
"table": {
"keyword_extract_template": MOCK_KEYWORD_EXTRACT_PROMPT,
},
}
query_configs = [
QueryConfig(
index_struct_type=IndexStructType.TREE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.LIST,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
QueryConfig(
index_struct_type=IndexStructType.KEYWORD_TABLE,
query_mode=QueryMode.DEFAULT,
query_kwargs={
"query_keyword_extract_template": MOCK_QUERY_KEYWORD_EXTRACT_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
},
),
]
return index_kwargs, query_configs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
docs = [
Document("This is a test v2."),
Document("This is another test."),
Document("This is a test."),
Document("Hello world."),
Document("Hello world."),
Document("This is a test."),
Document("This is another test."),
Document("This is a test v2."),
]
return docs
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_tree(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("summary1")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("summary2")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("summary3")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("summary4")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
tree = GPTTreeIndex(
[
list1,
list2,
list3,
list4,
],
**tree_kwargs
)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = tree.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("What is?:This is a test v2.")
# Also test a non-recursive query. This should not go down into the list
tree_query_kwargs = query_configs[0].query_kwargs
response = tree.query(query_str, mode="default", **tree_query_kwargs)
assert response == ("What is?:summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_tree_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
tree_kwargs = index_kwargs["tree"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
tree1 = GPTTreeIndex(documents[2:6], **tree_kwargs)
tree2 = GPTTreeIndex(documents[:2] + documents[6:], **tree_kwargs)
tree1.set_text("tree_summary1")
tree2.set_text("tree_summary2")
# there are two root nodes in this tree: one containing [list1, list2]
# and the other containing [list3, list4]
list_index = GPTListIndex([tree1, tree2], **list_kwargs)
query_str = "What is?"
# query should first pick the left root node, then pick list1
# within list1, it should go through the first document and second document
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("What is?:This is a test.")
# Also test a non-recursive query. This should not go down into the list
list_query_kwargs = query_configs[1].query_kwargs
response = list_index.query(query_str, mode="default", **list_query_kwargs)
assert response == ("What is?:tree_summary1")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_table_list(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
table1 = GPTSimpleKeywordTableIndex(documents[4:6], **table_kwargs)
table2 = GPTSimpleKeywordTableIndex(documents[2:3], **table_kwargs)
table1.set_text("table_summary1")
table2.set_text("table_summary2")
list_index = GPTListIndex([table1, table2], **list_kwargs)
query_str = "World?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("World?:Hello world.")
query_str = "Test?"
response = list_index.query(
query_str, mode="recursive", query_configs=query_configs
)
assert response == ("Test?:This is a test.")
@patch.object(TokenTextSplitter, "split_text", side_effect=mock_token_splitter_newline)
@patch.object(LLMPredictor, "predict", side_effect=mock_openai_llm_predict)
@patch.object(LLMPredictor, "__init__", return_value=None)
def test_recursive_query_list_table(
_mock_init: Any,
_mock_predict: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test query."""
index_kwargs, query_configs = struct_kwargs
list_kwargs = index_kwargs["list"]
table_kwargs = index_kwargs["table"]
# try building a tree for a group of 4, then a list
# use a diff set of documents
# try building a list for every two, then a tree
list1 = GPTListIndex(documents[0:2], **list_kwargs)
list1.set_text("foo bar")
list2 = GPTListIndex(documents[2:4], **list_kwargs)
list2.set_text("apple orange")
list3 = GPTListIndex(documents[4:6], **list_kwargs)
list3.set_text("toronto london")
list4 = GPTListIndex(documents[6:8], **list_kwargs)
list4.set_text("cat dog")
table = GPTSimpleKeywordTableIndex([list1, list2, list3, list4], **table_kwargs)
query_str = "Foo?"
response = table. | (query_str, mode="recursive", query_configs=query_configs)
assert response == ("Foo?:This is a test v2.")
query_str = "Orange?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Orange?:This is a test.")
query_str = "Cat?"
response = table.query(query_str, mode="recursive", query_configs=query_configs)
assert response == ("Cat?:This is another test.")
| jerryjliu__llama_index |
71 | 71-72-13 | inproject | _embed_model | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self. | = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-73-16 | commited | __init__ | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super(). | (
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-79-13 | inproject | _text_splitter | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self. | = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-79-35 | inproject | _prompt_helper | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self. | .get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-79-50 | inproject | get_text_splitter_given_prompt | [
"embedding_limit",
"get_chunk_size_given_prompt",
"get_numbered_text_from_nodes",
"get_text_from_nodes",
"get_text_splitter_given_prompt",
"max_chunk_overlap",
"max_input_size",
"num_output",
"_tokenizer",
"__annotations__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper. | (
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-90-56 | inproject | get_text | [
"doc_id",
"get_doc_id",
"get_text",
"is_doc_id_none",
"text",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document. | ())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-97-34 | inproject | _embed_model | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self. | .get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-97-47 | inproject | get_text_embedding | [
"get_query_embedding",
"get_text_embedding",
"mode",
"model",
"similarity",
"__annotations__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model. | (text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-103-25 | inproject | add_text | [
"add_text",
"dataclass_json_config",
"doc_id",
"from_dict",
"from_json",
"get_doc_id",
"get_node",
"get_nodes",
"get_text",
"id_map",
"is_doc_id_none",
"nodes_dict",
"schema",
"text",
"to_dict",
"to_json",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct. | (text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-103-55 | inproject | get_doc_id | [
"doc_id",
"get_doc_id",
"get_text",
"is_doc_id_none",
"text",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document. | (), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-109-29 | inproject | _prompt_helper | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self. | .get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-109-44 | inproject | get_text_splitter_given_prompt | [
"embedding_limit",
"get_chunk_size_given_prompt",
"get_numbered_text_from_nodes",
"get_text_from_nodes",
"get_text_splitter_given_prompt",
"max_chunk_overlap",
"max_input_size",
"num_output",
"_tokenizer",
"__annotations__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper. | (
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-114-17 | inproject | _add_document_to_index | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self. | (index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-122-56 | random | text_qa_template | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self. |
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-124-21 | inproject | index_struct | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self. | , faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-124-52 | inproject | _faiss_index | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self. | , **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-132-13 | infile | _add_document_to_index | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self. | (self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-132-41 | infile | _index_struct | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self. | , document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-132-71 | infile | _text_splitter | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"text_qa_template",
"_add_document_to_index",
"_docstore",
"_embed_model",
"_faiss_index",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_text_splitter",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self. | )
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-168-27 | infile | load_from_disk | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super(). | (save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-170-27 | infile | load_from_disk | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super(). | (save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super().save_to_disk(save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
71 | 71-194-16 | infile | save_to_disk | [
"build_index_from_documents",
"delete",
"docstore",
"index_struct",
"index_struct_cls",
"index_struct_with_text",
"insert",
"load_from_disk",
"query",
"save_to_disk",
"set_text",
"_docstore",
"_index_struct",
"_insert",
"_llm_predictor",
"_mode_to_query",
"_process_documents",
"_prompt_helper",
"_validate_documents",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Vector store index.
An index that that is built on top of an existing vector store.
"""
from typing import Any, Optional, Sequence, cast
import numpy as np
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import DEFAULT_MODE, DOCUMENTS_INPUT, BaseGPTIndex
from gpt_index.indices.data_structs import IndexDict
from gpt_index.indices.query.base import BaseGPTIndexQuery
from gpt_index.indices.query.vector_store.faiss import GPTFaissIndexQuery
from gpt_index.indices.utils import truncate_text
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT
from gpt_index.schema import BaseDocument
class GPTFaissIndex(BaseGPTIndex[IndexDict]):
"""GPT Faiss Index.
The GPTFaissIndex is a data structure where nodes are keyed by
embeddings, and those embeddings are stored within a Faiss index.
During index construction, the document texts are chunked up,
converted to nodes with text; they are then encoded in
document embeddings stored within Faiss.
During query time, the index uses Faiss to query for the top
k most similar nodes, and synthesizes an answer from the
retrieved nodes.
Args:
text_qa_template (Optional[Prompt]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
faiss_index (faiss.Index): A Faiss Index object (required)
embed_model (Optional[OpenAIEmbedding]): Embedding model to use for
embedding similarity.
"""
index_struct_cls = IndexDict
def __init__(
self,
documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
index_struct: Optional[IndexDict] = None,
text_qa_template: Optional[Prompt] = None,
llm_predictor: Optional[LLMPredictor] = None,
faiss_index: Optional[Any] = None,
embed_model: Optional[OpenAIEmbedding] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
import faiss # noqa: F401
except ImportError:
raise ValueError(import_err_msg)
self.text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
if faiss_index is None:
raise ValueError("faiss_index cannot be None.")
# NOTE: cast to Any for now
self._faiss_index = cast(Any, faiss_index)
self._embed_model = embed_model or OpenAIEmbedding()
super().__init__(
documents=documents,
index_struct=index_struct,
llm_predictor=llm_predictor,
**kwargs,
)
self._text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
def _add_document_to_index(
self,
index_struct: IndexDict,
document: BaseDocument,
text_splitter: TokenTextSplitter,
) -> None:
"""Add document to index."""
text_chunks = text_splitter.split_text(document.get_text())
for _, text_chunk in enumerate(text_chunks):
fmt_text_chunk = truncate_text(text_chunk, 50)
print(f"> Adding chunk: {fmt_text_chunk}")
# add to FAISS
# NOTE: embeddings won't be stored in Node but rather in underlying
# Faiss store
text_embedding = self._embed_model.get_text_embedding(text_chunk)
text_embedding_np = np.array(text_embedding)[np.newaxis, :]
new_id = self._faiss_index.ntotal
self._faiss_index.add(text_embedding_np)
# add to index
index_struct.add_text(text_chunk, document.get_doc_id(), text_id=new_id)
def build_index_from_documents(
self, documents: Sequence[BaseDocument]
) -> IndexDict:
"""Build index from documents."""
text_splitter = self._prompt_helper.get_text_splitter_given_prompt(
self.text_qa_template, 1
)
index_struct = IndexDict()
for d in documents:
self._add_document_to_index(index_struct, d, text_splitter)
return index_struct
def _mode_to_query(
self, mode: str, *query_args: Any, **query_kwargs: Any
) -> BaseGPTIndexQuery:
if mode == DEFAULT_MODE:
if "text_qa_template" not in query_kwargs:
query_kwargs["text_qa_template"] = self.text_qa_template
query: GPTFaissIndexQuery = GPTFaissIndexQuery(
self.index_struct, faiss_index=self._faiss_index, **query_kwargs
)
else:
raise ValueError(f"Invalid query mode: {mode}.")
return query
def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
"""Insert a document."""
self._add_document_to_index(self._index_struct, document, self._text_splitter)
def delete(self, document: BaseDocument) -> None:
"""Delete a document."""
raise NotImplementedError("Delete not implemented for Faiss index.")
@classmethod
def load_from_disk(
cls, save_path: str, faiss_index_save_path: Optional[str] = None, **kwargs: Any
) -> "BaseGPTIndex":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to load faiss index from a file - that
way, the user does not have to recreate the faiss index outside
of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
**kwargs: Additional kwargs to pass to the index constructor.
Returns:
BaseGPTIndex: The loaded index.
"""
if faiss_index_save_path is not None:
import faiss
faiss_index = faiss.read_index(faiss_index_save_path)
return super().load_from_disk(save_path, faiss_index=faiss_index, **kwargs)
else:
return super().load_from_disk(save_path, **kwargs)
def save_to_disk(
self,
save_path: str,
faiss_index_save_path: Optional[str] = None,
**save_kwargs: Any,
) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
In GPTFaissIndex, we allow user to specify an additional
`faiss_index_save_path` to save the faiss index to a file - that
way, the user can pass in the same argument in
`GPTFaissIndex.load_from_disk` without having to recreate
the Faiss index outside of this class.
Args:
save_path (str): The save_path of the file.
faiss_index_save_path (Optional[str]): The save_path of the
Faiss index file. If not specified, the Faiss index
will not be saved to disk.
"""
super(). | (save_path, **save_kwargs)
if faiss_index_save_path is not None:
import faiss
faiss.write_index(self._faiss_index, faiss_index_save_path)
| jerryjliu__llama_index |
74 | 74-188-28 | inproject | image | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx. | }
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-189-24 | common | post | [
"adapters",
"api",
"auth",
"certs",
"chardet_version",
"charset_normalizer_version",
"check_compatibility",
"codes",
"compat",
"ConnectionError",
"ConnectTimeout",
"cookies",
"cryptography_version",
"delete",
"DependencyWarning",
"exceptions",
"FileModeWarning",
"get",
"head",
"help",
"hooks",
"HTTPError",
"JSONDecodeError",
"logging",
"models",
"NullHandler",
"options",
"packages",
"patch",
"post",
"PreparedRequest",
"put",
"pyopenssl",
"ReadTimeout",
"request",
"Request",
"RequestException",
"RequestsDependencyWarning",
"Response",
"Session",
"session",
"sessions",
"ssl",
"status_codes",
"structures",
"Timeout",
"TooManyRedirects",
"urllib3",
"URLRequired",
"utils",
"warnings",
"_check_cryptography",
"_internal_utils",
"__author__",
"__author_email__",
"__build__",
"__cake__",
"__copyright__",
"__description__",
"__doc__",
"__file__",
"__license__",
"__name__",
"__package__",
"__title__",
"__url__",
"__version__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests. | (ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-189-33 | common | latch_image_api_url | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx. | , headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-214-28 | inproject | b64decode | [
"a85decode",
"a85encode",
"b16decode",
"b16encode",
"b32decode",
"b32encode",
"b32hexdecode",
"b32hexencode",
"b64decode",
"b64encode",
"b85decode",
"b85encode",
"binascii",
"bytes_types",
"decode",
"decodebytes",
"encode",
"encodebytes",
"main",
"MAXBINSIZE",
"MAXLINESIZE",
"re",
"standard_b64decode",
"standard_b64encode",
"struct",
"test",
"urlsafe_b64decode",
"urlsafe_b64encode",
"_85encode",
"_a85chars",
"_a85chars2",
"_A85END",
"_A85START",
"_B32_DECODE_DOCSTRING",
"_B32_DECODE_MAP01_DOCSTRING",
"_B32_ENCODE_DOCSTRING",
"_b32alphabet",
"_b32decode",
"_b32encode",
"_b32hexalphabet",
"_b32rev",
"_b32tab2",
"_b85alphabet",
"_b85chars",
"_b85chars2",
"_b85dec",
"_bytes_from_decode_data",
"_input_type_check",
"_urlsafe_decode_translation",
"_urlsafe_encode_translation",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64. | (token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-214-45 | inproject | decode | [
"capitalize",
"center",
"count",
"decode",
"endswith",
"expandtabs",
"find",
"fromhex",
"hex",
"index",
"isalnum",
"isalpha",
"isascii",
"isdigit",
"islower",
"isspace",
"istitle",
"isupper",
"join",
"ljust",
"lower",
"lstrip",
"maketrans",
"partition",
"removeprefix",
"removesuffix",
"replace",
"rfind",
"rindex",
"rjust",
"rpartition",
"rsplit",
"rstrip",
"split",
"splitlines",
"startswith",
"strip",
"swapcase",
"title",
"translate",
"upper",
"zfill",
"__add__",
"__annotations__",
"__class__",
"__contains__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__float__",
"__format__",
"__ge__",
"__getattribute__",
"__getitem__",
"__getnewargs__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__int__",
"__iter__",
"__le__",
"__len__",
"__lt__",
"__mod__",
"__module__",
"__mul__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rmul__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token). | ("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-214-61 | inproject | split | [
"capitalize",
"casefold",
"center",
"count",
"encode",
"endswith",
"expandtabs",
"find",
"format",
"format_map",
"index",
"isalnum",
"isalpha",
"isascii",
"isdecimal",
"isdigit",
"isidentifier",
"islower",
"isnumeric",
"isprintable",
"isspace",
"istitle",
"isupper",
"join",
"ljust",
"lower",
"lstrip",
"maketrans",
"partition",
"removeprefix",
"removesuffix",
"replace",
"rfind",
"rindex",
"rjust",
"rpartition",
"rsplit",
"rstrip",
"split",
"splitlines",
"startswith",
"strip",
"swapcase",
"title",
"translate",
"upper",
"zfill",
"__add__",
"__annotations__",
"__class__",
"__contains__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__getitem__",
"__getnewargs__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__le__",
"__len__",
"__lt__",
"__mod__",
"__module__",
"__mul__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__rmul__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8"). | (":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-215-8 | inproject | dkr_client | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx. | .login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-215-19 | inproject | login | [
"adapters",
"api_version",
"attach",
"attach_socket",
"auth",
"base_url",
"build",
"cert",
"close",
"commit",
"configs",
"configure_plugin",
"connect_container_to_network",
"containers",
"cookies",
"create_config",
"create_container",
"create_container_config",
"create_container_from_config",
"create_endpoint_config",
"create_host_config",
"create_network",
"create_networking_config",
"create_plugin",
"create_secret",
"create_service",
"create_swarm_spec",
"create_volume",
"credstore_env",
"delete",
"df",
"diff",
"disable_plugin",
"disconnect_container_from_network",
"enable_plugin",
"events",
"exec_create",
"exec_inspect",
"exec_resize",
"exec_start",
"export",
"get",
"get_adapter",
"get_archive",
"get_image",
"get_unlock_key",
"head",
"headers",
"history",
"hooks",
"images",
"import_image",
"import_image_from_data",
"import_image_from_file",
"import_image_from_image",
"import_image_from_stream",
"import_image_from_url",
"info",
"init_swarm",
"inspect_config",
"inspect_container",
"inspect_distribution",
"inspect_image",
"inspect_network",
"inspect_node",
"inspect_plugin",
"inspect_secret",
"inspect_service",
"inspect_swarm",
"inspect_task",
"inspect_volume",
"join_swarm",
"kill",
"leave_swarm",
"load_image",
"login",
"logs",
"max_redirects",
"merge_environment_settings",
"mount",
"networks",
"nodes",
"options",
"params",
"patch",
"pause",
"ping",
"plugin_privileges",
"plugins",
"port",
"post",
"prepare_request",
"proxies",
"prune_builds",
"prune_containers",
"prune_images",
"prune_networks",
"prune_volumes",
"pull",
"pull_plugin",
"push",
"push_plugin",
"put",
"put_archive",
"rebuild_auth",
"rebuild_proxies",
"redirect_cache",
"reload_config",
"remove_config",
"remove_container",
"remove_image",
"remove_network",
"remove_node",
"remove_plugin",
"remove_secret",
"remove_service",
"remove_volume",
"rename",
"request",
"resize",
"resolve_redirects",
"restart",
"search",
"secrets",
"send",
"service_logs",
"services",
"start",
"stats",
"stop",
"stream",
"tag",
"tasks",
"timeout",
"top",
"trust_env",
"unlock_swarm",
"unpause",
"update_container",
"update_node",
"update_service",
"update_swarm",
"upgrade_plugin",
"verify",
"version",
"volumes",
"wait",
"_attach_params",
"_attach_websocket",
"_auth_configs",
"_check_is_tty",
"_create_websocket_connection",
"_custom_adapter",
"_delete",
"_disable_socket_timeout",
"_general_configs",
"_get",
"_get_raw_response_socket",
"_get_result",
"_get_result_tty",
"_multiplexed_buffer_helper",
"_multiplexed_response_stream_helper",
"_post",
"_post_json",
"_proxy_configs",
"_put",
"_raise_for_status",
"_read_from_socket",
"_result",
"_retrieve_server_version",
"_set_auth_headers",
"_set_request_timeout",
"_stream_helper",
"_stream_raw_result",
"_unmount",
"_url",
"_version",
"__annotations__",
"__attrs__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client. | (
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-254-49 | infile | pkg_root | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx. | .parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-254-58 | infile | parent | [
"absolute",
"anchor",
"as_posix",
"as_uri",
"chmod",
"cwd",
"drive",
"exists",
"expanduser",
"glob",
"group",
"hardlink_to",
"home",
"is_absolute",
"is_block_device",
"is_char_device",
"is_dir",
"is_fifo",
"is_file",
"is_mount",
"is_relative_to",
"is_reserved",
"is_socket",
"is_symlink",
"iterdir",
"joinpath",
"lchmod",
"link_to",
"lstat",
"match",
"mkdir",
"name",
"open",
"owner",
"parent",
"parents",
"parts",
"read_bytes",
"read_text",
"readlink",
"relative_to",
"rename",
"replace",
"resolve",
"rglob",
"rmdir",
"root",
"samefile",
"stat",
"stem",
"suffix",
"suffixes",
"symlink_to",
"touch",
"unlink",
"with_name",
"with_stem",
"with_suffix",
"write_bytes",
"write_text",
"_accessor",
"_cached_cparts",
"_cparts",
"_format_parsed_parts",
"_from_parsed_parts",
"_from_parts",
"_hash",
"_make_child",
"_make_child_relpath",
"_parse_args",
"_pparts",
"_str",
"__annotations__",
"__bytes__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__fspath__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__rtruediv__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__truediv__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root. | )):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-301-26 | common | dkr_repo | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx. | }/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-325-38 | inproject | full_image_tagged | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx. | },
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-326-24 | inproject | full_image_tagged | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx. | ,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-335-12 | inproject | full_image_tagged | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx. | ,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-348-8 | common | dkr_client | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx. | .start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-348-19 | common | start | [
"adapters",
"api_version",
"attach",
"attach_socket",
"auth",
"base_url",
"build",
"cert",
"close",
"commit",
"configs",
"configure_plugin",
"connect_container_to_network",
"containers",
"cookies",
"create_config",
"create_container",
"create_container_config",
"create_container_from_config",
"create_endpoint_config",
"create_host_config",
"create_network",
"create_networking_config",
"create_plugin",
"create_secret",
"create_service",
"create_swarm_spec",
"create_volume",
"credstore_env",
"delete",
"df",
"diff",
"disable_plugin",
"disconnect_container_from_network",
"enable_plugin",
"events",
"exec_create",
"exec_inspect",
"exec_resize",
"exec_start",
"export",
"get",
"get_adapter",
"get_archive",
"get_image",
"get_unlock_key",
"head",
"headers",
"history",
"hooks",
"images",
"import_image",
"import_image_from_data",
"import_image_from_file",
"import_image_from_image",
"import_image_from_stream",
"import_image_from_url",
"info",
"init_swarm",
"inspect_config",
"inspect_container",
"inspect_distribution",
"inspect_image",
"inspect_network",
"inspect_node",
"inspect_plugin",
"inspect_secret",
"inspect_service",
"inspect_swarm",
"inspect_task",
"inspect_volume",
"join_swarm",
"kill",
"leave_swarm",
"load_image",
"login",
"logs",
"max_redirects",
"merge_environment_settings",
"mount",
"networks",
"nodes",
"options",
"params",
"patch",
"pause",
"ping",
"plugin_privileges",
"plugins",
"port",
"post",
"prepare_request",
"proxies",
"prune_builds",
"prune_containers",
"prune_images",
"prune_networks",
"prune_volumes",
"pull",
"pull_plugin",
"push",
"push_plugin",
"put",
"put_archive",
"rebuild_auth",
"rebuild_proxies",
"redirect_cache",
"reload_config",
"remove_config",
"remove_container",
"remove_image",
"remove_network",
"remove_node",
"remove_plugin",
"remove_secret",
"remove_service",
"remove_volume",
"rename",
"request",
"resize",
"resolve_redirects",
"restart",
"search",
"secrets",
"send",
"service_logs",
"services",
"start",
"stats",
"stop",
"stream",
"tag",
"tasks",
"timeout",
"top",
"trust_env",
"unlock_swarm",
"unpause",
"update_container",
"update_node",
"update_service",
"update_swarm",
"upgrade_plugin",
"verify",
"version",
"volumes",
"wait",
"_attach_params",
"_attach_websocket",
"_auth_configs",
"_check_is_tty",
"_create_websocket_connection",
"_custom_adapter",
"_delete",
"_disable_socket_timeout",
"_general_configs",
"_get",
"_get_raw_response_socket",
"_get_result",
"_get_result_tty",
"_multiplexed_buffer_helper",
"_multiplexed_response_stream_helper",
"_post",
"_post_json",
"_proxy_configs",
"_put",
"_raise_for_status",
"_read_from_socket",
"_result",
"_retrieve_server_version",
"_set_auth_headers",
"_set_request_timeout",
"_stream_helper",
"_stream_raw_result",
"_unmount",
"_url",
"_version",
"__annotations__",
"__attrs__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client. | (container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-357-23 | inproject | full_image_tagged | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx. | ,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-364-46 | inproject | token | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx. | }"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-372-24 | common | post | [
"adapters",
"api",
"auth",
"certs",
"chardet_version",
"charset_normalizer_version",
"check_compatibility",
"codes",
"compat",
"ConnectionError",
"ConnectTimeout",
"cookies",
"cryptography_version",
"delete",
"DependencyWarning",
"exceptions",
"FileModeWarning",
"get",
"head",
"help",
"hooks",
"HTTPError",
"JSONDecodeError",
"logging",
"models",
"NullHandler",
"options",
"packages",
"patch",
"post",
"PreparedRequest",
"put",
"pyopenssl",
"ReadTimeout",
"request",
"Request",
"RequestException",
"RequestsDependencyWarning",
"Response",
"Session",
"session",
"sessions",
"ssl",
"status_codes",
"structures",
"Timeout",
"TooManyRedirects",
"urllib3",
"URLRequired",
"utils",
"warnings",
"_check_cryptography",
"_internal_utils",
"__author__",
"__author_email__",
"__build__",
"__cake__",
"__copyright__",
"__description__",
"__doc__",
"__file__",
"__license__",
"__name__",
"__package__",
"__title__",
"__url__",
"__version__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests. | (
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-392-31 | common | post | [
"adapters",
"api",
"auth",
"certs",
"chardet_version",
"charset_normalizer_version",
"check_compatibility",
"codes",
"compat",
"ConnectionError",
"ConnectTimeout",
"cookies",
"cryptography_version",
"delete",
"DependencyWarning",
"exceptions",
"FileModeWarning",
"get",
"head",
"help",
"hooks",
"HTTPError",
"JSONDecodeError",
"logging",
"models",
"NullHandler",
"options",
"packages",
"patch",
"post",
"PreparedRequest",
"put",
"pyopenssl",
"ReadTimeout",
"request",
"Request",
"RequestException",
"RequestsDependencyWarning",
"Response",
"Session",
"session",
"sessions",
"ssl",
"status_codes",
"structures",
"Timeout",
"TooManyRedirects",
"urllib3",
"URLRequired",
"utils",
"warnings",
"_check_cryptography",
"_internal_utils",
"__author__",
"__author_email__",
"__build__",
"__cake__",
"__copyright__",
"__description__",
"__doc__",
"__file__",
"__license__",
"__name__",
"__package__",
"__title__",
"__url__",
"__version__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests. | (
url=ctx.latch_commit_api_url,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
74 | 74-393-16 | inproject | latch_commit_api_url | [
"account_id",
"dkr_client",
"dkr_repo",
"full_image_tagged",
"image",
"image_full",
"image_tagged",
"latch_commit_api_url",
"latch_image_api_url",
"latch_register_api_url",
"pkg_root",
"remote",
"serialize_dir",
"token",
"version",
"_construct_dkr_client",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | """Service to register workflows."""
import base64
import os
import tarfile
import tempfile
import textwrap
from io import BytesIO
from pathlib import Path
from typing import List, Union
import boto3
import requests
from latch.services.register import RegisterCtx, RegisterOutput
from latch.utils import retrieve_or_login
def _print_build_logs(build_logs, image):
print(f"\tBuilding Docker image for {image}")
for x in build_logs:
line = x.get("stream")
error = x.get("error")
if error is not None:
print(f"\t\t{x}")
raise OSError(f"Error when building image ~ {x}")
elif line is not None:
print(f"\t\t{line}", end="")
def _print_serialize_logs(serialize_logs, image):
print(f"\tSerializing workflow in {image}:")
for x in serialize_logs:
print(f"\t\t{x}", end="")
def _print_upload_logs(upload_image_logs, image):
print(f"\tUploading Docker image for {image}")
prog_map = {}
def _pp_prog_map(m):
prog_chunk = ""
i = 0
for id, prog in m.items():
if prog is None:
continue
prog_chunk += f"\t\t{id} ~ {prog}\n"
i += 1
if prog_chunk == "":
return
print(prog_chunk, end=f"\x1B[{i}A")
for x in upload_image_logs:
if (
x.get("error") is not None
and "denied: Your authorization token has expired." in x["error"]
):
raise OSError(f"Docker authorization for {image} is expired.")
prog_map[x.get("id")] = x.get("progress")
_pp_prog_map(prog_map)
def _print_reg_resp(resp, image):
print(f"\tRegistering {image} with LatchBio.")
print("\tstdout:")
for x in resp["stdout"].split("\n"):
print(f"\t\t{x}")
print("\tstderr:")
for x in resp["stderr"].split("\n"):
print(f"\t\t{x}")
def register(
pkg_root: str,
dockerfile: Union[str, None] = None,
requirements: Union[str, None] = None,
remote: Union[str, None] = None,
) -> RegisterOutput:
"""Registers a workflow, defined as python code, with Latch.
Kicks off a three-legged OAuth2.0 flow outlined in `this RFC`_. Logic
scaffolding this flow and detailed documentation can be found in the
`latch.auth` package
From a high-level, the user will be redirected to a browser and prompted to
login. The SDK meanwhile spins up a callback server on a separate thread
that will be hit when the browser login is successful with an access token.
.. _this RFC
https://datatracker.ietf.org/doc/html/rfc6749
The major constituent steps are:
- Constructing a Docker image
- Serializing flyte objects within an instantiated container
- Uploading the container with a latch-owned registry
- Registering serialized objects + the container with latch.
The Docker image is constructed by inferring relevant files + dependencies
from the workflow package code itself. If a Dockerfile is provided
explicitly, it will be used for image construction instead.
The registration flow makes heavy use of `Flyte`_, and while the Latch SDK
modifies many components to play nicely with Latch, eg. platform API,
user-specific auth, the underlying concepts are nicely summarized in the
`flytekit documentation`_.
Args:
pkg_root: A valid path pointing to the worklow code a user wishes to
register. The path can be absolute or relative. The path is always
a directory, with its structure exactly as constructed and
described in the `latch.services.init` function.
dockerfile: An optional valid path pointing to `Dockerfile`_ to define
a custom container. If passed, the resulting container will be used
as the environment to execute the registered workflow, allowing
arbitrary binaries and libraries to be called from workflow code.
However, be warned, this Dockerfile will be used *as is* - files
must be copied correctly and shell variables must be set to ensure
correct execution. See examples (TODO) for guidance.
requirements: An optional valid path pointing to `requirements.txt`
file containing a list of python libraries in the format produced
by `pip freeze` to install within the container that the workflow
will execute.
Example: ::
register("./foo")
register("/root/home/foo")
register("/root/home/foo", dockerfile="./Dockerfile")
register("/root/home/foo", requirements="./requirements.txt")
.. _Flyte:
https://docs.flyte.org
.. _Dockerfile:
https://docs.docker.com/engine/reference/builder/
.. _flytekit documentation:
https://docs.flyte.org/en/latest/concepts/registration.html
"""
ctx = RegisterCtx(pkg_root)
ctx.remote = remote
print(f"Initializing registration for {pkg_root}")
if dockerfile is not None:
dockerfile = Path(dockerfile).resolve()
if not dockerfile.exists():
raise OSError(f"Provided Dockerfile {dockerfile} does not exist.")
if requirements is not None:
if dockerfile is not None:
raise ValueError(
"Cannot provide both a dockerfile -"
f" {str(dockerfile)} and requirements file {requirements}"
)
requirements = Path(requirements).resolve()
if not requirements.exists():
raise OSError(f"Provided requirements file {requirements} does not exist.")
# TODO: kenny, retire logic for automatic container construction
if dockerfile is None:
dockerfile = ctx.pkg_root.joinpath("Dockerfile")
build_logs = _build_image(ctx, dockerfile, requirements)
_print_build_logs(build_logs, ctx.image_tagged)
with tempfile.TemporaryDirectory() as td:
td_path = Path(td).resolve()
serialize_logs = _serialize_pkg(ctx, td_path)
_print_serialize_logs(serialize_logs, ctx.image_tagged)
upload_image_logs = _upload_pkg_image(ctx)
_print_upload_logs(upload_image_logs, ctx.image_tagged)
reg_resp = _register_serialized_pkg(ctx, td_path)
_print_reg_resp(reg_resp, ctx.image_tagged)
return RegisterOutput(
build_logs=build_logs,
serialize_logs=serialize_logs,
registration_response=reg_resp,
)
def _login(ctx: RegisterCtx):
headers = {"Authorization": f"Bearer {ctx.token}"}
data = {"pkg_name": ctx.image}
response = requests.post(ctx.latch_image_api_url, headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
# TODO: cache
try:
client = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session_token,
region_name="us-west-2",
).client("ecr")
token = client.get_authorization_token()["authorizationData"][0][
"authorizationToken"
]
except Exception as err:
raise ValueError(
f"unable to retreive an ecr login token for user {ctx.account_id}"
) from err
user, password = base64.b64decode(token).decode("utf-8").split(":")
ctx.dkr_client.login(
username=user,
password=password,
registry=ctx.dkr_repo,
)
def _build_image(
ctx: RegisterCtx,
dockerfile: Union[None, Path] = None,
requirements: Union[None, Path] = None,
) -> List[str]:
if dockerfile is not None:
_login(ctx)
build_logs = ctx.dkr_client.build(
path=str(dockerfile.parent),
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
return build_logs
# Contruct tarball holding docker build context
# We want to construct a custom context that only has package files + our
# dockerfile object injected directly from memory.
def _build_file_list(root: str):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
with tempfile.NamedTemporaryFile() as f:
with tarfile.open(mode="w", fileobj=f) as t:
# TODO: docker build context is from the perspective of one
# directory up.
for path in _build_file_list(str(ctx.pkg_root.parent)):
full_path = Path(ctx.pkg_root.parent).resolve().joinpath(path)
i = t.gettarinfo(full_path, arcname=path)
if i.isfile():
try:
with open(full_path, "rb") as fp:
t.addfile(i, fp)
except OSError as e:
raise OSError(
f"Can not read file in context: {full_path}"
) from e
else:
# Directories, FIFOs, symlinks don't need to be read.
t.addfile(i, None)
fk_config_file = textwrap.dedent(
f"""
[sdk]
workflow_packages={ctx.pkg_root.name}
python_venv=flytekit_venv
"""
)
fk_config_file = BytesIO(fk_config_file.encode("utf-8"))
fcfinfo = tarfile.TarInfo("flytekit.config")
fcfinfo.size = len(fk_config_file.getvalue())
fk_config_file.seek(0)
t.addfile(fcfinfo, fk_config_file)
if requirements is not None:
requirements_cmds = textwrap.dedent(
"""
COPY requirements.txt /root
RUN python3 -m pip install -r requirements.txt
"""
)
with open(requirements) as r:
requirements = BytesIO(r.read().encode("utf-8"))
rinfo = tarfile.TarInfo("requirements.txt")
rinfo.size = len(requirements.getvalue())
requirements.seek(0)
t.addfile(rinfo, requirements)
else:
requirements_cmds = ""
dockerfile = textwrap.dedent(
f"""
FROM {ctx.dkr_repo}/wf-base:fbe8-main
COPY flytekit.config /root
COPY {ctx.pkg_root.name} /root/{ctx.pkg_root.name}
WORKDIR /root
RUN python3 -m pip install --upgrade latch
{requirements_cmds}
ARG tag
ENV FLYTE_INTERNAL_IMAGE $tag
"""
)
dockerfile = BytesIO(dockerfile.encode("utf-8"))
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
t.addfile(dfinfo, dockerfile)
f.seek(0)
_login(ctx)
return ctx.dkr_client.build(
fileobj=f,
custom_context=True,
buildargs={"tag": ctx.full_image_tagged},
tag=ctx.full_image_tagged,
decode=True,
)
def _serialize_pkg(ctx: RegisterCtx, serialize_dir: Path) -> List[str]:
_serialize_cmd = ["make", "serialize"]
container = ctx.dkr_client.create_container(
ctx.full_image_tagged,
command=_serialize_cmd,
volumes=[str(serialize_dir)],
host_config=ctx.dkr_client.create_host_config(
binds={
str(serialize_dir): {
"bind": "/tmp/output",
"mode": "rw",
},
}
),
)
container_id = container.get("Id")
ctx.dkr_client.start(container_id)
logs = ctx.dkr_client.logs(container_id, stream=True)
return [x.decode("utf-8") for x in logs]
def _upload_pkg_image(ctx: RegisterCtx) -> List[str]:
return ctx.dkr_client.push(
repository=ctx.full_image_tagged,
stream=True,
decode=True,
)
def _register_serialized_pkg(ctx: RegisterCtx, serialize_dir: Path) -> dict:
headers = {"Authorization": f"Bearer {ctx.token}"}
serialize_files = {"version": ctx.version.encode("utf-8")}
for dirname, dirnames, fnames in os.walk(serialize_dir):
for filename in fnames + dirnames:
file = Path(dirname).resolve().joinpath(filename)
serialize_files[file.name] = open(file, "rb")
response = requests.post(
ctx.latch_register_api_url,
headers=headers,
files=serialize_files,
)
commit_files = {".workflow_name": ctx.pkg_root.name.encode("utf-8")}
if not (ctx.remote is None):
commit_files[".remote_name"] = ctx.remote.encode("utf-8")
for dirname, dirnames, fnames in os.walk(ctx.pkg_root):
for filename in fnames:
file = Path(dirname).resolve().joinpath(filename)
# ignore data folder
if ctx.pkg_root.joinpath("data") in file.parents:
continue
key = str(file.relative_to(ctx.pkg_root))
commit_files[key] = open(file, "rb")
commit_response = requests.post(
url=ctx. | ,
headers=headers,
files=commit_files,
)
if not commit_response.json()["success"]:
raise ValueError(
"Issue committing: please make sure the specified remote exists, and that Latch can push to it."
)
return response.json()
| latchbio__latch |
75 | 75-82-24 | inproject | post | [
"adapters",
"api",
"auth",
"certs",
"chardet_version",
"charset_normalizer_version",
"check_compatibility",
"codes",
"compat",
"ConnectionError",
"ConnectTimeout",
"cookies",
"cryptography_version",
"delete",
"DependencyWarning",
"exceptions",
"FileModeWarning",
"get",
"head",
"help",
"hooks",
"HTTPError",
"JSONDecodeError",
"logging",
"models",
"NullHandler",
"options",
"packages",
"patch",
"post",
"PreparedRequest",
"put",
"pyopenssl",
"ReadTimeout",
"request",
"Request",
"RequestException",
"RequestsDependencyWarning",
"Response",
"Session",
"session",
"sessions",
"ssl",
"status_codes",
"structures",
"Timeout",
"TooManyRedirects",
"urllib3",
"URLRequired",
"utils",
"warnings",
"_check_cryptography",
"_internal_utils",
"__author__",
"__author_email__",
"__build__",
"__cake__",
"__copyright__",
"__description__",
"__doc__",
"__file__",
"__license__",
"__name__",
"__package__",
"__title__",
"__url__",
"__version__"
] | import json
import os
import select
import sys
import textwrap
from pathlib import Path
import kubernetes
import requests
import websocket
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.stream import stream
from latch_cli.config.latch import LatchConfig
from latch_cli.utils import account_id_from_token, retrieve_or_login
config = LatchConfig()
endpoints = config.sdk_endpoints
def _construct_kubeconfig(
cert_auth_data: str,
cluster_endpoint: str,
account_id: str,
access_key: str,
secret_key: str,
session_token: str,
) -> str:
open_brack = "{"
close_brack = "}"
region_code = "us-west-2"
cluster_name = "prion-prod"
cluster_endpoint = (
"https://C629A20F0E69D7DA6849ED877A3048EC.gr7.us-west-2.eks.amazonaws.com"
)
cert_auth_data = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXpNREEwTURJME5sb1hEVE15TURVeU56QTBNREkwTmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHptCkdkaGhBNDNzTHBrUnl3TlpJOUNtMWpXd1B2QTloRTNyWjVYWTRha2Ewb05uOFBlMSt4TVptZDg5ZHR3SjVVdFUKQ1AwR055cWd6NEtzbUxDOTMrVW8vcSs2eWxnMXZNRUV5bHFzNGt5WVN2dWhpaHVsbmZtODBBeE5xT0RQOEtDZgplSmRtTnM1cXo4MXpDamg1Y1NnNldFakJYVnhQSDQyQWdpcnZjYVUxbEJUd0VZV3gwQTdQRWlBd0I2NUtjUXB6CkRqOUFiZ0lYTjR0QjV6alhCZzNLemtJOFRDMWJvaElTbUxIa0NTTy9NUEd3RFpEdXN3RlFpYnpDSTdkVDlCTkEKaXBzZDNOTVcrd1kxV00zV2J6ZGNaSldib3NWUE5SUHNDTHlFSzFLZkpVbXFESGZGOWRMMUNjWFQ1QnFlY3B2cQpOVGdHRGZzZklJeVdJL2ZoMWpNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFKzJpT3RFRVhIS3VzaU5oUGoweE44ZElKUExNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBQVZlK28ybVN0M01CT04rdmRiNzIxWmRsY3IyZG5kNzBScjFyTlI4ME1DUTMwRVlFagpSSTdidUgvTUZhTzBaVjFZbjdYSGdUbVhNMWNPTk9DcTdLdXF6NURyVG5IN1c3VGQzbi80NjBPeFpOcWo4MUR1CnZZRlF6eHltbHZQMEx3dkVIQlliK1RWOUNsc2pCU1Vod1N3aXUrQWQrTHp6Wks0NWh0R2ZvdlJyeDYvR1pEVnEKYUFDQUZVTGgrVHRnMzFZdXdUQ0RZYmZZOC9QOUhma3psSTgraGY3UGxjZmp4Wmg5MTJUUk1VUTdkS1ZJMHF3TQo4NnFLK3ZmQktWOG5IQW1JMEEzVmp6cWQ4OWlHMkhQTHlhNDJXTkZmM0t3SCsxZC9IVHBYUEVBTk80WHpST1BQClJ6UHJHc21ZRmlZTGN2alA3RG5IZi9GYkViSFdYTXRWVjRSZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
return textwrap.dedent(
f"""apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {cert_auth_data}
server: {cluster_endpoint}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
contexts:
- context:
cluster: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
current-context: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
kind: Config
preferences: {open_brack}{close_brack}
users:
- name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: aws
args:
- --region
- {region_code}
- eks
- get-token
- --cluster-name
- {cluster_name}
env:
- name: 'AWS_ACCESS_KEY_ID'
value: '{access_key}'
- name: 'AWS_SECRET_ACCESS_KEY'
value: '{secret_key}'
- name: 'AWS_SESSION_TOKEN'
value: '{session_token}'"""
)
def _fetch_pod_info(token: str, task_name: str) -> (str, str, str):
headers = {"Authorization": f"Bearer {token}"}
data = {"task_name": task_name}
response = requests. | (endpoints["pod-exec-info"], headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
cert_auth_data = response["cert_auth_data"]
cluster_endpoint = response["cluster_endpoint"]
namespace = response["namespace"]
aws_account_id = response["aws_account_id"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
return (
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
)
def execute(task_name: str):
token = retrieve_or_login()
(
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
) = _fetch_pod_info(token, task_name)
account_id = account_id_from_token(token)
if int(account_id) < 10:
account_id = f"x{account_id}"
config_data = _construct_kubeconfig(
cert_auth_data,
cluster_endpoint,
aws_account_id,
access_key,
secret_key,
session_token,
)
config_file = Path("config").resolve()
with open(config_file, "w") as c:
c.write(config_data)
kubernetes.config.load_kube_config("config")
core_v1 = core_v1_api.CoreV1Api()
# TODO
pod_name = task_name
wssock = stream(
core_v1.connect_get_namespaced_pod_exec,
pod_name,
namespace,
command=["/bin/sh"],
stderr=True,
stdin=True,
stdout=True,
tty=True,
_preload_content=False,
).sock
stdin_channel = bytes([kubernetes.stream.ws_client.STDIN_CHANNEL])
stdout_channel = kubernetes.stream.ws_client.STDOUT_CHANNEL
stderr_channel = kubernetes.stream.ws_client.STDERR_CHANNEL
stdin = sys.stdin.fileno()
stdout = sys.stdout.fileno()
stderr = sys.stderr.fileno()
rlist = [wssock.sock, stdin]
while True:
rs, _ws, _xs = select.select(rlist, [], [])
if stdin in rs:
data = os.read(stdin, 32 * 1024)
if len(data) > 0:
wssock.send(stdin_channel + data, websocket.ABNF.OPCODE_BINARY)
if wssock.sock in rs:
opcode, frame = wssock.recv_data_frame(True)
if opcode == websocket.ABNF.OPCODE_CLOSE:
rlist.remove(wssock.sock)
elif opcode == websocket.ABNF.OPCODE_BINARY:
channel = frame.data[0]
data = frame.data[1:]
if channel in (stdout_channel, stderr_channel):
if len(data):
if channel == stdout_channel:
os.write(stdout, data)
else:
os.write(stderr, data)
elif channel == kubernetes.stream.ws_client.ERROR_CHANNEL:
wssock.close()
error = json.loads(data)
if error["status"] == "Success":
return 0
if error["reason"] == "NonZeroExitCode":
for cause in error["details"]["causes"]:
if cause["reason"] == "ExitCode":
return int(cause["message"])
print(file=sys.stderr)
print(f"Failure running: {' '.join(command)}", file=sys.stderr)
print(
f"Status: {error['status']} - Message: {error['message']}",
file=sys.stderr,
)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected channel: {channel}", file=sys.stderr)
print(f"Data: {data}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected websocket opcode: {opcode}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
| latchbio__latch |
75 | 75-85-28 | inproject | json | [
"apparent_encoding",
"close",
"content",
"cookies",
"elapsed",
"encoding",
"headers",
"history",
"is_permanent_redirect",
"is_redirect",
"iter_content",
"iter_lines",
"json",
"links",
"next",
"ok",
"raise_for_status",
"raw",
"reason",
"request",
"status_code",
"text",
"url",
"_content",
"_content_consumed",
"_next",
"__annotations__",
"__attrs__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__getattribute__",
"__getstate__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__module__",
"__ne__",
"__new__",
"__nonzero__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__setstate__",
"__sizeof__",
"__slots__",
"__str__"
] | import json
import os
import select
import sys
import textwrap
from pathlib import Path
import kubernetes
import requests
import websocket
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.stream import stream
from latch_cli.config.latch import LatchConfig
from latch_cli.utils import account_id_from_token, retrieve_or_login
config = LatchConfig()
endpoints = config.sdk_endpoints
def _construct_kubeconfig(
cert_auth_data: str,
cluster_endpoint: str,
account_id: str,
access_key: str,
secret_key: str,
session_token: str,
) -> str:
open_brack = "{"
close_brack = "}"
region_code = "us-west-2"
cluster_name = "prion-prod"
cluster_endpoint = (
"https://C629A20F0E69D7DA6849ED877A3048EC.gr7.us-west-2.eks.amazonaws.com"
)
cert_auth_data = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXpNREEwTURJME5sb1hEVE15TURVeU56QTBNREkwTmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHptCkdkaGhBNDNzTHBrUnl3TlpJOUNtMWpXd1B2QTloRTNyWjVYWTRha2Ewb05uOFBlMSt4TVptZDg5ZHR3SjVVdFUKQ1AwR055cWd6NEtzbUxDOTMrVW8vcSs2eWxnMXZNRUV5bHFzNGt5WVN2dWhpaHVsbmZtODBBeE5xT0RQOEtDZgplSmRtTnM1cXo4MXpDamg1Y1NnNldFakJYVnhQSDQyQWdpcnZjYVUxbEJUd0VZV3gwQTdQRWlBd0I2NUtjUXB6CkRqOUFiZ0lYTjR0QjV6alhCZzNLemtJOFRDMWJvaElTbUxIa0NTTy9NUEd3RFpEdXN3RlFpYnpDSTdkVDlCTkEKaXBzZDNOTVcrd1kxV00zV2J6ZGNaSldib3NWUE5SUHNDTHlFSzFLZkpVbXFESGZGOWRMMUNjWFQ1QnFlY3B2cQpOVGdHRGZzZklJeVdJL2ZoMWpNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFKzJpT3RFRVhIS3VzaU5oUGoweE44ZElKUExNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBQVZlK28ybVN0M01CT04rdmRiNzIxWmRsY3IyZG5kNzBScjFyTlI4ME1DUTMwRVlFagpSSTdidUgvTUZhTzBaVjFZbjdYSGdUbVhNMWNPTk9DcTdLdXF6NURyVG5IN1c3VGQzbi80NjBPeFpOcWo4MUR1CnZZRlF6eHltbHZQMEx3dkVIQlliK1RWOUNsc2pCU1Vod1N3aXUrQWQrTHp6Wks0NWh0R2ZvdlJyeDYvR1pEVnEKYUFDQUZVTGgrVHRnMzFZdXdUQ0RZYmZZOC9QOUhma3psSTgraGY3UGxjZmp4Wmg5MTJUUk1VUTdkS1ZJMHF3TQo4NnFLK3ZmQktWOG5IQW1JMEEzVmp6cWQ4OWlHMkhQTHlhNDJXTkZmM0t3SCsxZC9IVHBYUEVBTk80WHpST1BQClJ6UHJHc21ZRmlZTGN2alA3RG5IZi9GYkViSFdYTXRWVjRSZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
return textwrap.dedent(
f"""apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {cert_auth_data}
server: {cluster_endpoint}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
contexts:
- context:
cluster: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
current-context: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
kind: Config
preferences: {open_brack}{close_brack}
users:
- name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: aws
args:
- --region
- {region_code}
- eks
- get-token
- --cluster-name
- {cluster_name}
env:
- name: 'AWS_ACCESS_KEY_ID'
value: '{access_key}'
- name: 'AWS_SECRET_ACCESS_KEY'
value: '{secret_key}'
- name: 'AWS_SESSION_TOKEN'
value: '{session_token}'"""
)
def _fetch_pod_info(token: str, task_name: str) -> (str, str, str):
headers = {"Authorization": f"Bearer {token}"}
data = {"task_name": task_name}
response = requests.post(endpoints["pod-exec-info"], headers=headers, json=data)
try:
response = response. | ()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
cert_auth_data = response["cert_auth_data"]
cluster_endpoint = response["cluster_endpoint"]
namespace = response["namespace"]
aws_account_id = response["aws_account_id"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
return (
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
)
def execute(task_name: str):
token = retrieve_or_login()
(
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
) = _fetch_pod_info(token, task_name)
account_id = account_id_from_token(token)
if int(account_id) < 10:
account_id = f"x{account_id}"
config_data = _construct_kubeconfig(
cert_auth_data,
cluster_endpoint,
aws_account_id,
access_key,
secret_key,
session_token,
)
config_file = Path("config").resolve()
with open(config_file, "w") as c:
c.write(config_data)
kubernetes.config.load_kube_config("config")
core_v1 = core_v1_api.CoreV1Api()
# TODO
pod_name = task_name
wssock = stream(
core_v1.connect_get_namespaced_pod_exec,
pod_name,
namespace,
command=["/bin/sh"],
stderr=True,
stdin=True,
stdout=True,
tty=True,
_preload_content=False,
).sock
stdin_channel = bytes([kubernetes.stream.ws_client.STDIN_CHANNEL])
stdout_channel = kubernetes.stream.ws_client.STDOUT_CHANNEL
stderr_channel = kubernetes.stream.ws_client.STDERR_CHANNEL
stdin = sys.stdin.fileno()
stdout = sys.stdout.fileno()
stderr = sys.stderr.fileno()
rlist = [wssock.sock, stdin]
while True:
rs, _ws, _xs = select.select(rlist, [], [])
if stdin in rs:
data = os.read(stdin, 32 * 1024)
if len(data) > 0:
wssock.send(stdin_channel + data, websocket.ABNF.OPCODE_BINARY)
if wssock.sock in rs:
opcode, frame = wssock.recv_data_frame(True)
if opcode == websocket.ABNF.OPCODE_CLOSE:
rlist.remove(wssock.sock)
elif opcode == websocket.ABNF.OPCODE_BINARY:
channel = frame.data[0]
data = frame.data[1:]
if channel in (stdout_channel, stderr_channel):
if len(data):
if channel == stdout_channel:
os.write(stdout, data)
else:
os.write(stderr, data)
elif channel == kubernetes.stream.ws_client.ERROR_CHANNEL:
wssock.close()
error = json.loads(data)
if error["status"] == "Success":
return 0
if error["reason"] == "NonZeroExitCode":
for cause in error["details"]["causes"]:
if cause["reason"] == "ExitCode":
return int(cause["message"])
print(file=sys.stderr)
print(f"Failure running: {' '.join(command)}", file=sys.stderr)
print(
f"Status: {error['status']} - Message: {error['message']}",
file=sys.stderr,
)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected channel: {channel}", file=sys.stderr)
print(f"Data: {data}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected websocket opcode: {opcode}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
| latchbio__latch |
75 | 75-173-60 | random | ABNF | [
"ABNF",
"array",
"Callable",
"connect",
"continuous_frame",
"create_connection",
"debug",
"DEFAULT_SOCKET_OPTION",
"dump",
"enableTrace",
"errno",
"error",
"extract_err_message",
"extract_error_code",
"frame_buffer",
"getdefaulttimeout",
"handshake",
"info",
"isEnabledForDebug",
"isEnabledForError",
"isEnabledForTrace",
"Lock",
"logging",
"native_byteorder",
"NoLock",
"NullHandler",
"Optional",
"os",
"proxy_info",
"recv",
"recv_line",
"selectors",
"send",
"setdefaulttimeout",
"setReconnect",
"sock_opt",
"socket",
"ssl",
"SSLError",
"SSLWantReadError",
"SSLWantWriteError",
"STATUS_ABNORMAL_CLOSED",
"STATUS_BAD_GATEWAY",
"STATUS_GOING_AWAY",
"STATUS_INVALID_EXTENSION",
"STATUS_INVALID_PAYLOAD",
"STATUS_MESSAGE_TOO_BIG",
"STATUS_NORMAL",
"STATUS_POLICY_VIOLATION",
"STATUS_PROTOCOL_ERROR",
"STATUS_SERVICE_RESTART",
"STATUS_STATUS_NOT_AVAILABLE",
"STATUS_TLS_HANDSHAKE_ERROR",
"STATUS_TRY_AGAIN_LATER",
"STATUS_UNEXPECTED_CONDITION",
"STATUS_UNSUPPORTED_DATA_TYPE",
"struct",
"SUPPORTED_REDIRECT_STATUSES",
"sys",
"tests",
"threading",
"time",
"trace",
"Union",
"VALID_CLOSE_STATUS",
"validate_utf8",
"warning",
"WebSocket",
"WebSocketAddressException",
"WebSocketApp",
"WebSocketBadStatusException",
"WebSocketConnectionClosedException",
"WebSocketException",
"WebSocketPayloadException",
"WebSocketProtocolException",
"WebSocketProxyException",
"WebSocketTimeoutException",
"XorMaskerSimple",
"_abnf",
"_app",
"_cookiejar",
"_core",
"_default_timeout",
"_exceptions",
"_handshake",
"_http",
"_logger",
"_logging",
"_mask",
"_socket",
"_ssl_compat",
"_traceEnabled",
"_url",
"_utils",
"_wsdump",
"__all__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__version__"
] | import json
import os
import select
import sys
import textwrap
from pathlib import Path
import kubernetes
import requests
import websocket
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.stream import stream
from latch_cli.config.latch import LatchConfig
from latch_cli.utils import account_id_from_token, retrieve_or_login
config = LatchConfig()
endpoints = config.sdk_endpoints
def _construct_kubeconfig(
cert_auth_data: str,
cluster_endpoint: str,
account_id: str,
access_key: str,
secret_key: str,
session_token: str,
) -> str:
open_brack = "{"
close_brack = "}"
region_code = "us-west-2"
cluster_name = "prion-prod"
cluster_endpoint = (
"https://C629A20F0E69D7DA6849ED877A3048EC.gr7.us-west-2.eks.amazonaws.com"
)
cert_auth_data = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXpNREEwTURJME5sb1hEVE15TURVeU56QTBNREkwTmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHptCkdkaGhBNDNzTHBrUnl3TlpJOUNtMWpXd1B2QTloRTNyWjVYWTRha2Ewb05uOFBlMSt4TVptZDg5ZHR3SjVVdFUKQ1AwR055cWd6NEtzbUxDOTMrVW8vcSs2eWxnMXZNRUV5bHFzNGt5WVN2dWhpaHVsbmZtODBBeE5xT0RQOEtDZgplSmRtTnM1cXo4MXpDamg1Y1NnNldFakJYVnhQSDQyQWdpcnZjYVUxbEJUd0VZV3gwQTdQRWlBd0I2NUtjUXB6CkRqOUFiZ0lYTjR0QjV6alhCZzNLemtJOFRDMWJvaElTbUxIa0NTTy9NUEd3RFpEdXN3RlFpYnpDSTdkVDlCTkEKaXBzZDNOTVcrd1kxV00zV2J6ZGNaSldib3NWUE5SUHNDTHlFSzFLZkpVbXFESGZGOWRMMUNjWFQ1QnFlY3B2cQpOVGdHRGZzZklJeVdJL2ZoMWpNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFKzJpT3RFRVhIS3VzaU5oUGoweE44ZElKUExNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBQVZlK28ybVN0M01CT04rdmRiNzIxWmRsY3IyZG5kNzBScjFyTlI4ME1DUTMwRVlFagpSSTdidUgvTUZhTzBaVjFZbjdYSGdUbVhNMWNPTk9DcTdLdXF6NURyVG5IN1c3VGQzbi80NjBPeFpOcWo4MUR1CnZZRlF6eHltbHZQMEx3dkVIQlliK1RWOUNsc2pCU1Vod1N3aXUrQWQrTHp6Wks0NWh0R2ZvdlJyeDYvR1pEVnEKYUFDQUZVTGgrVHRnMzFZdXdUQ0RZYmZZOC9QOUhma3psSTgraGY3UGxjZmp4Wmg5MTJUUk1VUTdkS1ZJMHF3TQo4NnFLK3ZmQktWOG5IQW1JMEEzVmp6cWQ4OWlHMkhQTHlhNDJXTkZmM0t3SCsxZC9IVHBYUEVBTk80WHpST1BQClJ6UHJHc21ZRmlZTGN2alA3RG5IZi9GYkViSFdYTXRWVjRSZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
return textwrap.dedent(
f"""apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {cert_auth_data}
server: {cluster_endpoint}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
contexts:
- context:
cluster: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
current-context: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
kind: Config
preferences: {open_brack}{close_brack}
users:
- name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: aws
args:
- --region
- {region_code}
- eks
- get-token
- --cluster-name
- {cluster_name}
env:
- name: 'AWS_ACCESS_KEY_ID'
value: '{access_key}'
- name: 'AWS_SECRET_ACCESS_KEY'
value: '{secret_key}'
- name: 'AWS_SESSION_TOKEN'
value: '{session_token}'"""
)
def _fetch_pod_info(token: str, task_name: str) -> (str, str, str):
headers = {"Authorization": f"Bearer {token}"}
data = {"task_name": task_name}
response = requests.post(endpoints["pod-exec-info"], headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
cert_auth_data = response["cert_auth_data"]
cluster_endpoint = response["cluster_endpoint"]
namespace = response["namespace"]
aws_account_id = response["aws_account_id"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
return (
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
)
def execute(task_name: str):
token = retrieve_or_login()
(
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
) = _fetch_pod_info(token, task_name)
account_id = account_id_from_token(token)
if int(account_id) < 10:
account_id = f"x{account_id}"
config_data = _construct_kubeconfig(
cert_auth_data,
cluster_endpoint,
aws_account_id,
access_key,
secret_key,
session_token,
)
config_file = Path("config").resolve()
with open(config_file, "w") as c:
c.write(config_data)
kubernetes.config.load_kube_config("config")
core_v1 = core_v1_api.CoreV1Api()
# TODO
pod_name = task_name
wssock = stream(
core_v1.connect_get_namespaced_pod_exec,
pod_name,
namespace,
command=["/bin/sh"],
stderr=True,
stdin=True,
stdout=True,
tty=True,
_preload_content=False,
).sock
stdin_channel = bytes([kubernetes.stream.ws_client.STDIN_CHANNEL])
stdout_channel = kubernetes.stream.ws_client.STDOUT_CHANNEL
stderr_channel = kubernetes.stream.ws_client.STDERR_CHANNEL
stdin = sys.stdin.fileno()
stdout = sys.stdout.fileno()
stderr = sys.stderr.fileno()
rlist = [wssock.sock, stdin]
while True:
rs, _ws, _xs = select.select(rlist, [], [])
if stdin in rs:
data = os.read(stdin, 32 * 1024)
if len(data) > 0:
wssock.send(stdin_channel + data, websocket. | .OPCODE_BINARY)
if wssock.sock in rs:
opcode, frame = wssock.recv_data_frame(True)
if opcode == websocket.ABNF.OPCODE_CLOSE:
rlist.remove(wssock.sock)
elif opcode == websocket.ABNF.OPCODE_BINARY:
channel = frame.data[0]
data = frame.data[1:]
if channel in (stdout_channel, stderr_channel):
if len(data):
if channel == stdout_channel:
os.write(stdout, data)
else:
os.write(stderr, data)
elif channel == kubernetes.stream.ws_client.ERROR_CHANNEL:
wssock.close()
error = json.loads(data)
if error["status"] == "Success":
return 0
if error["reason"] == "NonZeroExitCode":
for cause in error["details"]["causes"]:
if cause["reason"] == "ExitCode":
return int(cause["message"])
print(file=sys.stderr)
print(f"Failure running: {' '.join(command)}", file=sys.stderr)
print(
f"Status: {error['status']} - Message: {error['message']}",
file=sys.stderr,
)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected channel: {channel}", file=sys.stderr)
print(f"Data: {data}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected websocket opcode: {opcode}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
| latchbio__latch |
75 | 75-173-65 | random | OPCODE_BINARY | [
"create_frame",
"format",
"LENGTH_16",
"LENGTH_63",
"LENGTH_7",
"mask",
"mro",
"OPCODE_BINARY",
"OPCODE_CLOSE",
"OPCODE_CONT",
"OPCODE_MAP",
"OPCODE_PING",
"OPCODE_PONG",
"OPCODE_TEXT",
"OPCODES",
"validate",
"_get_masked",
"_is_valid_close_status",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | import json
import os
import select
import sys
import textwrap
from pathlib import Path
import kubernetes
import requests
import websocket
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.stream import stream
from latch_cli.config.latch import LatchConfig
from latch_cli.utils import account_id_from_token, retrieve_or_login
config = LatchConfig()
endpoints = config.sdk_endpoints
def _construct_kubeconfig(
cert_auth_data: str,
cluster_endpoint: str,
account_id: str,
access_key: str,
secret_key: str,
session_token: str,
) -> str:
open_brack = "{"
close_brack = "}"
region_code = "us-west-2"
cluster_name = "prion-prod"
cluster_endpoint = (
"https://C629A20F0E69D7DA6849ED877A3048EC.gr7.us-west-2.eks.amazonaws.com"
)
cert_auth_data = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXpNREEwTURJME5sb1hEVE15TURVeU56QTBNREkwTmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHptCkdkaGhBNDNzTHBrUnl3TlpJOUNtMWpXd1B2QTloRTNyWjVYWTRha2Ewb05uOFBlMSt4TVptZDg5ZHR3SjVVdFUKQ1AwR055cWd6NEtzbUxDOTMrVW8vcSs2eWxnMXZNRUV5bHFzNGt5WVN2dWhpaHVsbmZtODBBeE5xT0RQOEtDZgplSmRtTnM1cXo4MXpDamg1Y1NnNldFakJYVnhQSDQyQWdpcnZjYVUxbEJUd0VZV3gwQTdQRWlBd0I2NUtjUXB6CkRqOUFiZ0lYTjR0QjV6alhCZzNLemtJOFRDMWJvaElTbUxIa0NTTy9NUEd3RFpEdXN3RlFpYnpDSTdkVDlCTkEKaXBzZDNOTVcrd1kxV00zV2J6ZGNaSldib3NWUE5SUHNDTHlFSzFLZkpVbXFESGZGOWRMMUNjWFQ1QnFlY3B2cQpOVGdHRGZzZklJeVdJL2ZoMWpNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFKzJpT3RFRVhIS3VzaU5oUGoweE44ZElKUExNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBQVZlK28ybVN0M01CT04rdmRiNzIxWmRsY3IyZG5kNzBScjFyTlI4ME1DUTMwRVlFagpSSTdidUgvTUZhTzBaVjFZbjdYSGdUbVhNMWNPTk9DcTdLdXF6NURyVG5IN1c3VGQzbi80NjBPeFpOcWo4MUR1CnZZRlF6eHltbHZQMEx3dkVIQlliK1RWOUNsc2pCU1Vod1N3aXUrQWQrTHp6Wks0NWh0R2ZvdlJyeDYvR1pEVnEKYUFDQUZVTGgrVHRnMzFZdXdUQ0RZYmZZOC9QOUhma3psSTgraGY3UGxjZmp4Wmg5MTJUUk1VUTdkS1ZJMHF3TQo4NnFLK3ZmQktWOG5IQW1JMEEzVmp6cWQ4OWlHMkhQTHlhNDJXTkZmM0t3SCsxZC9IVHBYUEVBTk80WHpST1BQClJ6UHJHc21ZRmlZTGN2alA3RG5IZi9GYkViSFdYTXRWVjRSZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
return textwrap.dedent(
f"""apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {cert_auth_data}
server: {cluster_endpoint}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
contexts:
- context:
cluster: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
current-context: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
kind: Config
preferences: {open_brack}{close_brack}
users:
- name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: aws
args:
- --region
- {region_code}
- eks
- get-token
- --cluster-name
- {cluster_name}
env:
- name: 'AWS_ACCESS_KEY_ID'
value: '{access_key}'
- name: 'AWS_SECRET_ACCESS_KEY'
value: '{secret_key}'
- name: 'AWS_SESSION_TOKEN'
value: '{session_token}'"""
)
def _fetch_pod_info(token: str, task_name: str) -> (str, str, str):
headers = {"Authorization": f"Bearer {token}"}
data = {"task_name": task_name}
response = requests.post(endpoints["pod-exec-info"], headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
cert_auth_data = response["cert_auth_data"]
cluster_endpoint = response["cluster_endpoint"]
namespace = response["namespace"]
aws_account_id = response["aws_account_id"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
return (
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
)
def execute(task_name: str):
token = retrieve_or_login()
(
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
) = _fetch_pod_info(token, task_name)
account_id = account_id_from_token(token)
if int(account_id) < 10:
account_id = f"x{account_id}"
config_data = _construct_kubeconfig(
cert_auth_data,
cluster_endpoint,
aws_account_id,
access_key,
secret_key,
session_token,
)
config_file = Path("config").resolve()
with open(config_file, "w") as c:
c.write(config_data)
kubernetes.config.load_kube_config("config")
core_v1 = core_v1_api.CoreV1Api()
# TODO
pod_name = task_name
wssock = stream(
core_v1.connect_get_namespaced_pod_exec,
pod_name,
namespace,
command=["/bin/sh"],
stderr=True,
stdin=True,
stdout=True,
tty=True,
_preload_content=False,
).sock
stdin_channel = bytes([kubernetes.stream.ws_client.STDIN_CHANNEL])
stdout_channel = kubernetes.stream.ws_client.STDOUT_CHANNEL
stderr_channel = kubernetes.stream.ws_client.STDERR_CHANNEL
stdin = sys.stdin.fileno()
stdout = sys.stdout.fileno()
stderr = sys.stderr.fileno()
rlist = [wssock.sock, stdin]
while True:
rs, _ws, _xs = select.select(rlist, [], [])
if stdin in rs:
data = os.read(stdin, 32 * 1024)
if len(data) > 0:
wssock.send(stdin_channel + data, websocket.ABNF. | )
if wssock.sock in rs:
opcode, frame = wssock.recv_data_frame(True)
if opcode == websocket.ABNF.OPCODE_CLOSE:
rlist.remove(wssock.sock)
elif opcode == websocket.ABNF.OPCODE_BINARY:
channel = frame.data[0]
data = frame.data[1:]
if channel in (stdout_channel, stderr_channel):
if len(data):
if channel == stdout_channel:
os.write(stdout, data)
else:
os.write(stderr, data)
elif channel == kubernetes.stream.ws_client.ERROR_CHANNEL:
wssock.close()
error = json.loads(data)
if error["status"] == "Success":
return 0
if error["reason"] == "NonZeroExitCode":
for cause in error["details"]["causes"]:
if cause["reason"] == "ExitCode":
return int(cause["message"])
print(file=sys.stderr)
print(f"Failure running: {' '.join(command)}", file=sys.stderr)
print(
f"Status: {error['status']} - Message: {error['message']}",
file=sys.stderr,
)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected channel: {channel}", file=sys.stderr)
print(f"Data: {data}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected websocket opcode: {opcode}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
| latchbio__latch |
75 | 75-190-33 | inproject | loads | [
"codecs",
"decoder",
"detect_encoding",
"dump",
"dumps",
"encoder",
"JSONDecodeError",
"JSONDecoder",
"JSONEncoder",
"load",
"loads",
"scanner",
"tool",
"_default_decoder",
"_default_encoder",
"__all__",
"__author__",
"__doc__",
"__file__",
"__name__",
"__package__",
"__version__"
] | import json
import os
import select
import sys
import textwrap
from pathlib import Path
import kubernetes
import requests
import websocket
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.stream import stream
from latch_cli.config.latch import LatchConfig
from latch_cli.utils import account_id_from_token, retrieve_or_login
config = LatchConfig()
endpoints = config.sdk_endpoints
def _construct_kubeconfig(
cert_auth_data: str,
cluster_endpoint: str,
account_id: str,
access_key: str,
secret_key: str,
session_token: str,
) -> str:
open_brack = "{"
close_brack = "}"
region_code = "us-west-2"
cluster_name = "prion-prod"
cluster_endpoint = (
"https://C629A20F0E69D7DA6849ED877A3048EC.gr7.us-west-2.eks.amazonaws.com"
)
cert_auth_data = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXpNREEwTURJME5sb1hEVE15TURVeU56QTBNREkwTmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHptCkdkaGhBNDNzTHBrUnl3TlpJOUNtMWpXd1B2QTloRTNyWjVYWTRha2Ewb05uOFBlMSt4TVptZDg5ZHR3SjVVdFUKQ1AwR055cWd6NEtzbUxDOTMrVW8vcSs2eWxnMXZNRUV5bHFzNGt5WVN2dWhpaHVsbmZtODBBeE5xT0RQOEtDZgplSmRtTnM1cXo4MXpDamg1Y1NnNldFakJYVnhQSDQyQWdpcnZjYVUxbEJUd0VZV3gwQTdQRWlBd0I2NUtjUXB6CkRqOUFiZ0lYTjR0QjV6alhCZzNLemtJOFRDMWJvaElTbUxIa0NTTy9NUEd3RFpEdXN3RlFpYnpDSTdkVDlCTkEKaXBzZDNOTVcrd1kxV00zV2J6ZGNaSldib3NWUE5SUHNDTHlFSzFLZkpVbXFESGZGOWRMMUNjWFQ1QnFlY3B2cQpOVGdHRGZzZklJeVdJL2ZoMWpNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFKzJpT3RFRVhIS3VzaU5oUGoweE44ZElKUExNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBQVZlK28ybVN0M01CT04rdmRiNzIxWmRsY3IyZG5kNzBScjFyTlI4ME1DUTMwRVlFagpSSTdidUgvTUZhTzBaVjFZbjdYSGdUbVhNMWNPTk9DcTdLdXF6NURyVG5IN1c3VGQzbi80NjBPeFpOcWo4MUR1CnZZRlF6eHltbHZQMEx3dkVIQlliK1RWOUNsc2pCU1Vod1N3aXUrQWQrTHp6Wks0NWh0R2ZvdlJyeDYvR1pEVnEKYUFDQUZVTGgrVHRnMzFZdXdUQ0RZYmZZOC9QOUhma3psSTgraGY3UGxjZmp4Wmg5MTJUUk1VUTdkS1ZJMHF3TQo4NnFLK3ZmQktWOG5IQW1JMEEzVmp6cWQ4OWlHMkhQTHlhNDJXTkZmM0t3SCsxZC9IVHBYUEVBTk80WHpST1BQClJ6UHJHc21ZRmlZTGN2alA3RG5IZi9GYkViSFdYTXRWVjRSZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
return textwrap.dedent(
f"""apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {cert_auth_data}
server: {cluster_endpoint}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
contexts:
- context:
cluster: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
current-context: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
kind: Config
preferences: {open_brack}{close_brack}
users:
- name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: aws
args:
- --region
- {region_code}
- eks
- get-token
- --cluster-name
- {cluster_name}
env:
- name: 'AWS_ACCESS_KEY_ID'
value: '{access_key}'
- name: 'AWS_SECRET_ACCESS_KEY'
value: '{secret_key}'
- name: 'AWS_SESSION_TOKEN'
value: '{session_token}'"""
)
def _fetch_pod_info(token: str, task_name: str) -> (str, str, str):
headers = {"Authorization": f"Bearer {token}"}
data = {"task_name": task_name}
response = requests.post(endpoints["pod-exec-info"], headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
cert_auth_data = response["cert_auth_data"]
cluster_endpoint = response["cluster_endpoint"]
namespace = response["namespace"]
aws_account_id = response["aws_account_id"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
return (
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
)
def execute(task_name: str):
token = retrieve_or_login()
(
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
) = _fetch_pod_info(token, task_name)
account_id = account_id_from_token(token)
if int(account_id) < 10:
account_id = f"x{account_id}"
config_data = _construct_kubeconfig(
cert_auth_data,
cluster_endpoint,
aws_account_id,
access_key,
secret_key,
session_token,
)
config_file = Path("config").resolve()
with open(config_file, "w") as c:
c.write(config_data)
kubernetes.config.load_kube_config("config")
core_v1 = core_v1_api.CoreV1Api()
# TODO
pod_name = task_name
wssock = stream(
core_v1.connect_get_namespaced_pod_exec,
pod_name,
namespace,
command=["/bin/sh"],
stderr=True,
stdin=True,
stdout=True,
tty=True,
_preload_content=False,
).sock
stdin_channel = bytes([kubernetes.stream.ws_client.STDIN_CHANNEL])
stdout_channel = kubernetes.stream.ws_client.STDOUT_CHANNEL
stderr_channel = kubernetes.stream.ws_client.STDERR_CHANNEL
stdin = sys.stdin.fileno()
stdout = sys.stdout.fileno()
stderr = sys.stderr.fileno()
rlist = [wssock.sock, stdin]
while True:
rs, _ws, _xs = select.select(rlist, [], [])
if stdin in rs:
data = os.read(stdin, 32 * 1024)
if len(data) > 0:
wssock.send(stdin_channel + data, websocket.ABNF.OPCODE_BINARY)
if wssock.sock in rs:
opcode, frame = wssock.recv_data_frame(True)
if opcode == websocket.ABNF.OPCODE_CLOSE:
rlist.remove(wssock.sock)
elif opcode == websocket.ABNF.OPCODE_BINARY:
channel = frame.data[0]
data = frame.data[1:]
if channel in (stdout_channel, stderr_channel):
if len(data):
if channel == stdout_channel:
os.write(stdout, data)
else:
os.write(stderr, data)
elif channel == kubernetes.stream.ws_client.ERROR_CHANNEL:
wssock.close()
error = json. | (data)
if error["status"] == "Success":
return 0
if error["reason"] == "NonZeroExitCode":
for cause in error["details"]["causes"]:
if cause["reason"] == "ExitCode":
return int(cause["message"])
print(file=sys.stderr)
print(f"Failure running: {' '.join(command)}", file=sys.stderr)
print(
f"Status: {error['status']} - Message: {error['message']}",
file=sys.stderr,
)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected channel: {channel}", file=sys.stderr)
print(f"Data: {data}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected websocket opcode: {opcode}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
| latchbio__latch |
75 | 75-204-24 | random | exit | [
"abiflags",
"addaudithook",
"api_version",
"argv",
"audit",
"base_exec_prefix",
"base_prefix",
"breakpointhook",
"builtin_module_names",
"byteorder",
"call_tracing",
"copyright",
"displayhook",
"dllhandle",
"dont_write_bytecode",
"exc_info",
"excepthook",
"exec_prefix",
"executable",
"exit",
"flags",
"float_info",
"float_repr_style",
"get_asyncgen_hooks",
"getdefaultencoding",
"getdlopenflags",
"getfilesystemencodeerrors",
"getfilesystemencoding",
"getprofile",
"getrecursionlimit",
"getrefcount",
"getsizeof",
"getswitchinterval",
"gettotalrefcount",
"gettrace",
"getwindowsversion",
"hash_info",
"hexversion",
"implementation",
"int_info",
"intern",
"is_finalizing",
"last_traceback",
"last_type",
"last_value",
"maxsize",
"maxunicode",
"meta_path",
"modules",
"path",
"path_hooks",
"path_importer_cache",
"platform",
"platlibdir",
"prefix",
"ps1",
"ps2",
"pycache_prefix",
"set_asyncgen_hooks",
"setdlopenflags",
"setprofile",
"setrecursionlimit",
"setswitchinterval",
"settrace",
"stderr",
"stdin",
"stdout",
"tracebacklimit",
"unraisablehook",
"UnraisableHookArgs",
"version",
"version_info",
"warnoptions",
"winver",
"__breakpointhook__",
"__displayhook__",
"__doc__",
"__excepthook__",
"__file__",
"__name__",
"__package__",
"__stderr__",
"__stdin__",
"__stdout__"
] | import json
import os
import select
import sys
import textwrap
from pathlib import Path
import kubernetes
import requests
import websocket
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.stream import stream
from latch_cli.config.latch import LatchConfig
from latch_cli.utils import account_id_from_token, retrieve_or_login
config = LatchConfig()
endpoints = config.sdk_endpoints
def _construct_kubeconfig(
cert_auth_data: str,
cluster_endpoint: str,
account_id: str,
access_key: str,
secret_key: str,
session_token: str,
) -> str:
open_brack = "{"
close_brack = "}"
region_code = "us-west-2"
cluster_name = "prion-prod"
cluster_endpoint = (
"https://C629A20F0E69D7DA6849ED877A3048EC.gr7.us-west-2.eks.amazonaws.com"
)
cert_auth_data = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXpNREEwTURJME5sb1hEVE15TURVeU56QTBNREkwTmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHptCkdkaGhBNDNzTHBrUnl3TlpJOUNtMWpXd1B2QTloRTNyWjVYWTRha2Ewb05uOFBlMSt4TVptZDg5ZHR3SjVVdFUKQ1AwR055cWd6NEtzbUxDOTMrVW8vcSs2eWxnMXZNRUV5bHFzNGt5WVN2dWhpaHVsbmZtODBBeE5xT0RQOEtDZgplSmRtTnM1cXo4MXpDamg1Y1NnNldFakJYVnhQSDQyQWdpcnZjYVUxbEJUd0VZV3gwQTdQRWlBd0I2NUtjUXB6CkRqOUFiZ0lYTjR0QjV6alhCZzNLemtJOFRDMWJvaElTbUxIa0NTTy9NUEd3RFpEdXN3RlFpYnpDSTdkVDlCTkEKaXBzZDNOTVcrd1kxV00zV2J6ZGNaSldib3NWUE5SUHNDTHlFSzFLZkpVbXFESGZGOWRMMUNjWFQ1QnFlY3B2cQpOVGdHRGZzZklJeVdJL2ZoMWpNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFKzJpT3RFRVhIS3VzaU5oUGoweE44ZElKUExNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBQVZlK28ybVN0M01CT04rdmRiNzIxWmRsY3IyZG5kNzBScjFyTlI4ME1DUTMwRVlFagpSSTdidUgvTUZhTzBaVjFZbjdYSGdUbVhNMWNPTk9DcTdLdXF6NURyVG5IN1c3VGQzbi80NjBPeFpOcWo4MUR1CnZZRlF6eHltbHZQMEx3dkVIQlliK1RWOUNsc2pCU1Vod1N3aXUrQWQrTHp6Wks0NWh0R2ZvdlJyeDYvR1pEVnEKYUFDQUZVTGgrVHRnMzFZdXdUQ0RZYmZZOC9QOUhma3psSTgraGY3UGxjZmp4Wmg5MTJUUk1VUTdkS1ZJMHF3TQo4NnFLK3ZmQktWOG5IQW1JMEEzVmp6cWQ4OWlHMkhQTHlhNDJXTkZmM0t3SCsxZC9IVHBYUEVBTk80WHpST1BQClJ6UHJHc21ZRmlZTGN2alA3RG5IZi9GYkViSFdYTXRWVjRSZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
return textwrap.dedent(
f"""apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {cert_auth_data}
server: {cluster_endpoint}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
contexts:
- context:
cluster: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
current-context: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
kind: Config
preferences: {open_brack}{close_brack}
users:
- name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: aws
args:
- --region
- {region_code}
- eks
- get-token
- --cluster-name
- {cluster_name}
env:
- name: 'AWS_ACCESS_KEY_ID'
value: '{access_key}'
- name: 'AWS_SECRET_ACCESS_KEY'
value: '{secret_key}'
- name: 'AWS_SESSION_TOKEN'
value: '{session_token}'"""
)
def _fetch_pod_info(token: str, task_name: str) -> (str, str, str):
headers = {"Authorization": f"Bearer {token}"}
data = {"task_name": task_name}
response = requests.post(endpoints["pod-exec-info"], headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
cert_auth_data = response["cert_auth_data"]
cluster_endpoint = response["cluster_endpoint"]
namespace = response["namespace"]
aws_account_id = response["aws_account_id"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
return (
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
)
def execute(task_name: str):
token = retrieve_or_login()
(
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
) = _fetch_pod_info(token, task_name)
account_id = account_id_from_token(token)
if int(account_id) < 10:
account_id = f"x{account_id}"
config_data = _construct_kubeconfig(
cert_auth_data,
cluster_endpoint,
aws_account_id,
access_key,
secret_key,
session_token,
)
config_file = Path("config").resolve()
with open(config_file, "w") as c:
c.write(config_data)
kubernetes.config.load_kube_config("config")
core_v1 = core_v1_api.CoreV1Api()
# TODO
pod_name = task_name
wssock = stream(
core_v1.connect_get_namespaced_pod_exec,
pod_name,
namespace,
command=["/bin/sh"],
stderr=True,
stdin=True,
stdout=True,
tty=True,
_preload_content=False,
).sock
stdin_channel = bytes([kubernetes.stream.ws_client.STDIN_CHANNEL])
stdout_channel = kubernetes.stream.ws_client.STDOUT_CHANNEL
stderr_channel = kubernetes.stream.ws_client.STDERR_CHANNEL
stdin = sys.stdin.fileno()
stdout = sys.stdout.fileno()
stderr = sys.stderr.fileno()
rlist = [wssock.sock, stdin]
while True:
rs, _ws, _xs = select.select(rlist, [], [])
if stdin in rs:
data = os.read(stdin, 32 * 1024)
if len(data) > 0:
wssock.send(stdin_channel + data, websocket.ABNF.OPCODE_BINARY)
if wssock.sock in rs:
opcode, frame = wssock.recv_data_frame(True)
if opcode == websocket.ABNF.OPCODE_CLOSE:
rlist.remove(wssock.sock)
elif opcode == websocket.ABNF.OPCODE_BINARY:
channel = frame.data[0]
data = frame.data[1:]
if channel in (stdout_channel, stderr_channel):
if len(data):
if channel == stdout_channel:
os.write(stdout, data)
else:
os.write(stderr, data)
elif channel == kubernetes.stream.ws_client.ERROR_CHANNEL:
wssock.close()
error = json.loads(data)
if error["status"] == "Success":
return 0
if error["reason"] == "NonZeroExitCode":
for cause in error["details"]["causes"]:
if cause["reason"] == "ExitCode":
return int(cause["message"])
print(file=sys.stderr)
print(f"Failure running: {' '.join(command)}", file=sys.stderr)
print(
f"Status: {error['status']} - Message: {error['message']}",
file=sys.stderr,
)
print(file=sys.stderr, flush=True)
sys. | (1)
else:
print(file=sys.stderr)
print(f"Unexpected channel: {channel}", file=sys.stderr)
print(f"Data: {data}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected websocket opcode: {opcode}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
| latchbio__latch |
75 | 75-213-73 | non_informative | stderr | [
"abiflags",
"addaudithook",
"api_version",
"argv",
"audit",
"base_exec_prefix",
"base_prefix",
"breakpointhook",
"builtin_module_names",
"byteorder",
"call_tracing",
"copyright",
"displayhook",
"dllhandle",
"dont_write_bytecode",
"exc_info",
"excepthook",
"exec_prefix",
"executable",
"exit",
"flags",
"float_info",
"float_repr_style",
"get_asyncgen_hooks",
"getdefaultencoding",
"getdlopenflags",
"getfilesystemencodeerrors",
"getfilesystemencoding",
"getprofile",
"getrecursionlimit",
"getrefcount",
"getsizeof",
"getswitchinterval",
"gettotalrefcount",
"gettrace",
"getwindowsversion",
"hash_info",
"hexversion",
"implementation",
"int_info",
"intern",
"is_finalizing",
"last_traceback",
"last_type",
"last_value",
"maxsize",
"maxunicode",
"meta_path",
"modules",
"path",
"path_hooks",
"path_importer_cache",
"platform",
"platlibdir",
"prefix",
"ps1",
"ps2",
"pycache_prefix",
"set_asyncgen_hooks",
"setdlopenflags",
"setprofile",
"setrecursionlimit",
"setswitchinterval",
"settrace",
"stderr",
"stdin",
"stdout",
"tracebacklimit",
"unraisablehook",
"UnraisableHookArgs",
"version",
"version_info",
"warnoptions",
"winver",
"__breakpointhook__",
"__displayhook__",
"__doc__",
"__excepthook__",
"__file__",
"__name__",
"__package__",
"__stderr__",
"__stdin__",
"__stdout__"
] | import json
import os
import select
import sys
import textwrap
from pathlib import Path
import kubernetes
import requests
import websocket
from kubernetes.client import Configuration
from kubernetes.client.api import core_v1_api
from kubernetes.stream import stream
from latch_cli.config.latch import LatchConfig
from latch_cli.utils import account_id_from_token, retrieve_or_login
config = LatchConfig()
endpoints = config.sdk_endpoints
def _construct_kubeconfig(
cert_auth_data: str,
cluster_endpoint: str,
account_id: str,
access_key: str,
secret_key: str,
session_token: str,
) -> str:
open_brack = "{"
close_brack = "}"
region_code = "us-west-2"
cluster_name = "prion-prod"
cluster_endpoint = (
"https://C629A20F0E69D7DA6849ED877A3048EC.gr7.us-west-2.eks.amazonaws.com"
)
cert_auth_data = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXpNREEwTURJME5sb1hEVE15TURVeU56QTBNREkwTmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHptCkdkaGhBNDNzTHBrUnl3TlpJOUNtMWpXd1B2QTloRTNyWjVYWTRha2Ewb05uOFBlMSt4TVptZDg5ZHR3SjVVdFUKQ1AwR055cWd6NEtzbUxDOTMrVW8vcSs2eWxnMXZNRUV5bHFzNGt5WVN2dWhpaHVsbmZtODBBeE5xT0RQOEtDZgplSmRtTnM1cXo4MXpDamg1Y1NnNldFakJYVnhQSDQyQWdpcnZjYVUxbEJUd0VZV3gwQTdQRWlBd0I2NUtjUXB6CkRqOUFiZ0lYTjR0QjV6alhCZzNLemtJOFRDMWJvaElTbUxIa0NTTy9NUEd3RFpEdXN3RlFpYnpDSTdkVDlCTkEKaXBzZDNOTVcrd1kxV00zV2J6ZGNaSldib3NWUE5SUHNDTHlFSzFLZkpVbXFESGZGOWRMMUNjWFQ1QnFlY3B2cQpOVGdHRGZzZklJeVdJL2ZoMWpNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZFKzJpT3RFRVhIS3VzaU5oUGoweE44ZElKUExNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBQVZlK28ybVN0M01CT04rdmRiNzIxWmRsY3IyZG5kNzBScjFyTlI4ME1DUTMwRVlFagpSSTdidUgvTUZhTzBaVjFZbjdYSGdUbVhNMWNPTk9DcTdLdXF6NURyVG5IN1c3VGQzbi80NjBPeFpOcWo4MUR1CnZZRlF6eHltbHZQMEx3dkVIQlliK1RWOUNsc2pCU1Vod1N3aXUrQWQrTHp6Wks0NWh0R2ZvdlJyeDYvR1pEVnEKYUFDQUZVTGgrVHRnMzFZdXdUQ0RZYmZZOC9QOUhma3psSTgraGY3UGxjZmp4Wmg5MTJUUk1VUTdkS1ZJMHF3TQo4NnFLK3ZmQktWOG5IQW1JMEEzVmp6cWQ4OWlHMkhQTHlhNDJXTkZmM0t3SCsxZC9IVHBYUEVBTk80WHpST1BQClJ6UHJHc21ZRmlZTGN2alA3RG5IZi9GYkViSFdYTXRWVjRSZgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="
return textwrap.dedent(
f"""apiVersion: v1
clusters:
- cluster:
certificate-authority-data: {cert_auth_data}
server: {cluster_endpoint}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
contexts:
- context:
cluster: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
current-context: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
kind: Config
preferences: {open_brack}{close_brack}
users:
- name: arn:aws:eks:{region_code}:{account_id}:cluster/{cluster_name}
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
command: aws
args:
- --region
- {region_code}
- eks
- get-token
- --cluster-name
- {cluster_name}
env:
- name: 'AWS_ACCESS_KEY_ID'
value: '{access_key}'
- name: 'AWS_SECRET_ACCESS_KEY'
value: '{secret_key}'
- name: 'AWS_SESSION_TOKEN'
value: '{session_token}'"""
)
def _fetch_pod_info(token: str, task_name: str) -> (str, str, str):
headers = {"Authorization": f"Bearer {token}"}
data = {"task_name": task_name}
response = requests.post(endpoints["pod-exec-info"], headers=headers, json=data)
try:
response = response.json()
access_key = response["tmp_access_key"]
secret_key = response["tmp_secret_key"]
session_token = response["tmp_session_token"]
cert_auth_data = response["cert_auth_data"]
cluster_endpoint = response["cluster_endpoint"]
namespace = response["namespace"]
aws_account_id = response["aws_account_id"]
except KeyError as err:
raise ValueError(f"malformed response on image upload: {response}") from err
return (
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
)
def execute(task_name: str):
token = retrieve_or_login()
(
access_key,
secret_key,
session_token,
cert_auth_data,
cluster_endpoint,
namespace,
aws_account_id,
) = _fetch_pod_info(token, task_name)
account_id = account_id_from_token(token)
if int(account_id) < 10:
account_id = f"x{account_id}"
config_data = _construct_kubeconfig(
cert_auth_data,
cluster_endpoint,
aws_account_id,
access_key,
secret_key,
session_token,
)
config_file = Path("config").resolve()
with open(config_file, "w") as c:
c.write(config_data)
kubernetes.config.load_kube_config("config")
core_v1 = core_v1_api.CoreV1Api()
# TODO
pod_name = task_name
wssock = stream(
core_v1.connect_get_namespaced_pod_exec,
pod_name,
namespace,
command=["/bin/sh"],
stderr=True,
stdin=True,
stdout=True,
tty=True,
_preload_content=False,
).sock
stdin_channel = bytes([kubernetes.stream.ws_client.STDIN_CHANNEL])
stdout_channel = kubernetes.stream.ws_client.STDOUT_CHANNEL
stderr_channel = kubernetes.stream.ws_client.STDERR_CHANNEL
stdin = sys.stdin.fileno()
stdout = sys.stdout.fileno()
stderr = sys.stderr.fileno()
rlist = [wssock.sock, stdin]
while True:
rs, _ws, _xs = select.select(rlist, [], [])
if stdin in rs:
data = os.read(stdin, 32 * 1024)
if len(data) > 0:
wssock.send(stdin_channel + data, websocket.ABNF.OPCODE_BINARY)
if wssock.sock in rs:
opcode, frame = wssock.recv_data_frame(True)
if opcode == websocket.ABNF.OPCODE_CLOSE:
rlist.remove(wssock.sock)
elif opcode == websocket.ABNF.OPCODE_BINARY:
channel = frame.data[0]
data = frame.data[1:]
if channel in (stdout_channel, stderr_channel):
if len(data):
if channel == stdout_channel:
os.write(stdout, data)
else:
os.write(stderr, data)
elif channel == kubernetes.stream.ws_client.ERROR_CHANNEL:
wssock.close()
error = json.loads(data)
if error["status"] == "Success":
return 0
if error["reason"] == "NonZeroExitCode":
for cause in error["details"]["causes"]:
if cause["reason"] == "ExitCode":
return int(cause["message"])
print(file=sys.stderr)
print(f"Failure running: {' '.join(command)}", file=sys.stderr)
print(
f"Status: {error['status']} - Message: {error['message']}",
file=sys.stderr,
)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected channel: {channel}", file=sys.stderr)
print(f"Data: {data}", file=sys.stderr)
print(file=sys.stderr, flush=True)
sys.exit(1)
else:
print(file=sys.stderr)
print(f"Unexpected websocket opcode: {opcode}", file=sys. | )
print(file=sys.stderr, flush=True)
sys.exit(1)
| latchbio__latch |
76 | 76-97-8 | inproject | stdout | [
"abiflags",
"addaudithook",
"api_version",
"argv",
"audit",
"base_exec_prefix",
"base_prefix",
"breakpointhook",
"builtin_module_names",
"byteorder",
"call_tracing",
"copyright",
"displayhook",
"dllhandle",
"dont_write_bytecode",
"exc_info",
"excepthook",
"exec_prefix",
"executable",
"exit",
"flags",
"float_info",
"float_repr_style",
"get_asyncgen_hooks",
"getdefaultencoding",
"getdlopenflags",
"getfilesystemencodeerrors",
"getfilesystemencoding",
"getprofile",
"getrecursionlimit",
"getrefcount",
"getsizeof",
"getswitchinterval",
"gettotalrefcount",
"gettrace",
"getwindowsversion",
"hash_info",
"hexversion",
"implementation",
"int_info",
"intern",
"is_finalizing",
"last_traceback",
"last_type",
"last_value",
"maxsize",
"maxunicode",
"meta_path",
"modules",
"path",
"path_hooks",
"path_importer_cache",
"platform",
"platlibdir",
"prefix",
"ps1",
"ps2",
"pycache_prefix",
"set_asyncgen_hooks",
"setdlopenflags",
"setprofile",
"setrecursionlimit",
"setswitchinterval",
"settrace",
"stderr",
"stdin",
"stdout",
"tracebacklimit",
"unraisablehook",
"UnraisableHookArgs",
"version",
"version_info",
"warnoptions",
"winver",
"__breakpointhook__",
"__displayhook__",
"__doc__",
"__excepthook__",
"__file__",
"__name__",
"__package__",
"__stderr__",
"__stdin__",
"__stdout__"
] | import os
import sys
import termios
import tty
from typing import Dict, List
def _print(*args, **kwargs):
print(*args, flush=True, end="", **kwargs)
def clear(k: int):
"""
Clear `k` lines below the cursor, returning the cursor to its original position
"""
_print(f"\x1b[2K\x1b[1E" * (k) + f"\x1b[{k}F")
def read_next_byte() -> bytes:
b = sys.stdin.buffer.read(1)
if b in (
b"\x03", # CTRL C
b"\x04", # CTRL D
b"q",
b"Q",
):
raise KeyboardInterrupt
return b
def read_bytes(num_bytes: int) -> bytes:
if num_bytes < 0:
raise ValueError(f"cannot read {num_bytes} bytes")
result = b""
for _ in range(num_bytes):
result += read_next_byte()
return result
def tui_select(title: str, options: List[str], clear_terminal: bool = False):
"""
Renders a terminal UI that allows users to select one of the options listed in `options`
Args:
options: A list of names for each of the options.
"""
if len(options) == 0:
raise ValueError("No options given")
def render(
curr_selected: int,
start_index: int = 0,
max_per_page: int = 10,
indent: str = " ",
) -> int:
if curr_selected < 0 or curr_selected >= len(options):
curr_selected = 0
_print(title)
_print("\x1b[2E") # two new lines
num_lines_rendered = 4 # 4 "extra" lines for header + footer
for i in range(start_index, start_index + max_per_page):
if i >= len(options):
break
name = options[i]
if i == curr_selected:
color = "\x1b[38;5;40m"
bold = "\x1b[1m"
reset = "\x1b[0m"
_print(f"{indent}{color}{bold}[{name}]{reset}\x1b[1E")
else:
_print(f"{indent}{name}\x1b[1E")
num_lines_rendered += 1
_print("\x1b[1E")
control_str = "[ARROW-KEYS] Navigate\t[ENTER] Select\t[Q] Quit"
_print(control_str)
_print("\x1b[1E")
_print(f"\x1b[{num_lines_rendered}F")
return num_lines_rendered
old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
curr_selected = 0
start_index = 0
_, term_height = os.get_terminal_size()
# Get cursor height
res = b""
sys.stdout.write("\x1b[6n")
sys. | .flush()
while not res.endswith(b"R"):
res += sys.stdin.buffer.read(1)
curs_height = int(res.strip(b"\x1b[").split(b";", 1)[0])
max_per_page = term_height - curs_height - 4
# Hide the cursor
_print("\x1b[?25l")
if clear_terminal:
# This line
# (1) Clears the terminal window
# (2) Moves the cursor to the top left corner
_print("\x1b[2J\x1b[H")
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
)
try:
while True:
b = read_bytes(1)
if b == b"\r":
return options[curr_selected]
elif b == b"\x1b":
b = read_bytes(2)
if b == b"[A": # Up Arrow
curr_selected = max(curr_selected - 1, 0)
if (
curr_selected - start_index < max_per_page // 2
and start_index > 0
):
start_index -= 1
elif b == b"[B": # Down Arrow
curr_selected = min(curr_selected + 1, len(options) - 1)
if (
curr_selected - start_index > max_per_page // 2
and start_index < len(options) - max_per_page
):
start_index += 1
else:
continue
clear(num_lines_rendered)
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
)
except KeyboardInterrupt:
...
finally:
clear(num_lines_rendered)
# Unhide the cursor
_print("\x1b[?25h")
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, old_settings)
def tui_select_table(
title: str,
column_names: List[str],
options: List[Dict[str, str]],
clear_terminal: bool = False,
):
if len(options) == 0:
raise ValueError("No options given")
elif len(column_names) == 0:
raise ValueError("No column names specified")
def render(
curr_selected: int,
term_width: int,
start_index: int = 0,
max_per_page: int = 10,
indent: str = " ",
column_spacing: str = " ",
) -> int:
if curr_selected < 0 or curr_selected >= len(options):
curr_selected = 0
_print(title)
_print("\x1b[2E") # two new lines
num_lines_rendered = 5 # 5 "extra" lines for header + footer
lengths = {col: len(col) for col in column_names}
for i in range(len(options)):
values = options[i]
for col in column_names:
lengths[col] = max(lengths[col], len(values[col]))
for i in range(start_index, start_index + max_per_page):
if i >= len(options):
break
values = options[i]
row_str = indent
for col in column_names:
item = values[col]
row_str = row_str + f"{item: <{lengths[col]}}" + column_spacing
if len(row_str) > term_width - 2:
row_str = row_str[: term_width - 5] + "... "
if i == curr_selected:
color = "\x1b[38;5;40m"
bold = "\x1b[1m"
reset = "\x1b[0m"
row_str = f"{color}{bold}{row_str}{reset}"
_print(f"{row_str}\x1b[1E")
num_lines_rendered += 1
_print("\x1b[1E")
control_str = "[ARROW-KEYS] Navigate\t[ENTER] Select\t[Q] Quit"
_print(control_str)
_print("\x1b[1E")
_print(f"\x1b[{num_lines_rendered}F")
return num_lines_rendered
old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
curr_selected = 0
start_index = 0
term_width, term_height = os.get_terminal_size()
# Get cursor height
res = b""
sys.stdout.write("\x1b[6n")
sys.stdout.flush()
while not res.endswith(b"R"):
res += sys.stdin.buffer.read(1)
curs_height = int(res.strip(b"\x1b[").split(b";", 1)[0])
max_per_page = term_height - curs_height - 4
# Hide the cursor
_print("\x1b[?25l")
if clear_terminal:
# This line
# (1) Clears the terminal window
# (2) Moves the cursor to the top left corner
_print("\x1b[2J\x1b[H")
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
term_width=term_width,
)
try:
while True:
b = read_bytes(1)
if b == b"\r":
return options[curr_selected]
elif b == b"\x1b":
b = read_bytes(2)
if b == b"[A": # Up Arrow
curr_selected = max(curr_selected - 1, 0)
if (
curr_selected - start_index < max_per_page // 2
and start_index > 0
):
start_index -= 1
elif b == b"[B": # Down Arrow
curr_selected = min(curr_selected + 1, len(options) - 1)
if (
curr_selected - start_index > max_per_page // 2
and start_index < len(options) - max_per_page
):
start_index += 1
else:
continue
clear(num_lines_rendered)
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
term_width=term_width,
)
except KeyboardInterrupt:
...
finally:
clear(num_lines_rendered)
# Unhide the cursor
_print("\x1b[?25h")
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, old_settings)
if __name__ == "__main__":
title = "Sample Selection Screen"
options = [f"Option {i}" for i in range(100)]
selected = tui_select(title, options)
if selected:
print(selected)
| latchbio__latch |
76 | 76-97-15 | inproject | flush | [
"buffer",
"close",
"closed",
"encoding",
"errors",
"fileno",
"flush",
"isatty",
"line_buffering",
"mode",
"name",
"newlines",
"read",
"readable",
"readline",
"readlines",
"seek",
"seekable",
"tell",
"truncate",
"writable",
"write",
"writelines",
"_is_protocol",
"__annotations__",
"__class__",
"__class_getitem__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__enter__",
"__eq__",
"__exit__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__iter__",
"__module__",
"__ne__",
"__new__",
"__next__",
"__parameters__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | import os
import sys
import termios
import tty
from typing import Dict, List
def _print(*args, **kwargs):
print(*args, flush=True, end="", **kwargs)
def clear(k: int):
"""
Clear `k` lines below the cursor, returning the cursor to its original position
"""
_print(f"\x1b[2K\x1b[1E" * (k) + f"\x1b[{k}F")
def read_next_byte() -> bytes:
b = sys.stdin.buffer.read(1)
if b in (
b"\x03", # CTRL C
b"\x04", # CTRL D
b"q",
b"Q",
):
raise KeyboardInterrupt
return b
def read_bytes(num_bytes: int) -> bytes:
if num_bytes < 0:
raise ValueError(f"cannot read {num_bytes} bytes")
result = b""
for _ in range(num_bytes):
result += read_next_byte()
return result
def tui_select(title: str, options: List[str], clear_terminal: bool = False):
"""
Renders a terminal UI that allows users to select one of the options listed in `options`
Args:
options: A list of names for each of the options.
"""
if len(options) == 0:
raise ValueError("No options given")
def render(
curr_selected: int,
start_index: int = 0,
max_per_page: int = 10,
indent: str = " ",
) -> int:
if curr_selected < 0 or curr_selected >= len(options):
curr_selected = 0
_print(title)
_print("\x1b[2E") # two new lines
num_lines_rendered = 4 # 4 "extra" lines for header + footer
for i in range(start_index, start_index + max_per_page):
if i >= len(options):
break
name = options[i]
if i == curr_selected:
color = "\x1b[38;5;40m"
bold = "\x1b[1m"
reset = "\x1b[0m"
_print(f"{indent}{color}{bold}[{name}]{reset}\x1b[1E")
else:
_print(f"{indent}{name}\x1b[1E")
num_lines_rendered += 1
_print("\x1b[1E")
control_str = "[ARROW-KEYS] Navigate\t[ENTER] Select\t[Q] Quit"
_print(control_str)
_print("\x1b[1E")
_print(f"\x1b[{num_lines_rendered}F")
return num_lines_rendered
old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
curr_selected = 0
start_index = 0
_, term_height = os.get_terminal_size()
# Get cursor height
res = b""
sys.stdout.write("\x1b[6n")
sys.stdout. | ()
while not res.endswith(b"R"):
res += sys.stdin.buffer.read(1)
curs_height = int(res.strip(b"\x1b[").split(b";", 1)[0])
max_per_page = term_height - curs_height - 4
# Hide the cursor
_print("\x1b[?25l")
if clear_terminal:
# This line
# (1) Clears the terminal window
# (2) Moves the cursor to the top left corner
_print("\x1b[2J\x1b[H")
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
)
try:
while True:
b = read_bytes(1)
if b == b"\r":
return options[curr_selected]
elif b == b"\x1b":
b = read_bytes(2)
if b == b"[A": # Up Arrow
curr_selected = max(curr_selected - 1, 0)
if (
curr_selected - start_index < max_per_page // 2
and start_index > 0
):
start_index -= 1
elif b == b"[B": # Down Arrow
curr_selected = min(curr_selected + 1, len(options) - 1)
if (
curr_selected - start_index > max_per_page // 2
and start_index < len(options) - max_per_page
):
start_index += 1
else:
continue
clear(num_lines_rendered)
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
)
except KeyboardInterrupt:
...
finally:
clear(num_lines_rendered)
# Unhide the cursor
_print("\x1b[?25h")
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, old_settings)
def tui_select_table(
title: str,
column_names: List[str],
options: List[Dict[str, str]],
clear_terminal: bool = False,
):
if len(options) == 0:
raise ValueError("No options given")
elif len(column_names) == 0:
raise ValueError("No column names specified")
def render(
curr_selected: int,
term_width: int,
start_index: int = 0,
max_per_page: int = 10,
indent: str = " ",
column_spacing: str = " ",
) -> int:
if curr_selected < 0 or curr_selected >= len(options):
curr_selected = 0
_print(title)
_print("\x1b[2E") # two new lines
num_lines_rendered = 5 # 5 "extra" lines for header + footer
lengths = {col: len(col) for col in column_names}
for i in range(len(options)):
values = options[i]
for col in column_names:
lengths[col] = max(lengths[col], len(values[col]))
for i in range(start_index, start_index + max_per_page):
if i >= len(options):
break
values = options[i]
row_str = indent
for col in column_names:
item = values[col]
row_str = row_str + f"{item: <{lengths[col]}}" + column_spacing
if len(row_str) > term_width - 2:
row_str = row_str[: term_width - 5] + "... "
if i == curr_selected:
color = "\x1b[38;5;40m"
bold = "\x1b[1m"
reset = "\x1b[0m"
row_str = f"{color}{bold}{row_str}{reset}"
_print(f"{row_str}\x1b[1E")
num_lines_rendered += 1
_print("\x1b[1E")
control_str = "[ARROW-KEYS] Navigate\t[ENTER] Select\t[Q] Quit"
_print(control_str)
_print("\x1b[1E")
_print(f"\x1b[{num_lines_rendered}F")
return num_lines_rendered
old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
curr_selected = 0
start_index = 0
term_width, term_height = os.get_terminal_size()
# Get cursor height
res = b""
sys.stdout.write("\x1b[6n")
sys.stdout.flush()
while not res.endswith(b"R"):
res += sys.stdin.buffer.read(1)
curs_height = int(res.strip(b"\x1b[").split(b";", 1)[0])
max_per_page = term_height - curs_height - 4
# Hide the cursor
_print("\x1b[?25l")
if clear_terminal:
# This line
# (1) Clears the terminal window
# (2) Moves the cursor to the top left corner
_print("\x1b[2J\x1b[H")
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
term_width=term_width,
)
try:
while True:
b = read_bytes(1)
if b == b"\r":
return options[curr_selected]
elif b == b"\x1b":
b = read_bytes(2)
if b == b"[A": # Up Arrow
curr_selected = max(curr_selected - 1, 0)
if (
curr_selected - start_index < max_per_page // 2
and start_index > 0
):
start_index -= 1
elif b == b"[B": # Down Arrow
curr_selected = min(curr_selected + 1, len(options) - 1)
if (
curr_selected - start_index > max_per_page // 2
and start_index < len(options) - max_per_page
):
start_index += 1
else:
continue
clear(num_lines_rendered)
num_lines_rendered = render(
curr_selected,
start_index=start_index,
max_per_page=max_per_page,
term_width=term_width,
)
except KeyboardInterrupt:
...
finally:
clear(num_lines_rendered)
# Unhide the cursor
_print("\x1b[?25h")
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, old_settings)
if __name__ == "__main__":
title = "Sample Selection Screen"
options = [f"Option {i}" for i in range(100)]
selected = tui_select(title, options)
if selected:
print(selected)
| latchbio__latch |
84 | 84-265-33 | inproject | LOCAL | [
"ALLREDUCE",
"CUSTOM",
"LOCAL",
"mro",
"PARAMETER_SERVER",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from itertools import chain
from dlrover.python.common.constants import DistributionStrategy
from dlrover.python.common.log_utils import default_logger as logger
def add_params(parser):
add_bool_param(
parser=parser,
name="--use_async",
default=False,
help="True for asynchronous SGD, False for synchronous SGD",
)
add_bool_param(
parser=parser,
name="--need_task_manager",
default=True,
help="If true, master creates a task manager for dynamic sharding. "
"Otherwise, no task manager is created",
)
add_bool_param(
parser=parser,
name="--need_node_manager",
default=True,
help="If true, master creates a pod manager to maintain the "
"cluster for the job. Otherwise, no pod manager is created",
)
add_bool_param(
parser=parser,
name="--enabled_auto_ps",
default=False,
help="If true, the master will auto-configure the resources "
"of PS nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--enabled_auto_worker",
default=False,
help="If true, the master will auto-configure the resources "
"of worker nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--task_fault_tolerance",
default=True,
help="If true, task manager supports fault tolerance, otherwise "
"no fault tolerance.",
)
add_bool_param(
parser=parser,
name="--relaunch_timeout_worker",
default=True,
help="If true, the master will detect the time of worker to "
"execute a task and relaunch the worker if timeout",
)
add_bool_param(
parser=parser,
name="--use_ddp",
default=False,
help="If true, the master calls DDPRendezvousServer,"
"or the master calls HorovodRendezvousServer",
)
parser.add_argument(
"--custom_scaling_strategy",
type=str,
default="off",
help="Set low priority gpu workers scaling out strategies when using "
"gpu elastic training. If 'off', low priority gpu workers can scale "
"out at any time as long as resources are available. If "
"'scaling_by_time', scale out at default period of time. If "
"'scaling_by_time:starttime-endtime', scale out during starttime to"
" endtime. The format of 'starttime' or 'endtime' is"
" 'hour:minute:second' of 24-hour system. Currently, only support "
"`scaling_by_time` strategy.",
)
add_bool_param(
parser=parser,
name="--need_tf_config",
default=False,
help="If true, needs to set TF_CONFIG env for ps/worker. Also "
"need to use fixed service name for workers",
)
parser.add_argument(
"--relaunch_on_worker_failure",
type=int,
help="The number of relaunch tries for a worker failure for "
"PS Strategy training",
default=3,
)
add_bool_param(
parser=parser,
name="--ps_is_critical",
default=True,
help="If true, ps pods are critical, and ps pod failure "
"results in job failure.",
)
parser.add_argument(
"--critical_worker_index",
default="default",
help="If 'default', worker0 is critical for PS strategy custom "
"training, none for others; "
"If 'none', all workers are non-critical; "
"Otherwise, a list of critical worker indices such as '1:0,3:1' "
"In each pair, the first value is the pod index and the second value "
"is the number of allowed relaunches before becoming critical",
)
parser.add_argument(
"--ps_relaunch_max_num",
type=int,
help="The max number of ps relaunches",
default=1,
)
parser.add_argument(
"--launch_worker_after_ps_running",
default="default",
help="This argument indicates if launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'on', launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'off', launch worker pods regardless of ps pod status "
"If 'default', when ps.core >= 16 with PS strategy, similar "
"to 'on', otherwise, similar to 'off'. ",
)
parser.add_argument(
"--num_workers", type=int, help="Number of workers", default=0
)
parser.add_argument(
"--worker_resource_request",
default="",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--worker_resource_limit",
type=str,
default="",
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--num_tf_master",
type=int,
help="Number of TensorFlow estimator master",
default=0,
)
parser.add_argument(
"--tf_master_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by TensorFlow estimator, "
" master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--tf_master_resource_limit",
type=str,
default="",
help="The maximal resource required by TensorFlow estimator, "
"master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to tf_master_resource_request",
)
parser.add_argument(
"--master_pod_priority",
default="",
help="The requested priority of master pod",
)
parser.add_argument(
"--tf_master_pod_priority",
default="",
help="The requested priority of tensorflow estimator master",
)
parser.add_argument(
"--worker_pod_priority",
default="",
help="The requested priority of worker pod, we support following"
"configs: high/low/0.5. The 0.5 means that half"
"worker pods have high priority, and half worker pods have"
"low priority. The default value is low",
)
parser.add_argument(
"--num_ps_pods", type=int, help="Number of PS pods", default=0
)
parser.add_argument(
"--ps_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--ps_resource_limit",
default="",
type=str,
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--ps_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--evaluator_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--evaluator_resource_limit",
default="",
type=str,
help="The maximal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to evaluator_resource_request",
)
parser.add_argument(
"--evaluator_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--num_evaluators",
type=int,
default=0,
help="The number of evaluator pods",
)
parser.add_argument(
"--namespace",
default="default",
type=str,
help="The name of the Kubernetes namespace where ElasticDL "
"pods will be created",
)
add_bool_param(
parser=parser,
name="--force_use_kube_config_file",
default=False,
help="If true, force to load the cluster config from ~/.kube/config "
"while submitting the ElasticDL job. Otherwise, if the client is in a "
"K8S environment, load the incluster config, if not, load the kube "
"config file.",
)
parser.add_argument(
"--distribution_strategy",
type=str,
choices=[
"",
DistributionStrategy. | ,
DistributionStrategy.PARAMETER_SERVER,
DistributionStrategy.ALLREDUCE,
DistributionStrategy.CUSTOM,
],
default=DistributionStrategy.PARAMETER_SERVER,
help="Master will use a distribution policy on a list of devices "
"according to the distributed strategy, "
"e.g. 'ParameterServerStrategy', 'AllreduceStrategy', "
"'CustomStrategy' or 'Local'",
)
def add_bool_param(parser, name, default, help):
parser.add_argument(
name, # should be in "--foo" format
nargs="?",
const=not default,
default=default,
type=lambda x: x.lower() in ["true", "yes", "t", "y"],
help=help,
)
def build_arguments_from_parsed_result(args, filter_args=None):
"""Reconstruct arguments from parsed result
Args:
args: result from `parser.parse_args()`
Returns:
list of string: ready for parser to parse,
such as ["--foo", "3", "--bar", False]
"""
items = vars(args).items()
if filter_args:
items = filter(lambda item: item[0] not in filter_args, items)
def _str_ignore_none(s):
if s is None:
return s
return str(s)
arguments = map(_str_ignore_none, chain(*items))
arguments = [
"--" + k if i % 2 == 0 else k for i, k in enumerate(arguments)
]
return arguments
def wrap_python_args_with_string(args):
"""Wrap argument values with string
Args:
args: list like ["--foo", "3", "--bar", False]
Returns:
list of string: like ["--foo", "'3'", "--bar", "'False'"]
"""
result = []
for value in args:
if not value.startswith("--"):
result.append("'{}'".format(value))
else:
result.append(value)
return result
def pos_int(arg):
res = int(arg)
if res <= 0:
raise ValueError("Positive integer argument required. Got %s" % res)
return res
def non_neg_int(arg):
res = int(arg)
if res < 0:
raise ValueError(
"Non-negative integer argument required. Get %s" % res
)
return res
def print_args(args, exclude_args=[], groups=None):
"""
Args:
args: parsing results returned from `parser.parse_args`
exclude_args: the arguments which won't be printed.
groups: It is a list of a list. It controls which options should be
printed together. For example, we expect all model specifications such
as `optimizer`, `loss` are better printed together.
groups = [["optimizer", "loss"]]
"""
def _get_attr(instance, attribute):
try:
return getattr(instance, attribute)
except AttributeError:
return None
dedup = set()
if groups:
for group in groups:
for element in group:
dedup.add(element)
logger.info("%s = %s", element, _get_attr(args, element))
other_options = [
(key, value)
for (key, value) in args.__dict__.items()
if key not in dedup and key not in exclude_args
]
for key, value in other_options:
logger.info("%s = %s", key, value)
def _build_master_args_parser():
parser = argparse.ArgumentParser(description="ElasticDL Master")
parser.add_argument(
"--port",
default=50001,
type=pos_int,
help="The listening port of master",
)
add_params(parser)
return parser
def parse_master_args(master_args=None):
parser = _build_master_args_parser()
args, unknown_args = parser.parse_known_args(args=master_args)
print_args(args)
if unknown_args:
logger.warning("Unknown arguments: %s", unknown_args)
return args
| intelligent-machine-learning__dlrover |
84 | 84-266-33 | inproject | PARAMETER_SERVER | [
"ALLREDUCE",
"CUSTOM",
"LOCAL",
"mro",
"PARAMETER_SERVER",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from itertools import chain
from dlrover.python.common.constants import DistributionStrategy
from dlrover.python.common.log_utils import default_logger as logger
def add_params(parser):
add_bool_param(
parser=parser,
name="--use_async",
default=False,
help="True for asynchronous SGD, False for synchronous SGD",
)
add_bool_param(
parser=parser,
name="--need_task_manager",
default=True,
help="If true, master creates a task manager for dynamic sharding. "
"Otherwise, no task manager is created",
)
add_bool_param(
parser=parser,
name="--need_node_manager",
default=True,
help="If true, master creates a pod manager to maintain the "
"cluster for the job. Otherwise, no pod manager is created",
)
add_bool_param(
parser=parser,
name="--enabled_auto_ps",
default=False,
help="If true, the master will auto-configure the resources "
"of PS nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--enabled_auto_worker",
default=False,
help="If true, the master will auto-configure the resources "
"of worker nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--task_fault_tolerance",
default=True,
help="If true, task manager supports fault tolerance, otherwise "
"no fault tolerance.",
)
add_bool_param(
parser=parser,
name="--relaunch_timeout_worker",
default=True,
help="If true, the master will detect the time of worker to "
"execute a task and relaunch the worker if timeout",
)
add_bool_param(
parser=parser,
name="--use_ddp",
default=False,
help="If true, the master calls DDPRendezvousServer,"
"or the master calls HorovodRendezvousServer",
)
parser.add_argument(
"--custom_scaling_strategy",
type=str,
default="off",
help="Set low priority gpu workers scaling out strategies when using "
"gpu elastic training. If 'off', low priority gpu workers can scale "
"out at any time as long as resources are available. If "
"'scaling_by_time', scale out at default period of time. If "
"'scaling_by_time:starttime-endtime', scale out during starttime to"
" endtime. The format of 'starttime' or 'endtime' is"
" 'hour:minute:second' of 24-hour system. Currently, only support "
"`scaling_by_time` strategy.",
)
add_bool_param(
parser=parser,
name="--need_tf_config",
default=False,
help="If true, needs to set TF_CONFIG env for ps/worker. Also "
"need to use fixed service name for workers",
)
parser.add_argument(
"--relaunch_on_worker_failure",
type=int,
help="The number of relaunch tries for a worker failure for "
"PS Strategy training",
default=3,
)
add_bool_param(
parser=parser,
name="--ps_is_critical",
default=True,
help="If true, ps pods are critical, and ps pod failure "
"results in job failure.",
)
parser.add_argument(
"--critical_worker_index",
default="default",
help="If 'default', worker0 is critical for PS strategy custom "
"training, none for others; "
"If 'none', all workers are non-critical; "
"Otherwise, a list of critical worker indices such as '1:0,3:1' "
"In each pair, the first value is the pod index and the second value "
"is the number of allowed relaunches before becoming critical",
)
parser.add_argument(
"--ps_relaunch_max_num",
type=int,
help="The max number of ps relaunches",
default=1,
)
parser.add_argument(
"--launch_worker_after_ps_running",
default="default",
help="This argument indicates if launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'on', launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'off', launch worker pods regardless of ps pod status "
"If 'default', when ps.core >= 16 with PS strategy, similar "
"to 'on', otherwise, similar to 'off'. ",
)
parser.add_argument(
"--num_workers", type=int, help="Number of workers", default=0
)
parser.add_argument(
"--worker_resource_request",
default="",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--worker_resource_limit",
type=str,
default="",
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--num_tf_master",
type=int,
help="Number of TensorFlow estimator master",
default=0,
)
parser.add_argument(
"--tf_master_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by TensorFlow estimator, "
" master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--tf_master_resource_limit",
type=str,
default="",
help="The maximal resource required by TensorFlow estimator, "
"master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to tf_master_resource_request",
)
parser.add_argument(
"--master_pod_priority",
default="",
help="The requested priority of master pod",
)
parser.add_argument(
"--tf_master_pod_priority",
default="",
help="The requested priority of tensorflow estimator master",
)
parser.add_argument(
"--worker_pod_priority",
default="",
help="The requested priority of worker pod, we support following"
"configs: high/low/0.5. The 0.5 means that half"
"worker pods have high priority, and half worker pods have"
"low priority. The default value is low",
)
parser.add_argument(
"--num_ps_pods", type=int, help="Number of PS pods", default=0
)
parser.add_argument(
"--ps_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--ps_resource_limit",
default="",
type=str,
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--ps_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--evaluator_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--evaluator_resource_limit",
default="",
type=str,
help="The maximal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to evaluator_resource_request",
)
parser.add_argument(
"--evaluator_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--num_evaluators",
type=int,
default=0,
help="The number of evaluator pods",
)
parser.add_argument(
"--namespace",
default="default",
type=str,
help="The name of the Kubernetes namespace where ElasticDL "
"pods will be created",
)
add_bool_param(
parser=parser,
name="--force_use_kube_config_file",
default=False,
help="If true, force to load the cluster config from ~/.kube/config "
"while submitting the ElasticDL job. Otherwise, if the client is in a "
"K8S environment, load the incluster config, if not, load the kube "
"config file.",
)
parser.add_argument(
"--distribution_strategy",
type=str,
choices=[
"",
DistributionStrategy.LOCAL,
DistributionStrategy. | ,
DistributionStrategy.ALLREDUCE,
DistributionStrategy.CUSTOM,
],
default=DistributionStrategy.PARAMETER_SERVER,
help="Master will use a distribution policy on a list of devices "
"according to the distributed strategy, "
"e.g. 'ParameterServerStrategy', 'AllreduceStrategy', "
"'CustomStrategy' or 'Local'",
)
def add_bool_param(parser, name, default, help):
parser.add_argument(
name, # should be in "--foo" format
nargs="?",
const=not default,
default=default,
type=lambda x: x.lower() in ["true", "yes", "t", "y"],
help=help,
)
def build_arguments_from_parsed_result(args, filter_args=None):
"""Reconstruct arguments from parsed result
Args:
args: result from `parser.parse_args()`
Returns:
list of string: ready for parser to parse,
such as ["--foo", "3", "--bar", False]
"""
items = vars(args).items()
if filter_args:
items = filter(lambda item: item[0] not in filter_args, items)
def _str_ignore_none(s):
if s is None:
return s
return str(s)
arguments = map(_str_ignore_none, chain(*items))
arguments = [
"--" + k if i % 2 == 0 else k for i, k in enumerate(arguments)
]
return arguments
def wrap_python_args_with_string(args):
"""Wrap argument values with string
Args:
args: list like ["--foo", "3", "--bar", False]
Returns:
list of string: like ["--foo", "'3'", "--bar", "'False'"]
"""
result = []
for value in args:
if not value.startswith("--"):
result.append("'{}'".format(value))
else:
result.append(value)
return result
def pos_int(arg):
res = int(arg)
if res <= 0:
raise ValueError("Positive integer argument required. Got %s" % res)
return res
def non_neg_int(arg):
res = int(arg)
if res < 0:
raise ValueError(
"Non-negative integer argument required. Get %s" % res
)
return res
def print_args(args, exclude_args=[], groups=None):
"""
Args:
args: parsing results returned from `parser.parse_args`
exclude_args: the arguments which won't be printed.
groups: It is a list of a list. It controls which options should be
printed together. For example, we expect all model specifications such
as `optimizer`, `loss` are better printed together.
groups = [["optimizer", "loss"]]
"""
def _get_attr(instance, attribute):
try:
return getattr(instance, attribute)
except AttributeError:
return None
dedup = set()
if groups:
for group in groups:
for element in group:
dedup.add(element)
logger.info("%s = %s", element, _get_attr(args, element))
other_options = [
(key, value)
for (key, value) in args.__dict__.items()
if key not in dedup and key not in exclude_args
]
for key, value in other_options:
logger.info("%s = %s", key, value)
def _build_master_args_parser():
parser = argparse.ArgumentParser(description="ElasticDL Master")
parser.add_argument(
"--port",
default=50001,
type=pos_int,
help="The listening port of master",
)
add_params(parser)
return parser
def parse_master_args(master_args=None):
parser = _build_master_args_parser()
args, unknown_args = parser.parse_known_args(args=master_args)
print_args(args)
if unknown_args:
logger.warning("Unknown arguments: %s", unknown_args)
return args
| intelligent-machine-learning__dlrover |
84 | 84-267-33 | inproject | ALLREDUCE | [
"ALLREDUCE",
"CUSTOM",
"LOCAL",
"mro",
"PARAMETER_SERVER",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from itertools import chain
from dlrover.python.common.constants import DistributionStrategy
from dlrover.python.common.log_utils import default_logger as logger
def add_params(parser):
add_bool_param(
parser=parser,
name="--use_async",
default=False,
help="True for asynchronous SGD, False for synchronous SGD",
)
add_bool_param(
parser=parser,
name="--need_task_manager",
default=True,
help="If true, master creates a task manager for dynamic sharding. "
"Otherwise, no task manager is created",
)
add_bool_param(
parser=parser,
name="--need_node_manager",
default=True,
help="If true, master creates a pod manager to maintain the "
"cluster for the job. Otherwise, no pod manager is created",
)
add_bool_param(
parser=parser,
name="--enabled_auto_ps",
default=False,
help="If true, the master will auto-configure the resources "
"of PS nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--enabled_auto_worker",
default=False,
help="If true, the master will auto-configure the resources "
"of worker nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--task_fault_tolerance",
default=True,
help="If true, task manager supports fault tolerance, otherwise "
"no fault tolerance.",
)
add_bool_param(
parser=parser,
name="--relaunch_timeout_worker",
default=True,
help="If true, the master will detect the time of worker to "
"execute a task and relaunch the worker if timeout",
)
add_bool_param(
parser=parser,
name="--use_ddp",
default=False,
help="If true, the master calls DDPRendezvousServer,"
"or the master calls HorovodRendezvousServer",
)
parser.add_argument(
"--custom_scaling_strategy",
type=str,
default="off",
help="Set low priority gpu workers scaling out strategies when using "
"gpu elastic training. If 'off', low priority gpu workers can scale "
"out at any time as long as resources are available. If "
"'scaling_by_time', scale out at default period of time. If "
"'scaling_by_time:starttime-endtime', scale out during starttime to"
" endtime. The format of 'starttime' or 'endtime' is"
" 'hour:minute:second' of 24-hour system. Currently, only support "
"`scaling_by_time` strategy.",
)
add_bool_param(
parser=parser,
name="--need_tf_config",
default=False,
help="If true, needs to set TF_CONFIG env for ps/worker. Also "
"need to use fixed service name for workers",
)
parser.add_argument(
"--relaunch_on_worker_failure",
type=int,
help="The number of relaunch tries for a worker failure for "
"PS Strategy training",
default=3,
)
add_bool_param(
parser=parser,
name="--ps_is_critical",
default=True,
help="If true, ps pods are critical, and ps pod failure "
"results in job failure.",
)
parser.add_argument(
"--critical_worker_index",
default="default",
help="If 'default', worker0 is critical for PS strategy custom "
"training, none for others; "
"If 'none', all workers are non-critical; "
"Otherwise, a list of critical worker indices such as '1:0,3:1' "
"In each pair, the first value is the pod index and the second value "
"is the number of allowed relaunches before becoming critical",
)
parser.add_argument(
"--ps_relaunch_max_num",
type=int,
help="The max number of ps relaunches",
default=1,
)
parser.add_argument(
"--launch_worker_after_ps_running",
default="default",
help="This argument indicates if launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'on', launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'off', launch worker pods regardless of ps pod status "
"If 'default', when ps.core >= 16 with PS strategy, similar "
"to 'on', otherwise, similar to 'off'. ",
)
parser.add_argument(
"--num_workers", type=int, help="Number of workers", default=0
)
parser.add_argument(
"--worker_resource_request",
default="",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--worker_resource_limit",
type=str,
default="",
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--num_tf_master",
type=int,
help="Number of TensorFlow estimator master",
default=0,
)
parser.add_argument(
"--tf_master_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by TensorFlow estimator, "
" master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--tf_master_resource_limit",
type=str,
default="",
help="The maximal resource required by TensorFlow estimator, "
"master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to tf_master_resource_request",
)
parser.add_argument(
"--master_pod_priority",
default="",
help="The requested priority of master pod",
)
parser.add_argument(
"--tf_master_pod_priority",
default="",
help="The requested priority of tensorflow estimator master",
)
parser.add_argument(
"--worker_pod_priority",
default="",
help="The requested priority of worker pod, we support following"
"configs: high/low/0.5. The 0.5 means that half"
"worker pods have high priority, and half worker pods have"
"low priority. The default value is low",
)
parser.add_argument(
"--num_ps_pods", type=int, help="Number of PS pods", default=0
)
parser.add_argument(
"--ps_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--ps_resource_limit",
default="",
type=str,
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--ps_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--evaluator_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--evaluator_resource_limit",
default="",
type=str,
help="The maximal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to evaluator_resource_request",
)
parser.add_argument(
"--evaluator_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--num_evaluators",
type=int,
default=0,
help="The number of evaluator pods",
)
parser.add_argument(
"--namespace",
default="default",
type=str,
help="The name of the Kubernetes namespace where ElasticDL "
"pods will be created",
)
add_bool_param(
parser=parser,
name="--force_use_kube_config_file",
default=False,
help="If true, force to load the cluster config from ~/.kube/config "
"while submitting the ElasticDL job. Otherwise, if the client is in a "
"K8S environment, load the incluster config, if not, load the kube "
"config file.",
)
parser.add_argument(
"--distribution_strategy",
type=str,
choices=[
"",
DistributionStrategy.LOCAL,
DistributionStrategy.PARAMETER_SERVER,
DistributionStrategy. | ,
DistributionStrategy.CUSTOM,
],
default=DistributionStrategy.PARAMETER_SERVER,
help="Master will use a distribution policy on a list of devices "
"according to the distributed strategy, "
"e.g. 'ParameterServerStrategy', 'AllreduceStrategy', "
"'CustomStrategy' or 'Local'",
)
def add_bool_param(parser, name, default, help):
parser.add_argument(
name, # should be in "--foo" format
nargs="?",
const=not default,
default=default,
type=lambda x: x.lower() in ["true", "yes", "t", "y"],
help=help,
)
def build_arguments_from_parsed_result(args, filter_args=None):
"""Reconstruct arguments from parsed result
Args:
args: result from `parser.parse_args()`
Returns:
list of string: ready for parser to parse,
such as ["--foo", "3", "--bar", False]
"""
items = vars(args).items()
if filter_args:
items = filter(lambda item: item[0] not in filter_args, items)
def _str_ignore_none(s):
if s is None:
return s
return str(s)
arguments = map(_str_ignore_none, chain(*items))
arguments = [
"--" + k if i % 2 == 0 else k for i, k in enumerate(arguments)
]
return arguments
def wrap_python_args_with_string(args):
"""Wrap argument values with string
Args:
args: list like ["--foo", "3", "--bar", False]
Returns:
list of string: like ["--foo", "'3'", "--bar", "'False'"]
"""
result = []
for value in args:
if not value.startswith("--"):
result.append("'{}'".format(value))
else:
result.append(value)
return result
def pos_int(arg):
res = int(arg)
if res <= 0:
raise ValueError("Positive integer argument required. Got %s" % res)
return res
def non_neg_int(arg):
res = int(arg)
if res < 0:
raise ValueError(
"Non-negative integer argument required. Get %s" % res
)
return res
def print_args(args, exclude_args=[], groups=None):
"""
Args:
args: parsing results returned from `parser.parse_args`
exclude_args: the arguments which won't be printed.
groups: It is a list of a list. It controls which options should be
printed together. For example, we expect all model specifications such
as `optimizer`, `loss` are better printed together.
groups = [["optimizer", "loss"]]
"""
def _get_attr(instance, attribute):
try:
return getattr(instance, attribute)
except AttributeError:
return None
dedup = set()
if groups:
for group in groups:
for element in group:
dedup.add(element)
logger.info("%s = %s", element, _get_attr(args, element))
other_options = [
(key, value)
for (key, value) in args.__dict__.items()
if key not in dedup and key not in exclude_args
]
for key, value in other_options:
logger.info("%s = %s", key, value)
def _build_master_args_parser():
parser = argparse.ArgumentParser(description="ElasticDL Master")
parser.add_argument(
"--port",
default=50001,
type=pos_int,
help="The listening port of master",
)
add_params(parser)
return parser
def parse_master_args(master_args=None):
parser = _build_master_args_parser()
args, unknown_args = parser.parse_known_args(args=master_args)
print_args(args)
if unknown_args:
logger.warning("Unknown arguments: %s", unknown_args)
return args
| intelligent-machine-learning__dlrover |
84 | 84-268-33 | inproject | CUSTOM | [
"ALLREDUCE",
"CUSTOM",
"LOCAL",
"mro",
"PARAMETER_SERVER",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from itertools import chain
from dlrover.python.common.constants import DistributionStrategy
from dlrover.python.common.log_utils import default_logger as logger
def add_params(parser):
add_bool_param(
parser=parser,
name="--use_async",
default=False,
help="True for asynchronous SGD, False for synchronous SGD",
)
add_bool_param(
parser=parser,
name="--need_task_manager",
default=True,
help="If true, master creates a task manager for dynamic sharding. "
"Otherwise, no task manager is created",
)
add_bool_param(
parser=parser,
name="--need_node_manager",
default=True,
help="If true, master creates a pod manager to maintain the "
"cluster for the job. Otherwise, no pod manager is created",
)
add_bool_param(
parser=parser,
name="--enabled_auto_ps",
default=False,
help="If true, the master will auto-configure the resources "
"of PS nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--enabled_auto_worker",
default=False,
help="If true, the master will auto-configure the resources "
"of worker nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--task_fault_tolerance",
default=True,
help="If true, task manager supports fault tolerance, otherwise "
"no fault tolerance.",
)
add_bool_param(
parser=parser,
name="--relaunch_timeout_worker",
default=True,
help="If true, the master will detect the time of worker to "
"execute a task and relaunch the worker if timeout",
)
add_bool_param(
parser=parser,
name="--use_ddp",
default=False,
help="If true, the master calls DDPRendezvousServer,"
"or the master calls HorovodRendezvousServer",
)
parser.add_argument(
"--custom_scaling_strategy",
type=str,
default="off",
help="Set low priority gpu workers scaling out strategies when using "
"gpu elastic training. If 'off', low priority gpu workers can scale "
"out at any time as long as resources are available. If "
"'scaling_by_time', scale out at default period of time. If "
"'scaling_by_time:starttime-endtime', scale out during starttime to"
" endtime. The format of 'starttime' or 'endtime' is"
" 'hour:minute:second' of 24-hour system. Currently, only support "
"`scaling_by_time` strategy.",
)
add_bool_param(
parser=parser,
name="--need_tf_config",
default=False,
help="If true, needs to set TF_CONFIG env for ps/worker. Also "
"need to use fixed service name for workers",
)
parser.add_argument(
"--relaunch_on_worker_failure",
type=int,
help="The number of relaunch tries for a worker failure for "
"PS Strategy training",
default=3,
)
add_bool_param(
parser=parser,
name="--ps_is_critical",
default=True,
help="If true, ps pods are critical, and ps pod failure "
"results in job failure.",
)
parser.add_argument(
"--critical_worker_index",
default="default",
help="If 'default', worker0 is critical for PS strategy custom "
"training, none for others; "
"If 'none', all workers are non-critical; "
"Otherwise, a list of critical worker indices such as '1:0,3:1' "
"In each pair, the first value is the pod index and the second value "
"is the number of allowed relaunches before becoming critical",
)
parser.add_argument(
"--ps_relaunch_max_num",
type=int,
help="The max number of ps relaunches",
default=1,
)
parser.add_argument(
"--launch_worker_after_ps_running",
default="default",
help="This argument indicates if launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'on', launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'off', launch worker pods regardless of ps pod status "
"If 'default', when ps.core >= 16 with PS strategy, similar "
"to 'on', otherwise, similar to 'off'. ",
)
parser.add_argument(
"--num_workers", type=int, help="Number of workers", default=0
)
parser.add_argument(
"--worker_resource_request",
default="",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--worker_resource_limit",
type=str,
default="",
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--num_tf_master",
type=int,
help="Number of TensorFlow estimator master",
default=0,
)
parser.add_argument(
"--tf_master_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by TensorFlow estimator, "
" master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--tf_master_resource_limit",
type=str,
default="",
help="The maximal resource required by TensorFlow estimator, "
"master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to tf_master_resource_request",
)
parser.add_argument(
"--master_pod_priority",
default="",
help="The requested priority of master pod",
)
parser.add_argument(
"--tf_master_pod_priority",
default="",
help="The requested priority of tensorflow estimator master",
)
parser.add_argument(
"--worker_pod_priority",
default="",
help="The requested priority of worker pod, we support following"
"configs: high/low/0.5. The 0.5 means that half"
"worker pods have high priority, and half worker pods have"
"low priority. The default value is low",
)
parser.add_argument(
"--num_ps_pods", type=int, help="Number of PS pods", default=0
)
parser.add_argument(
"--ps_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--ps_resource_limit",
default="",
type=str,
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--ps_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--evaluator_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--evaluator_resource_limit",
default="",
type=str,
help="The maximal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to evaluator_resource_request",
)
parser.add_argument(
"--evaluator_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--num_evaluators",
type=int,
default=0,
help="The number of evaluator pods",
)
parser.add_argument(
"--namespace",
default="default",
type=str,
help="The name of the Kubernetes namespace where ElasticDL "
"pods will be created",
)
add_bool_param(
parser=parser,
name="--force_use_kube_config_file",
default=False,
help="If true, force to load the cluster config from ~/.kube/config "
"while submitting the ElasticDL job. Otherwise, if the client is in a "
"K8S environment, load the incluster config, if not, load the kube "
"config file.",
)
parser.add_argument(
"--distribution_strategy",
type=str,
choices=[
"",
DistributionStrategy.LOCAL,
DistributionStrategy.PARAMETER_SERVER,
DistributionStrategy.ALLREDUCE,
DistributionStrategy. | ,
],
default=DistributionStrategy.PARAMETER_SERVER,
help="Master will use a distribution policy on a list of devices "
"according to the distributed strategy, "
"e.g. 'ParameterServerStrategy', 'AllreduceStrategy', "
"'CustomStrategy' or 'Local'",
)
def add_bool_param(parser, name, default, help):
parser.add_argument(
name, # should be in "--foo" format
nargs="?",
const=not default,
default=default,
type=lambda x: x.lower() in ["true", "yes", "t", "y"],
help=help,
)
def build_arguments_from_parsed_result(args, filter_args=None):
"""Reconstruct arguments from parsed result
Args:
args: result from `parser.parse_args()`
Returns:
list of string: ready for parser to parse,
such as ["--foo", "3", "--bar", False]
"""
items = vars(args).items()
if filter_args:
items = filter(lambda item: item[0] not in filter_args, items)
def _str_ignore_none(s):
if s is None:
return s
return str(s)
arguments = map(_str_ignore_none, chain(*items))
arguments = [
"--" + k if i % 2 == 0 else k for i, k in enumerate(arguments)
]
return arguments
def wrap_python_args_with_string(args):
"""Wrap argument values with string
Args:
args: list like ["--foo", "3", "--bar", False]
Returns:
list of string: like ["--foo", "'3'", "--bar", "'False'"]
"""
result = []
for value in args:
if not value.startswith("--"):
result.append("'{}'".format(value))
else:
result.append(value)
return result
def pos_int(arg):
res = int(arg)
if res <= 0:
raise ValueError("Positive integer argument required. Got %s" % res)
return res
def non_neg_int(arg):
res = int(arg)
if res < 0:
raise ValueError(
"Non-negative integer argument required. Get %s" % res
)
return res
def print_args(args, exclude_args=[], groups=None):
"""
Args:
args: parsing results returned from `parser.parse_args`
exclude_args: the arguments which won't be printed.
groups: It is a list of a list. It controls which options should be
printed together. For example, we expect all model specifications such
as `optimizer`, `loss` are better printed together.
groups = [["optimizer", "loss"]]
"""
def _get_attr(instance, attribute):
try:
return getattr(instance, attribute)
except AttributeError:
return None
dedup = set()
if groups:
for group in groups:
for element in group:
dedup.add(element)
logger.info("%s = %s", element, _get_attr(args, element))
other_options = [
(key, value)
for (key, value) in args.__dict__.items()
if key not in dedup and key not in exclude_args
]
for key, value in other_options:
logger.info("%s = %s", key, value)
def _build_master_args_parser():
parser = argparse.ArgumentParser(description="ElasticDL Master")
parser.add_argument(
"--port",
default=50001,
type=pos_int,
help="The listening port of master",
)
add_params(parser)
return parser
def parse_master_args(master_args=None):
parser = _build_master_args_parser()
args, unknown_args = parser.parse_known_args(args=master_args)
print_args(args)
if unknown_args:
logger.warning("Unknown arguments: %s", unknown_args)
return args
| intelligent-machine-learning__dlrover |
84 | 84-270-37 | inproject | PARAMETER_SERVER | [
"ALLREDUCE",
"CUSTOM",
"LOCAL",
"mro",
"PARAMETER_SERVER",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from itertools import chain
from dlrover.python.common.constants import DistributionStrategy
from dlrover.python.common.log_utils import default_logger as logger
def add_params(parser):
add_bool_param(
parser=parser,
name="--use_async",
default=False,
help="True for asynchronous SGD, False for synchronous SGD",
)
add_bool_param(
parser=parser,
name="--need_task_manager",
default=True,
help="If true, master creates a task manager for dynamic sharding. "
"Otherwise, no task manager is created",
)
add_bool_param(
parser=parser,
name="--need_node_manager",
default=True,
help="If true, master creates a pod manager to maintain the "
"cluster for the job. Otherwise, no pod manager is created",
)
add_bool_param(
parser=parser,
name="--enabled_auto_ps",
default=False,
help="If true, the master will auto-configure the resources "
"of PS nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--enabled_auto_worker",
default=False,
help="If true, the master will auto-configure the resources "
"of worker nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--task_fault_tolerance",
default=True,
help="If true, task manager supports fault tolerance, otherwise "
"no fault tolerance.",
)
add_bool_param(
parser=parser,
name="--relaunch_timeout_worker",
default=True,
help="If true, the master will detect the time of worker to "
"execute a task and relaunch the worker if timeout",
)
add_bool_param(
parser=parser,
name="--use_ddp",
default=False,
help="If true, the master calls DDPRendezvousServer,"
"or the master calls HorovodRendezvousServer",
)
parser.add_argument(
"--custom_scaling_strategy",
type=str,
default="off",
help="Set low priority gpu workers scaling out strategies when using "
"gpu elastic training. If 'off', low priority gpu workers can scale "
"out at any time as long as resources are available. If "
"'scaling_by_time', scale out at default period of time. If "
"'scaling_by_time:starttime-endtime', scale out during starttime to"
" endtime. The format of 'starttime' or 'endtime' is"
" 'hour:minute:second' of 24-hour system. Currently, only support "
"`scaling_by_time` strategy.",
)
add_bool_param(
parser=parser,
name="--need_tf_config",
default=False,
help="If true, needs to set TF_CONFIG env for ps/worker. Also "
"need to use fixed service name for workers",
)
parser.add_argument(
"--relaunch_on_worker_failure",
type=int,
help="The number of relaunch tries for a worker failure for "
"PS Strategy training",
default=3,
)
add_bool_param(
parser=parser,
name="--ps_is_critical",
default=True,
help="If true, ps pods are critical, and ps pod failure "
"results in job failure.",
)
parser.add_argument(
"--critical_worker_index",
default="default",
help="If 'default', worker0 is critical for PS strategy custom "
"training, none for others; "
"If 'none', all workers are non-critical; "
"Otherwise, a list of critical worker indices such as '1:0,3:1' "
"In each pair, the first value is the pod index and the second value "
"is the number of allowed relaunches before becoming critical",
)
parser.add_argument(
"--ps_relaunch_max_num",
type=int,
help="The max number of ps relaunches",
default=1,
)
parser.add_argument(
"--launch_worker_after_ps_running",
default="default",
help="This argument indicates if launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'on', launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'off', launch worker pods regardless of ps pod status "
"If 'default', when ps.core >= 16 with PS strategy, similar "
"to 'on', otherwise, similar to 'off'. ",
)
parser.add_argument(
"--num_workers", type=int, help="Number of workers", default=0
)
parser.add_argument(
"--worker_resource_request",
default="",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--worker_resource_limit",
type=str,
default="",
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--num_tf_master",
type=int,
help="Number of TensorFlow estimator master",
default=0,
)
parser.add_argument(
"--tf_master_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by TensorFlow estimator, "
" master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--tf_master_resource_limit",
type=str,
default="",
help="The maximal resource required by TensorFlow estimator, "
"master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to tf_master_resource_request",
)
parser.add_argument(
"--master_pod_priority",
default="",
help="The requested priority of master pod",
)
parser.add_argument(
"--tf_master_pod_priority",
default="",
help="The requested priority of tensorflow estimator master",
)
parser.add_argument(
"--worker_pod_priority",
default="",
help="The requested priority of worker pod, we support following"
"configs: high/low/0.5. The 0.5 means that half"
"worker pods have high priority, and half worker pods have"
"low priority. The default value is low",
)
parser.add_argument(
"--num_ps_pods", type=int, help="Number of PS pods", default=0
)
parser.add_argument(
"--ps_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--ps_resource_limit",
default="",
type=str,
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--ps_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--evaluator_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--evaluator_resource_limit",
default="",
type=str,
help="The maximal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to evaluator_resource_request",
)
parser.add_argument(
"--evaluator_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--num_evaluators",
type=int,
default=0,
help="The number of evaluator pods",
)
parser.add_argument(
"--namespace",
default="default",
type=str,
help="The name of the Kubernetes namespace where ElasticDL "
"pods will be created",
)
add_bool_param(
parser=parser,
name="--force_use_kube_config_file",
default=False,
help="If true, force to load the cluster config from ~/.kube/config "
"while submitting the ElasticDL job. Otherwise, if the client is in a "
"K8S environment, load the incluster config, if not, load the kube "
"config file.",
)
parser.add_argument(
"--distribution_strategy",
type=str,
choices=[
"",
DistributionStrategy.LOCAL,
DistributionStrategy.PARAMETER_SERVER,
DistributionStrategy.ALLREDUCE,
DistributionStrategy.CUSTOM,
],
default=DistributionStrategy. | ,
help="Master will use a distribution policy on a list of devices "
"according to the distributed strategy, "
"e.g. 'ParameterServerStrategy', 'AllreduceStrategy', "
"'CustomStrategy' or 'Local'",
)
def add_bool_param(parser, name, default, help):
parser.add_argument(
name, # should be in "--foo" format
nargs="?",
const=not default,
default=default,
type=lambda x: x.lower() in ["true", "yes", "t", "y"],
help=help,
)
def build_arguments_from_parsed_result(args, filter_args=None):
"""Reconstruct arguments from parsed result
Args:
args: result from `parser.parse_args()`
Returns:
list of string: ready for parser to parse,
such as ["--foo", "3", "--bar", False]
"""
items = vars(args).items()
if filter_args:
items = filter(lambda item: item[0] not in filter_args, items)
def _str_ignore_none(s):
if s is None:
return s
return str(s)
arguments = map(_str_ignore_none, chain(*items))
arguments = [
"--" + k if i % 2 == 0 else k for i, k in enumerate(arguments)
]
return arguments
def wrap_python_args_with_string(args):
"""Wrap argument values with string
Args:
args: list like ["--foo", "3", "--bar", False]
Returns:
list of string: like ["--foo", "'3'", "--bar", "'False'"]
"""
result = []
for value in args:
if not value.startswith("--"):
result.append("'{}'".format(value))
else:
result.append(value)
return result
def pos_int(arg):
res = int(arg)
if res <= 0:
raise ValueError("Positive integer argument required. Got %s" % res)
return res
def non_neg_int(arg):
res = int(arg)
if res < 0:
raise ValueError(
"Non-negative integer argument required. Get %s" % res
)
return res
def print_args(args, exclude_args=[], groups=None):
"""
Args:
args: parsing results returned from `parser.parse_args`
exclude_args: the arguments which won't be printed.
groups: It is a list of a list. It controls which options should be
printed together. For example, we expect all model specifications such
as `optimizer`, `loss` are better printed together.
groups = [["optimizer", "loss"]]
"""
def _get_attr(instance, attribute):
try:
return getattr(instance, attribute)
except AttributeError:
return None
dedup = set()
if groups:
for group in groups:
for element in group:
dedup.add(element)
logger.info("%s = %s", element, _get_attr(args, element))
other_options = [
(key, value)
for (key, value) in args.__dict__.items()
if key not in dedup and key not in exclude_args
]
for key, value in other_options:
logger.info("%s = %s", key, value)
def _build_master_args_parser():
parser = argparse.ArgumentParser(description="ElasticDL Master")
parser.add_argument(
"--port",
default=50001,
type=pos_int,
help="The listening port of master",
)
add_params(parser)
return parser
def parse_master_args(master_args=None):
parser = _build_master_args_parser()
args, unknown_args = parser.parse_known_args(args=master_args)
print_args(args)
if unknown_args:
logger.warning("Unknown arguments: %s", unknown_args)
return args
| intelligent-machine-learning__dlrover |
84 | 84-368-23 | infile | info | [
"addFilter",
"addHandler",
"callHandlers",
"critical",
"debug",
"disabled",
"error",
"exception",
"fatal",
"filter",
"filters",
"findCaller",
"getChild",
"getEffectiveLevel",
"handle",
"handlers",
"hasHandlers",
"info",
"isEnabledFor",
"level",
"log",
"makeRecord",
"name",
"parent",
"propagate",
"removeFilter",
"removeHandler",
"setLevel",
"warn",
"warning",
"_cache",
"_log",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from itertools import chain
from dlrover.python.common.constants import DistributionStrategy
from dlrover.python.common.log_utils import default_logger as logger
def add_params(parser):
add_bool_param(
parser=parser,
name="--use_async",
default=False,
help="True for asynchronous SGD, False for synchronous SGD",
)
add_bool_param(
parser=parser,
name="--need_task_manager",
default=True,
help="If true, master creates a task manager for dynamic sharding. "
"Otherwise, no task manager is created",
)
add_bool_param(
parser=parser,
name="--need_node_manager",
default=True,
help="If true, master creates a pod manager to maintain the "
"cluster for the job. Otherwise, no pod manager is created",
)
add_bool_param(
parser=parser,
name="--enabled_auto_ps",
default=False,
help="If true, the master will auto-configure the resources "
"of PS nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--enabled_auto_worker",
default=False,
help="If true, the master will auto-configure the resources "
"of worker nodes and adjust the resources at runtime",
)
add_bool_param(
parser=parser,
name="--task_fault_tolerance",
default=True,
help="If true, task manager supports fault tolerance, otherwise "
"no fault tolerance.",
)
add_bool_param(
parser=parser,
name="--relaunch_timeout_worker",
default=True,
help="If true, the master will detect the time of worker to "
"execute a task and relaunch the worker if timeout",
)
add_bool_param(
parser=parser,
name="--use_ddp",
default=False,
help="If true, the master calls DDPRendezvousServer,"
"or the master calls HorovodRendezvousServer",
)
parser.add_argument(
"--custom_scaling_strategy",
type=str,
default="off",
help="Set low priority gpu workers scaling out strategies when using "
"gpu elastic training. If 'off', low priority gpu workers can scale "
"out at any time as long as resources are available. If "
"'scaling_by_time', scale out at default period of time. If "
"'scaling_by_time:starttime-endtime', scale out during starttime to"
" endtime. The format of 'starttime' or 'endtime' is"
" 'hour:minute:second' of 24-hour system. Currently, only support "
"`scaling_by_time` strategy.",
)
add_bool_param(
parser=parser,
name="--need_tf_config",
default=False,
help="If true, needs to set TF_CONFIG env for ps/worker. Also "
"need to use fixed service name for workers",
)
parser.add_argument(
"--relaunch_on_worker_failure",
type=int,
help="The number of relaunch tries for a worker failure for "
"PS Strategy training",
default=3,
)
add_bool_param(
parser=parser,
name="--ps_is_critical",
default=True,
help="If true, ps pods are critical, and ps pod failure "
"results in job failure.",
)
parser.add_argument(
"--critical_worker_index",
default="default",
help="If 'default', worker0 is critical for PS strategy custom "
"training, none for others; "
"If 'none', all workers are non-critical; "
"Otherwise, a list of critical worker indices such as '1:0,3:1' "
"In each pair, the first value is the pod index and the second value "
"is the number of allowed relaunches before becoming critical",
)
parser.add_argument(
"--ps_relaunch_max_num",
type=int,
help="The max number of ps relaunches",
default=1,
)
parser.add_argument(
"--launch_worker_after_ps_running",
default="default",
help="This argument indicates if launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'on', launch worker "
"pods (execpt worker0) after all ps pods are running. "
"If 'off', launch worker pods regardless of ps pod status "
"If 'default', when ps.core >= 16 with PS strategy, similar "
"to 'on', otherwise, similar to 'off'. ",
)
parser.add_argument(
"--num_workers", type=int, help="Number of workers", default=0
)
parser.add_argument(
"--worker_resource_request",
default="",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--worker_resource_limit",
type=str,
default="",
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--num_tf_master",
type=int,
help="Number of TensorFlow estimator master",
default=0,
)
parser.add_argument(
"--tf_master_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by TensorFlow estimator, "
" master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--tf_master_resource_limit",
type=str,
default="",
help="The maximal resource required by TensorFlow estimator, "
"master e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to tf_master_resource_request",
)
parser.add_argument(
"--master_pod_priority",
default="",
help="The requested priority of master pod",
)
parser.add_argument(
"--tf_master_pod_priority",
default="",
help="The requested priority of tensorflow estimator master",
)
parser.add_argument(
"--worker_pod_priority",
default="",
help="The requested priority of worker pod, we support following"
"configs: high/low/0.5. The 0.5 means that half"
"worker pods have high priority, and half worker pods have"
"low priority. The default value is low",
)
parser.add_argument(
"--num_ps_pods", type=int, help="Number of PS pods", default=0
)
parser.add_argument(
"--ps_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--ps_resource_limit",
default="",
type=str,
help="The maximal resource required by worker, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to worker_resource_request",
)
parser.add_argument(
"--ps_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--evaluator_resource_request",
default="cpu=1,memory=4096Mi",
type=str,
help="The minimal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1",
)
parser.add_argument(
"--evaluator_resource_limit",
default="",
type=str,
help="The maximal resource required by evaluator, "
"e.g. cpu=1,memory=1024Mi,disk=1024Mi,gpu=1,"
"default to evaluator_resource_request",
)
parser.add_argument(
"--evaluator_pod_priority",
default="",
help="The requested priority of PS pod",
)
parser.add_argument(
"--num_evaluators",
type=int,
default=0,
help="The number of evaluator pods",
)
parser.add_argument(
"--namespace",
default="default",
type=str,
help="The name of the Kubernetes namespace where ElasticDL "
"pods will be created",
)
add_bool_param(
parser=parser,
name="--force_use_kube_config_file",
default=False,
help="If true, force to load the cluster config from ~/.kube/config "
"while submitting the ElasticDL job. Otherwise, if the client is in a "
"K8S environment, load the incluster config, if not, load the kube "
"config file.",
)
parser.add_argument(
"--distribution_strategy",
type=str,
choices=[
"",
DistributionStrategy.LOCAL,
DistributionStrategy.PARAMETER_SERVER,
DistributionStrategy.ALLREDUCE,
DistributionStrategy.CUSTOM,
],
default=DistributionStrategy.PARAMETER_SERVER,
help="Master will use a distribution policy on a list of devices "
"according to the distributed strategy, "
"e.g. 'ParameterServerStrategy', 'AllreduceStrategy', "
"'CustomStrategy' or 'Local'",
)
def add_bool_param(parser, name, default, help):
parser.add_argument(
name, # should be in "--foo" format
nargs="?",
const=not default,
default=default,
type=lambda x: x.lower() in ["true", "yes", "t", "y"],
help=help,
)
def build_arguments_from_parsed_result(args, filter_args=None):
"""Reconstruct arguments from parsed result
Args:
args: result from `parser.parse_args()`
Returns:
list of string: ready for parser to parse,
such as ["--foo", "3", "--bar", False]
"""
items = vars(args).items()
if filter_args:
items = filter(lambda item: item[0] not in filter_args, items)
def _str_ignore_none(s):
if s is None:
return s
return str(s)
arguments = map(_str_ignore_none, chain(*items))
arguments = [
"--" + k if i % 2 == 0 else k for i, k in enumerate(arguments)
]
return arguments
def wrap_python_args_with_string(args):
"""Wrap argument values with string
Args:
args: list like ["--foo", "3", "--bar", False]
Returns:
list of string: like ["--foo", "'3'", "--bar", "'False'"]
"""
result = []
for value in args:
if not value.startswith("--"):
result.append("'{}'".format(value))
else:
result.append(value)
return result
def pos_int(arg):
res = int(arg)
if res <= 0:
raise ValueError("Positive integer argument required. Got %s" % res)
return res
def non_neg_int(arg):
res = int(arg)
if res < 0:
raise ValueError(
"Non-negative integer argument required. Get %s" % res
)
return res
def print_args(args, exclude_args=[], groups=None):
"""
Args:
args: parsing results returned from `parser.parse_args`
exclude_args: the arguments which won't be printed.
groups: It is a list of a list. It controls which options should be
printed together. For example, we expect all model specifications such
as `optimizer`, `loss` are better printed together.
groups = [["optimizer", "loss"]]
"""
def _get_attr(instance, attribute):
try:
return getattr(instance, attribute)
except AttributeError:
return None
dedup = set()
if groups:
for group in groups:
for element in group:
dedup.add(element)
logger. | ("%s = %s", element, _get_attr(args, element))
other_options = [
(key, value)
for (key, value) in args.__dict__.items()
if key not in dedup and key not in exclude_args
]
for key, value in other_options:
logger.info("%s = %s", key, value)
def _build_master_args_parser():
parser = argparse.ArgumentParser(description="ElasticDL Master")
parser.add_argument(
"--port",
default=50001,
type=pos_int,
help="The listening port of master",
)
add_params(parser)
return parser
def parse_master_args(master_args=None):
parser = _build_master_args_parser()
args, unknown_args = parser.parse_known_args(args=master_args)
print_args(args)
if unknown_args:
logger.warning("Unknown arguments: %s", unknown_args)
return args
| intelligent-machine-learning__dlrover |
86 | 86-36-60 | inproject | ALLREDUCE | [
"ALLREDUCE",
"CUSTOM",
"LOCAL",
"mro",
"PARAMETER_SERVER",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy. | :
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-46-58 | inproject | PARAMETER_SERVER | [
"ALLREDUCE",
"CUSTOM",
"LOCAL",
"mro",
"PARAMETER_SERVER",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__call__",
"__class__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__doc__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__module__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__text_signature__",
"__weakrefoffset__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy. | :
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-53-13 | inproject | speed_monitor | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self. | = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-55-43 | inproject | speed_monitor | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self. | )
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-60-59 | inproject | speed_monitor | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self. | )
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-64-13 | infile | rendezvous_server | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self. | = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-65-13 | infile | job_metric_collector | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self. | = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-65-41 | infile | _create_metric_collector_if_needed | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self. | (
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-68-13 | infile | elastic_ps_service | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self. | = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-69-13 | infile | _master_server | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self. | = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-69-35 | infile | _create_master_grpc_service | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self. | (args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-72-13 | random | _exit_code | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self. | = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-89-24 | inproject | node_manager | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self. | .get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-89-37 | inproject | get_job_uuid | [
"add_pod_event_callback",
"all_critical_node_completed",
"all_workers_deleted",
"all_workers_exited",
"all_workers_failed",
"get_all_training_nodes",
"get_cur_cluster_ps",
"get_job_uuid",
"get_next_cluster_ps",
"get_running_workers",
"ready_for_new_ps_cluster",
"remove_training_nodes",
"remove_worker",
"start",
"start_auto_scale",
"stop",
"update_node_resource_usage",
"_chief_worker_started",
"_critical_worker_index",
"_deleted_ps_pod_ids",
"_get_pod_counter",
"_get_worker_status_counter",
"_init_job_nodes",
"_job_nodes",
"_job_resource",
"_job_uuid",
"_k8s_client",
"_last_pod_stats",
"_lock",
"_monitor_nodes",
"_node_watcher",
"_pending_relaunch_count",
"_pod_event_callbacks",
"_process_event",
"_process_list_nodes",
"_process_node_events",
"_ps_is_critical",
"_ps_relaunch_max_num",
"_relaunch_on_worker_failure",
"_relaunch_pod",
"_relaunch_typed_pod",
"_should_relaunch",
"_speed_monitor",
"_start_launch_waiting_workers_time",
"_stop_launch_worker_for_ps",
"_stop_monitor",
"_use_ddp",
"_wait_pending_relaunch",
"_workers_waiting_ps_running",
"__annotations__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager. | ()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-97-17 | inproject | task_manager | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self. | .set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-97-30 | inproject | set_task_timeout_callback | [
"finished",
"get_dataset",
"get_dataset_checkpoint",
"get_dataset_epoch",
"get_dataset_task",
"new_dataset",
"recover_tasks",
"relaunch_timeout_worker",
"remove_running_worker",
"report_dataset_task",
"reset_worker_start_task_time",
"restore_dataset_from_checkpoint",
"set_task_timeout_callback",
"start",
"training_started",
"_check_and_reassign_timeout_tasks",
"_datasets",
"_invoke_task_timeout_callback",
"_lock",
"_should_stop",
"_speed_monitor",
"_task_timeout_callbacks",
"_worker_start_task_time",
"__annotations__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager. | (
self.node_manager.remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-98-21 | inproject | node_manager | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self. | .remove_worker
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-98-34 | inproject | remove_worker | [
"add_pod_event_callback",
"all_critical_node_completed",
"all_workers_deleted",
"all_workers_exited",
"all_workers_failed",
"get_all_training_nodes",
"get_cur_cluster_ps",
"get_job_uuid",
"get_next_cluster_ps",
"get_running_workers",
"ready_for_new_ps_cluster",
"remove_training_nodes",
"remove_worker",
"start",
"start_auto_scale",
"stop",
"update_node_resource_usage",
"_chief_worker_started",
"_critical_worker_index",
"_deleted_ps_pod_ids",
"_get_pod_counter",
"_get_worker_status_counter",
"_init_job_nodes",
"_job_nodes",
"_job_resource",
"_job_uuid",
"_k8s_client",
"_last_pod_stats",
"_lock",
"_monitor_nodes",
"_node_watcher",
"_pending_relaunch_count",
"_pod_event_callbacks",
"_process_event",
"_process_list_nodes",
"_process_node_events",
"_ps_is_critical",
"_ps_relaunch_max_num",
"_relaunch_on_worker_failure",
"_relaunch_pod",
"_relaunch_typed_pod",
"_should_relaunch",
"_speed_monitor",
"_start_launch_waiting_workers_time",
"_stop_launch_worker_for_ps",
"_stop_monitor",
"_use_ddp",
"_wait_pending_relaunch",
"_workers_waiting_ps_running",
"__annotations__",
"__bool__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__ge__",
"__getattribute__",
"__gt__",
"__hash__",
"__init__",
"__init_subclass__",
"__le__",
"__lt__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager. |
)
if self.node_manager:
self._add_pod_event_callback()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |
86 | 86-101-17 | infile | _add_pod_event_callback | [
"elastic_ps_service",
"job_metric_collector",
"node_manager",
"prepare",
"rendezvous_server",
"request_stop",
"run",
"speed_monitor",
"stop",
"task_manager",
"_add_pod_event_callback",
"_args",
"_create_master_grpc_service",
"_create_metric_collector_if_needed",
"_exit_code",
"_exit_reason",
"_master_server",
"_stop_requested",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__doc__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__module__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__"
] | # Copyright 2022 The DLRover Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from dlrover.python.common.constants import DistributionStrategy, JobExitReason
from dlrover.python.common.log_utils import default_logger as logger
from dlrover.python.elastic_training.elastic_ps import ElasticPsService
from dlrover.python.master.monitor.speed_monitor import SpeedMonitor
from dlrover.python.master.node_manager.event_callback import (
TaskRescheduleCallback,
)
from dlrover.python.master.node_manager.node_manager import create_node_manager
from dlrover.python.master.servicer import create_master_service
from dlrover.python.master.shard_manager.task_manager import TaskManager
from dlrover.python.master.stats_collector.job_collector import (
JobMetricCollector,
)
def _create_rendezvous_server_if_needed(args):
master_ip = os.getenv("MY_POD_IP", "localhost")
if args.use_ddp:
logger.info("call DDPRendezvousServer, master_ip:{}".format(master_ip))
return None
elif args.distribution_strategy != DistributionStrategy.ALLREDUCE:
return None
else:
logger.info(
"call HorovodRendezvousServer, master_ip:{}".format(master_ip)
)
return None
def _create_elastic_ps_service_if_needed(args):
if args.distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
return ElasticPsService()
return None
class Master(object):
def __init__(self, args):
self.speed_monitor = SpeedMonitor()
self.node_manager = (
create_node_manager(args, self.speed_monitor)
if args.need_node_manager
else None
)
self.task_manager = (
TaskManager(args.relaunch_timeout_worker, self.speed_monitor)
if args.need_task_manager
else None
)
self.rendezvous_server = _create_rendezvous_server_if_needed(args)
self.job_metric_collector = self._create_metric_collector_if_needed(
args
)
self.elastic_ps_service = _create_elastic_ps_service_if_needed(args)
self._master_server = self._create_master_grpc_service(args)
self._args = args
self._stop_requested = False
self._exit_code = 0
self._exit_reason = None
def _create_master_grpc_service(self, args):
return create_master_service(
args.port,
self.task_manager,
self.node_manager,
self.speed_monitor,
self.rendezvous_server,
self.job_metric_collector,
self.elastic_ps_service,
)
def _create_metric_collector_if_needed(self, args):
if not args.need_node_manager:
return None
job_uuid = self.node_manager.get_job_uuid()
return JobMetricCollector(
job_uuid, args.namespace, args.cluster, args.user
)
def prepare(self):
# Composite the components
if self.task_manager and self.node_manager:
self.task_manager.set_task_timeout_callback(
self.node_manager.remove_worker
)
if self.node_manager:
self. | ()
# Start the components one by one
if self.task_manager:
self.task_manager.start()
if self.rendezvous_server:
self.rendezvous_server.start()
if self.node_manager:
self.node_manager.start()
if self.job_metric_collector:
self.job_metric_collector.report_job_type(
self._args.distribution_strategy
)
# Start the master GRPC server
logger.info("Starting master RPC server")
self._master_server.start()
logger.info("Master RPC server started")
def _add_pod_event_callback(self):
# Add PodEventCallbacks for the listeners of Pod events.
if self.task_manager:
self.node_manager.add_pod_event_callback(
TaskRescheduleCallback(self.task_manager)
)
def run(self):
"""
The main loop of master.
Dispatch the tasks to the workers until all the tasks are completed.
"""
try:
while True:
if self._stop_requested:
break
if (
self.node_manager
and self.node_manager.all_workers_exited()
):
if self.node_manager.all_workers_failed():
logger.error("All workers failed")
self._exit_code = 1
self._exit_reason = JobExitReason.UNKNOWN_ERROR
break
if self.task_manager and not self.task_manager.finished():
logger.warning(
"All workers exited but there also are "
"unfinished tasks",
)
break
if (
self.task_manager
and self.task_manager.finished()
and (
not self.node_manager
or self.node_manager.all_critical_node_completed()
)
):
logger.info("All task completed")
break
time.sleep(30)
except KeyboardInterrupt:
logger.warning("Server stopping")
finally:
if self.node_manager:
self.node_manager.stop()
self.stop()
return self._exit_code
def stop(self):
"""
Stop all the components.
Make sure that the created services and components are shut down.
"""
if self._exit_code == 0 and not self._exit_reason:
self._exit_reason = JobExitReason.SUCCEEDED
logger.info("Job exit with the reason {}".format(self._exit_reason))
if self.job_metric_collector:
self.job_metric_collector.report_job_exit_reason_to_easydl(
self._exit_reason
)
logger.info("Stopping master")
logger.info("Stopping RPC server")
self._master_server.stop(grace=None)
logger.info("RPC server stopped")
logger.info("Master stopped")
def request_stop(self, success, reason, msg=""):
self._stop_requested = True
self._exit_reason = reason
if success:
self._exit_code = 0
logger.info(msg)
else:
self._exit_code = 1
logger.error(msg)
| intelligent-machine-learning__dlrover |