code
stringlengths
161
233k
apis
sequencelengths
1
24
extract_api
stringlengths
162
68.5k
import os from argparse import Namespace, _SubParsersAction from llama_index import SimpleDirectoryReader from .configuration import load_index, save_index def add_cli(args: Namespace) -> None: """Handle subcommand "add".""" index = load_index() for p in args.files: if not os.path.exists(p): raise FileNotFoundError(p) if os.path.isdir(p): documents = SimpleDirectoryReader(p).load_data() for document in documents: index.insert(document) else: documents = SimpleDirectoryReader(input_files=[p]).load_data() for document in documents: index.insert(document) save_index(index) def register_add_cli(subparsers: _SubParsersAction) -> None: """Register subcommand "add" to ArgumentParser.""" parser = subparsers.add_parser("add") parser.add_argument( "files", default=".", nargs="+", help="Files to add", ) parser.set_defaults(func=add_cli)
[ "llama_index.SimpleDirectoryReader" ]
[((368, 384), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (381, 384), False, 'import os\n'), ((299, 316), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (313, 316), False, 'import os\n'), ((410, 434), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['p'], {}), '(p)\n', (431, 434), False, 'from llama_index import SimpleDirectoryReader\n'), ((563, 601), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[p]'}), '(input_files=[p])\n', (584, 601), False, 'from llama_index import SimpleDirectoryReader\n')]
import os from argparse import Namespace, _SubParsersAction from llama_index import SimpleDirectoryReader from .configuration import load_index, save_index def add_cli(args: Namespace) -> None: """Handle subcommand "add".""" index = load_index() for p in args.files: if not os.path.exists(p): raise FileNotFoundError(p) if os.path.isdir(p): documents = SimpleDirectoryReader(p).load_data() for document in documents: index.insert(document) else: documents = SimpleDirectoryReader(input_files=[p]).load_data() for document in documents: index.insert(document) save_index(index) def register_add_cli(subparsers: _SubParsersAction) -> None: """Register subcommand "add" to ArgumentParser.""" parser = subparsers.add_parser("add") parser.add_argument( "files", default=".", nargs="+", help="Files to add", ) parser.set_defaults(func=add_cli)
[ "llama_index.SimpleDirectoryReader" ]
[((368, 384), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (381, 384), False, 'import os\n'), ((299, 316), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (313, 316), False, 'import os\n'), ((410, 434), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['p'], {}), '(p)\n', (431, 434), False, 'from llama_index import SimpleDirectoryReader\n'), ((563, 601), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[p]'}), '(input_files=[p])\n', (584, 601), False, 'from llama_index import SimpleDirectoryReader\n')]
import os from argparse import Namespace, _SubParsersAction from llama_index import SimpleDirectoryReader from .configuration import load_index, save_index def add_cli(args: Namespace) -> None: """Handle subcommand "add".""" index = load_index() for p in args.files: if not os.path.exists(p): raise FileNotFoundError(p) if os.path.isdir(p): documents = SimpleDirectoryReader(p).load_data() for document in documents: index.insert(document) else: documents = SimpleDirectoryReader(input_files=[p]).load_data() for document in documents: index.insert(document) save_index(index) def register_add_cli(subparsers: _SubParsersAction) -> None: """Register subcommand "add" to ArgumentParser.""" parser = subparsers.add_parser("add") parser.add_argument( "files", default=".", nargs="+", help="Files to add", ) parser.set_defaults(func=add_cli)
[ "llama_index.SimpleDirectoryReader" ]
[((368, 384), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (381, 384), False, 'import os\n'), ((299, 316), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (313, 316), False, 'import os\n'), ((410, 434), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['p'], {}), '(p)\n', (431, 434), False, 'from llama_index import SimpleDirectoryReader\n'), ((563, 601), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[p]'}), '(input_files=[p])\n', (584, 601), False, 'from llama_index import SimpleDirectoryReader\n')]
import os from argparse import Namespace, _SubParsersAction from llama_index import SimpleDirectoryReader from .configuration import load_index, save_index def add_cli(args: Namespace) -> None: """Handle subcommand "add".""" index = load_index() for p in args.files: if not os.path.exists(p): raise FileNotFoundError(p) if os.path.isdir(p): documents = SimpleDirectoryReader(p).load_data() for document in documents: index.insert(document) else: documents = SimpleDirectoryReader(input_files=[p]).load_data() for document in documents: index.insert(document) save_index(index) def register_add_cli(subparsers: _SubParsersAction) -> None: """Register subcommand "add" to ArgumentParser.""" parser = subparsers.add_parser("add") parser.add_argument( "files", default=".", nargs="+", help="Files to add", ) parser.set_defaults(func=add_cli)
[ "llama_index.SimpleDirectoryReader" ]
[((368, 384), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (381, 384), False, 'import os\n'), ((299, 316), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (313, 316), False, 'import os\n'), ((410, 434), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['p'], {}), '(p)\n', (431, 434), False, 'from llama_index import SimpleDirectoryReader\n'), ((563, 601), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[p]'}), '(input_files=[p])\n', (584, 601), False, 'from llama_index import SimpleDirectoryReader\n')]
from typing import Dict, List, Type from llama_index.agent import OpenAIAgent, ReActAgent from llama_index.agent.types import BaseAgent from llama_index.llms import Anthropic, OpenAI from llama_index.llms.llama_utils import messages_to_prompt from llama_index.llms.llm import LLM from llama_index.llms.replicate import Replicate OPENAI_MODELS = [ "text-davinci-003", "gpt-3.5-turbo-0613", "gpt-4-0613", ] ANTHROPIC_MODELS = ["claude-instant-1", "claude-instant-1.2", "claude-2", "claude-2.0"] LLAMA_MODELS = [ "llama13b-v2-chat", "llama70b-v2-chat", ] REPLICATE_MODELS: List[str] = [] ALL_MODELS = OPENAI_MODELS + ANTHROPIC_MODELS + LLAMA_MODELS AGENTS: Dict[str, Type[BaseAgent]] = { "react": ReActAgent, "openai": OpenAIAgent, } LLAMA_13B_V2_CHAT = ( "a16z-infra/llama13b-v2-chat:" "df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5" ) LLAMA_70B_V2_CHAT = ( "replicate/llama70b-v2-chat:" "e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48" ) def get_model(model: str) -> LLM: llm: LLM if model in OPENAI_MODELS: llm = OpenAI(model=model) elif model in ANTHROPIC_MODELS: llm = Anthropic(model=model) elif model in LLAMA_MODELS: model_dict = { "llama13b-v2-chat": LLAMA_13B_V2_CHAT, "llama70b-v2-chat": LLAMA_70B_V2_CHAT, } replicate_model = model_dict[model] llm = Replicate( model=replicate_model, temperature=0.01, context_window=4096, # override message representation for llama 2 messages_to_prompt=messages_to_prompt, ) else: raise ValueError(f"Unknown model {model}") return llm def is_valid_combination(agent: str, model: str) -> bool: if agent == "openai" and model not in ["gpt-3.5-turbo-0613", "gpt-4-0613"]: print(f"{agent} does not work with {model}") return False return True
[ "llama_index.llms.Anthropic", "llama_index.llms.OpenAI", "llama_index.llms.replicate.Replicate" ]
[((1116, 1135), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'model'}), '(model=model)\n', (1122, 1135), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1186, 1208), 'llama_index.llms.Anthropic', 'Anthropic', ([], {'model': 'model'}), '(model=model)\n', (1195, 1208), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1434, 1548), 'llama_index.llms.replicate.Replicate', 'Replicate', ([], {'model': 'replicate_model', 'temperature': '(0.01)', 'context_window': '(4096)', 'messages_to_prompt': 'messages_to_prompt'}), '(model=replicate_model, temperature=0.01, context_window=4096,\n messages_to_prompt=messages_to_prompt)\n', (1443, 1548), False, 'from llama_index.llms.replicate import Replicate\n')]
from typing import Dict, List, Type from llama_index.agent import OpenAIAgent, ReActAgent from llama_index.agent.types import BaseAgent from llama_index.llms import Anthropic, OpenAI from llama_index.llms.llama_utils import messages_to_prompt from llama_index.llms.llm import LLM from llama_index.llms.replicate import Replicate OPENAI_MODELS = [ "text-davinci-003", "gpt-3.5-turbo-0613", "gpt-4-0613", ] ANTHROPIC_MODELS = ["claude-instant-1", "claude-instant-1.2", "claude-2", "claude-2.0"] LLAMA_MODELS = [ "llama13b-v2-chat", "llama70b-v2-chat", ] REPLICATE_MODELS: List[str] = [] ALL_MODELS = OPENAI_MODELS + ANTHROPIC_MODELS + LLAMA_MODELS AGENTS: Dict[str, Type[BaseAgent]] = { "react": ReActAgent, "openai": OpenAIAgent, } LLAMA_13B_V2_CHAT = ( "a16z-infra/llama13b-v2-chat:" "df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5" ) LLAMA_70B_V2_CHAT = ( "replicate/llama70b-v2-chat:" "e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48" ) def get_model(model: str) -> LLM: llm: LLM if model in OPENAI_MODELS: llm = OpenAI(model=model) elif model in ANTHROPIC_MODELS: llm = Anthropic(model=model) elif model in LLAMA_MODELS: model_dict = { "llama13b-v2-chat": LLAMA_13B_V2_CHAT, "llama70b-v2-chat": LLAMA_70B_V2_CHAT, } replicate_model = model_dict[model] llm = Replicate( model=replicate_model, temperature=0.01, context_window=4096, # override message representation for llama 2 messages_to_prompt=messages_to_prompt, ) else: raise ValueError(f"Unknown model {model}") return llm def is_valid_combination(agent: str, model: str) -> bool: if agent == "openai" and model not in ["gpt-3.5-turbo-0613", "gpt-4-0613"]: print(f"{agent} does not work with {model}") return False return True
[ "llama_index.llms.Anthropic", "llama_index.llms.OpenAI", "llama_index.llms.replicate.Replicate" ]
[((1116, 1135), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'model'}), '(model=model)\n', (1122, 1135), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1186, 1208), 'llama_index.llms.Anthropic', 'Anthropic', ([], {'model': 'model'}), '(model=model)\n', (1195, 1208), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1434, 1548), 'llama_index.llms.replicate.Replicate', 'Replicate', ([], {'model': 'replicate_model', 'temperature': '(0.01)', 'context_window': '(4096)', 'messages_to_prompt': 'messages_to_prompt'}), '(model=replicate_model, temperature=0.01, context_window=4096,\n messages_to_prompt=messages_to_prompt)\n', (1443, 1548), False, 'from llama_index.llms.replicate import Replicate\n')]
from typing import Dict, List, Type from llama_index.agent import OpenAIAgent, ReActAgent from llama_index.agent.types import BaseAgent from llama_index.llms import Anthropic, OpenAI from llama_index.llms.llama_utils import messages_to_prompt from llama_index.llms.llm import LLM from llama_index.llms.replicate import Replicate OPENAI_MODELS = [ "text-davinci-003", "gpt-3.5-turbo-0613", "gpt-4-0613", ] ANTHROPIC_MODELS = ["claude-instant-1", "claude-instant-1.2", "claude-2", "claude-2.0"] LLAMA_MODELS = [ "llama13b-v2-chat", "llama70b-v2-chat", ] REPLICATE_MODELS: List[str] = [] ALL_MODELS = OPENAI_MODELS + ANTHROPIC_MODELS + LLAMA_MODELS AGENTS: Dict[str, Type[BaseAgent]] = { "react": ReActAgent, "openai": OpenAIAgent, } LLAMA_13B_V2_CHAT = ( "a16z-infra/llama13b-v2-chat:" "df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5" ) LLAMA_70B_V2_CHAT = ( "replicate/llama70b-v2-chat:" "e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48" ) def get_model(model: str) -> LLM: llm: LLM if model in OPENAI_MODELS: llm = OpenAI(model=model) elif model in ANTHROPIC_MODELS: llm = Anthropic(model=model) elif model in LLAMA_MODELS: model_dict = { "llama13b-v2-chat": LLAMA_13B_V2_CHAT, "llama70b-v2-chat": LLAMA_70B_V2_CHAT, } replicate_model = model_dict[model] llm = Replicate( model=replicate_model, temperature=0.01, context_window=4096, # override message representation for llama 2 messages_to_prompt=messages_to_prompt, ) else: raise ValueError(f"Unknown model {model}") return llm def is_valid_combination(agent: str, model: str) -> bool: if agent == "openai" and model not in ["gpt-3.5-turbo-0613", "gpt-4-0613"]: print(f"{agent} does not work with {model}") return False return True
[ "llama_index.llms.Anthropic", "llama_index.llms.OpenAI", "llama_index.llms.replicate.Replicate" ]
[((1116, 1135), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'model'}), '(model=model)\n', (1122, 1135), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1186, 1208), 'llama_index.llms.Anthropic', 'Anthropic', ([], {'model': 'model'}), '(model=model)\n', (1195, 1208), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1434, 1548), 'llama_index.llms.replicate.Replicate', 'Replicate', ([], {'model': 'replicate_model', 'temperature': '(0.01)', 'context_window': '(4096)', 'messages_to_prompt': 'messages_to_prompt'}), '(model=replicate_model, temperature=0.01, context_window=4096,\n messages_to_prompt=messages_to_prompt)\n', (1443, 1548), False, 'from llama_index.llms.replicate import Replicate\n')]
import asyncio import os import shutil from argparse import ArgumentParser from glob import iglob from pathlib import Path from typing import Any, Callable, Dict, Optional, Union, cast from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, ) from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.base.response.schema import ( RESPONSE_TYPE, StreamingResponse, Response, ) from llama_index.core.bridge.pydantic import BaseModel, Field, validator from llama_index.core.chat_engine import CondenseQuestionChatEngine from llama_index.core.indices.service_context import ServiceContext from llama_index.core.ingestion import IngestionPipeline from llama_index.core.llms import LLM from llama_index.core.query_engine import CustomQueryEngine from llama_index.core.query_pipeline.components.function import FnComponent from llama_index.core.query_pipeline.query import QueryPipeline from llama_index.core.readers.base import BaseReader from llama_index.core.response_synthesizers import CompactAndRefine from llama_index.core.utils import get_cache_dir def _try_load_openai_llm(): try: from llama_index.llms.openai import OpenAI # pants: no-infer-dep return OpenAI(model="gpt-3.5-turbo", streaming=True) except ImportError: raise ImportError( "`llama-index-llms-openai` package not found, " "please run `pip install llama-index-llms-openai`" ) RAG_HISTORY_FILE_NAME = "files_history.txt" def default_ragcli_persist_dir() -> str: return str(Path(get_cache_dir()) / "rag_cli") def query_input(query_str: Optional[str] = None) -> str: return query_str or "" class QueryPipelineQueryEngine(CustomQueryEngine): query_pipeline: QueryPipeline = Field( description="Query Pipeline to use for Q&A.", ) def custom_query(self, query_str: str) -> RESPONSE_TYPE: return self.query_pipeline.run(query_str=query_str) async def acustom_query(self, query_str: str) -> RESPONSE_TYPE: return await self.query_pipeline.arun(query_str=query_str) class RagCLI(BaseModel): """ CLI tool for chatting with output of a IngestionPipeline via a QueryPipeline. """ ingestion_pipeline: IngestionPipeline = Field( description="Ingestion pipeline to run for RAG ingestion." ) verbose: bool = Field( description="Whether to print out verbose information during execution.", default=False, ) persist_dir: str = Field( description="Directory to persist ingestion pipeline.", default_factory=default_ragcli_persist_dir, ) llm: LLM = Field( description="Language model to use for response generation.", default_factory=lambda: _try_load_openai_llm(), ) query_pipeline: Optional[QueryPipeline] = Field( description="Query Pipeline to use for Q&A.", default=None, ) chat_engine: Optional[CondenseQuestionChatEngine] = Field( description="Chat engine to use for chatting.", default_factory=None, ) file_extractor: Optional[Dict[str, BaseReader]] = Field( description="File extractor to use for extracting text from files.", default=None, ) class Config: arbitrary_types_allowed = True @validator("query_pipeline", always=True) def query_pipeline_from_ingestion_pipeline( cls, query_pipeline: Any, values: Dict[str, Any] ) -> Optional[QueryPipeline]: """ If query_pipeline is not provided, create one from ingestion_pipeline. """ if query_pipeline is not None: return query_pipeline ingestion_pipeline = cast(IngestionPipeline, values["ingestion_pipeline"]) if ingestion_pipeline.vector_store is None: return None verbose = cast(bool, values["verbose"]) query_component = FnComponent( fn=query_input, output_key="output", req_params={"query_str"} ) llm = cast(LLM, values["llm"]) # get embed_model from transformations if possible embed_model = None if ingestion_pipeline.transformations is not None: for transformation in ingestion_pipeline.transformations: if isinstance(transformation, BaseEmbedding): embed_model = transformation break service_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model or "default" ) retriever = VectorStoreIndex.from_vector_store( ingestion_pipeline.vector_store, service_context=service_context ).as_retriever(similarity_top_k=8) response_synthesizer = CompactAndRefine( service_context=service_context, streaming=True, verbose=verbose ) # define query pipeline query_pipeline = QueryPipeline(verbose=verbose) query_pipeline.add_modules( { "query": query_component, "retriever": retriever, "summarizer": response_synthesizer, } ) query_pipeline.add_link("query", "retriever") query_pipeline.add_link("retriever", "summarizer", dest_key="nodes") query_pipeline.add_link("query", "summarizer", dest_key="query_str") return query_pipeline @validator("chat_engine", always=True) def chat_engine_from_query_pipeline( cls, chat_engine: Any, values: Dict[str, Any] ) -> Optional[CondenseQuestionChatEngine]: """ If chat_engine is not provided, create one from query_pipeline. """ if chat_engine is not None: return chat_engine if values.get("query_pipeline", None) is None: values["query_pipeline"] = cls.query_pipeline_from_ingestion_pipeline( query_pipeline=None, values=values ) query_pipeline = cast(QueryPipeline, values["query_pipeline"]) if query_pipeline is None: return None query_engine = QueryPipelineQueryEngine(query_pipeline=query_pipeline) # type: ignore verbose = cast(bool, values["verbose"]) llm = cast(LLM, values["llm"]) return CondenseQuestionChatEngine.from_defaults( query_engine=query_engine, llm=llm, verbose=verbose ) async def handle_cli( self, files: Optional[str] = None, question: Optional[str] = None, chat: bool = False, verbose: bool = False, clear: bool = False, create_llama: bool = False, **kwargs: Dict[str, Any], ) -> None: """ Entrypoint for local document RAG CLI tool. """ if clear: # delete self.persist_dir directory including all subdirectories and files if os.path.exists(self.persist_dir): # Ask for confirmation response = input( f"Are you sure you want to delete data within {self.persist_dir}? [y/N] " ) if response.strip().lower() != "y": print("Aborted.") return os.system(f"rm -rf {self.persist_dir}") print(f"Successfully cleared {self.persist_dir}") self.verbose = verbose ingestion_pipeline = cast(IngestionPipeline, self.ingestion_pipeline) if self.verbose: print("Saving/Loading from persist_dir: ", self.persist_dir) if files is not None: documents = [] for _file in iglob(files, recursive=True): _file = os.path.abspath(_file) if os.path.isdir(_file): reader = SimpleDirectoryReader( input_dir=_file, filename_as_id=True, file_extractor=self.file_extractor, ) else: reader = SimpleDirectoryReader( input_files=[_file], filename_as_id=True, file_extractor=self.file_extractor, ) documents.extend(reader.load_data(show_progress=verbose)) await ingestion_pipeline.arun(show_progress=verbose, documents=documents) ingestion_pipeline.persist(persist_dir=self.persist_dir) # Append the `--files` argument to the history file with open(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}", "a") as f: f.write(files + "\n") if create_llama: if shutil.which("npx") is None: print( "`npx` is not installed. Please install it by calling `npm install -g npx`" ) else: history_file_path = Path(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}") if not history_file_path.exists(): print( "No data has been ingested, " "please specify `--files` to create llama dataset." ) else: with open(history_file_path) as f: stored_paths = {line.strip() for line in f if line.strip()} if len(stored_paths) == 0: print( "No data has been ingested, " "please specify `--files` to create llama dataset." ) elif len(stored_paths) > 1: print( "Multiple files or folders were ingested, which is not supported by create-llama. " "Please call `llamaindex-cli rag --clear` to clear the cache first, " "then call `llamaindex-cli rag --files` again with a single folder or file" ) else: path = stored_paths.pop() if "*" in path: print( "Glob pattern is not supported by create-llama. " "Please call `llamaindex-cli rag --clear` to clear the cache first, " "then call `llamaindex-cli rag --files` again with a single folder or file." ) elif not os.path.exists(path): print( f"The path {path} does not exist. " "Please call `llamaindex-cli rag --clear` to clear the cache first, " "then call `llamaindex-cli rag --files` again with a single folder or file." ) else: print(f"Calling create-llama using data from {path} ...") command_args = [ "npx", "create-llama@latest", "--frontend", "--template", "streaming", "--framework", "fastapi", "--ui", "shadcn", "--vector-db", "none", "--engine", "context", f"--files {path}", ] os.system(" ".join(command_args)) if question is not None: await self.handle_question(question) if chat: await self.start_chat_repl() async def handle_question(self, question: str) -> None: if self.query_pipeline is None: raise ValueError("query_pipeline is not defined.") query_pipeline = cast(QueryPipeline, self.query_pipeline) query_pipeline.verbose = self.verbose chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine) response = chat_engine.chat(question) if isinstance(response, StreamingResponse): response.print_response_stream() else: response = cast(Response, response) print(response) async def start_chat_repl(self) -> None: """ Start a REPL for chatting with the agent. """ if self.query_pipeline is None: raise ValueError("query_pipeline is not defined.") chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine) chat_engine.streaming_chat_repl() @classmethod def add_parser_args( cls, parser: Union[ArgumentParser, Any], instance_generator: Optional[Callable[[], "RagCLI"]], ) -> None: if instance_generator: parser.add_argument( "-q", "--question", type=str, help="The question you want to ask.", required=False, ) parser.add_argument( "-f", "--files", type=str, help=( "The name of the file or directory you want to ask a question about," 'such as "file.pdf".' ), ) parser.add_argument( "-c", "--chat", help="If flag is present, opens a chat REPL.", action="store_true", ) parser.add_argument( "-v", "--verbose", help="Whether to print out verbose information during execution.", action="store_true", ) parser.add_argument( "--clear", help="Clears out all currently embedded data.", action="store_true", ) parser.add_argument( "--create-llama", help="Create a LlamaIndex application with your embedded data.", required=False, action="store_true", ) parser.set_defaults( func=lambda args: asyncio.run( instance_generator().handle_cli(**vars(args)) ) ) def cli(self) -> None: """ Entrypoint for CLI tool. """ parser = ArgumentParser(description="LlamaIndex RAG Q&A tool.") subparsers = parser.add_subparsers( title="commands", dest="command", required=True ) llamarag_parser = subparsers.add_parser( "rag", help="Ask a question to a document / a directory of documents." ) self.add_parser_args(llamarag_parser, lambda: self) # Parse the command-line arguments args = parser.parse_args() # Call the appropriate function based on the command args.func(args)
[ "llama_index.llms.openai.OpenAI", "llama_index.core.bridge.pydantic.validator", "llama_index.core.VectorStoreIndex.from_vector_store", "llama_index.core.indices.service_context.ServiceContext.from_defaults", "llama_index.core.bridge.pydantic.Field", "llama_index.core.query_pipeline.components.function.FnComponent", "llama_index.core.utils.get_cache_dir", "llama_index.core.SimpleDirectoryReader", "llama_index.core.query_pipeline.query.QueryPipeline", "llama_index.core.response_synthesizers.CompactAndRefine", "llama_index.core.chat_engine.CondenseQuestionChatEngine.from_defaults" ]
[((1789, 1840), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query Pipeline to use for Q&A."""'}), "(description='Query Pipeline to use for Q&A.')\n", (1794, 1840), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2284, 2349), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Ingestion pipeline to run for RAG ingestion."""'}), "(description='Ingestion pipeline to run for RAG ingestion.')\n", (2289, 2349), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2384, 2488), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Whether to print out verbose information during execution."""', 'default': '(False)'}), "(description=\n 'Whether to print out verbose information during execution.', default=False\n )\n", (2389, 2488), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2525, 2634), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Directory to persist ingestion pipeline."""', 'default_factory': 'default_ragcli_persist_dir'}), "(description='Directory to persist ingestion pipeline.',\n default_factory=default_ragcli_persist_dir)\n", (2530, 2634), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2854, 2919), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query Pipeline to use for Q&A."""', 'default': 'None'}), "(description='Query Pipeline to use for Q&A.', default=None)\n", (2859, 2919), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2999, 3074), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Chat engine to use for chatting."""', 'default_factory': 'None'}), "(description='Chat engine to use for chatting.', default_factory=None)\n", (3004, 3074), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((3152, 3244), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""File extractor to use for extracting text from files."""', 'default': 'None'}), "(description='File extractor to use for extracting text from files.',\n default=None)\n", (3157, 3244), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((3328, 3368), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""query_pipeline"""'], {'always': '(True)'}), "('query_pipeline', always=True)\n", (3337, 3368), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((5385, 5422), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""chat_engine"""'], {'always': '(True)'}), "('chat_engine', always=True)\n", (5394, 5422), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((1245, 1290), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'streaming': '(True)'}), "(model='gpt-3.5-turbo', streaming=True)\n", (1251, 1290), False, 'from llama_index.llms.openai import OpenAI\n'), ((3714, 3767), 'typing.cast', 'cast', (['IngestionPipeline', "values['ingestion_pipeline']"], {}), "(IngestionPipeline, values['ingestion_pipeline'])\n", (3718, 3767), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((3862, 3891), 'typing.cast', 'cast', (['bool', "values['verbose']"], {}), "(bool, values['verbose'])\n", (3866, 3891), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((3918, 3992), 'llama_index.core.query_pipeline.components.function.FnComponent', 'FnComponent', ([], {'fn': 'query_input', 'output_key': '"""output"""', 'req_params': "{'query_str'}"}), "(fn=query_input, output_key='output', req_params={'query_str'})\n", (3929, 3992), False, 'from llama_index.core.query_pipeline.components.function import FnComponent\n'), ((4029, 4053), 'typing.cast', 'cast', (['LLM', "values['llm']"], {}), "(LLM, values['llm'])\n", (4033, 4053), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((4434, 4509), 'llama_index.core.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': "(embed_model or 'default')"}), "(llm=llm, embed_model=embed_model or 'default')\n", (4462, 4509), False, 'from llama_index.core.indices.service_context import ServiceContext\n'), ((4739, 4826), 'llama_index.core.response_synthesizers.CompactAndRefine', 'CompactAndRefine', ([], {'service_context': 'service_context', 'streaming': '(True)', 'verbose': 'verbose'}), '(service_context=service_context, streaming=True, verbose=\n verbose)\n', (4755, 4826), False, 'from llama_index.core.response_synthesizers import CompactAndRefine\n'), ((4902, 4932), 'llama_index.core.query_pipeline.query.QueryPipeline', 'QueryPipeline', ([], {'verbose': 'verbose'}), '(verbose=verbose)\n', (4915, 4932), False, 'from llama_index.core.query_pipeline.query import QueryPipeline\n'), ((5958, 6003), 'typing.cast', 'cast', (['QueryPipeline', "values['query_pipeline']"], {}), "(QueryPipeline, values['query_pipeline'])\n", (5962, 6003), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6176, 6205), 'typing.cast', 'cast', (['bool', "values['verbose']"], {}), "(bool, values['verbose'])\n", (6180, 6205), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6220, 6244), 'typing.cast', 'cast', (['LLM', "values['llm']"], {}), "(LLM, values['llm'])\n", (6224, 6244), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6260, 6357), 'llama_index.core.chat_engine.CondenseQuestionChatEngine.from_defaults', 'CondenseQuestionChatEngine.from_defaults', ([], {'query_engine': 'query_engine', 'llm': 'llm', 'verbose': 'verbose'}), '(query_engine=query_engine, llm=llm,\n verbose=verbose)\n', (6300, 6357), False, 'from llama_index.core.chat_engine import CondenseQuestionChatEngine\n'), ((7378, 7426), 'typing.cast', 'cast', (['IngestionPipeline', 'self.ingestion_pipeline'], {}), '(IngestionPipeline, self.ingestion_pipeline)\n', (7382, 7426), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12054, 12094), 'typing.cast', 'cast', (['QueryPipeline', 'self.query_pipeline'], {}), '(QueryPipeline, self.query_pipeline)\n', (12058, 12094), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12163, 12213), 'typing.cast', 'cast', (['CondenseQuestionChatEngine', 'self.chat_engine'], {}), '(CondenseQuestionChatEngine, self.chat_engine)\n', (12167, 12213), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12693, 12743), 'typing.cast', 'cast', (['CondenseQuestionChatEngine', 'self.chat_engine'], {}), '(CondenseQuestionChatEngine, self.chat_engine)\n', (12697, 12743), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((14601, 14655), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""LlamaIndex RAG Q&A tool."""'}), "(description='LlamaIndex RAG Q&A tool.')\n", (14615, 14655), False, 'from argparse import ArgumentParser\n'), ((6863, 6895), 'os.path.exists', 'os.path.exists', (['self.persist_dir'], {}), '(self.persist_dir)\n', (6877, 6895), False, 'import os\n'), ((7607, 7635), 'glob.iglob', 'iglob', (['files'], {'recursive': '(True)'}), '(files, recursive=True)\n', (7612, 7635), False, 'from glob import iglob\n'), ((12395, 12419), 'typing.cast', 'cast', (['Response', 'response'], {}), '(Response, response)\n', (12399, 12419), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((1584, 1599), 'llama_index.core.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (1597, 1599), False, 'from llama_index.core.utils import get_cache_dir\n'), ((4552, 4656), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['ingestion_pipeline.vector_store'], {'service_context': 'service_context'}), '(ingestion_pipeline.vector_store,\n service_context=service_context)\n', (4586, 4656), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7215, 7254), 'os.system', 'os.system', (['f"""rm -rf {self.persist_dir}"""'], {}), "(f'rm -rf {self.persist_dir}')\n", (7224, 7254), False, 'import os\n'), ((7661, 7683), 'os.path.abspath', 'os.path.abspath', (['_file'], {}), '(_file)\n', (7676, 7683), False, 'import os\n'), ((7703, 7723), 'os.path.isdir', 'os.path.isdir', (['_file'], {}), '(_file)\n', (7716, 7723), False, 'import os\n'), ((8646, 8665), 'shutil.which', 'shutil.which', (['"""npx"""'], {}), "('npx')\n", (8658, 8665), False, 'import shutil\n'), ((8866, 8917), 'pathlib.Path', 'Path', (['f"""{self.persist_dir}/{RAG_HISTORY_FILE_NAME}"""'], {}), "(f'{self.persist_dir}/{RAG_HISTORY_FILE_NAME}')\n", (8870, 8917), False, 'from pathlib import Path\n'), ((7754, 7854), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '_file', 'filename_as_id': '(True)', 'file_extractor': 'self.file_extractor'}), '(input_dir=_file, filename_as_id=True, file_extractor=\n self.file_extractor)\n', (7775, 7854), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7996, 8099), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[_file]', 'filename_as_id': '(True)', 'file_extractor': 'self.file_extractor'}), '(input_files=[_file], filename_as_id=True,\n file_extractor=self.file_extractor)\n', (8017, 8099), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((10477, 10497), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (10491, 10497), False, 'import os\n')]
import asyncio import os import shutil from argparse import ArgumentParser from glob import iglob from pathlib import Path from typing import Any, Callable, Dict, Optional, Union, cast from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, ) from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.base.response.schema import ( RESPONSE_TYPE, StreamingResponse, Response, ) from llama_index.core.bridge.pydantic import BaseModel, Field, validator from llama_index.core.chat_engine import CondenseQuestionChatEngine from llama_index.core.indices.service_context import ServiceContext from llama_index.core.ingestion import IngestionPipeline from llama_index.core.llms import LLM from llama_index.core.query_engine import CustomQueryEngine from llama_index.core.query_pipeline.components.function import FnComponent from llama_index.core.query_pipeline.query import QueryPipeline from llama_index.core.readers.base import BaseReader from llama_index.core.response_synthesizers import CompactAndRefine from llama_index.core.utils import get_cache_dir def _try_load_openai_llm(): try: from llama_index.llms.openai import OpenAI # pants: no-infer-dep return OpenAI(model="gpt-3.5-turbo", streaming=True) except ImportError: raise ImportError( "`llama-index-llms-openai` package not found, " "please run `pip install llama-index-llms-openai`" ) RAG_HISTORY_FILE_NAME = "files_history.txt" def default_ragcli_persist_dir() -> str: return str(Path(get_cache_dir()) / "rag_cli") def query_input(query_str: Optional[str] = None) -> str: return query_str or "" class QueryPipelineQueryEngine(CustomQueryEngine): query_pipeline: QueryPipeline = Field( description="Query Pipeline to use for Q&A.", ) def custom_query(self, query_str: str) -> RESPONSE_TYPE: return self.query_pipeline.run(query_str=query_str) async def acustom_query(self, query_str: str) -> RESPONSE_TYPE: return await self.query_pipeline.arun(query_str=query_str) class RagCLI(BaseModel): """ CLI tool for chatting with output of a IngestionPipeline via a QueryPipeline. """ ingestion_pipeline: IngestionPipeline = Field( description="Ingestion pipeline to run for RAG ingestion." ) verbose: bool = Field( description="Whether to print out verbose information during execution.", default=False, ) persist_dir: str = Field( description="Directory to persist ingestion pipeline.", default_factory=default_ragcli_persist_dir, ) llm: LLM = Field( description="Language model to use for response generation.", default_factory=lambda: _try_load_openai_llm(), ) query_pipeline: Optional[QueryPipeline] = Field( description="Query Pipeline to use for Q&A.", default=None, ) chat_engine: Optional[CondenseQuestionChatEngine] = Field( description="Chat engine to use for chatting.", default_factory=None, ) file_extractor: Optional[Dict[str, BaseReader]] = Field( description="File extractor to use for extracting text from files.", default=None, ) class Config: arbitrary_types_allowed = True @validator("query_pipeline", always=True) def query_pipeline_from_ingestion_pipeline( cls, query_pipeline: Any, values: Dict[str, Any] ) -> Optional[QueryPipeline]: """ If query_pipeline is not provided, create one from ingestion_pipeline. """ if query_pipeline is not None: return query_pipeline ingestion_pipeline = cast(IngestionPipeline, values["ingestion_pipeline"]) if ingestion_pipeline.vector_store is None: return None verbose = cast(bool, values["verbose"]) query_component = FnComponent( fn=query_input, output_key="output", req_params={"query_str"} ) llm = cast(LLM, values["llm"]) # get embed_model from transformations if possible embed_model = None if ingestion_pipeline.transformations is not None: for transformation in ingestion_pipeline.transformations: if isinstance(transformation, BaseEmbedding): embed_model = transformation break service_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model or "default" ) retriever = VectorStoreIndex.from_vector_store( ingestion_pipeline.vector_store, service_context=service_context ).as_retriever(similarity_top_k=8) response_synthesizer = CompactAndRefine( service_context=service_context, streaming=True, verbose=verbose ) # define query pipeline query_pipeline = QueryPipeline(verbose=verbose) query_pipeline.add_modules( { "query": query_component, "retriever": retriever, "summarizer": response_synthesizer, } ) query_pipeline.add_link("query", "retriever") query_pipeline.add_link("retriever", "summarizer", dest_key="nodes") query_pipeline.add_link("query", "summarizer", dest_key="query_str") return query_pipeline @validator("chat_engine", always=True) def chat_engine_from_query_pipeline( cls, chat_engine: Any, values: Dict[str, Any] ) -> Optional[CondenseQuestionChatEngine]: """ If chat_engine is not provided, create one from query_pipeline. """ if chat_engine is not None: return chat_engine if values.get("query_pipeline", None) is None: values["query_pipeline"] = cls.query_pipeline_from_ingestion_pipeline( query_pipeline=None, values=values ) query_pipeline = cast(QueryPipeline, values["query_pipeline"]) if query_pipeline is None: return None query_engine = QueryPipelineQueryEngine(query_pipeline=query_pipeline) # type: ignore verbose = cast(bool, values["verbose"]) llm = cast(LLM, values["llm"]) return CondenseQuestionChatEngine.from_defaults( query_engine=query_engine, llm=llm, verbose=verbose ) async def handle_cli( self, files: Optional[str] = None, question: Optional[str] = None, chat: bool = False, verbose: bool = False, clear: bool = False, create_llama: bool = False, **kwargs: Dict[str, Any], ) -> None: """ Entrypoint for local document RAG CLI tool. """ if clear: # delete self.persist_dir directory including all subdirectories and files if os.path.exists(self.persist_dir): # Ask for confirmation response = input( f"Are you sure you want to delete data within {self.persist_dir}? [y/N] " ) if response.strip().lower() != "y": print("Aborted.") return os.system(f"rm -rf {self.persist_dir}") print(f"Successfully cleared {self.persist_dir}") self.verbose = verbose ingestion_pipeline = cast(IngestionPipeline, self.ingestion_pipeline) if self.verbose: print("Saving/Loading from persist_dir: ", self.persist_dir) if files is not None: documents = [] for _file in iglob(files, recursive=True): _file = os.path.abspath(_file) if os.path.isdir(_file): reader = SimpleDirectoryReader( input_dir=_file, filename_as_id=True, file_extractor=self.file_extractor, ) else: reader = SimpleDirectoryReader( input_files=[_file], filename_as_id=True, file_extractor=self.file_extractor, ) documents.extend(reader.load_data(show_progress=verbose)) await ingestion_pipeline.arun(show_progress=verbose, documents=documents) ingestion_pipeline.persist(persist_dir=self.persist_dir) # Append the `--files` argument to the history file with open(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}", "a") as f: f.write(files + "\n") if create_llama: if shutil.which("npx") is None: print( "`npx` is not installed. Please install it by calling `npm install -g npx`" ) else: history_file_path = Path(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}") if not history_file_path.exists(): print( "No data has been ingested, " "please specify `--files` to create llama dataset." ) else: with open(history_file_path) as f: stored_paths = {line.strip() for line in f if line.strip()} if len(stored_paths) == 0: print( "No data has been ingested, " "please specify `--files` to create llama dataset." ) elif len(stored_paths) > 1: print( "Multiple files or folders were ingested, which is not supported by create-llama. " "Please call `llamaindex-cli rag --clear` to clear the cache first, " "then call `llamaindex-cli rag --files` again with a single folder or file" ) else: path = stored_paths.pop() if "*" in path: print( "Glob pattern is not supported by create-llama. " "Please call `llamaindex-cli rag --clear` to clear the cache first, " "then call `llamaindex-cli rag --files` again with a single folder or file." ) elif not os.path.exists(path): print( f"The path {path} does not exist. " "Please call `llamaindex-cli rag --clear` to clear the cache first, " "then call `llamaindex-cli rag --files` again with a single folder or file." ) else: print(f"Calling create-llama using data from {path} ...") command_args = [ "npx", "create-llama@latest", "--frontend", "--template", "streaming", "--framework", "fastapi", "--ui", "shadcn", "--vector-db", "none", "--engine", "context", f"--files {path}", ] os.system(" ".join(command_args)) if question is not None: await self.handle_question(question) if chat: await self.start_chat_repl() async def handle_question(self, question: str) -> None: if self.query_pipeline is None: raise ValueError("query_pipeline is not defined.") query_pipeline = cast(QueryPipeline, self.query_pipeline) query_pipeline.verbose = self.verbose chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine) response = chat_engine.chat(question) if isinstance(response, StreamingResponse): response.print_response_stream() else: response = cast(Response, response) print(response) async def start_chat_repl(self) -> None: """ Start a REPL for chatting with the agent. """ if self.query_pipeline is None: raise ValueError("query_pipeline is not defined.") chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine) chat_engine.streaming_chat_repl() @classmethod def add_parser_args( cls, parser: Union[ArgumentParser, Any], instance_generator: Optional[Callable[[], "RagCLI"]], ) -> None: if instance_generator: parser.add_argument( "-q", "--question", type=str, help="The question you want to ask.", required=False, ) parser.add_argument( "-f", "--files", type=str, help=( "The name of the file or directory you want to ask a question about," 'such as "file.pdf".' ), ) parser.add_argument( "-c", "--chat", help="If flag is present, opens a chat REPL.", action="store_true", ) parser.add_argument( "-v", "--verbose", help="Whether to print out verbose information during execution.", action="store_true", ) parser.add_argument( "--clear", help="Clears out all currently embedded data.", action="store_true", ) parser.add_argument( "--create-llama", help="Create a LlamaIndex application with your embedded data.", required=False, action="store_true", ) parser.set_defaults( func=lambda args: asyncio.run( instance_generator().handle_cli(**vars(args)) ) ) def cli(self) -> None: """ Entrypoint for CLI tool. """ parser = ArgumentParser(description="LlamaIndex RAG Q&A tool.") subparsers = parser.add_subparsers( title="commands", dest="command", required=True ) llamarag_parser = subparsers.add_parser( "rag", help="Ask a question to a document / a directory of documents." ) self.add_parser_args(llamarag_parser, lambda: self) # Parse the command-line arguments args = parser.parse_args() # Call the appropriate function based on the command args.func(args)
[ "llama_index.llms.openai.OpenAI", "llama_index.core.bridge.pydantic.validator", "llama_index.core.VectorStoreIndex.from_vector_store", "llama_index.core.indices.service_context.ServiceContext.from_defaults", "llama_index.core.bridge.pydantic.Field", "llama_index.core.query_pipeline.components.function.FnComponent", "llama_index.core.utils.get_cache_dir", "llama_index.core.SimpleDirectoryReader", "llama_index.core.query_pipeline.query.QueryPipeline", "llama_index.core.response_synthesizers.CompactAndRefine", "llama_index.core.chat_engine.CondenseQuestionChatEngine.from_defaults" ]
[((1789, 1840), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query Pipeline to use for Q&A."""'}), "(description='Query Pipeline to use for Q&A.')\n", (1794, 1840), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2284, 2349), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Ingestion pipeline to run for RAG ingestion."""'}), "(description='Ingestion pipeline to run for RAG ingestion.')\n", (2289, 2349), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2384, 2488), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Whether to print out verbose information during execution."""', 'default': '(False)'}), "(description=\n 'Whether to print out verbose information during execution.', default=False\n )\n", (2389, 2488), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2525, 2634), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Directory to persist ingestion pipeline."""', 'default_factory': 'default_ragcli_persist_dir'}), "(description='Directory to persist ingestion pipeline.',\n default_factory=default_ragcli_persist_dir)\n", (2530, 2634), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2854, 2919), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query Pipeline to use for Q&A."""', 'default': 'None'}), "(description='Query Pipeline to use for Q&A.', default=None)\n", (2859, 2919), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2999, 3074), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Chat engine to use for chatting."""', 'default_factory': 'None'}), "(description='Chat engine to use for chatting.', default_factory=None)\n", (3004, 3074), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((3152, 3244), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""File extractor to use for extracting text from files."""', 'default': 'None'}), "(description='File extractor to use for extracting text from files.',\n default=None)\n", (3157, 3244), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((3328, 3368), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""query_pipeline"""'], {'always': '(True)'}), "('query_pipeline', always=True)\n", (3337, 3368), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((5385, 5422), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""chat_engine"""'], {'always': '(True)'}), "('chat_engine', always=True)\n", (5394, 5422), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((1245, 1290), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'streaming': '(True)'}), "(model='gpt-3.5-turbo', streaming=True)\n", (1251, 1290), False, 'from llama_index.llms.openai import OpenAI\n'), ((3714, 3767), 'typing.cast', 'cast', (['IngestionPipeline', "values['ingestion_pipeline']"], {}), "(IngestionPipeline, values['ingestion_pipeline'])\n", (3718, 3767), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((3862, 3891), 'typing.cast', 'cast', (['bool', "values['verbose']"], {}), "(bool, values['verbose'])\n", (3866, 3891), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((3918, 3992), 'llama_index.core.query_pipeline.components.function.FnComponent', 'FnComponent', ([], {'fn': 'query_input', 'output_key': '"""output"""', 'req_params': "{'query_str'}"}), "(fn=query_input, output_key='output', req_params={'query_str'})\n", (3929, 3992), False, 'from llama_index.core.query_pipeline.components.function import FnComponent\n'), ((4029, 4053), 'typing.cast', 'cast', (['LLM', "values['llm']"], {}), "(LLM, values['llm'])\n", (4033, 4053), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((4434, 4509), 'llama_index.core.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': "(embed_model or 'default')"}), "(llm=llm, embed_model=embed_model or 'default')\n", (4462, 4509), False, 'from llama_index.core.indices.service_context import ServiceContext\n'), ((4739, 4826), 'llama_index.core.response_synthesizers.CompactAndRefine', 'CompactAndRefine', ([], {'service_context': 'service_context', 'streaming': '(True)', 'verbose': 'verbose'}), '(service_context=service_context, streaming=True, verbose=\n verbose)\n', (4755, 4826), False, 'from llama_index.core.response_synthesizers import CompactAndRefine\n'), ((4902, 4932), 'llama_index.core.query_pipeline.query.QueryPipeline', 'QueryPipeline', ([], {'verbose': 'verbose'}), '(verbose=verbose)\n', (4915, 4932), False, 'from llama_index.core.query_pipeline.query import QueryPipeline\n'), ((5958, 6003), 'typing.cast', 'cast', (['QueryPipeline', "values['query_pipeline']"], {}), "(QueryPipeline, values['query_pipeline'])\n", (5962, 6003), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6176, 6205), 'typing.cast', 'cast', (['bool', "values['verbose']"], {}), "(bool, values['verbose'])\n", (6180, 6205), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6220, 6244), 'typing.cast', 'cast', (['LLM', "values['llm']"], {}), "(LLM, values['llm'])\n", (6224, 6244), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6260, 6357), 'llama_index.core.chat_engine.CondenseQuestionChatEngine.from_defaults', 'CondenseQuestionChatEngine.from_defaults', ([], {'query_engine': 'query_engine', 'llm': 'llm', 'verbose': 'verbose'}), '(query_engine=query_engine, llm=llm,\n verbose=verbose)\n', (6300, 6357), False, 'from llama_index.core.chat_engine import CondenseQuestionChatEngine\n'), ((7378, 7426), 'typing.cast', 'cast', (['IngestionPipeline', 'self.ingestion_pipeline'], {}), '(IngestionPipeline, self.ingestion_pipeline)\n', (7382, 7426), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12054, 12094), 'typing.cast', 'cast', (['QueryPipeline', 'self.query_pipeline'], {}), '(QueryPipeline, self.query_pipeline)\n', (12058, 12094), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12163, 12213), 'typing.cast', 'cast', (['CondenseQuestionChatEngine', 'self.chat_engine'], {}), '(CondenseQuestionChatEngine, self.chat_engine)\n', (12167, 12213), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12693, 12743), 'typing.cast', 'cast', (['CondenseQuestionChatEngine', 'self.chat_engine'], {}), '(CondenseQuestionChatEngine, self.chat_engine)\n', (12697, 12743), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((14601, 14655), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""LlamaIndex RAG Q&A tool."""'}), "(description='LlamaIndex RAG Q&A tool.')\n", (14615, 14655), False, 'from argparse import ArgumentParser\n'), ((6863, 6895), 'os.path.exists', 'os.path.exists', (['self.persist_dir'], {}), '(self.persist_dir)\n', (6877, 6895), False, 'import os\n'), ((7607, 7635), 'glob.iglob', 'iglob', (['files'], {'recursive': '(True)'}), '(files, recursive=True)\n', (7612, 7635), False, 'from glob import iglob\n'), ((12395, 12419), 'typing.cast', 'cast', (['Response', 'response'], {}), '(Response, response)\n', (12399, 12419), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((1584, 1599), 'llama_index.core.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (1597, 1599), False, 'from llama_index.core.utils import get_cache_dir\n'), ((4552, 4656), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['ingestion_pipeline.vector_store'], {'service_context': 'service_context'}), '(ingestion_pipeline.vector_store,\n service_context=service_context)\n', (4586, 4656), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7215, 7254), 'os.system', 'os.system', (['f"""rm -rf {self.persist_dir}"""'], {}), "(f'rm -rf {self.persist_dir}')\n", (7224, 7254), False, 'import os\n'), ((7661, 7683), 'os.path.abspath', 'os.path.abspath', (['_file'], {}), '(_file)\n', (7676, 7683), False, 'import os\n'), ((7703, 7723), 'os.path.isdir', 'os.path.isdir', (['_file'], {}), '(_file)\n', (7716, 7723), False, 'import os\n'), ((8646, 8665), 'shutil.which', 'shutil.which', (['"""npx"""'], {}), "('npx')\n", (8658, 8665), False, 'import shutil\n'), ((8866, 8917), 'pathlib.Path', 'Path', (['f"""{self.persist_dir}/{RAG_HISTORY_FILE_NAME}"""'], {}), "(f'{self.persist_dir}/{RAG_HISTORY_FILE_NAME}')\n", (8870, 8917), False, 'from pathlib import Path\n'), ((7754, 7854), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '_file', 'filename_as_id': '(True)', 'file_extractor': 'self.file_extractor'}), '(input_dir=_file, filename_as_id=True, file_extractor=\n self.file_extractor)\n', (7775, 7854), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7996, 8099), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[_file]', 'filename_as_id': '(True)', 'file_extractor': 'self.file_extractor'}), '(input_files=[_file], filename_as_id=True,\n file_extractor=self.file_extractor)\n', (8017, 8099), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((10477, 10497), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (10491, 10497), False, 'import os\n')]
from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, List, Optional if TYPE_CHECKING: from llama_index.core.service_context import ServiceContext from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model from llama_index.core.indices.prompt_helper import PromptHelper from llama_index.core.llms import LLM from llama_index.core.llms.utils import LLMType, resolve_llm from llama_index.core.node_parser import NodeParser, SentenceSplitter from llama_index.core.schema import TransformComponent from llama_index.core.types import PydanticProgramMode from llama_index.core.utils import get_tokenizer, set_global_tokenizer @dataclass class _Settings: """Settings for the Llama Index, lazily initialized.""" # lazy initialization _llm: Optional[LLM] = None _embed_model: Optional[BaseEmbedding] = None _callback_manager: Optional[CallbackManager] = None _tokenizer: Optional[Callable[[str], List[Any]]] = None _node_parser: Optional[NodeParser] = None _prompt_helper: Optional[PromptHelper] = None _transformations: Optional[List[TransformComponent]] = None # ---- LLM ---- @property def llm(self) -> LLM: """Get the LLM.""" if self._llm is None: self._llm = resolve_llm("default") if self._callback_manager is not None: self._llm.callback_manager = self._callback_manager return self._llm @llm.setter def llm(self, llm: LLMType) -> None: """Set the LLM.""" self._llm = resolve_llm(llm) @property def pydantic_program_mode(self) -> PydanticProgramMode: """Get the pydantic program mode.""" return self.llm.pydantic_program_mode @pydantic_program_mode.setter def pydantic_program_mode(self, pydantic_program_mode: PydanticProgramMode) -> None: """Set the pydantic program mode.""" self.llm.pydantic_program_mode = pydantic_program_mode # ---- Embedding ---- @property def embed_model(self) -> BaseEmbedding: """Get the embedding model.""" if self._embed_model is None: self._embed_model = resolve_embed_model("default") if self._callback_manager is not None: self._embed_model.callback_manager = self._callback_manager return self._embed_model @embed_model.setter def embed_model(self, embed_model: EmbedType) -> None: """Set the embedding model.""" self._embed_model = resolve_embed_model(embed_model) # ---- Callbacks ---- @property def global_handler(self) -> Optional[BaseCallbackHandler]: """Get the global handler.""" import llama_index.core # TODO: deprecated? return llama_index.core.global_handler @global_handler.setter def global_handler(self, eval_mode: str, **eval_params: Any) -> None: """Set the global handler.""" from llama_index.core import set_global_handler # TODO: deprecated? set_global_handler(eval_mode, **eval_params) @property def callback_manager(self) -> CallbackManager: """Get the callback manager.""" if self._callback_manager is None: self._callback_manager = CallbackManager() return self._callback_manager @callback_manager.setter def callback_manager(self, callback_manager: CallbackManager) -> None: """Set the callback manager.""" self._callback_manager = callback_manager # ---- Tokenizer ---- @property def tokenizer(self) -> Callable[[str], List[Any]]: """Get the tokenizer.""" import llama_index.core if llama_index.core.global_tokenizer is None: return get_tokenizer() # TODO: deprecated? return llama_index.core.global_tokenizer @tokenizer.setter def tokenizer(self, tokenizer: Callable[[str], List[Any]]) -> None: """Set the tokenizer.""" try: from transformers import PreTrainedTokenizerBase # pants: no-infer-dep if isinstance(tokenizer, PreTrainedTokenizerBase): from functools import partial tokenizer = partial(tokenizer.encode, add_special_tokens=False) except ImportError: pass # TODO: deprecated? set_global_tokenizer(tokenizer) # ---- Node parser ---- @property def node_parser(self) -> NodeParser: """Get the node parser.""" if self._node_parser is None: self._node_parser = SentenceSplitter() if self._callback_manager is not None: self._node_parser.callback_manager = self._callback_manager return self._node_parser @node_parser.setter def node_parser(self, node_parser: NodeParser) -> None: """Set the node parser.""" self._node_parser = node_parser @property def chunk_size(self) -> int: """Get the chunk size.""" if hasattr(self.node_parser, "chunk_size"): return self.node_parser.chunk_size else: raise ValueError("Configured node parser does not have chunk size.") @chunk_size.setter def chunk_size(self, chunk_size: int) -> None: """Set the chunk size.""" if hasattr(self.node_parser, "chunk_size"): self.node_parser.chunk_size = chunk_size else: raise ValueError("Configured node parser does not have chunk size.") @property def chunk_overlap(self) -> int: """Get the chunk overlap.""" if hasattr(self.node_parser, "chunk_overlap"): return self.node_parser.chunk_overlap else: raise ValueError("Configured node parser does not have chunk overlap.") @chunk_overlap.setter def chunk_overlap(self, chunk_overlap: int) -> None: """Set the chunk overlap.""" if hasattr(self.node_parser, "chunk_overlap"): self.node_parser.chunk_overlap = chunk_overlap else: raise ValueError("Configured node parser does not have chunk overlap.") # ---- Node parser alias ---- @property def text_splitter(self) -> NodeParser: """Get the text splitter.""" return self.node_parser @text_splitter.setter def text_splitter(self, text_splitter: NodeParser) -> None: """Set the text splitter.""" self.node_parser = text_splitter @property def prompt_helper(self) -> PromptHelper: """Get the prompt helper.""" if self._llm is not None and self._prompt_helper is None: self._prompt_helper = PromptHelper.from_llm_metadata(self._llm.metadata) elif self._prompt_helper is None: self._prompt_helper = PromptHelper() return self._prompt_helper @prompt_helper.setter def prompt_helper(self, prompt_helper: PromptHelper) -> None: """Set the prompt helper.""" self._prompt_helper = prompt_helper @property def num_output(self) -> int: """Get the number of outputs.""" return self.prompt_helper.num_output @num_output.setter def num_output(self, num_output: int) -> None: """Set the number of outputs.""" self.prompt_helper.num_output = num_output @property def context_window(self) -> int: """Get the context window.""" return self.prompt_helper.context_window @context_window.setter def context_window(self, context_window: int) -> None: """Set the context window.""" self.prompt_helper.context_window = context_window # ---- Transformations ---- @property def transformations(self) -> List[TransformComponent]: """Get the transformations.""" if self._transformations is None: self._transformations = [self.node_parser] return self._transformations @transformations.setter def transformations(self, transformations: List[TransformComponent]) -> None: """Set the transformations.""" self._transformations = transformations # Singleton Settings = _Settings() # -- Helper functions for deprecation/migration -- def llm_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> LLM: """Get settings from either settings or context.""" if context is not None: return context.llm return settings.llm def embed_model_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> BaseEmbedding: """Get settings from either settings or context.""" if context is not None: return context.embed_model return settings.embed_model def callback_manager_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> CallbackManager: """Get settings from either settings or context.""" if context is not None: return context.callback_manager return settings.callback_manager def node_parser_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> NodeParser: """Get settings from either settings or context.""" if context is not None: return context.node_parser return settings.node_parser def transformations_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> List[TransformComponent]: """Get settings from either settings or context.""" if context is not None: return context.transformations return settings.transformations
[ "llama_index.core.llms.utils.resolve_llm", "llama_index.core.utils.get_tokenizer", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.embeddings.utils.resolve_embed_model", "llama_index.core.node_parser.SentenceSplitter", "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.set_global_handler", "llama_index.core.indices.prompt_helper.PromptHelper", "llama_index.core.utils.set_global_tokenizer" ]
[((1701, 1717), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (1712, 1717), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2647, 2679), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (2666, 2679), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3164, 3208), 'llama_index.core.set_global_handler', 'set_global_handler', (['eval_mode'], {}), '(eval_mode, **eval_params)\n', (3182, 3208), False, 'from llama_index.core import set_global_handler\n'), ((4474, 4505), 'llama_index.core.utils.set_global_tokenizer', 'set_global_tokenizer', (['tokenizer'], {}), '(tokenizer)\n', (4494, 4505), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((1435, 1457), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['"""default"""'], {}), "('default')\n", (1446, 1457), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2311, 2341), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['"""default"""'], {}), "('default')\n", (2330, 2341), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3395, 3412), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (3410, 3412), False, 'from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager\n'), ((3882, 3897), 'llama_index.core.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (3895, 3897), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((4696, 4714), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4712, 4714), False, 'from llama_index.core.node_parser import NodeParser, SentenceSplitter\n'), ((6766, 6816), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (6796, 6816), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((4340, 4391), 'functools.partial', 'partial', (['tokenizer.encode'], {'add_special_tokens': '(False)'}), '(tokenizer.encode, add_special_tokens=False)\n', (4347, 4391), False, 'from functools import partial\n'), ((6893, 6907), 'llama_index.core.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {}), '()\n', (6905, 6907), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n')]
from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, List, Optional if TYPE_CHECKING: from llama_index.core.service_context import ServiceContext from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model from llama_index.core.indices.prompt_helper import PromptHelper from llama_index.core.llms import LLM from llama_index.core.llms.utils import LLMType, resolve_llm from llama_index.core.node_parser import NodeParser, SentenceSplitter from llama_index.core.schema import TransformComponent from llama_index.core.types import PydanticProgramMode from llama_index.core.utils import get_tokenizer, set_global_tokenizer @dataclass class _Settings: """Settings for the Llama Index, lazily initialized.""" # lazy initialization _llm: Optional[LLM] = None _embed_model: Optional[BaseEmbedding] = None _callback_manager: Optional[CallbackManager] = None _tokenizer: Optional[Callable[[str], List[Any]]] = None _node_parser: Optional[NodeParser] = None _prompt_helper: Optional[PromptHelper] = None _transformations: Optional[List[TransformComponent]] = None # ---- LLM ---- @property def llm(self) -> LLM: """Get the LLM.""" if self._llm is None: self._llm = resolve_llm("default") if self._callback_manager is not None: self._llm.callback_manager = self._callback_manager return self._llm @llm.setter def llm(self, llm: LLMType) -> None: """Set the LLM.""" self._llm = resolve_llm(llm) @property def pydantic_program_mode(self) -> PydanticProgramMode: """Get the pydantic program mode.""" return self.llm.pydantic_program_mode @pydantic_program_mode.setter def pydantic_program_mode(self, pydantic_program_mode: PydanticProgramMode) -> None: """Set the pydantic program mode.""" self.llm.pydantic_program_mode = pydantic_program_mode # ---- Embedding ---- @property def embed_model(self) -> BaseEmbedding: """Get the embedding model.""" if self._embed_model is None: self._embed_model = resolve_embed_model("default") if self._callback_manager is not None: self._embed_model.callback_manager = self._callback_manager return self._embed_model @embed_model.setter def embed_model(self, embed_model: EmbedType) -> None: """Set the embedding model.""" self._embed_model = resolve_embed_model(embed_model) # ---- Callbacks ---- @property def global_handler(self) -> Optional[BaseCallbackHandler]: """Get the global handler.""" import llama_index.core # TODO: deprecated? return llama_index.core.global_handler @global_handler.setter def global_handler(self, eval_mode: str, **eval_params: Any) -> None: """Set the global handler.""" from llama_index.core import set_global_handler # TODO: deprecated? set_global_handler(eval_mode, **eval_params) @property def callback_manager(self) -> CallbackManager: """Get the callback manager.""" if self._callback_manager is None: self._callback_manager = CallbackManager() return self._callback_manager @callback_manager.setter def callback_manager(self, callback_manager: CallbackManager) -> None: """Set the callback manager.""" self._callback_manager = callback_manager # ---- Tokenizer ---- @property def tokenizer(self) -> Callable[[str], List[Any]]: """Get the tokenizer.""" import llama_index.core if llama_index.core.global_tokenizer is None: return get_tokenizer() # TODO: deprecated? return llama_index.core.global_tokenizer @tokenizer.setter def tokenizer(self, tokenizer: Callable[[str], List[Any]]) -> None: """Set the tokenizer.""" try: from transformers import PreTrainedTokenizerBase # pants: no-infer-dep if isinstance(tokenizer, PreTrainedTokenizerBase): from functools import partial tokenizer = partial(tokenizer.encode, add_special_tokens=False) except ImportError: pass # TODO: deprecated? set_global_tokenizer(tokenizer) # ---- Node parser ---- @property def node_parser(self) -> NodeParser: """Get the node parser.""" if self._node_parser is None: self._node_parser = SentenceSplitter() if self._callback_manager is not None: self._node_parser.callback_manager = self._callback_manager return self._node_parser @node_parser.setter def node_parser(self, node_parser: NodeParser) -> None: """Set the node parser.""" self._node_parser = node_parser @property def chunk_size(self) -> int: """Get the chunk size.""" if hasattr(self.node_parser, "chunk_size"): return self.node_parser.chunk_size else: raise ValueError("Configured node parser does not have chunk size.") @chunk_size.setter def chunk_size(self, chunk_size: int) -> None: """Set the chunk size.""" if hasattr(self.node_parser, "chunk_size"): self.node_parser.chunk_size = chunk_size else: raise ValueError("Configured node parser does not have chunk size.") @property def chunk_overlap(self) -> int: """Get the chunk overlap.""" if hasattr(self.node_parser, "chunk_overlap"): return self.node_parser.chunk_overlap else: raise ValueError("Configured node parser does not have chunk overlap.") @chunk_overlap.setter def chunk_overlap(self, chunk_overlap: int) -> None: """Set the chunk overlap.""" if hasattr(self.node_parser, "chunk_overlap"): self.node_parser.chunk_overlap = chunk_overlap else: raise ValueError("Configured node parser does not have chunk overlap.") # ---- Node parser alias ---- @property def text_splitter(self) -> NodeParser: """Get the text splitter.""" return self.node_parser @text_splitter.setter def text_splitter(self, text_splitter: NodeParser) -> None: """Set the text splitter.""" self.node_parser = text_splitter @property def prompt_helper(self) -> PromptHelper: """Get the prompt helper.""" if self._llm is not None and self._prompt_helper is None: self._prompt_helper = PromptHelper.from_llm_metadata(self._llm.metadata) elif self._prompt_helper is None: self._prompt_helper = PromptHelper() return self._prompt_helper @prompt_helper.setter def prompt_helper(self, prompt_helper: PromptHelper) -> None: """Set the prompt helper.""" self._prompt_helper = prompt_helper @property def num_output(self) -> int: """Get the number of outputs.""" return self.prompt_helper.num_output @num_output.setter def num_output(self, num_output: int) -> None: """Set the number of outputs.""" self.prompt_helper.num_output = num_output @property def context_window(self) -> int: """Get the context window.""" return self.prompt_helper.context_window @context_window.setter def context_window(self, context_window: int) -> None: """Set the context window.""" self.prompt_helper.context_window = context_window # ---- Transformations ---- @property def transformations(self) -> List[TransformComponent]: """Get the transformations.""" if self._transformations is None: self._transformations = [self.node_parser] return self._transformations @transformations.setter def transformations(self, transformations: List[TransformComponent]) -> None: """Set the transformations.""" self._transformations = transformations # Singleton Settings = _Settings() # -- Helper functions for deprecation/migration -- def llm_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> LLM: """Get settings from either settings or context.""" if context is not None: return context.llm return settings.llm def embed_model_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> BaseEmbedding: """Get settings from either settings or context.""" if context is not None: return context.embed_model return settings.embed_model def callback_manager_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> CallbackManager: """Get settings from either settings or context.""" if context is not None: return context.callback_manager return settings.callback_manager def node_parser_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> NodeParser: """Get settings from either settings or context.""" if context is not None: return context.node_parser return settings.node_parser def transformations_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> List[TransformComponent]: """Get settings from either settings or context.""" if context is not None: return context.transformations return settings.transformations
[ "llama_index.core.llms.utils.resolve_llm", "llama_index.core.utils.get_tokenizer", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.embeddings.utils.resolve_embed_model", "llama_index.core.node_parser.SentenceSplitter", "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.set_global_handler", "llama_index.core.indices.prompt_helper.PromptHelper", "llama_index.core.utils.set_global_tokenizer" ]
[((1701, 1717), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (1712, 1717), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2647, 2679), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (2666, 2679), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3164, 3208), 'llama_index.core.set_global_handler', 'set_global_handler', (['eval_mode'], {}), '(eval_mode, **eval_params)\n', (3182, 3208), False, 'from llama_index.core import set_global_handler\n'), ((4474, 4505), 'llama_index.core.utils.set_global_tokenizer', 'set_global_tokenizer', (['tokenizer'], {}), '(tokenizer)\n', (4494, 4505), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((1435, 1457), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['"""default"""'], {}), "('default')\n", (1446, 1457), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2311, 2341), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['"""default"""'], {}), "('default')\n", (2330, 2341), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3395, 3412), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (3410, 3412), False, 'from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager\n'), ((3882, 3897), 'llama_index.core.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (3895, 3897), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((4696, 4714), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4712, 4714), False, 'from llama_index.core.node_parser import NodeParser, SentenceSplitter\n'), ((6766, 6816), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (6796, 6816), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((4340, 4391), 'functools.partial', 'partial', (['tokenizer.encode'], {'add_special_tokens': '(False)'}), '(tokenizer.encode, add_special_tokens=False)\n', (4347, 4391), False, 'from functools import partial\n'), ((6893, 6907), 'llama_index.core.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {}), '()\n', (6905, 6907), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("CovidQaDataset", "./data") # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=40, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 315), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""CovidQaDataset"""', '"""./data"""'], {}), "('CovidQaDataset', './data')\n", (287, 315), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((360, 412), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (391, 412), False, 'from llama_index.core import VectorStoreIndex\n'), ((505, 554), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (524, 554), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1405, 1429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1427, 1429), False, 'import asyncio\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("CovidQaDataset", "./data") # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=40, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 315), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""CovidQaDataset"""', '"""./data"""'], {}), "('CovidQaDataset', './data')\n", (287, 315), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((360, 412), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (391, 412), False, 'from llama_index.core import VectorStoreIndex\n'), ((505, 554), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (524, 554), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1405, 1429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1427, 1429), False, 'import asyncio\n')]
from typing import Any, Callable, Optional, Sequence from llama_index.core.base.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.core.callbacks import CallbackManager from llama_index.core.llms.callbacks import llm_completion_callback from llama_index.core.llms.custom import CustomLLM from llama_index.core.types import PydanticProgramMode class MockLLM(CustomLLM): max_tokens: Optional[int] def __init__( self, max_tokens: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, ) -> None: super().__init__( max_tokens=max_tokens, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, ) @classmethod def class_name(cls) -> str: return "MockLLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata(num_output=self.max_tokens or -1) def _generate_text(self, length: int) -> str: return " ".join(["text" for _ in range(length)]) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: response_text = ( self._generate_text(self.max_tokens) if self.max_tokens else prompt ) return CompletionResponse( text=response_text, ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: def gen_prompt() -> CompletionResponseGen: for ch in prompt: yield CompletionResponse( text=prompt, delta=ch, ) def gen_response(max_tokens: int) -> CompletionResponseGen: for i in range(max_tokens): response_text = self._generate_text(i) yield CompletionResponse( text=response_text, delta="text ", ) return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
[ "llama_index.core.llms.callbacks.llm_completion_callback", "llama_index.core.base.llms.types.LLMMetadata", "llama_index.core.base.llms.types.CompletionResponse" ]
[((1532, 1557), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1555, 1557), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1871, 1896), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1894, 1896), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1372, 1417), 'llama_index.core.base.llms.types.LLMMetadata', 'LLMMetadata', ([], {'num_output': '(self.max_tokens or -1)'}), '(num_output=self.max_tokens or -1)\n', (1383, 1417), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1803, 1841), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text'}), '(text=response_text)\n', (1821, 1841), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2123, 2164), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'prompt', 'delta': 'ch'}), '(text=prompt, delta=ch)\n', (2141, 2164), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2410, 2463), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text', 'delta': '"""text """'}), "(text=response_text, delta='text ')\n", (2428, 2463), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n')]
from typing import Any, Callable, Optional, Sequence from llama_index.core.base.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.core.callbacks import CallbackManager from llama_index.core.llms.callbacks import llm_completion_callback from llama_index.core.llms.custom import CustomLLM from llama_index.core.types import PydanticProgramMode class MockLLM(CustomLLM): max_tokens: Optional[int] def __init__( self, max_tokens: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, ) -> None: super().__init__( max_tokens=max_tokens, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, ) @classmethod def class_name(cls) -> str: return "MockLLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata(num_output=self.max_tokens or -1) def _generate_text(self, length: int) -> str: return " ".join(["text" for _ in range(length)]) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: response_text = ( self._generate_text(self.max_tokens) if self.max_tokens else prompt ) return CompletionResponse( text=response_text, ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: def gen_prompt() -> CompletionResponseGen: for ch in prompt: yield CompletionResponse( text=prompt, delta=ch, ) def gen_response(max_tokens: int) -> CompletionResponseGen: for i in range(max_tokens): response_text = self._generate_text(i) yield CompletionResponse( text=response_text, delta="text ", ) return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
[ "llama_index.core.llms.callbacks.llm_completion_callback", "llama_index.core.base.llms.types.LLMMetadata", "llama_index.core.base.llms.types.CompletionResponse" ]
[((1532, 1557), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1555, 1557), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1871, 1896), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1894, 1896), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1372, 1417), 'llama_index.core.base.llms.types.LLMMetadata', 'LLMMetadata', ([], {'num_output': '(self.max_tokens or -1)'}), '(num_output=self.max_tokens or -1)\n', (1383, 1417), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1803, 1841), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text'}), '(text=response_text)\n', (1821, 1841), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2123, 2164), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'prompt', 'delta': 'ch'}), '(text=prompt, delta=ch)\n', (2141, 2164), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2410, 2463), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text', 'delta': '"""text """'}), "(text=response_text, delta='text ')\n", (2428, 2463), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n')]
from typing import Any, Callable, Optional, Sequence from llama_index.core.base.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.core.callbacks import CallbackManager from llama_index.core.llms.callbacks import llm_completion_callback from llama_index.core.llms.custom import CustomLLM from llama_index.core.types import PydanticProgramMode class MockLLM(CustomLLM): max_tokens: Optional[int] def __init__( self, max_tokens: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, ) -> None: super().__init__( max_tokens=max_tokens, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, ) @classmethod def class_name(cls) -> str: return "MockLLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata(num_output=self.max_tokens or -1) def _generate_text(self, length: int) -> str: return " ".join(["text" for _ in range(length)]) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: response_text = ( self._generate_text(self.max_tokens) if self.max_tokens else prompt ) return CompletionResponse( text=response_text, ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: def gen_prompt() -> CompletionResponseGen: for ch in prompt: yield CompletionResponse( text=prompt, delta=ch, ) def gen_response(max_tokens: int) -> CompletionResponseGen: for i in range(max_tokens): response_text = self._generate_text(i) yield CompletionResponse( text=response_text, delta="text ", ) return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
[ "llama_index.core.llms.callbacks.llm_completion_callback", "llama_index.core.base.llms.types.LLMMetadata", "llama_index.core.base.llms.types.CompletionResponse" ]
[((1532, 1557), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1555, 1557), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1871, 1896), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1894, 1896), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1372, 1417), 'llama_index.core.base.llms.types.LLMMetadata', 'LLMMetadata', ([], {'num_output': '(self.max_tokens or -1)'}), '(num_output=self.max_tokens or -1)\n', (1383, 1417), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1803, 1841), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text'}), '(text=response_text)\n', (1821, 1841), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2123, 2164), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'prompt', 'delta': 'ch'}), '(text=prompt, delta=ch)\n', (2141, 2164), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2410, 2463), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text', 'delta': '"""text """'}), "(text=response_text, delta='text ')\n", (2428, 2463), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n')]
"""Base agent type.""" import uuid from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.base.llms.types import ChatMessage from llama_index.core.base.response.schema import RESPONSE_TYPE, Response from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.callbacks import CallbackManager, trace_method from llama_index.core.chat_engine.types import ( BaseChatEngine, StreamingAgentChatResponse, ) from llama_index.core.memory.types import BaseMemory from llama_index.core.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType from llama_index.core.schema import QueryBundle class BaseAgent(BaseChatEngine, BaseQueryEngine): """Base Agent.""" def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" # ===== Query Engine Interface ===== @trace_method("query") def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = self.chat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) @trace_method("query") async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = await self.achat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) def stream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("stream_chat not implemented") async def astream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("astream_chat not implemented") class TaskStep(BaseModel): """Agent task step. Represents a single input step within the execution run ("Task") of an agent given a user input. The output is returned as a `TaskStepOutput`. """ task_id: str = Field(..., diescription="Task ID") step_id: str = Field(..., description="Step ID") input: Optional[str] = Field(default=None, description="User input") # memory: BaseMemory = Field( # ..., type=BaseMemory, description="Conversational Memory" # ) step_state: Dict[str, Any] = Field( default_factory=dict, description="Additional state for a given step." ) # NOTE: the state below may change throughout the course of execution # this tracks the relationships to other steps next_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Next steps to be executed." ) prev_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Previous steps that were dependencies for this step.", ) is_ready: bool = Field( default=True, description="Is this step ready to be executed?" ) def get_next_step( self, step_id: str, input: Optional[str] = None, step_state: Optional[Dict[str, Any]] = None, ) -> "TaskStep": """Convenience function to get next step. Preserve task_id, memory, step_state. """ return TaskStep( task_id=self.task_id, step_id=step_id, input=input, # memory=self.memory, step_state=step_state or self.step_state, ) def link_step( self, next_step: "TaskStep", ) -> None: """Link to next step. Add link from this step to next, and from next step to current. """ self.next_steps[next_step.step_id] = next_step next_step.prev_steps[self.step_id] = self class TaskStepOutput(BaseModel): """Agent task step output.""" output: Any = Field(..., description="Task step output") task_step: TaskStep = Field(..., description="Task step input") next_steps: List[TaskStep] = Field(..., description="Next steps to be executed.") is_last: bool = Field(default=False, description="Is this the last step?") def __str__(self) -> str: """String representation.""" return str(self.output) class Task(BaseModel): """Agent Task. Represents a "run" of an agent given a user input. """ class Config: arbitrary_types_allowed = True task_id: str = Field( default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID" ) input: str = Field(..., type=str, description="User input") # NOTE: this is state that may be modified throughout the course of execution of the task memory: BaseMemory = Field( ..., type=BaseMemory, description=( "Conversational Memory. Maintains state before execution of this task." ), ) callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True, description="Callback manager for the task.", ) extra_state: Dict[str, Any] = Field( default_factory=dict, description=( "Additional user-specified state for a given task. " "Can be modified throughout the execution of a task." ), ) class BaseAgentWorker(PromptMixin): """Base agent worker.""" class Config: arbitrary_types_allowed = True def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" @abstractmethod def initialize_step(self, task: Task, **kwargs: Any) -> TaskStep: """Initialize step from task.""" @abstractmethod def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step.""" @abstractmethod async def arun_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async).""" raise NotImplementedError @abstractmethod def stream_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step (stream).""" # TODO: figure out if we need a different type for TaskStepOutput raise NotImplementedError @abstractmethod async def astream_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async stream).""" raise NotImplementedError @abstractmethod def finalize_task(self, task: Task, **kwargs: Any) -> None: """Finalize task, after all the steps are completed.""" def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" # TODO: make this abstractmethod (right now will break some agent impls)
[ "llama_index.core.bridge.pydantic.Field", "llama_index.core.callbacks.trace_method" ]
[((1275, 1296), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1287, 1296), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((1598, 1619), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1610, 1619), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((2578, 2612), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'diescription': '"""Task ID"""'}), "(..., diescription='Task ID')\n", (2583, 2612), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2632, 2665), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Step ID"""'}), "(..., description='Step ID')\n", (2637, 2665), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2693, 2738), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""User input"""'}), "(default=None, description='User input')\n", (2698, 2738), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2882, 2959), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional state for a given step."""'}), "(default_factory=dict, description='Additional state for a given step.')\n", (2887, 2959), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3140, 3209), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Next steps to be executed."""'}), "(default_factory=dict, description='Next steps to be executed.')\n", (3145, 3209), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3264, 3364), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Previous steps that were dependencies for this step."""'}), "(default_factory=dict, description=\n 'Previous steps that were dependencies for this step.')\n", (3269, 3364), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3404, 3473), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Is this step ready to be executed?"""'}), "(default=True, description='Is this step ready to be executed?')\n", (3409, 3473), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4369, 4411), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step output"""'}), "(..., description='Task step output')\n", (4374, 4411), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4438, 4479), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step input"""'}), "(..., description='Task step input')\n", (4443, 4479), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4513, 4565), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Next steps to be executed."""'}), "(..., description='Next steps to be executed.')\n", (4518, 4565), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4586, 4644), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Is this the last step?"""'}), "(default=False, description='Is this the last step?')\n", (4591, 4644), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5045, 5091), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'type': 'str', 'description': '"""User input"""'}), "(..., type=str, description='User input')\n", (5050, 5091), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5212, 5329), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'type': 'BaseMemory', 'description': '"""Conversational Memory. Maintains state before execution of this task."""'}), "(..., type=BaseMemory, description=\n 'Conversational Memory. Maintains state before execution of this task.')\n", (5217, 5329), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5421, 5524), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)', 'description': '"""Callback manager for the task."""'}), "(default_factory=CallbackManager, exclude=True, description=\n 'Callback manager for the task.')\n", (5426, 5524), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5586, 5740), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional user-specified state for a given task. Can be modified throughout the execution of a task."""'}), "(default_factory=dict, description=\n 'Additional user-specified state for a given task. Can be modified throughout the execution of a task.'\n )\n", (5591, 5740), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4975, 4987), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4985, 4987), False, 'import uuid\n')]
"""Base agent type.""" import uuid from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.base.llms.types import ChatMessage from llama_index.core.base.response.schema import RESPONSE_TYPE, Response from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.callbacks import CallbackManager, trace_method from llama_index.core.chat_engine.types import ( BaseChatEngine, StreamingAgentChatResponse, ) from llama_index.core.memory.types import BaseMemory from llama_index.core.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType from llama_index.core.schema import QueryBundle class BaseAgent(BaseChatEngine, BaseQueryEngine): """Base Agent.""" def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" # ===== Query Engine Interface ===== @trace_method("query") def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = self.chat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) @trace_method("query") async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = await self.achat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) def stream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("stream_chat not implemented") async def astream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("astream_chat not implemented") class TaskStep(BaseModel): """Agent task step. Represents a single input step within the execution run ("Task") of an agent given a user input. The output is returned as a `TaskStepOutput`. """ task_id: str = Field(..., diescription="Task ID") step_id: str = Field(..., description="Step ID") input: Optional[str] = Field(default=None, description="User input") # memory: BaseMemory = Field( # ..., type=BaseMemory, description="Conversational Memory" # ) step_state: Dict[str, Any] = Field( default_factory=dict, description="Additional state for a given step." ) # NOTE: the state below may change throughout the course of execution # this tracks the relationships to other steps next_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Next steps to be executed." ) prev_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Previous steps that were dependencies for this step.", ) is_ready: bool = Field( default=True, description="Is this step ready to be executed?" ) def get_next_step( self, step_id: str, input: Optional[str] = None, step_state: Optional[Dict[str, Any]] = None, ) -> "TaskStep": """Convenience function to get next step. Preserve task_id, memory, step_state. """ return TaskStep( task_id=self.task_id, step_id=step_id, input=input, # memory=self.memory, step_state=step_state or self.step_state, ) def link_step( self, next_step: "TaskStep", ) -> None: """Link to next step. Add link from this step to next, and from next step to current. """ self.next_steps[next_step.step_id] = next_step next_step.prev_steps[self.step_id] = self class TaskStepOutput(BaseModel): """Agent task step output.""" output: Any = Field(..., description="Task step output") task_step: TaskStep = Field(..., description="Task step input") next_steps: List[TaskStep] = Field(..., description="Next steps to be executed.") is_last: bool = Field(default=False, description="Is this the last step?") def __str__(self) -> str: """String representation.""" return str(self.output) class Task(BaseModel): """Agent Task. Represents a "run" of an agent given a user input. """ class Config: arbitrary_types_allowed = True task_id: str = Field( default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID" ) input: str = Field(..., type=str, description="User input") # NOTE: this is state that may be modified throughout the course of execution of the task memory: BaseMemory = Field( ..., type=BaseMemory, description=( "Conversational Memory. Maintains state before execution of this task." ), ) callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True, description="Callback manager for the task.", ) extra_state: Dict[str, Any] = Field( default_factory=dict, description=( "Additional user-specified state for a given task. " "Can be modified throughout the execution of a task." ), ) class BaseAgentWorker(PromptMixin): """Base agent worker.""" class Config: arbitrary_types_allowed = True def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" @abstractmethod def initialize_step(self, task: Task, **kwargs: Any) -> TaskStep: """Initialize step from task.""" @abstractmethod def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step.""" @abstractmethod async def arun_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async).""" raise NotImplementedError @abstractmethod def stream_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step (stream).""" # TODO: figure out if we need a different type for TaskStepOutput raise NotImplementedError @abstractmethod async def astream_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async stream).""" raise NotImplementedError @abstractmethod def finalize_task(self, task: Task, **kwargs: Any) -> None: """Finalize task, after all the steps are completed.""" def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" # TODO: make this abstractmethod (right now will break some agent impls)
[ "llama_index.core.bridge.pydantic.Field", "llama_index.core.callbacks.trace_method" ]
[((1275, 1296), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1287, 1296), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((1598, 1619), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1610, 1619), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((2578, 2612), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'diescription': '"""Task ID"""'}), "(..., diescription='Task ID')\n", (2583, 2612), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2632, 2665), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Step ID"""'}), "(..., description='Step ID')\n", (2637, 2665), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2693, 2738), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""User input"""'}), "(default=None, description='User input')\n", (2698, 2738), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2882, 2959), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional state for a given step."""'}), "(default_factory=dict, description='Additional state for a given step.')\n", (2887, 2959), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3140, 3209), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Next steps to be executed."""'}), "(default_factory=dict, description='Next steps to be executed.')\n", (3145, 3209), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3264, 3364), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Previous steps that were dependencies for this step."""'}), "(default_factory=dict, description=\n 'Previous steps that were dependencies for this step.')\n", (3269, 3364), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3404, 3473), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Is this step ready to be executed?"""'}), "(default=True, description='Is this step ready to be executed?')\n", (3409, 3473), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4369, 4411), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step output"""'}), "(..., description='Task step output')\n", (4374, 4411), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4438, 4479), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step input"""'}), "(..., description='Task step input')\n", (4443, 4479), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4513, 4565), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Next steps to be executed."""'}), "(..., description='Next steps to be executed.')\n", (4518, 4565), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4586, 4644), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Is this the last step?"""'}), "(default=False, description='Is this the last step?')\n", (4591, 4644), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5045, 5091), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'type': 'str', 'description': '"""User input"""'}), "(..., type=str, description='User input')\n", (5050, 5091), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5212, 5329), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'type': 'BaseMemory', 'description': '"""Conversational Memory. Maintains state before execution of this task."""'}), "(..., type=BaseMemory, description=\n 'Conversational Memory. Maintains state before execution of this task.')\n", (5217, 5329), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5421, 5524), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)', 'description': '"""Callback manager for the task."""'}), "(default_factory=CallbackManager, exclude=True, description=\n 'Callback manager for the task.')\n", (5426, 5524), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5586, 5740), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional user-specified state for a given task. Can be modified throughout the execution of a task."""'}), "(default_factory=dict, description=\n 'Additional user-specified state for a given task. Can be modified throughout the execution of a task.'\n )\n", (5591, 5740), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4975, 4987), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4985, 4987), False, 'import uuid\n')]
"""Base agent type.""" import uuid from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.base.llms.types import ChatMessage from llama_index.core.base.response.schema import RESPONSE_TYPE, Response from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.callbacks import CallbackManager, trace_method from llama_index.core.chat_engine.types import ( BaseChatEngine, StreamingAgentChatResponse, ) from llama_index.core.memory.types import BaseMemory from llama_index.core.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType from llama_index.core.schema import QueryBundle class BaseAgent(BaseChatEngine, BaseQueryEngine): """Base Agent.""" def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" # ===== Query Engine Interface ===== @trace_method("query") def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = self.chat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) @trace_method("query") async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = await self.achat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) def stream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("stream_chat not implemented") async def astream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("astream_chat not implemented") class TaskStep(BaseModel): """Agent task step. Represents a single input step within the execution run ("Task") of an agent given a user input. The output is returned as a `TaskStepOutput`. """ task_id: str = Field(..., diescription="Task ID") step_id: str = Field(..., description="Step ID") input: Optional[str] = Field(default=None, description="User input") # memory: BaseMemory = Field( # ..., type=BaseMemory, description="Conversational Memory" # ) step_state: Dict[str, Any] = Field( default_factory=dict, description="Additional state for a given step." ) # NOTE: the state below may change throughout the course of execution # this tracks the relationships to other steps next_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Next steps to be executed." ) prev_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Previous steps that were dependencies for this step.", ) is_ready: bool = Field( default=True, description="Is this step ready to be executed?" ) def get_next_step( self, step_id: str, input: Optional[str] = None, step_state: Optional[Dict[str, Any]] = None, ) -> "TaskStep": """Convenience function to get next step. Preserve task_id, memory, step_state. """ return TaskStep( task_id=self.task_id, step_id=step_id, input=input, # memory=self.memory, step_state=step_state or self.step_state, ) def link_step( self, next_step: "TaskStep", ) -> None: """Link to next step. Add link from this step to next, and from next step to current. """ self.next_steps[next_step.step_id] = next_step next_step.prev_steps[self.step_id] = self class TaskStepOutput(BaseModel): """Agent task step output.""" output: Any = Field(..., description="Task step output") task_step: TaskStep = Field(..., description="Task step input") next_steps: List[TaskStep] = Field(..., description="Next steps to be executed.") is_last: bool = Field(default=False, description="Is this the last step?") def __str__(self) -> str: """String representation.""" return str(self.output) class Task(BaseModel): """Agent Task. Represents a "run" of an agent given a user input. """ class Config: arbitrary_types_allowed = True task_id: str = Field( default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID" ) input: str = Field(..., type=str, description="User input") # NOTE: this is state that may be modified throughout the course of execution of the task memory: BaseMemory = Field( ..., type=BaseMemory, description=( "Conversational Memory. Maintains state before execution of this task." ), ) callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True, description="Callback manager for the task.", ) extra_state: Dict[str, Any] = Field( default_factory=dict, description=( "Additional user-specified state for a given task. " "Can be modified throughout the execution of a task." ), ) class BaseAgentWorker(PromptMixin): """Base agent worker.""" class Config: arbitrary_types_allowed = True def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" @abstractmethod def initialize_step(self, task: Task, **kwargs: Any) -> TaskStep: """Initialize step from task.""" @abstractmethod def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step.""" @abstractmethod async def arun_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async).""" raise NotImplementedError @abstractmethod def stream_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step (stream).""" # TODO: figure out if we need a different type for TaskStepOutput raise NotImplementedError @abstractmethod async def astream_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async stream).""" raise NotImplementedError @abstractmethod def finalize_task(self, task: Task, **kwargs: Any) -> None: """Finalize task, after all the steps are completed.""" def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" # TODO: make this abstractmethod (right now will break some agent impls)
[ "llama_index.core.bridge.pydantic.Field", "llama_index.core.callbacks.trace_method" ]
[((1275, 1296), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1287, 1296), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((1598, 1619), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1610, 1619), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((2578, 2612), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'diescription': '"""Task ID"""'}), "(..., diescription='Task ID')\n", (2583, 2612), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2632, 2665), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Step ID"""'}), "(..., description='Step ID')\n", (2637, 2665), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2693, 2738), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""User input"""'}), "(default=None, description='User input')\n", (2698, 2738), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2882, 2959), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional state for a given step."""'}), "(default_factory=dict, description='Additional state for a given step.')\n", (2887, 2959), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3140, 3209), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Next steps to be executed."""'}), "(default_factory=dict, description='Next steps to be executed.')\n", (3145, 3209), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3264, 3364), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Previous steps that were dependencies for this step."""'}), "(default_factory=dict, description=\n 'Previous steps that were dependencies for this step.')\n", (3269, 3364), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3404, 3473), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Is this step ready to be executed?"""'}), "(default=True, description='Is this step ready to be executed?')\n", (3409, 3473), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4369, 4411), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step output"""'}), "(..., description='Task step output')\n", (4374, 4411), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4438, 4479), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step input"""'}), "(..., description='Task step input')\n", (4443, 4479), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4513, 4565), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Next steps to be executed."""'}), "(..., description='Next steps to be executed.')\n", (4518, 4565), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4586, 4644), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Is this the last step?"""'}), "(default=False, description='Is this the last step?')\n", (4591, 4644), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5045, 5091), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'type': 'str', 'description': '"""User input"""'}), "(..., type=str, description='User input')\n", (5050, 5091), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5212, 5329), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'type': 'BaseMemory', 'description': '"""Conversational Memory. Maintains state before execution of this task."""'}), "(..., type=BaseMemory, description=\n 'Conversational Memory. Maintains state before execution of this task.')\n", (5217, 5329), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5421, 5524), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)', 'description': '"""Callback manager for the task."""'}), "(default_factory=CallbackManager, exclude=True, description=\n 'Callback manager for the task.')\n", (5426, 5524), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5586, 5740), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional user-specified state for a given task. Can be modified throughout the execution of a task."""'}), "(default_factory=dict, description=\n 'Additional user-specified state for a given task. Can be modified throughout the execution of a task.'\n )\n", (5591, 5740), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4975, 4987), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4985, 4987), False, 'import uuid\n')]
import json from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Optional, Type if TYPE_CHECKING: from llama_index.core.bridge.langchain import StructuredTool, Tool from deprecated import deprecated from llama_index.core.bridge.pydantic import BaseModel class DefaultToolFnSchema(BaseModel): """Default tool function Schema.""" input: str @dataclass class ToolMetadata: description: str name: Optional[str] = None fn_schema: Optional[Type[BaseModel]] = DefaultToolFnSchema def get_parameters_dict(self) -> dict: if self.fn_schema is None: parameters = { "type": "object", "properties": { "input": {"title": "input query string", "type": "string"}, }, "required": ["input"], } else: parameters = self.fn_schema.schema() parameters = { k: v for k, v in parameters.items() if k in ["type", "properties", "required", "definitions"] } return parameters @property def fn_schema_str(self) -> str: """Get fn schema as string.""" if self.fn_schema is None: raise ValueError("fn_schema is None.") parameters = self.get_parameters_dict() return json.dumps(parameters) def get_name(self) -> str: """Get name.""" if self.name is None: raise ValueError("name is None.") return self.name @deprecated( "Deprecated in favor of `to_openai_tool`, which should be used instead." ) def to_openai_function(self) -> Dict[str, Any]: """Deprecated and replaced by `to_openai_tool`. The name and arguments of a function that should be called, as generated by the model. """ return { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), } def to_openai_tool(self) -> Dict[str, Any]: """To OpenAI tool.""" return { "type": "function", "function": { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), }, } class ToolOutput(BaseModel): """Tool output.""" content: str tool_name: str raw_input: Dict[str, Any] raw_output: Any def __str__(self) -> str: """String.""" return str(self.content) class BaseTool: @property @abstractmethod def metadata(self) -> ToolMetadata: pass @abstractmethod def __call__(self, input: Any) -> ToolOutput: pass def _process_langchain_tool_kwargs( self, langchain_tool_kwargs: Any, ) -> Dict[str, Any]: """Process langchain tool kwargs.""" if "name" not in langchain_tool_kwargs: langchain_tool_kwargs["name"] = self.metadata.name or "" if "description" not in langchain_tool_kwargs: langchain_tool_kwargs["description"] = self.metadata.description if "fn_schema" not in langchain_tool_kwargs: langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema return langchain_tool_kwargs def to_langchain_tool( self, **langchain_tool_kwargs: Any, ) -> "Tool": """To langchain tool.""" from llama_index.core.bridge.langchain import Tool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return Tool.from_function( func=self.__call__, **langchain_tool_kwargs, ) def to_langchain_structured_tool( self, **langchain_tool_kwargs: Any, ) -> "StructuredTool": """To langchain structured tool.""" from llama_index.core.bridge.langchain import StructuredTool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return StructuredTool.from_function( func=self.__call__, **langchain_tool_kwargs, ) class AsyncBaseTool(BaseTool): """ Base-level tool class that is backwards compatible with the old tool spec but also supports async. """ def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: return self.call(*args, **kwargs) @abstractmethod def call(self, input: Any) -> ToolOutput: """ This is the method that should be implemented by the tool developer. """ @abstractmethod async def acall(self, input: Any) -> ToolOutput: """ This is the async version of the call method. Should also be implemented by the tool developer as an async-compatible implementation. """ class BaseToolAsyncAdapter(AsyncBaseTool): """ Adapter class that allows a synchronous tool to be used as an async tool. """ def __init__(self, tool: BaseTool): self.base_tool = tool @property def metadata(self) -> ToolMetadata: return self.base_tool.metadata def call(self, input: Any) -> ToolOutput: return self.base_tool(input) async def acall(self, input: Any) -> ToolOutput: return self.call(input) def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool: """ Converts a synchronous tool to an async tool. """ if isinstance(tool, AsyncBaseTool): return tool else: return BaseToolAsyncAdapter(tool)
[ "llama_index.core.bridge.langchain.Tool.from_function", "llama_index.core.bridge.langchain.StructuredTool.from_function" ]
[((1581, 1670), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1591, 1670), False, 'from deprecated import deprecated\n'), ((1395, 1417), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (1405, 1417), False, 'import json\n'), ((3690, 3753), 'llama_index.core.bridge.langchain.Tool.from_function', 'Tool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (3708, 3753), False, 'from llama_index.core.bridge.langchain import Tool\n'), ((4149, 4222), 'llama_index.core.bridge.langchain.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (4177, 4222), False, 'from llama_index.core.bridge.langchain import StructuredTool\n')]
import json from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Optional, Type if TYPE_CHECKING: from llama_index.core.bridge.langchain import StructuredTool, Tool from deprecated import deprecated from llama_index.core.bridge.pydantic import BaseModel class DefaultToolFnSchema(BaseModel): """Default tool function Schema.""" input: str @dataclass class ToolMetadata: description: str name: Optional[str] = None fn_schema: Optional[Type[BaseModel]] = DefaultToolFnSchema def get_parameters_dict(self) -> dict: if self.fn_schema is None: parameters = { "type": "object", "properties": { "input": {"title": "input query string", "type": "string"}, }, "required": ["input"], } else: parameters = self.fn_schema.schema() parameters = { k: v for k, v in parameters.items() if k in ["type", "properties", "required", "definitions"] } return parameters @property def fn_schema_str(self) -> str: """Get fn schema as string.""" if self.fn_schema is None: raise ValueError("fn_schema is None.") parameters = self.get_parameters_dict() return json.dumps(parameters) def get_name(self) -> str: """Get name.""" if self.name is None: raise ValueError("name is None.") return self.name @deprecated( "Deprecated in favor of `to_openai_tool`, which should be used instead." ) def to_openai_function(self) -> Dict[str, Any]: """Deprecated and replaced by `to_openai_tool`. The name and arguments of a function that should be called, as generated by the model. """ return { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), } def to_openai_tool(self) -> Dict[str, Any]: """To OpenAI tool.""" return { "type": "function", "function": { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), }, } class ToolOutput(BaseModel): """Tool output.""" content: str tool_name: str raw_input: Dict[str, Any] raw_output: Any def __str__(self) -> str: """String.""" return str(self.content) class BaseTool: @property @abstractmethod def metadata(self) -> ToolMetadata: pass @abstractmethod def __call__(self, input: Any) -> ToolOutput: pass def _process_langchain_tool_kwargs( self, langchain_tool_kwargs: Any, ) -> Dict[str, Any]: """Process langchain tool kwargs.""" if "name" not in langchain_tool_kwargs: langchain_tool_kwargs["name"] = self.metadata.name or "" if "description" not in langchain_tool_kwargs: langchain_tool_kwargs["description"] = self.metadata.description if "fn_schema" not in langchain_tool_kwargs: langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema return langchain_tool_kwargs def to_langchain_tool( self, **langchain_tool_kwargs: Any, ) -> "Tool": """To langchain tool.""" from llama_index.core.bridge.langchain import Tool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return Tool.from_function( func=self.__call__, **langchain_tool_kwargs, ) def to_langchain_structured_tool( self, **langchain_tool_kwargs: Any, ) -> "StructuredTool": """To langchain structured tool.""" from llama_index.core.bridge.langchain import StructuredTool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return StructuredTool.from_function( func=self.__call__, **langchain_tool_kwargs, ) class AsyncBaseTool(BaseTool): """ Base-level tool class that is backwards compatible with the old tool spec but also supports async. """ def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: return self.call(*args, **kwargs) @abstractmethod def call(self, input: Any) -> ToolOutput: """ This is the method that should be implemented by the tool developer. """ @abstractmethod async def acall(self, input: Any) -> ToolOutput: """ This is the async version of the call method. Should also be implemented by the tool developer as an async-compatible implementation. """ class BaseToolAsyncAdapter(AsyncBaseTool): """ Adapter class that allows a synchronous tool to be used as an async tool. """ def __init__(self, tool: BaseTool): self.base_tool = tool @property def metadata(self) -> ToolMetadata: return self.base_tool.metadata def call(self, input: Any) -> ToolOutput: return self.base_tool(input) async def acall(self, input: Any) -> ToolOutput: return self.call(input) def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool: """ Converts a synchronous tool to an async tool. """ if isinstance(tool, AsyncBaseTool): return tool else: return BaseToolAsyncAdapter(tool)
[ "llama_index.core.bridge.langchain.Tool.from_function", "llama_index.core.bridge.langchain.StructuredTool.from_function" ]
[((1581, 1670), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1591, 1670), False, 'from deprecated import deprecated\n'), ((1395, 1417), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (1405, 1417), False, 'import json\n'), ((3690, 3753), 'llama_index.core.bridge.langchain.Tool.from_function', 'Tool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (3708, 3753), False, 'from llama_index.core.bridge.langchain import Tool\n'), ((4149, 4222), 'llama_index.core.bridge.langchain.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (4177, 4222), False, 'from llama_index.core.bridge.langchain import StructuredTool\n')]
import json from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Optional, Type if TYPE_CHECKING: from llama_index.core.bridge.langchain import StructuredTool, Tool from deprecated import deprecated from llama_index.core.bridge.pydantic import BaseModel class DefaultToolFnSchema(BaseModel): """Default tool function Schema.""" input: str @dataclass class ToolMetadata: description: str name: Optional[str] = None fn_schema: Optional[Type[BaseModel]] = DefaultToolFnSchema def get_parameters_dict(self) -> dict: if self.fn_schema is None: parameters = { "type": "object", "properties": { "input": {"title": "input query string", "type": "string"}, }, "required": ["input"], } else: parameters = self.fn_schema.schema() parameters = { k: v for k, v in parameters.items() if k in ["type", "properties", "required", "definitions"] } return parameters @property def fn_schema_str(self) -> str: """Get fn schema as string.""" if self.fn_schema is None: raise ValueError("fn_schema is None.") parameters = self.get_parameters_dict() return json.dumps(parameters) def get_name(self) -> str: """Get name.""" if self.name is None: raise ValueError("name is None.") return self.name @deprecated( "Deprecated in favor of `to_openai_tool`, which should be used instead." ) def to_openai_function(self) -> Dict[str, Any]: """Deprecated and replaced by `to_openai_tool`. The name and arguments of a function that should be called, as generated by the model. """ return { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), } def to_openai_tool(self) -> Dict[str, Any]: """To OpenAI tool.""" return { "type": "function", "function": { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), }, } class ToolOutput(BaseModel): """Tool output.""" content: str tool_name: str raw_input: Dict[str, Any] raw_output: Any def __str__(self) -> str: """String.""" return str(self.content) class BaseTool: @property @abstractmethod def metadata(self) -> ToolMetadata: pass @abstractmethod def __call__(self, input: Any) -> ToolOutput: pass def _process_langchain_tool_kwargs( self, langchain_tool_kwargs: Any, ) -> Dict[str, Any]: """Process langchain tool kwargs.""" if "name" not in langchain_tool_kwargs: langchain_tool_kwargs["name"] = self.metadata.name or "" if "description" not in langchain_tool_kwargs: langchain_tool_kwargs["description"] = self.metadata.description if "fn_schema" not in langchain_tool_kwargs: langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema return langchain_tool_kwargs def to_langchain_tool( self, **langchain_tool_kwargs: Any, ) -> "Tool": """To langchain tool.""" from llama_index.core.bridge.langchain import Tool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return Tool.from_function( func=self.__call__, **langchain_tool_kwargs, ) def to_langchain_structured_tool( self, **langchain_tool_kwargs: Any, ) -> "StructuredTool": """To langchain structured tool.""" from llama_index.core.bridge.langchain import StructuredTool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return StructuredTool.from_function( func=self.__call__, **langchain_tool_kwargs, ) class AsyncBaseTool(BaseTool): """ Base-level tool class that is backwards compatible with the old tool spec but also supports async. """ def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: return self.call(*args, **kwargs) @abstractmethod def call(self, input: Any) -> ToolOutput: """ This is the method that should be implemented by the tool developer. """ @abstractmethod async def acall(self, input: Any) -> ToolOutput: """ This is the async version of the call method. Should also be implemented by the tool developer as an async-compatible implementation. """ class BaseToolAsyncAdapter(AsyncBaseTool): """ Adapter class that allows a synchronous tool to be used as an async tool. """ def __init__(self, tool: BaseTool): self.base_tool = tool @property def metadata(self) -> ToolMetadata: return self.base_tool.metadata def call(self, input: Any) -> ToolOutput: return self.base_tool(input) async def acall(self, input: Any) -> ToolOutput: return self.call(input) def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool: """ Converts a synchronous tool to an async tool. """ if isinstance(tool, AsyncBaseTool): return tool else: return BaseToolAsyncAdapter(tool)
[ "llama_index.core.bridge.langchain.Tool.from_function", "llama_index.core.bridge.langchain.StructuredTool.from_function" ]
[((1581, 1670), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1591, 1670), False, 'from deprecated import deprecated\n'), ((1395, 1417), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (1405, 1417), False, 'import json\n'), ((3690, 3753), 'llama_index.core.bridge.langchain.Tool.from_function', 'Tool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (3708, 3753), False, 'from llama_index.core.bridge.langchain import Tool\n'), ((4149, 4222), 'llama_index.core.bridge.langchain.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (4177, 4222), False, 'from llama_index.core.bridge.langchain import StructuredTool\n')]
"""Generate SQL queries using LlamaIndex.""" import argparse import json import logging import os import re from typing import Any, cast from llama_index import LLMPredictor, SQLDatabase from llama_index.indices import SQLStructStoreIndex from llama_index.llms.openai import OpenAI from sqlalchemy import create_engine, text from tqdm import tqdm logging.getLogger("root").setLevel(logging.WARNING) _spaces = re.compile(r"\s+") _newlines = re.compile(r"\n+") def _generate_sql( llama_index: SQLStructStoreIndex, nl_query_text: str, ) -> str: """Generate SQL query for the given NL query text.""" query_engine = llama_index.as_query_engine() response = query_engine.query(nl_query_text) if ( response.metadata is None or "sql_query" not in response.metadata or response.metadata["sql_query"] is None ): raise RuntimeError("No SQL query generated.") query = response.metadata["sql_query"] # Remove newlines and extra spaces. query = _newlines.sub(" ", query) query = _spaces.sub(" ", query) return query.strip() def generate_sql(llama_indexes: dict, examples: list, output_file: str) -> None: """Generate SQL queries for the given examples and write them to the output file.""" with open(output_file, "w") as f: for example in tqdm(examples, desc=f"Generating {output_file}"): db_name = example["db_id"] nl_query_text = example["question"] try: sql_query = _generate_sql(llama_indexes[db_name], nl_query_text) except Exception as e: print( f"Failed to generate SQL query for question: " f"{example['question']} on database: {example['db_id']}." ) print(e) sql_query = "ERROR" f.write(sql_query + "\n") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Generate SQL queries using LlamaIndex." ) parser.add_argument( "--input", type=str, required=True, help="Path to the spider dataset directory." ) parser.add_argument( "--output", type=str, required=True, help="Path to the output directory of generated SQL files," " one query on each line, " "to be compared with the *_gold.sql files in the input directory.", ) parser.add_argument( "--model", type=str, choices=["gpt-4", "gpt-3.5-turbo", "text-davinci-003", "code-davinci-002"], required=True, help="The model to use for generating SQL queries.", ) args = parser.parse_args() # Create the output directory if it does not exist. if not os.path.exists(args.output): os.makedirs(args.output) # Load the Spider dataset from the input directory. with open(os.path.join(args.input, "train_spider.json")) as f: train_spider = json.load(f) with open(os.path.join(args.input, "train_others.json")) as f: train_others = json.load(f) with open(os.path.join(args.input, "dev.json")) as f: dev = json.load(f) # Create all necessary SQL database objects. databases = {} for db in train_spider + train_others + dev: db_name = db["db_id"] if db_name in databases: continue db_path = os.path.join(args.input, "database", db_name, db_name + ".sqlite") engine = create_engine("sqlite:///" + db_path) databases[db_name] = (SQLDatabase(engine=engine), engine) # Create the LlamaIndexes for all databases. llm = OpenAI(model=args.model, temperature=0) llm_predictor = LLMPredictor(llm=llm) llm_indexes = {} for db_name, (db, engine) in databases.items(): # Get the name of the first table in the database. # This is a hack to get a table name for the index, which can use any # table in the database. with engine.connect() as connection: table_name = cast( Any, connection.execute( text("select name from sqlite_master where type = 'table'") ).fetchone(), )[0] llm_indexes[db_name] = SQLStructStoreIndex.from_documents( documents=[], llm_predictor=llm_predictor, sql_database=db, table_name=table_name, ) # Generate SQL queries. generate_sql( llama_indexes=llm_indexes, examples=train_spider + train_others, output_file=os.path.join(args.output, "train_pred.sql"), ) generate_sql( llama_indexes=llm_indexes, examples=dev, output_file=os.path.join(args.output, "dev_pred.sql"), )
[ "llama_index.llms.openai.OpenAI", "llama_index.indices.SQLStructStoreIndex.from_documents", "llama_index.SQLDatabase", "llama_index.LLMPredictor" ]
[((413, 431), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (423, 431), False, 'import re\n'), ((444, 462), 're.compile', 're.compile', (['"""\\\\n+"""'], {}), "('\\\\n+')\n", (454, 462), False, 'import re\n'), ((1926, 2003), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate SQL queries using LlamaIndex."""'}), "(description='Generate SQL queries using LlamaIndex.')\n", (1949, 2003), False, 'import argparse\n'), ((3623, 3662), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'args.model', 'temperature': '(0)'}), '(model=args.model, temperature=0)\n', (3629, 3662), False, 'from llama_index.llms.openai import OpenAI\n'), ((3683, 3704), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3695, 3704), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((349, 374), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (366, 374), False, 'import logging\n'), ((1329, 1377), 'tqdm.tqdm', 'tqdm', (['examples'], {'desc': 'f"""Generating {output_file}"""'}), "(examples, desc=f'Generating {output_file}')\n", (1333, 1377), False, 'from tqdm import tqdm\n'), ((2745, 2772), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (2759, 2772), False, 'import os\n'), ((2782, 2806), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (2793, 2806), False, 'import os\n'), ((2954, 2966), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2963, 2966), False, 'import json\n'), ((3057, 3069), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3066, 3069), False, 'import json\n'), ((3142, 3154), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3151, 3154), False, 'import json\n'), ((3375, 3441), 'os.path.join', 'os.path.join', (['args.input', '"""database"""', 'db_name', "(db_name + '.sqlite')"], {}), "(args.input, 'database', db_name, db_name + '.sqlite')\n", (3387, 3441), False, 'import os\n'), ((3459, 3496), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + db_path)"], {}), "('sqlite:///' + db_path)\n", (3472, 3496), False, 'from sqlalchemy import create_engine, text\n'), ((2878, 2923), 'os.path.join', 'os.path.join', (['args.input', '"""train_spider.json"""'], {}), "(args.input, 'train_spider.json')\n", (2890, 2923), False, 'import os\n'), ((2981, 3026), 'os.path.join', 'os.path.join', (['args.input', '"""train_others.json"""'], {}), "(args.input, 'train_others.json')\n", (2993, 3026), False, 'import os\n'), ((3084, 3120), 'os.path.join', 'os.path.join', (['args.input', '"""dev.json"""'], {}), "(args.input, 'dev.json')\n", (3096, 3120), False, 'import os\n'), ((3527, 3553), 'llama_index.SQLDatabase', 'SQLDatabase', ([], {'engine': 'engine'}), '(engine=engine)\n', (3538, 3553), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((4243, 4365), 'llama_index.indices.SQLStructStoreIndex.from_documents', 'SQLStructStoreIndex.from_documents', ([], {'documents': '[]', 'llm_predictor': 'llm_predictor', 'sql_database': 'db', 'table_name': 'table_name'}), '(documents=[], llm_predictor=\n llm_predictor, sql_database=db, table_name=table_name)\n', (4277, 4365), False, 'from llama_index.indices import SQLStructStoreIndex\n'), ((4588, 4631), 'os.path.join', 'os.path.join', (['args.output', '"""train_pred.sql"""'], {}), "(args.output, 'train_pred.sql')\n", (4600, 4631), False, 'import os\n'), ((4734, 4775), 'os.path.join', 'os.path.join', (['args.output', '"""dev_pred.sql"""'], {}), "(args.output, 'dev_pred.sql')\n", (4746, 4775), False, 'import os\n'), ((4101, 4160), 'sqlalchemy.text', 'text', (['"""select name from sqlite_master where type = \'table\'"""'], {}), '("select name from sqlite_master where type = \'table\'")\n', (4105, 4160), False, 'from sqlalchemy import create_engine, text\n')]
"""Generate SQL queries using LlamaIndex.""" import argparse import json import logging import os import re from typing import Any, cast from llama_index import LLMPredictor, SQLDatabase from llama_index.indices import SQLStructStoreIndex from llama_index.llms.openai import OpenAI from sqlalchemy import create_engine, text from tqdm import tqdm logging.getLogger("root").setLevel(logging.WARNING) _spaces = re.compile(r"\s+") _newlines = re.compile(r"\n+") def _generate_sql( llama_index: SQLStructStoreIndex, nl_query_text: str, ) -> str: """Generate SQL query for the given NL query text.""" query_engine = llama_index.as_query_engine() response = query_engine.query(nl_query_text) if ( response.metadata is None or "sql_query" not in response.metadata or response.metadata["sql_query"] is None ): raise RuntimeError("No SQL query generated.") query = response.metadata["sql_query"] # Remove newlines and extra spaces. query = _newlines.sub(" ", query) query = _spaces.sub(" ", query) return query.strip() def generate_sql(llama_indexes: dict, examples: list, output_file: str) -> None: """Generate SQL queries for the given examples and write them to the output file.""" with open(output_file, "w") as f: for example in tqdm(examples, desc=f"Generating {output_file}"): db_name = example["db_id"] nl_query_text = example["question"] try: sql_query = _generate_sql(llama_indexes[db_name], nl_query_text) except Exception as e: print( f"Failed to generate SQL query for question: " f"{example['question']} on database: {example['db_id']}." ) print(e) sql_query = "ERROR" f.write(sql_query + "\n") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Generate SQL queries using LlamaIndex." ) parser.add_argument( "--input", type=str, required=True, help="Path to the spider dataset directory." ) parser.add_argument( "--output", type=str, required=True, help="Path to the output directory of generated SQL files," " one query on each line, " "to be compared with the *_gold.sql files in the input directory.", ) parser.add_argument( "--model", type=str, choices=["gpt-4", "gpt-3.5-turbo", "text-davinci-003", "code-davinci-002"], required=True, help="The model to use for generating SQL queries.", ) args = parser.parse_args() # Create the output directory if it does not exist. if not os.path.exists(args.output): os.makedirs(args.output) # Load the Spider dataset from the input directory. with open(os.path.join(args.input, "train_spider.json")) as f: train_spider = json.load(f) with open(os.path.join(args.input, "train_others.json")) as f: train_others = json.load(f) with open(os.path.join(args.input, "dev.json")) as f: dev = json.load(f) # Create all necessary SQL database objects. databases = {} for db in train_spider + train_others + dev: db_name = db["db_id"] if db_name in databases: continue db_path = os.path.join(args.input, "database", db_name, db_name + ".sqlite") engine = create_engine("sqlite:///" + db_path) databases[db_name] = (SQLDatabase(engine=engine), engine) # Create the LlamaIndexes for all databases. llm = OpenAI(model=args.model, temperature=0) llm_predictor = LLMPredictor(llm=llm) llm_indexes = {} for db_name, (db, engine) in databases.items(): # Get the name of the first table in the database. # This is a hack to get a table name for the index, which can use any # table in the database. with engine.connect() as connection: table_name = cast( Any, connection.execute( text("select name from sqlite_master where type = 'table'") ).fetchone(), )[0] llm_indexes[db_name] = SQLStructStoreIndex.from_documents( documents=[], llm_predictor=llm_predictor, sql_database=db, table_name=table_name, ) # Generate SQL queries. generate_sql( llama_indexes=llm_indexes, examples=train_spider + train_others, output_file=os.path.join(args.output, "train_pred.sql"), ) generate_sql( llama_indexes=llm_indexes, examples=dev, output_file=os.path.join(args.output, "dev_pred.sql"), )
[ "llama_index.llms.openai.OpenAI", "llama_index.indices.SQLStructStoreIndex.from_documents", "llama_index.SQLDatabase", "llama_index.LLMPredictor" ]
[((413, 431), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (423, 431), False, 'import re\n'), ((444, 462), 're.compile', 're.compile', (['"""\\\\n+"""'], {}), "('\\\\n+')\n", (454, 462), False, 'import re\n'), ((1926, 2003), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate SQL queries using LlamaIndex."""'}), "(description='Generate SQL queries using LlamaIndex.')\n", (1949, 2003), False, 'import argparse\n'), ((3623, 3662), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'args.model', 'temperature': '(0)'}), '(model=args.model, temperature=0)\n', (3629, 3662), False, 'from llama_index.llms.openai import OpenAI\n'), ((3683, 3704), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3695, 3704), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((349, 374), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (366, 374), False, 'import logging\n'), ((1329, 1377), 'tqdm.tqdm', 'tqdm', (['examples'], {'desc': 'f"""Generating {output_file}"""'}), "(examples, desc=f'Generating {output_file}')\n", (1333, 1377), False, 'from tqdm import tqdm\n'), ((2745, 2772), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (2759, 2772), False, 'import os\n'), ((2782, 2806), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (2793, 2806), False, 'import os\n'), ((2954, 2966), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2963, 2966), False, 'import json\n'), ((3057, 3069), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3066, 3069), False, 'import json\n'), ((3142, 3154), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3151, 3154), False, 'import json\n'), ((3375, 3441), 'os.path.join', 'os.path.join', (['args.input', '"""database"""', 'db_name', "(db_name + '.sqlite')"], {}), "(args.input, 'database', db_name, db_name + '.sqlite')\n", (3387, 3441), False, 'import os\n'), ((3459, 3496), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + db_path)"], {}), "('sqlite:///' + db_path)\n", (3472, 3496), False, 'from sqlalchemy import create_engine, text\n'), ((2878, 2923), 'os.path.join', 'os.path.join', (['args.input', '"""train_spider.json"""'], {}), "(args.input, 'train_spider.json')\n", (2890, 2923), False, 'import os\n'), ((2981, 3026), 'os.path.join', 'os.path.join', (['args.input', '"""train_others.json"""'], {}), "(args.input, 'train_others.json')\n", (2993, 3026), False, 'import os\n'), ((3084, 3120), 'os.path.join', 'os.path.join', (['args.input', '"""dev.json"""'], {}), "(args.input, 'dev.json')\n", (3096, 3120), False, 'import os\n'), ((3527, 3553), 'llama_index.SQLDatabase', 'SQLDatabase', ([], {'engine': 'engine'}), '(engine=engine)\n', (3538, 3553), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((4243, 4365), 'llama_index.indices.SQLStructStoreIndex.from_documents', 'SQLStructStoreIndex.from_documents', ([], {'documents': '[]', 'llm_predictor': 'llm_predictor', 'sql_database': 'db', 'table_name': 'table_name'}), '(documents=[], llm_predictor=\n llm_predictor, sql_database=db, table_name=table_name)\n', (4277, 4365), False, 'from llama_index.indices import SQLStructStoreIndex\n'), ((4588, 4631), 'os.path.join', 'os.path.join', (['args.output', '"""train_pred.sql"""'], {}), "(args.output, 'train_pred.sql')\n", (4600, 4631), False, 'import os\n'), ((4734, 4775), 'os.path.join', 'os.path.join', (['args.output', '"""dev_pred.sql"""'], {}), "(args.output, 'dev_pred.sql')\n", (4746, 4775), False, 'import os\n'), ((4101, 4160), 'sqlalchemy.text', 'text', (['"""select name from sqlite_master where type = \'table\'"""'], {}), '("select name from sqlite_master where type = \'table\'")\n', (4105, 4160), False, 'from sqlalchemy import create_engine, text\n')]
"""Utilities for Spider module.""" import json import os from typing import Dict, Tuple from llama_index import LLMPredictor, SQLDatabase from llama_index.indices import SQLStructStoreIndex from llama_index.llms.openai import OpenAI from sqlalchemy import create_engine, text def load_examples(spider_dir: str) -> Tuple[list, list]: """Load examples.""" with open(os.path.join(spider_dir, "train_spider.json")) as f: train_spider = json.load(f) with open(os.path.join(spider_dir, "train_others.json")) as f: train_others = json.load(f) with open(os.path.join(spider_dir, "dev.json")) as f: dev = json.load(f) return train_spider + train_others, dev def create_indexes(spider_dir: str, llm: OpenAI) -> Dict[str, SQLStructStoreIndex]: """Create indexes for all databases.""" # Create all necessary SQL database objects. databases = {} for db_name in os.listdir(os.path.join(spider_dir, "database")): db_path = os.path.join(spider_dir, "database", db_name, db_name + ".sqlite") if not os.path.exists(db_path): continue engine = create_engine("sqlite:///" + db_path) databases[db_name] = SQLDatabase(engine=engine) # Test connection. with engine.connect() as connection: connection.execute( text("select name from sqlite_master where type = 'table'") ).fetchone() llm_predictor = LLMPredictor(llm=llm) llm_indexes = {} for db_name, db in databases.items(): llm_indexes[db_name] = SQLStructStoreIndex( llm_predictor=llm_predictor, sql_database=db, ) return llm_indexes
[ "llama_index.indices.SQLStructStoreIndex", "llama_index.SQLDatabase", "llama_index.LLMPredictor" ]
[((1447, 1468), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1459, 1468), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((452, 464), 'json.load', 'json.load', (['f'], {}), '(f)\n', (461, 464), False, 'import json\n'), ((555, 567), 'json.load', 'json.load', (['f'], {}), '(f)\n', (564, 567), False, 'import json\n'), ((640, 652), 'json.load', 'json.load', (['f'], {}), '(f)\n', (649, 652), False, 'import json\n'), ((925, 961), 'os.path.join', 'os.path.join', (['spider_dir', '"""database"""'], {}), "(spider_dir, 'database')\n", (937, 961), False, 'import os\n'), ((982, 1048), 'os.path.join', 'os.path.join', (['spider_dir', '"""database"""', 'db_name', "(db_name + '.sqlite')"], {}), "(spider_dir, 'database', db_name, db_name + '.sqlite')\n", (994, 1048), False, 'import os\n'), ((1127, 1164), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + db_path)"], {}), "('sqlite:///' + db_path)\n", (1140, 1164), False, 'from sqlalchemy import create_engine, text\n'), ((1194, 1220), 'llama_index.SQLDatabase', 'SQLDatabase', ([], {'engine': 'engine'}), '(engine=engine)\n', (1205, 1220), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((1563, 1628), 'llama_index.indices.SQLStructStoreIndex', 'SQLStructStoreIndex', ([], {'llm_predictor': 'llm_predictor', 'sql_database': 'db'}), '(llm_predictor=llm_predictor, sql_database=db)\n', (1582, 1628), False, 'from llama_index.indices import SQLStructStoreIndex\n'), ((376, 421), 'os.path.join', 'os.path.join', (['spider_dir', '"""train_spider.json"""'], {}), "(spider_dir, 'train_spider.json')\n", (388, 421), False, 'import os\n'), ((479, 524), 'os.path.join', 'os.path.join', (['spider_dir', '"""train_others.json"""'], {}), "(spider_dir, 'train_others.json')\n", (491, 524), False, 'import os\n'), ((582, 618), 'os.path.join', 'os.path.join', (['spider_dir', '"""dev.json"""'], {}), "(spider_dir, 'dev.json')\n", (594, 618), False, 'import os\n'), ((1064, 1087), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (1078, 1087), False, 'import os\n'), ((1341, 1400), 'sqlalchemy.text', 'text', (['"""select name from sqlite_master where type = \'table\'"""'], {}), '("select name from sqlite_master where type = \'table\'")\n', (1345, 1400), False, 'from sqlalchemy import create_engine, text\n')]
"""Utilities for Spider module.""" import json import os from typing import Dict, Tuple from llama_index import LLMPredictor, SQLDatabase from llama_index.indices import SQLStructStoreIndex from llama_index.llms.openai import OpenAI from sqlalchemy import create_engine, text def load_examples(spider_dir: str) -> Tuple[list, list]: """Load examples.""" with open(os.path.join(spider_dir, "train_spider.json")) as f: train_spider = json.load(f) with open(os.path.join(spider_dir, "train_others.json")) as f: train_others = json.load(f) with open(os.path.join(spider_dir, "dev.json")) as f: dev = json.load(f) return train_spider + train_others, dev def create_indexes(spider_dir: str, llm: OpenAI) -> Dict[str, SQLStructStoreIndex]: """Create indexes for all databases.""" # Create all necessary SQL database objects. databases = {} for db_name in os.listdir(os.path.join(spider_dir, "database")): db_path = os.path.join(spider_dir, "database", db_name, db_name + ".sqlite") if not os.path.exists(db_path): continue engine = create_engine("sqlite:///" + db_path) databases[db_name] = SQLDatabase(engine=engine) # Test connection. with engine.connect() as connection: connection.execute( text("select name from sqlite_master where type = 'table'") ).fetchone() llm_predictor = LLMPredictor(llm=llm) llm_indexes = {} for db_name, db in databases.items(): llm_indexes[db_name] = SQLStructStoreIndex( llm_predictor=llm_predictor, sql_database=db, ) return llm_indexes
[ "llama_index.indices.SQLStructStoreIndex", "llama_index.SQLDatabase", "llama_index.LLMPredictor" ]
[((1447, 1468), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1459, 1468), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((452, 464), 'json.load', 'json.load', (['f'], {}), '(f)\n', (461, 464), False, 'import json\n'), ((555, 567), 'json.load', 'json.load', (['f'], {}), '(f)\n', (564, 567), False, 'import json\n'), ((640, 652), 'json.load', 'json.load', (['f'], {}), '(f)\n', (649, 652), False, 'import json\n'), ((925, 961), 'os.path.join', 'os.path.join', (['spider_dir', '"""database"""'], {}), "(spider_dir, 'database')\n", (937, 961), False, 'import os\n'), ((982, 1048), 'os.path.join', 'os.path.join', (['spider_dir', '"""database"""', 'db_name', "(db_name + '.sqlite')"], {}), "(spider_dir, 'database', db_name, db_name + '.sqlite')\n", (994, 1048), False, 'import os\n'), ((1127, 1164), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + db_path)"], {}), "('sqlite:///' + db_path)\n", (1140, 1164), False, 'from sqlalchemy import create_engine, text\n'), ((1194, 1220), 'llama_index.SQLDatabase', 'SQLDatabase', ([], {'engine': 'engine'}), '(engine=engine)\n', (1205, 1220), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((1563, 1628), 'llama_index.indices.SQLStructStoreIndex', 'SQLStructStoreIndex', ([], {'llm_predictor': 'llm_predictor', 'sql_database': 'db'}), '(llm_predictor=llm_predictor, sql_database=db)\n', (1582, 1628), False, 'from llama_index.indices import SQLStructStoreIndex\n'), ((376, 421), 'os.path.join', 'os.path.join', (['spider_dir', '"""train_spider.json"""'], {}), "(spider_dir, 'train_spider.json')\n", (388, 421), False, 'import os\n'), ((479, 524), 'os.path.join', 'os.path.join', (['spider_dir', '"""train_others.json"""'], {}), "(spider_dir, 'train_others.json')\n", (491, 524), False, 'import os\n'), ((582, 618), 'os.path.join', 'os.path.join', (['spider_dir', '"""dev.json"""'], {}), "(spider_dir, 'dev.json')\n", (594, 618), False, 'import os\n'), ((1064, 1087), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (1078, 1087), False, 'import os\n'), ((1341, 1400), 'sqlalchemy.text', 'text', (['"""select name from sqlite_master where type = \'table\'"""'], {}), '("select name from sqlite_master where type = \'table\'")\n', (1345, 1400), False, 'from sqlalchemy import create_engine, text\n')]
from collections import ChainMap from typing import ( Any, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable, ) from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator from llama_index.legacy.callbacks import CBEventType, EventPayload from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponseAsyncGen, ChatResponseGen, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole, ) from llama_index.legacy.core.query_pipeline.query_component import ( InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable, ) from llama_index.legacy.llms.base import BaseLLM from llama_index.legacy.llms.generic_utils import ( messages_to_prompt as generic_messages_to_prompt, ) from llama_index.legacy.llms.generic_utils import ( prompt_to_messages, ) from llama_index.legacy.prompts import BasePromptTemplate, PromptTemplate from llama_index.legacy.types import ( BaseOutputParser, PydanticProgramMode, TokenAsyncGen, TokenGen, ) # NOTE: These two protocols are needed to appease mypy @runtime_checkable class MessagesToPromptType(Protocol): def __call__(self, messages: Sequence[ChatMessage]) -> str: pass @runtime_checkable class CompletionToPromptType(Protocol): def __call__(self, prompt: str) -> str: pass def stream_completion_response_to_tokens( completion_response_gen: CompletionResponseGen, ) -> TokenGen: """Convert a stream completion response to a stream of tokens.""" def gen() -> TokenGen: for response in completion_response_gen: yield response.delta or "" return gen() def stream_chat_response_to_tokens( chat_response_gen: ChatResponseGen, ) -> TokenGen: """Convert a stream completion response to a stream of tokens.""" def gen() -> TokenGen: for response in chat_response_gen: yield response.delta or "" return gen() async def astream_completion_response_to_tokens( completion_response_gen: CompletionResponseAsyncGen, ) -> TokenAsyncGen: """Convert a stream completion response to a stream of tokens.""" async def gen() -> TokenAsyncGen: async for response in completion_response_gen: yield response.delta or "" return gen() async def astream_chat_response_to_tokens( chat_response_gen: ChatResponseAsyncGen, ) -> TokenAsyncGen: """Convert a stream completion response to a stream of tokens.""" async def gen() -> TokenAsyncGen: async for response in chat_response_gen: yield response.delta or "" return gen() def default_completion_to_prompt(prompt: str) -> str: return prompt class LLM(BaseLLM): system_prompt: Optional[str] = Field( default=None, description="System prompt for LLM calls." ) messages_to_prompt: MessagesToPromptType = Field( description="Function to convert a list of messages to an LLM prompt.", default=generic_messages_to_prompt, exclude=True, ) completion_to_prompt: CompletionToPromptType = Field( description="Function to convert a completion to an LLM prompt.", default=default_completion_to_prompt, exclude=True, ) output_parser: Optional[BaseOutputParser] = Field( description="Output parser to parse, validate, and correct errors programmatically.", default=None, exclude=True, ) pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT # deprecated query_wrapper_prompt: Optional[BasePromptTemplate] = Field( description="Query wrapper prompt for LLM calls.", default=None, exclude=True, ) @validator("messages_to_prompt", pre=True) def set_messages_to_prompt( cls, messages_to_prompt: Optional[MessagesToPromptType] ) -> MessagesToPromptType: return messages_to_prompt or generic_messages_to_prompt @validator("completion_to_prompt", pre=True) def set_completion_to_prompt( cls, completion_to_prompt: Optional[CompletionToPromptType] ) -> CompletionToPromptType: return completion_to_prompt or default_completion_to_prompt def _log_template_data( self, prompt: BasePromptTemplate, **prompt_args: Any ) -> None: template_vars = { k: v for k, v in ChainMap(prompt.kwargs, prompt_args).items() if k in prompt.template_vars } with self.callback_manager.event( CBEventType.TEMPLATING, payload={ EventPayload.TEMPLATE: prompt.get_template(llm=self), EventPayload.TEMPLATE_VARS: template_vars, EventPayload.SYSTEM_PROMPT: self.system_prompt, EventPayload.QUERY_WRAPPER_PROMPT: self.query_wrapper_prompt, }, ): pass def _get_prompt(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str: formatted_prompt = prompt.format( llm=self, messages_to_prompt=self.messages_to_prompt, completion_to_prompt=self.completion_to_prompt, **prompt_args, ) if self.output_parser is not None: formatted_prompt = self.output_parser.format(formatted_prompt) return self._extend_prompt(formatted_prompt) def _get_messages( self, prompt: BasePromptTemplate, **prompt_args: Any ) -> List[ChatMessage]: messages = prompt.format_messages(llm=self, **prompt_args) if self.output_parser is not None: messages = self.output_parser.format_messages(messages) return self._extend_messages(messages) def structured_predict( self, output_cls: BaseModel, prompt: PromptTemplate, **prompt_args: Any, ) -> BaseModel: from llama_index.legacy.program.utils import get_program_for_llm program = get_program_for_llm( output_cls, prompt, self, pydantic_program_mode=self.pydantic_program_mode, ) return program(**prompt_args) async def astructured_predict( self, output_cls: BaseModel, prompt: PromptTemplate, **prompt_args: Any, ) -> BaseModel: from llama_index.legacy.program.utils import get_program_for_llm program = get_program_for_llm( output_cls, prompt, self, pydantic_program_mode=self.pydantic_program_mode, ) return await program.acall(**prompt_args) def _parse_output(self, output: str) -> str: if self.output_parser is not None: return str(self.output_parser.parse(output)) return output def predict( self, prompt: BasePromptTemplate, **prompt_args: Any, ) -> str: """Predict.""" self._log_template_data(prompt, **prompt_args) if self.metadata.is_chat_model: messages = self._get_messages(prompt, **prompt_args) chat_response = self.chat(messages) output = chat_response.message.content or "" else: formatted_prompt = self._get_prompt(prompt, **prompt_args) response = self.complete(formatted_prompt, formatted=True) output = response.text return self._parse_output(output) def stream( self, prompt: BasePromptTemplate, **prompt_args: Any, ) -> TokenGen: """Stream.""" self._log_template_data(prompt, **prompt_args) if self.metadata.is_chat_model: messages = self._get_messages(prompt, **prompt_args) chat_response = self.stream_chat(messages) stream_tokens = stream_chat_response_to_tokens(chat_response) else: formatted_prompt = self._get_prompt(prompt, **prompt_args) stream_response = self.stream_complete(formatted_prompt, formatted=True) stream_tokens = stream_completion_response_to_tokens(stream_response) if prompt.output_parser is not None or self.output_parser is not None: raise NotImplementedError("Output parser is not supported for streaming.") return stream_tokens async def apredict( self, prompt: BasePromptTemplate, **prompt_args: Any, ) -> str: """Async predict.""" self._log_template_data(prompt, **prompt_args) if self.metadata.is_chat_model: messages = self._get_messages(prompt, **prompt_args) chat_response = await self.achat(messages) output = chat_response.message.content or "" else: formatted_prompt = self._get_prompt(prompt, **prompt_args) response = await self.acomplete(formatted_prompt, formatted=True) output = response.text return self._parse_output(output) async def astream( self, prompt: BasePromptTemplate, **prompt_args: Any, ) -> TokenAsyncGen: """Async stream.""" self._log_template_data(prompt, **prompt_args) if self.metadata.is_chat_model: messages = self._get_messages(prompt, **prompt_args) chat_response = await self.astream_chat(messages) stream_tokens = await astream_chat_response_to_tokens(chat_response) else: formatted_prompt = self._get_prompt(prompt, **prompt_args) stream_response = await self.astream_complete( formatted_prompt, formatted=True ) stream_tokens = await astream_completion_response_to_tokens(stream_response) if prompt.output_parser is not None or self.output_parser is not None: raise NotImplementedError("Output parser is not supported for streaming.") return stream_tokens def _extend_prompt( self, formatted_prompt: str, ) -> str: """Add system and query wrapper prompts to base prompt.""" extended_prompt = formatted_prompt if self.system_prompt: extended_prompt = self.system_prompt + "\n\n" + extended_prompt if self.query_wrapper_prompt: extended_prompt = self.query_wrapper_prompt.format( query_str=extended_prompt ) return extended_prompt def _extend_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]: """Add system prompt to chat message list.""" if self.system_prompt: messages = [ ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt), *messages, ] return messages def _as_query_component(self, **kwargs: Any) -> QueryComponent: """Return query component.""" if self.metadata.is_chat_model: return LLMChatComponent(llm=self, **kwargs) else: return LLMCompleteComponent(llm=self, **kwargs) class BaseLLMComponent(QueryComponent): """Base LLM component.""" llm: LLM = Field(..., description="LLM") streaming: bool = Field(default=False, description="Streaming mode") class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: Any) -> None: """Set callback manager.""" self.llm.callback_manager = callback_manager class LLMCompleteComponent(BaseLLMComponent): """LLM completion component.""" def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" if "prompt" not in input: raise ValueError("Prompt must be in input dict.") # do special check to see if prompt is a list of chat messages if isinstance(input["prompt"], get_args(List[ChatMessage])): input["prompt"] = self.llm.messages_to_prompt(input["prompt"]) input["prompt"] = validate_and_convert_stringable(input["prompt"]) else: input["prompt"] = validate_and_convert_stringable(input["prompt"]) input["prompt"] = self.llm.completion_to_prompt(input["prompt"]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" # TODO: support only complete for now # non-trivial to figure how to support chat/complete/etc. prompt = kwargs["prompt"] # ignore all other kwargs for now if self.streaming: response = self.llm.stream_complete(prompt, formatted=True) else: response = self.llm.complete(prompt, formatted=True) return {"output": response} async def _arun_component(self, **kwargs: Any) -> Any: """Run component.""" # TODO: support only complete for now # non-trivial to figure how to support chat/complete/etc. prompt = kwargs["prompt"] # ignore all other kwargs for now response = await self.llm.acomplete(prompt, formatted=True) return {"output": response} @property def input_keys(self) -> InputKeys: """Input keys.""" # TODO: support only complete for now return InputKeys.from_keys({"prompt"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"}) class LLMChatComponent(BaseLLMComponent): """LLM chat component.""" def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" if "messages" not in input: raise ValueError("Messages must be in input dict.") # if `messages` is a string, convert to a list of chat message if isinstance(input["messages"], get_args(StringableInput)): input["messages"] = validate_and_convert_stringable(input["messages"]) input["messages"] = prompt_to_messages(str(input["messages"])) for message in input["messages"]: if not isinstance(message, ChatMessage): raise ValueError("Messages must be a list of ChatMessage") return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" # TODO: support only complete for now # non-trivial to figure how to support chat/complete/etc. messages = kwargs["messages"] if self.streaming: response = self.llm.stream_chat(messages) else: response = self.llm.chat(messages) return {"output": response} async def _arun_component(self, **kwargs: Any) -> Any: """Run component.""" # TODO: support only complete for now # non-trivial to figure how to support chat/complete/etc. messages = kwargs["messages"] if self.streaming: response = await self.llm.astream_chat(messages) else: response = await self.llm.achat(messages) return {"output": response} @property def input_keys(self) -> InputKeys: """Input keys.""" # TODO: support only complete for now return InputKeys.from_keys({"messages"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"})
[ "llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable", "llama_index.legacy.program.utils.get_program_for_llm", "llama_index.legacy.bridge.pydantic.validator" ]
[((2828, 2891), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""System prompt for LLM calls."""'}), "(default=None, description='System prompt for LLM calls.')\n", (2833, 2891), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((2953, 3090), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a list of messages to an LLM prompt."""', 'default': 'generic_messages_to_prompt', 'exclude': '(True)'}), "(description=\n 'Function to convert a list of messages to an LLM prompt.', default=\n generic_messages_to_prompt, exclude=True)\n", (2958, 3090), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3163, 3290), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a completion to an LLM prompt."""', 'default': 'default_completion_to_prompt', 'exclude': '(True)'}), "(description='Function to convert a completion to an LLM prompt.',\n default=default_completion_to_prompt, exclude=True)\n", (3168, 3290), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3366, 3494), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Output parser to parse, validate, and correct errors programmatically."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Output parser to parse, validate, and correct errors programmatically.',\n default=None, exclude=True)\n", (3371, 3494), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3669, 3757), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query wrapper prompt for LLM calls."""', 'default': 'None', 'exclude': '(True)'}), "(description='Query wrapper prompt for LLM calls.', default=None,\n exclude=True)\n", (3674, 3757), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3791, 3832), 'llama_index.legacy.bridge.pydantic.validator', 'validator', (['"""messages_to_prompt"""'], {'pre': '(True)'}), "('messages_to_prompt', pre=True)\n", (3800, 3832), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((4030, 4073), 'llama_index.legacy.bridge.pydantic.validator', 'validator', (['"""completion_to_prompt"""'], {'pre': '(True)'}), "('completion_to_prompt', pre=True)\n", (4039, 4073), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((11157, 11186), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""LLM"""'}), "(..., description='LLM')\n", (11162, 11186), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((11209, 11259), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Streaming mode"""'}), "(default=False, description='Streaming mode')\n", (11214, 11259), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((6014, 6114), 'llama_index.legacy.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6033, 6114), False, 'from llama_index.legacy.program.utils import get_program_for_llm\n'), ((6461, 6561), 'llama_index.legacy.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6480, 6561), False, 'from llama_index.legacy.program.utils import get_program_for_llm\n'), ((13294, 13325), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (13313, 13325), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((13424, 13456), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (13444, 13456), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15250, 15283), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'messages'}"], {}), "({'messages'})\n", (15269, 15283), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15382, 15414), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (15402, 15414), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((11912, 11939), 'typing.get_args', 'get_args', (['List[ChatMessage]'], {}), '(List[ChatMessage])\n', (11920, 11939), False, 'from typing import Any, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((12047, 12095), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12078, 12095), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((12140, 12188), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12171, 12188), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((13890, 13915), 'typing.get_args', 'get_args', (['StringableInput'], {}), '(StringableInput)\n', (13898, 13915), False, 'from typing import Any, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((13950, 14000), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['messages']"], {}), "(input['messages'])\n", (13981, 14000), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((10661, 10725), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': 'self.system_prompt'}), '(role=MessageRole.SYSTEM, content=self.system_prompt)\n', (10672, 10725), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponseAsyncGen, ChatResponseGen, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((4449, 4485), 'collections.ChainMap', 'ChainMap', (['prompt.kwargs', 'prompt_args'], {}), '(prompt.kwargs, prompt_args)\n', (4457, 4485), False, 'from collections import ChainMap\n')]
from collections import ChainMap from typing import ( Any, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable, ) from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator from llama_index.legacy.callbacks import CBEventType, EventPayload from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponseAsyncGen, ChatResponseGen, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole, ) from llama_index.legacy.core.query_pipeline.query_component import ( InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable, ) from llama_index.legacy.llms.base import BaseLLM from llama_index.legacy.llms.generic_utils import ( messages_to_prompt as generic_messages_to_prompt, ) from llama_index.legacy.llms.generic_utils import ( prompt_to_messages, ) from llama_index.legacy.prompts import BasePromptTemplate, PromptTemplate from llama_index.legacy.types import ( BaseOutputParser, PydanticProgramMode, TokenAsyncGen, TokenGen, ) # NOTE: These two protocols are needed to appease mypy @runtime_checkable class MessagesToPromptType(Protocol): def __call__(self, messages: Sequence[ChatMessage]) -> str: pass @runtime_checkable class CompletionToPromptType(Protocol): def __call__(self, prompt: str) -> str: pass def stream_completion_response_to_tokens( completion_response_gen: CompletionResponseGen, ) -> TokenGen: """Convert a stream completion response to a stream of tokens.""" def gen() -> TokenGen: for response in completion_response_gen: yield response.delta or "" return gen() def stream_chat_response_to_tokens( chat_response_gen: ChatResponseGen, ) -> TokenGen: """Convert a stream completion response to a stream of tokens.""" def gen() -> TokenGen: for response in chat_response_gen: yield response.delta or "" return gen() async def astream_completion_response_to_tokens( completion_response_gen: CompletionResponseAsyncGen, ) -> TokenAsyncGen: """Convert a stream completion response to a stream of tokens.""" async def gen() -> TokenAsyncGen: async for response in completion_response_gen: yield response.delta or "" return gen() async def astream_chat_response_to_tokens( chat_response_gen: ChatResponseAsyncGen, ) -> TokenAsyncGen: """Convert a stream completion response to a stream of tokens.""" async def gen() -> TokenAsyncGen: async for response in chat_response_gen: yield response.delta or "" return gen() def default_completion_to_prompt(prompt: str) -> str: return prompt class LLM(BaseLLM): system_prompt: Optional[str] = Field( default=None, description="System prompt for LLM calls." ) messages_to_prompt: MessagesToPromptType = Field( description="Function to convert a list of messages to an LLM prompt.", default=generic_messages_to_prompt, exclude=True, ) completion_to_prompt: CompletionToPromptType = Field( description="Function to convert a completion to an LLM prompt.", default=default_completion_to_prompt, exclude=True, ) output_parser: Optional[BaseOutputParser] = Field( description="Output parser to parse, validate, and correct errors programmatically.", default=None, exclude=True, ) pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT # deprecated query_wrapper_prompt: Optional[BasePromptTemplate] = Field( description="Query wrapper prompt for LLM calls.", default=None, exclude=True, ) @validator("messages_to_prompt", pre=True) def set_messages_to_prompt( cls, messages_to_prompt: Optional[MessagesToPromptType] ) -> MessagesToPromptType: return messages_to_prompt or generic_messages_to_prompt @validator("completion_to_prompt", pre=True) def set_completion_to_prompt( cls, completion_to_prompt: Optional[CompletionToPromptType] ) -> CompletionToPromptType: return completion_to_prompt or default_completion_to_prompt def _log_template_data( self, prompt: BasePromptTemplate, **prompt_args: Any ) -> None: template_vars = { k: v for k, v in ChainMap(prompt.kwargs, prompt_args).items() if k in prompt.template_vars } with self.callback_manager.event( CBEventType.TEMPLATING, payload={ EventPayload.TEMPLATE: prompt.get_template(llm=self), EventPayload.TEMPLATE_VARS: template_vars, EventPayload.SYSTEM_PROMPT: self.system_prompt, EventPayload.QUERY_WRAPPER_PROMPT: self.query_wrapper_prompt, }, ): pass def _get_prompt(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str: formatted_prompt = prompt.format( llm=self, messages_to_prompt=self.messages_to_prompt, completion_to_prompt=self.completion_to_prompt, **prompt_args, ) if self.output_parser is not None: formatted_prompt = self.output_parser.format(formatted_prompt) return self._extend_prompt(formatted_prompt) def _get_messages( self, prompt: BasePromptTemplate, **prompt_args: Any ) -> List[ChatMessage]: messages = prompt.format_messages(llm=self, **prompt_args) if self.output_parser is not None: messages = self.output_parser.format_messages(messages) return self._extend_messages(messages) def structured_predict( self, output_cls: BaseModel, prompt: PromptTemplate, **prompt_args: Any, ) -> BaseModel: from llama_index.legacy.program.utils import get_program_for_llm program = get_program_for_llm( output_cls, prompt, self, pydantic_program_mode=self.pydantic_program_mode, ) return program(**prompt_args) async def astructured_predict( self, output_cls: BaseModel, prompt: PromptTemplate, **prompt_args: Any, ) -> BaseModel: from llama_index.legacy.program.utils import get_program_for_llm program = get_program_for_llm( output_cls, prompt, self, pydantic_program_mode=self.pydantic_program_mode, ) return await program.acall(**prompt_args) def _parse_output(self, output: str) -> str: if self.output_parser is not None: return str(self.output_parser.parse(output)) return output def predict( self, prompt: BasePromptTemplate, **prompt_args: Any, ) -> str: """Predict.""" self._log_template_data(prompt, **prompt_args) if self.metadata.is_chat_model: messages = self._get_messages(prompt, **prompt_args) chat_response = self.chat(messages) output = chat_response.message.content or "" else: formatted_prompt = self._get_prompt(prompt, **prompt_args) response = self.complete(formatted_prompt, formatted=True) output = response.text return self._parse_output(output) def stream( self, prompt: BasePromptTemplate, **prompt_args: Any, ) -> TokenGen: """Stream.""" self._log_template_data(prompt, **prompt_args) if self.metadata.is_chat_model: messages = self._get_messages(prompt, **prompt_args) chat_response = self.stream_chat(messages) stream_tokens = stream_chat_response_to_tokens(chat_response) else: formatted_prompt = self._get_prompt(prompt, **prompt_args) stream_response = self.stream_complete(formatted_prompt, formatted=True) stream_tokens = stream_completion_response_to_tokens(stream_response) if prompt.output_parser is not None or self.output_parser is not None: raise NotImplementedError("Output parser is not supported for streaming.") return stream_tokens async def apredict( self, prompt: BasePromptTemplate, **prompt_args: Any, ) -> str: """Async predict.""" self._log_template_data(prompt, **prompt_args) if self.metadata.is_chat_model: messages = self._get_messages(prompt, **prompt_args) chat_response = await self.achat(messages) output = chat_response.message.content or "" else: formatted_prompt = self._get_prompt(prompt, **prompt_args) response = await self.acomplete(formatted_prompt, formatted=True) output = response.text return self._parse_output(output) async def astream( self, prompt: BasePromptTemplate, **prompt_args: Any, ) -> TokenAsyncGen: """Async stream.""" self._log_template_data(prompt, **prompt_args) if self.metadata.is_chat_model: messages = self._get_messages(prompt, **prompt_args) chat_response = await self.astream_chat(messages) stream_tokens = await astream_chat_response_to_tokens(chat_response) else: formatted_prompt = self._get_prompt(prompt, **prompt_args) stream_response = await self.astream_complete( formatted_prompt, formatted=True ) stream_tokens = await astream_completion_response_to_tokens(stream_response) if prompt.output_parser is not None or self.output_parser is not None: raise NotImplementedError("Output parser is not supported for streaming.") return stream_tokens def _extend_prompt( self, formatted_prompt: str, ) -> str: """Add system and query wrapper prompts to base prompt.""" extended_prompt = formatted_prompt if self.system_prompt: extended_prompt = self.system_prompt + "\n\n" + extended_prompt if self.query_wrapper_prompt: extended_prompt = self.query_wrapper_prompt.format( query_str=extended_prompt ) return extended_prompt def _extend_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]: """Add system prompt to chat message list.""" if self.system_prompt: messages = [ ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt), *messages, ] return messages def _as_query_component(self, **kwargs: Any) -> QueryComponent: """Return query component.""" if self.metadata.is_chat_model: return LLMChatComponent(llm=self, **kwargs) else: return LLMCompleteComponent(llm=self, **kwargs) class BaseLLMComponent(QueryComponent): """Base LLM component.""" llm: LLM = Field(..., description="LLM") streaming: bool = Field(default=False, description="Streaming mode") class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: Any) -> None: """Set callback manager.""" self.llm.callback_manager = callback_manager class LLMCompleteComponent(BaseLLMComponent): """LLM completion component.""" def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" if "prompt" not in input: raise ValueError("Prompt must be in input dict.") # do special check to see if prompt is a list of chat messages if isinstance(input["prompt"], get_args(List[ChatMessage])): input["prompt"] = self.llm.messages_to_prompt(input["prompt"]) input["prompt"] = validate_and_convert_stringable(input["prompt"]) else: input["prompt"] = validate_and_convert_stringable(input["prompt"]) input["prompt"] = self.llm.completion_to_prompt(input["prompt"]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" # TODO: support only complete for now # non-trivial to figure how to support chat/complete/etc. prompt = kwargs["prompt"] # ignore all other kwargs for now if self.streaming: response = self.llm.stream_complete(prompt, formatted=True) else: response = self.llm.complete(prompt, formatted=True) return {"output": response} async def _arun_component(self, **kwargs: Any) -> Any: """Run component.""" # TODO: support only complete for now # non-trivial to figure how to support chat/complete/etc. prompt = kwargs["prompt"] # ignore all other kwargs for now response = await self.llm.acomplete(prompt, formatted=True) return {"output": response} @property def input_keys(self) -> InputKeys: """Input keys.""" # TODO: support only complete for now return InputKeys.from_keys({"prompt"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"}) class LLMChatComponent(BaseLLMComponent): """LLM chat component.""" def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" if "messages" not in input: raise ValueError("Messages must be in input dict.") # if `messages` is a string, convert to a list of chat message if isinstance(input["messages"], get_args(StringableInput)): input["messages"] = validate_and_convert_stringable(input["messages"]) input["messages"] = prompt_to_messages(str(input["messages"])) for message in input["messages"]: if not isinstance(message, ChatMessage): raise ValueError("Messages must be a list of ChatMessage") return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" # TODO: support only complete for now # non-trivial to figure how to support chat/complete/etc. messages = kwargs["messages"] if self.streaming: response = self.llm.stream_chat(messages) else: response = self.llm.chat(messages) return {"output": response} async def _arun_component(self, **kwargs: Any) -> Any: """Run component.""" # TODO: support only complete for now # non-trivial to figure how to support chat/complete/etc. messages = kwargs["messages"] if self.streaming: response = await self.llm.astream_chat(messages) else: response = await self.llm.achat(messages) return {"output": response} @property def input_keys(self) -> InputKeys: """Input keys.""" # TODO: support only complete for now return InputKeys.from_keys({"messages"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"})
[ "llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable", "llama_index.legacy.program.utils.get_program_for_llm", "llama_index.legacy.bridge.pydantic.validator" ]
[((2828, 2891), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""System prompt for LLM calls."""'}), "(default=None, description='System prompt for LLM calls.')\n", (2833, 2891), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((2953, 3090), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a list of messages to an LLM prompt."""', 'default': 'generic_messages_to_prompt', 'exclude': '(True)'}), "(description=\n 'Function to convert a list of messages to an LLM prompt.', default=\n generic_messages_to_prompt, exclude=True)\n", (2958, 3090), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3163, 3290), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a completion to an LLM prompt."""', 'default': 'default_completion_to_prompt', 'exclude': '(True)'}), "(description='Function to convert a completion to an LLM prompt.',\n default=default_completion_to_prompt, exclude=True)\n", (3168, 3290), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3366, 3494), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Output parser to parse, validate, and correct errors programmatically."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Output parser to parse, validate, and correct errors programmatically.',\n default=None, exclude=True)\n", (3371, 3494), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3669, 3757), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query wrapper prompt for LLM calls."""', 'default': 'None', 'exclude': '(True)'}), "(description='Query wrapper prompt for LLM calls.', default=None,\n exclude=True)\n", (3674, 3757), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3791, 3832), 'llama_index.legacy.bridge.pydantic.validator', 'validator', (['"""messages_to_prompt"""'], {'pre': '(True)'}), "('messages_to_prompt', pre=True)\n", (3800, 3832), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((4030, 4073), 'llama_index.legacy.bridge.pydantic.validator', 'validator', (['"""completion_to_prompt"""'], {'pre': '(True)'}), "('completion_to_prompt', pre=True)\n", (4039, 4073), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((11157, 11186), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""LLM"""'}), "(..., description='LLM')\n", (11162, 11186), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((11209, 11259), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Streaming mode"""'}), "(default=False, description='Streaming mode')\n", (11214, 11259), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((6014, 6114), 'llama_index.legacy.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6033, 6114), False, 'from llama_index.legacy.program.utils import get_program_for_llm\n'), ((6461, 6561), 'llama_index.legacy.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6480, 6561), False, 'from llama_index.legacy.program.utils import get_program_for_llm\n'), ((13294, 13325), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (13313, 13325), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((13424, 13456), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (13444, 13456), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15250, 15283), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'messages'}"], {}), "({'messages'})\n", (15269, 15283), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15382, 15414), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (15402, 15414), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((11912, 11939), 'typing.get_args', 'get_args', (['List[ChatMessage]'], {}), '(List[ChatMessage])\n', (11920, 11939), False, 'from typing import Any, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((12047, 12095), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12078, 12095), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((12140, 12188), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12171, 12188), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((13890, 13915), 'typing.get_args', 'get_args', (['StringableInput'], {}), '(StringableInput)\n', (13898, 13915), False, 'from typing import Any, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((13950, 14000), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['messages']"], {}), "(input['messages'])\n", (13981, 14000), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((10661, 10725), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': 'self.system_prompt'}), '(role=MessageRole.SYSTEM, content=self.system_prompt)\n', (10672, 10725), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponseAsyncGen, ChatResponseGen, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((4449, 4485), 'collections.ChainMap', 'ChainMap', (['prompt.kwargs', 'prompt_args'], {}), '(prompt.kwargs, prompt_args)\n', (4457, 4485), False, 'from collections import ChainMap\n')]
"""Base reader class.""" from abc import ABC from typing import TYPE_CHECKING, Any, Dict, Iterable, List if TYPE_CHECKING: from llama_index.core.bridge.langchain import Document as LCDocument from llama_index.core.bridge.pydantic import Field from llama_index.core.schema import BaseComponent, Document class BaseReader(ABC): """Utilities for loading data from a directory.""" def lazy_load_data(self, *args: Any, **load_kwargs: Any) -> Iterable[Document]: """Load data from the input directory lazily.""" raise NotImplementedError( f"{self.__class__.__name__} does not provide lazy_load_data method currently" ) def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]: """Load data from the input directory.""" return list(self.lazy_load_data(*args, **load_kwargs)) def load_langchain_documents(self, **load_kwargs: Any) -> List["LCDocument"]: """Load data in LangChain document format.""" docs = self.load_data(**load_kwargs) return [d.to_langchain_format() for d in docs] class BasePydanticReader(BaseReader, BaseComponent): """Serialiable Data Loader with Pydantic.""" is_remote: bool = Field( default=False, description="Whether the data is loaded from a remote API or a local file.", ) class Config: arbitrary_types_allowed = True class ReaderConfig(BaseComponent): """Represents a reader and it's input arguments.""" reader: BasePydanticReader = Field(..., description="Reader to use.") reader_args: List[Any] = Field(default_factory=list, description="Reader args.") reader_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Reader kwargs." ) class Config: arbitrary_types_allowed = True @classmethod def class_name(cls) -> str: """Get the name identifier of the class.""" return "ReaderConfig" def to_dict(self, **kwargs: Any) -> Dict[str, Any]: """Convert the class to a dictionary.""" return { "loader": self.reader.to_dict(**kwargs), "reader_args": self.reader_args, "reader_kwargs": self.reader_kwargs, "class_name": self.class_name(), } def read(self) -> List[Document]: """Call the loader with the given arguments.""" return self.reader.load_data(*self.reader_args, **self.reader_kwargs)
[ "llama_index.core.bridge.pydantic.Field" ]
[((1219, 1321), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the data is loaded from a remote API or a local file."""'}), "(default=False, description=\n 'Whether the data is loaded from a remote API or a local file.')\n", (1224, 1321), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1525, 1565), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Reader to use."""'}), "(..., description='Reader to use.')\n", (1530, 1565), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1595, 1650), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Reader args."""'}), "(default_factory=list, description='Reader args.')\n", (1600, 1650), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1687, 1744), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Reader kwargs."""'}), "(default_factory=dict, description='Reader kwargs.')\n", (1692, 1744), False, 'from llama_index.core.bridge.pydantic import Field\n')]
"""Base reader class.""" from abc import ABC from typing import TYPE_CHECKING, Any, Dict, Iterable, List if TYPE_CHECKING: from llama_index.core.bridge.langchain import Document as LCDocument from llama_index.core.bridge.pydantic import Field from llama_index.core.schema import BaseComponent, Document class BaseReader(ABC): """Utilities for loading data from a directory.""" def lazy_load_data(self, *args: Any, **load_kwargs: Any) -> Iterable[Document]: """Load data from the input directory lazily.""" raise NotImplementedError( f"{self.__class__.__name__} does not provide lazy_load_data method currently" ) def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]: """Load data from the input directory.""" return list(self.lazy_load_data(*args, **load_kwargs)) def load_langchain_documents(self, **load_kwargs: Any) -> List["LCDocument"]: """Load data in LangChain document format.""" docs = self.load_data(**load_kwargs) return [d.to_langchain_format() for d in docs] class BasePydanticReader(BaseReader, BaseComponent): """Serialiable Data Loader with Pydantic.""" is_remote: bool = Field( default=False, description="Whether the data is loaded from a remote API or a local file.", ) class Config: arbitrary_types_allowed = True class ReaderConfig(BaseComponent): """Represents a reader and it's input arguments.""" reader: BasePydanticReader = Field(..., description="Reader to use.") reader_args: List[Any] = Field(default_factory=list, description="Reader args.") reader_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Reader kwargs." ) class Config: arbitrary_types_allowed = True @classmethod def class_name(cls) -> str: """Get the name identifier of the class.""" return "ReaderConfig" def to_dict(self, **kwargs: Any) -> Dict[str, Any]: """Convert the class to a dictionary.""" return { "loader": self.reader.to_dict(**kwargs), "reader_args": self.reader_args, "reader_kwargs": self.reader_kwargs, "class_name": self.class_name(), } def read(self) -> List[Document]: """Call the loader with the given arguments.""" return self.reader.load_data(*self.reader_args, **self.reader_kwargs)
[ "llama_index.core.bridge.pydantic.Field" ]
[((1219, 1321), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the data is loaded from a remote API or a local file."""'}), "(default=False, description=\n 'Whether the data is loaded from a remote API or a local file.')\n", (1224, 1321), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1525, 1565), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Reader to use."""'}), "(..., description='Reader to use.')\n", (1530, 1565), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1595, 1650), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Reader args."""'}), "(default_factory=list, description='Reader args.')\n", (1600, 1650), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1687, 1744), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Reader kwargs."""'}), "(default_factory=dict, description='Reader kwargs.')\n", (1692, 1744), False, 'from llama_index.core.bridge.pydantic import Field\n')]
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.core.bridge.pydantic import Field from llama_index.core.callbacks.base import CallbackManager from llama_index.core.indices.base import BaseIndex from llama_index.core.indices.vector_store.base import VectorStoreIndex from llama_index.core.objects.base_node_mapping import ( DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping, ) from llama_index.core.schema import QueryType from llama_index.core.storage.storage_context import ( DEFAULT_PERSIST_DIR, StorageContext, ) OT = TypeVar("OT") class ObjectRetriever(ChainableMixin, Generic[OT]): """Object retriever.""" def __init__( self, retriever: BaseRetriever, object_node_mapping: BaseObjectNodeMapping[OT] ): self._retriever = retriever self._object_node_mapping = object_node_mapping @property def retriever(self) -> BaseRetriever: """Retriever.""" return self._retriever def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = self._retriever.retrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = await self._retriever.aretrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" return ObjectRetrieverComponent(retriever=self) class ObjectRetrieverComponent(QueryComponent): """Object retriever component.""" retriever: ObjectRetriever = Field(..., description="Retriever.") class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" self.retriever.retriever.callback_manager = callback_manager def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" # make sure input is a string input["input"] = validate_and_convert_stringable(input["input"]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" output = self.retriever.retrieve(kwargs["input"]) return {"output": output} async def _arun_component(self, **kwargs: Any) -> Any: """Run component (async).""" output = await self.retriever.aretrieve(kwargs["input"]) return {"output": output} @property def input_keys(self) -> InputKeys: """Input keys.""" return InputKeys.from_keys({"input"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"}) class ObjectIndex(Generic[OT]): """Object index.""" def __init__( self, index: BaseIndex, object_node_mapping: BaseObjectNodeMapping ) -> None: self._index = index self._object_node_mapping = object_node_mapping @classmethod def from_objects( cls, objects: Sequence[OT], object_mapping: Optional[BaseObjectNodeMapping] = None, index_cls: Type[BaseIndex] = VectorStoreIndex, **index_kwargs: Any, ) -> "ObjectIndex": if object_mapping is None: object_mapping = SimpleObjectNodeMapping.from_objects(objects) nodes = object_mapping.to_nodes(objects) index = index_cls(nodes, **index_kwargs) return cls(index, object_mapping) def insert_object(self, obj: Any) -> None: self._object_node_mapping.add_object(obj) node = self._object_node_mapping.to_node(obj) self._index.insert_nodes([node]) def as_retriever(self, **kwargs: Any) -> ObjectRetriever: return ObjectRetriever( retriever=self._index.as_retriever(**kwargs), object_node_mapping=self._object_node_mapping, ) def as_node_retriever(self, **kwargs: Any) -> BaseRetriever: return self._index.as_retriever(**kwargs) def persist( self, persist_dir: str = DEFAULT_PERSIST_DIR, obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME, ) -> None: # try to persist object node mapping try: self._object_node_mapping.persist( persist_dir=persist_dir, obj_node_mapping_fname=obj_node_mapping_fname ) except (NotImplementedError, pickle.PickleError) as err: warnings.warn( ( "Unable to persist ObjectNodeMapping. You will need to " "reconstruct the same object node mapping to build this ObjectIndex" ), stacklevel=2, ) self._index._storage_context.persist(persist_dir=persist_dir) @classmethod def from_persist_dir( cls, persist_dir: str = DEFAULT_PERSIST_DIR, object_node_mapping: Optional[BaseObjectNodeMapping] = None, ) -> "ObjectIndex": from llama_index.core.indices import load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir=persist_dir) index = load_index_from_storage(storage_context) if object_node_mapping: return cls(index=index, object_node_mapping=object_node_mapping) else: # try to load object_node_mapping # assume SimpleObjectNodeMapping for simplicity as its only subclass # that supports this method try: object_node_mapping = SimpleObjectNodeMapping.from_persist_dir( persist_dir=persist_dir ) except Exception as err: raise Exception( "Unable to load from persist dir. The object_node_mapping cannot be loaded." ) from err else: return cls(index=index, object_node_mapping=object_node_mapping)
[ "llama_index.core.indices.load_index_from_storage", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.core.base.query_pipeline.query.InputKeys.from_keys", "llama_index.core.base.query_pipeline.query.OutputKeys.from_keys", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir", "llama_index.core.bridge.pydantic.Field", "llama_index.core.base.query_pipeline.query.validate_and_convert_stringable", "llama_index.core.storage.storage_context.StorageContext.from_defaults" ]
[((897, 910), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (904, 910), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2032, 2068), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriever.')\n", (2037, 2068), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2521, 2568), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (2552, 2568), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3055, 3085), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (3074, 3085), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3184, 3216), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (3204, 3216), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((5570, 5623), 'llama_index.core.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (5598, 5623), False, 'from llama_index.core.storage.storage_context import DEFAULT_PERSIST_DIR, StorageContext\n'), ((5640, 5680), 'llama_index.core.indices.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (5663, 5680), False, 'from llama_index.core.indices import load_index_from_storage\n'), ((3788, 3833), 'llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects', 'SimpleObjectNodeMapping.from_objects', (['objects'], {}), '(objects)\n', (3824, 3833), False, 'from llama_index.core.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n'), ((4944, 5105), 'warnings.warn', 'warnings.warn', (['"""Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex"""'], {'stacklevel': '(2)'}), "(\n 'Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex'\n , stacklevel=2)\n", (4957, 5105), False, 'import warnings\n'), ((6026, 6091), 'llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir', 'SimpleObjectNodeMapping.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (6066, 6091), False, 'from llama_index.core.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n')]
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.core.bridge.pydantic import Field from llama_index.core.callbacks.base import CallbackManager from llama_index.core.indices.base import BaseIndex from llama_index.core.indices.vector_store.base import VectorStoreIndex from llama_index.core.objects.base_node_mapping import ( DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping, ) from llama_index.core.schema import QueryType from llama_index.core.storage.storage_context import ( DEFAULT_PERSIST_DIR, StorageContext, ) OT = TypeVar("OT") class ObjectRetriever(ChainableMixin, Generic[OT]): """Object retriever.""" def __init__( self, retriever: BaseRetriever, object_node_mapping: BaseObjectNodeMapping[OT] ): self._retriever = retriever self._object_node_mapping = object_node_mapping @property def retriever(self) -> BaseRetriever: """Retriever.""" return self._retriever def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = self._retriever.retrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = await self._retriever.aretrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" return ObjectRetrieverComponent(retriever=self) class ObjectRetrieverComponent(QueryComponent): """Object retriever component.""" retriever: ObjectRetriever = Field(..., description="Retriever.") class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" self.retriever.retriever.callback_manager = callback_manager def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" # make sure input is a string input["input"] = validate_and_convert_stringable(input["input"]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" output = self.retriever.retrieve(kwargs["input"]) return {"output": output} async def _arun_component(self, **kwargs: Any) -> Any: """Run component (async).""" output = await self.retriever.aretrieve(kwargs["input"]) return {"output": output} @property def input_keys(self) -> InputKeys: """Input keys.""" return InputKeys.from_keys({"input"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"}) class ObjectIndex(Generic[OT]): """Object index.""" def __init__( self, index: BaseIndex, object_node_mapping: BaseObjectNodeMapping ) -> None: self._index = index self._object_node_mapping = object_node_mapping @classmethod def from_objects( cls, objects: Sequence[OT], object_mapping: Optional[BaseObjectNodeMapping] = None, index_cls: Type[BaseIndex] = VectorStoreIndex, **index_kwargs: Any, ) -> "ObjectIndex": if object_mapping is None: object_mapping = SimpleObjectNodeMapping.from_objects(objects) nodes = object_mapping.to_nodes(objects) index = index_cls(nodes, **index_kwargs) return cls(index, object_mapping) def insert_object(self, obj: Any) -> None: self._object_node_mapping.add_object(obj) node = self._object_node_mapping.to_node(obj) self._index.insert_nodes([node]) def as_retriever(self, **kwargs: Any) -> ObjectRetriever: return ObjectRetriever( retriever=self._index.as_retriever(**kwargs), object_node_mapping=self._object_node_mapping, ) def as_node_retriever(self, **kwargs: Any) -> BaseRetriever: return self._index.as_retriever(**kwargs) def persist( self, persist_dir: str = DEFAULT_PERSIST_DIR, obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME, ) -> None: # try to persist object node mapping try: self._object_node_mapping.persist( persist_dir=persist_dir, obj_node_mapping_fname=obj_node_mapping_fname ) except (NotImplementedError, pickle.PickleError) as err: warnings.warn( ( "Unable to persist ObjectNodeMapping. You will need to " "reconstruct the same object node mapping to build this ObjectIndex" ), stacklevel=2, ) self._index._storage_context.persist(persist_dir=persist_dir) @classmethod def from_persist_dir( cls, persist_dir: str = DEFAULT_PERSIST_DIR, object_node_mapping: Optional[BaseObjectNodeMapping] = None, ) -> "ObjectIndex": from llama_index.core.indices import load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir=persist_dir) index = load_index_from_storage(storage_context) if object_node_mapping: return cls(index=index, object_node_mapping=object_node_mapping) else: # try to load object_node_mapping # assume SimpleObjectNodeMapping for simplicity as its only subclass # that supports this method try: object_node_mapping = SimpleObjectNodeMapping.from_persist_dir( persist_dir=persist_dir ) except Exception as err: raise Exception( "Unable to load from persist dir. The object_node_mapping cannot be loaded." ) from err else: return cls(index=index, object_node_mapping=object_node_mapping)
[ "llama_index.core.indices.load_index_from_storage", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.core.base.query_pipeline.query.InputKeys.from_keys", "llama_index.core.base.query_pipeline.query.OutputKeys.from_keys", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir", "llama_index.core.bridge.pydantic.Field", "llama_index.core.base.query_pipeline.query.validate_and_convert_stringable", "llama_index.core.storage.storage_context.StorageContext.from_defaults" ]
[((897, 910), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (904, 910), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2032, 2068), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriever.')\n", (2037, 2068), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2521, 2568), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (2552, 2568), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3055, 3085), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (3074, 3085), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3184, 3216), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (3204, 3216), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((5570, 5623), 'llama_index.core.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (5598, 5623), False, 'from llama_index.core.storage.storage_context import DEFAULT_PERSIST_DIR, StorageContext\n'), ((5640, 5680), 'llama_index.core.indices.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (5663, 5680), False, 'from llama_index.core.indices import load_index_from_storage\n'), ((3788, 3833), 'llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects', 'SimpleObjectNodeMapping.from_objects', (['objects'], {}), '(objects)\n', (3824, 3833), False, 'from llama_index.core.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n'), ((4944, 5105), 'warnings.warn', 'warnings.warn', (['"""Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex"""'], {'stacklevel': '(2)'}), "(\n 'Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex'\n , stacklevel=2)\n", (4957, 5105), False, 'import warnings\n'), ((6026, 6091), 'llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir', 'SimpleObjectNodeMapping.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (6066, 6091), False, 'from llama_index.core.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n')]
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.core.bridge.pydantic import Field from llama_index.core.callbacks.base import CallbackManager from llama_index.core.indices.base import BaseIndex from llama_index.core.indices.vector_store.base import VectorStoreIndex from llama_index.core.objects.base_node_mapping import ( DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping, ) from llama_index.core.schema import QueryType from llama_index.core.storage.storage_context import ( DEFAULT_PERSIST_DIR, StorageContext, ) OT = TypeVar("OT") class ObjectRetriever(ChainableMixin, Generic[OT]): """Object retriever.""" def __init__( self, retriever: BaseRetriever, object_node_mapping: BaseObjectNodeMapping[OT] ): self._retriever = retriever self._object_node_mapping = object_node_mapping @property def retriever(self) -> BaseRetriever: """Retriever.""" return self._retriever def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = self._retriever.retrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = await self._retriever.aretrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" return ObjectRetrieverComponent(retriever=self) class ObjectRetrieverComponent(QueryComponent): """Object retriever component.""" retriever: ObjectRetriever = Field(..., description="Retriever.") class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" self.retriever.retriever.callback_manager = callback_manager def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" # make sure input is a string input["input"] = validate_and_convert_stringable(input["input"]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" output = self.retriever.retrieve(kwargs["input"]) return {"output": output} async def _arun_component(self, **kwargs: Any) -> Any: """Run component (async).""" output = await self.retriever.aretrieve(kwargs["input"]) return {"output": output} @property def input_keys(self) -> InputKeys: """Input keys.""" return InputKeys.from_keys({"input"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"}) class ObjectIndex(Generic[OT]): """Object index.""" def __init__( self, index: BaseIndex, object_node_mapping: BaseObjectNodeMapping ) -> None: self._index = index self._object_node_mapping = object_node_mapping @classmethod def from_objects( cls, objects: Sequence[OT], object_mapping: Optional[BaseObjectNodeMapping] = None, index_cls: Type[BaseIndex] = VectorStoreIndex, **index_kwargs: Any, ) -> "ObjectIndex": if object_mapping is None: object_mapping = SimpleObjectNodeMapping.from_objects(objects) nodes = object_mapping.to_nodes(objects) index = index_cls(nodes, **index_kwargs) return cls(index, object_mapping) def insert_object(self, obj: Any) -> None: self._object_node_mapping.add_object(obj) node = self._object_node_mapping.to_node(obj) self._index.insert_nodes([node]) def as_retriever(self, **kwargs: Any) -> ObjectRetriever: return ObjectRetriever( retriever=self._index.as_retriever(**kwargs), object_node_mapping=self._object_node_mapping, ) def as_node_retriever(self, **kwargs: Any) -> BaseRetriever: return self._index.as_retriever(**kwargs) def persist( self, persist_dir: str = DEFAULT_PERSIST_DIR, obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME, ) -> None: # try to persist object node mapping try: self._object_node_mapping.persist( persist_dir=persist_dir, obj_node_mapping_fname=obj_node_mapping_fname ) except (NotImplementedError, pickle.PickleError) as err: warnings.warn( ( "Unable to persist ObjectNodeMapping. You will need to " "reconstruct the same object node mapping to build this ObjectIndex" ), stacklevel=2, ) self._index._storage_context.persist(persist_dir=persist_dir) @classmethod def from_persist_dir( cls, persist_dir: str = DEFAULT_PERSIST_DIR, object_node_mapping: Optional[BaseObjectNodeMapping] = None, ) -> "ObjectIndex": from llama_index.core.indices import load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir=persist_dir) index = load_index_from_storage(storage_context) if object_node_mapping: return cls(index=index, object_node_mapping=object_node_mapping) else: # try to load object_node_mapping # assume SimpleObjectNodeMapping for simplicity as its only subclass # that supports this method try: object_node_mapping = SimpleObjectNodeMapping.from_persist_dir( persist_dir=persist_dir ) except Exception as err: raise Exception( "Unable to load from persist dir. The object_node_mapping cannot be loaded." ) from err else: return cls(index=index, object_node_mapping=object_node_mapping)
[ "llama_index.core.indices.load_index_from_storage", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.core.base.query_pipeline.query.InputKeys.from_keys", "llama_index.core.base.query_pipeline.query.OutputKeys.from_keys", "llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir", "llama_index.core.bridge.pydantic.Field", "llama_index.core.base.query_pipeline.query.validate_and_convert_stringable", "llama_index.core.storage.storage_context.StorageContext.from_defaults" ]
[((897, 910), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (904, 910), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2032, 2068), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriever.')\n", (2037, 2068), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2521, 2568), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (2552, 2568), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3055, 3085), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (3074, 3085), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3184, 3216), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (3204, 3216), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((5570, 5623), 'llama_index.core.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (5598, 5623), False, 'from llama_index.core.storage.storage_context import DEFAULT_PERSIST_DIR, StorageContext\n'), ((5640, 5680), 'llama_index.core.indices.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (5663, 5680), False, 'from llama_index.core.indices import load_index_from_storage\n'), ((3788, 3833), 'llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects', 'SimpleObjectNodeMapping.from_objects', (['objects'], {}), '(objects)\n', (3824, 3833), False, 'from llama_index.core.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n'), ((4944, 5105), 'warnings.warn', 'warnings.warn', (['"""Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex"""'], {'stacklevel': '(2)'}), "(\n 'Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex'\n , stacklevel=2)\n", (4957, 5105), False, 'import warnings\n'), ((6026, 6091), 'llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir', 'SimpleObjectNodeMapping.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (6066, 6091), False, 'from llama_index.core.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n')]
from typing import Any, Callable, Optional, Sequence from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.types import PydanticProgramMode class MockLLM(CustomLLM): max_tokens: Optional[int] def __init__( self, max_tokens: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, ) -> None: super().__init__( max_tokens=max_tokens, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, ) @classmethod def class_name(cls) -> str: return "MockLLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata(num_output=self.max_tokens or -1) def _generate_text(self, length: int) -> str: return " ".join(["text" for _ in range(length)]) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: response_text = ( self._generate_text(self.max_tokens) if self.max_tokens else prompt ) return CompletionResponse( text=response_text, ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: def gen_prompt() -> CompletionResponseGen: for ch in prompt: yield CompletionResponse( text=prompt, delta=ch, ) def gen_response(max_tokens: int) -> CompletionResponseGen: for i in range(max_tokens): response_text = self._generate_text(i) yield CompletionResponse( text=response_text, delta="text ", ) return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.LLMMetadata" ]
[((1537, 1562), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1560, 1562), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((1876, 1901), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1899, 1901), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((1377, 1422), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'num_output': '(self.max_tokens or -1)'}), '(num_output=self.max_tokens or -1)\n', (1388, 1422), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1808, 1846), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text'}), '(text=response_text)\n', (1826, 1846), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2128, 2169), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'prompt', 'delta': 'ch'}), '(text=prompt, delta=ch)\n', (2146, 2169), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2415, 2468), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text', 'delta': '"""text """'}), "(text=response_text, delta='text ')\n", (2433, 2468), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n')]
from typing import Any, Callable, Optional, Sequence from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.types import PydanticProgramMode class MockLLM(CustomLLM): max_tokens: Optional[int] def __init__( self, max_tokens: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, ) -> None: super().__init__( max_tokens=max_tokens, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, ) @classmethod def class_name(cls) -> str: return "MockLLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata(num_output=self.max_tokens or -1) def _generate_text(self, length: int) -> str: return " ".join(["text" for _ in range(length)]) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: response_text = ( self._generate_text(self.max_tokens) if self.max_tokens else prompt ) return CompletionResponse( text=response_text, ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: def gen_prompt() -> CompletionResponseGen: for ch in prompt: yield CompletionResponse( text=prompt, delta=ch, ) def gen_response(max_tokens: int) -> CompletionResponseGen: for i in range(max_tokens): response_text = self._generate_text(i) yield CompletionResponse( text=response_text, delta="text ", ) return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.LLMMetadata" ]
[((1537, 1562), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1560, 1562), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((1876, 1901), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1899, 1901), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((1377, 1422), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'num_output': '(self.max_tokens or -1)'}), '(num_output=self.max_tokens or -1)\n', (1388, 1422), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1808, 1846), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text'}), '(text=response_text)\n', (1826, 1846), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2128, 2169), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'prompt', 'delta': 'ch'}), '(text=prompt, delta=ch)\n', (2146, 2169), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2415, 2468), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text', 'delta': '"""text """'}), "(text=response_text, delta='text ')\n", (2433, 2468), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("MiniCovidQaDataset", "./data") # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=40, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 319), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniCovidQaDataset"""', '"""./data"""'], {}), "('MiniCovidQaDataset', './data')\n", (287, 319), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((364, 416), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (395, 416), False, 'from llama_index.core import VectorStoreIndex\n'), ((509, 558), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (528, 558), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1409, 1433), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1431, 1433), False, 'import asyncio\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("MiniCovidQaDataset", "./data") # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=40, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 319), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniCovidQaDataset"""', '"""./data"""'], {}), "('MiniCovidQaDataset', './data')\n", (287, 319), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((364, 416), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (395, 416), False, 'from llama_index.core import VectorStoreIndex\n'), ((509, 558), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (528, 558), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1409, 1433), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1431, 1433), False, 'import asyncio\n')]
"""Palm API.""" import os from typing import Any, Callable, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode DEFAULT_PALM_MODEL = "models/text-bison-001" class PaLM(CustomLLM): """PaLM LLM.""" model_name: str = Field( default=DEFAULT_PALM_MODEL, description="The PaLM model to use." ) num_output: int = Field( default=DEFAULT_NUM_OUTPUTS, description="The number of tokens to generate.", gt=0, ) generate_kwargs: dict = Field( default_factory=dict, description="Kwargs for generation." ) _model: Any = PrivateAttr() def __init__( self, api_key: Optional[str] = None, model_name: Optional[str] = DEFAULT_PALM_MODEL, num_output: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, **generate_kwargs: Any, ) -> None: """Initialize params.""" try: import google.generativeai as palm except ImportError: raise ValueError( "PaLM is not installed. " "Please install it with `pip install google-generativeai`." ) api_key = api_key or os.environ.get("PALM_API_KEY") palm.configure(api_key=api_key) models = palm.list_models() models_dict = {m.name: m for m in models} if model_name not in models_dict: raise ValueError( f"Model name {model_name} not found in {models_dict.keys()}" ) model_name = model_name self._model = models_dict[model_name] # get num_output num_output = num_output or self._model.output_token_limit generate_kwargs = generate_kwargs or {} super().__init__( model_name=model_name, num_output=num_output, generate_kwargs=generate_kwargs, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "PaLM_llm" @property def metadata(self) -> LLMMetadata: """Get LLM metadata.""" # TODO: google palm actually separates input and output token limits total_tokens = self._model.input_token_limit + self.num_output return LLMMetadata( context_window=total_tokens, num_output=self.num_output, model_name=self.model_name, ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Predict the answer to a query. Args: prompt (str): Prompt to use for prediction. Returns: Tuple[str, str]: Tuple of the predicted answer and the formatted prompt. """ import google.generativeai as palm completion = palm.generate_text( model=self.model_name, prompt=prompt, **kwargs, ) return CompletionResponse(text=completion.result, raw=completion.candidates[0]) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: """Stream the answer to a query. NOTE: this is a beta feature. Will try to build or use better abstractions about response handling. Args: prompt (str): Prompt to use for prediction. Returns: str: The predicted answer. """ raise NotImplementedError( "PaLM does not support streaming completion in LlamaIndex currently." )
[ "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.CompletionResponse" ]
[((708, 779), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_PALM_MODEL', 'description': '"""The PaLM model to use."""'}), "(default=DEFAULT_PALM_MODEL, description='The PaLM model to use.')\n", (713, 779), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((816, 910), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""The number of tokens to generate."""', 'gt': '(0)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'The number of tokens to generate.', gt=0)\n", (821, 910), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((965, 1030), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Kwargs for generation."""'}), "(default_factory=dict, description='Kwargs for generation.')\n", (970, 1030), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1064, 1077), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1075, 1077), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3465, 3490), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3488, 3490), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((4106, 4131), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4129, 4131), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((2045, 2076), 'google.generativeai.configure', 'palm.configure', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (2059, 2076), True, 'import google.generativeai as palm\n'), ((2095, 2113), 'google.generativeai.list_models', 'palm.list_models', ([], {}), '()\n', (2111, 2113), True, 'import google.generativeai as palm\n'), ((3315, 3415), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'total_tokens', 'num_output': 'self.num_output', 'model_name': 'self.model_name'}), '(context_window=total_tokens, num_output=self.num_output,\n model_name=self.model_name)\n', (3326, 3415), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((3898, 3964), 'google.generativeai.generate_text', 'palm.generate_text', ([], {'model': 'self.model_name', 'prompt': 'prompt'}), '(model=self.model_name, prompt=prompt, **kwargs)\n', (3916, 3964), True, 'import google.generativeai as palm\n'), ((4027, 4099), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'completion.result', 'raw': 'completion.candidates[0]'}), '(text=completion.result, raw=completion.candidates[0])\n', (4045, 4099), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2006, 2036), 'os.environ.get', 'os.environ.get', (['"""PALM_API_KEY"""'], {}), "('PALM_API_KEY')\n", (2020, 2036), False, 'import os\n')]
"""Palm API.""" import os from typing import Any, Callable, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode DEFAULT_PALM_MODEL = "models/text-bison-001" class PaLM(CustomLLM): """PaLM LLM.""" model_name: str = Field( default=DEFAULT_PALM_MODEL, description="The PaLM model to use." ) num_output: int = Field( default=DEFAULT_NUM_OUTPUTS, description="The number of tokens to generate.", gt=0, ) generate_kwargs: dict = Field( default_factory=dict, description="Kwargs for generation." ) _model: Any = PrivateAttr() def __init__( self, api_key: Optional[str] = None, model_name: Optional[str] = DEFAULT_PALM_MODEL, num_output: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, **generate_kwargs: Any, ) -> None: """Initialize params.""" try: import google.generativeai as palm except ImportError: raise ValueError( "PaLM is not installed. " "Please install it with `pip install google-generativeai`." ) api_key = api_key or os.environ.get("PALM_API_KEY") palm.configure(api_key=api_key) models = palm.list_models() models_dict = {m.name: m for m in models} if model_name not in models_dict: raise ValueError( f"Model name {model_name} not found in {models_dict.keys()}" ) model_name = model_name self._model = models_dict[model_name] # get num_output num_output = num_output or self._model.output_token_limit generate_kwargs = generate_kwargs or {} super().__init__( model_name=model_name, num_output=num_output, generate_kwargs=generate_kwargs, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "PaLM_llm" @property def metadata(self) -> LLMMetadata: """Get LLM metadata.""" # TODO: google palm actually separates input and output token limits total_tokens = self._model.input_token_limit + self.num_output return LLMMetadata( context_window=total_tokens, num_output=self.num_output, model_name=self.model_name, ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Predict the answer to a query. Args: prompt (str): Prompt to use for prediction. Returns: Tuple[str, str]: Tuple of the predicted answer and the formatted prompt. """ import google.generativeai as palm completion = palm.generate_text( model=self.model_name, prompt=prompt, **kwargs, ) return CompletionResponse(text=completion.result, raw=completion.candidates[0]) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: """Stream the answer to a query. NOTE: this is a beta feature. Will try to build or use better abstractions about response handling. Args: prompt (str): Prompt to use for prediction. Returns: str: The predicted answer. """ raise NotImplementedError( "PaLM does not support streaming completion in LlamaIndex currently." )
[ "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.CompletionResponse" ]
[((708, 779), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_PALM_MODEL', 'description': '"""The PaLM model to use."""'}), "(default=DEFAULT_PALM_MODEL, description='The PaLM model to use.')\n", (713, 779), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((816, 910), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""The number of tokens to generate."""', 'gt': '(0)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'The number of tokens to generate.', gt=0)\n", (821, 910), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((965, 1030), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Kwargs for generation."""'}), "(default_factory=dict, description='Kwargs for generation.')\n", (970, 1030), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1064, 1077), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1075, 1077), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3465, 3490), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3488, 3490), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((4106, 4131), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4129, 4131), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((2045, 2076), 'google.generativeai.configure', 'palm.configure', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (2059, 2076), True, 'import google.generativeai as palm\n'), ((2095, 2113), 'google.generativeai.list_models', 'palm.list_models', ([], {}), '()\n', (2111, 2113), True, 'import google.generativeai as palm\n'), ((3315, 3415), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'total_tokens', 'num_output': 'self.num_output', 'model_name': 'self.model_name'}), '(context_window=total_tokens, num_output=self.num_output,\n model_name=self.model_name)\n', (3326, 3415), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((3898, 3964), 'google.generativeai.generate_text', 'palm.generate_text', ([], {'model': 'self.model_name', 'prompt': 'prompt'}), '(model=self.model_name, prompt=prompt, **kwargs)\n', (3916, 3964), True, 'import google.generativeai as palm\n'), ((4027, 4099), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'completion.result', 'raw': 'completion.candidates[0]'}), '(text=completion.result, raw=completion.candidates[0])\n', (4045, 4099), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2006, 2036), 'os.environ.get', 'os.environ.get', (['"""PALM_API_KEY"""'], {}), "('PALM_API_KEY')\n", (2020, 2036), False, 'import os\n')]
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.ai21_utils import ai21_model_to_context_size from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.llms.generic_utils import ( completion_to_chat_decorator, get_from_param_or_env, ) from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class AI21(CustomLLM): """AI21 Labs LLM.""" model: str = Field(description="The AI21 model to use.") maxTokens: int = Field(description="The maximum number of tokens to generate.") temperature: float = Field(description="The temperature to use for sampling.") additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the anthropic API." ) _api_key = PrivateAttr() def __init__( self, api_key: Optional[str] = None, model: Optional[str] = "j2-mid", maxTokens: Optional[int] = 512, temperature: Optional[float] = 0.1, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: """Initialize params.""" try: import ai21 as _ # noqa except ImportError as e: raise ImportError( "You must install the `ai21` package to use AI21." "Please `pip install ai21`" ) from e additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) api_key = get_from_param_or_env("api_key", api_key, "AI21_API_KEY") self._api_key = api_key super().__init__( model=model, maxTokens=maxTokens, temperature=temperature, additional_kwargs=additional_kwargs, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(self) -> str: """Get Class Name.""" return "AI21_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=ai21_model_to_context_size(self.model), num_output=self.maxTokens, model_name=self.model, ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "model": self.model, "maxTokens": self.maxTokens, "temperature": self.temperature, } return {**base_kwargs, **self.additional_kwargs} def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_all_kwargs(**kwargs) import ai21 ai21.api_key = self._api_key response = ai21.Completion.execute(**all_kwargs, prompt=prompt) return CompletionResponse( text=response["completions"][0]["data"]["text"], raw=response.__dict__ ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: raise NotImplementedError( "AI21 does not currently support streaming completion." ) @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: all_kwargs = self._get_all_kwargs(**kwargs) chat_fn = completion_to_chat_decorator(self.complete) return chat_fn(messages, **all_kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: raise NotImplementedError("AI21 does not Currently Support Streaming Chat.")
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.get_from_param_or_env", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.llms.ai21_utils.ai21_model_to_context_size", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.completion_to_chat_decorator" ]
[((827, 870), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The AI21 model to use."""'}), "(description='The AI21 model to use.')\n", (832, 870), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((892, 954), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (897, 954), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((980, 1037), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (985, 1037), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1079, 1167), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the anthropic API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the anthropic API.')\n", (1084, 1167), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1193, 1206), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1204, 1206), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3625, 3650), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3648, 3650), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4083, 4108), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4106, 4108), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4351, 4370), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4368, 4370), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4623, 4642), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4640, 4642), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2296, 2353), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""AI21_API_KEY"""'], {}), "('api_key', api_key, 'AI21_API_KEY')\n", (2317, 2353), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, get_from_param_or_env\n'), ((3895, 3947), 'ai21.Completion.execute', 'ai21.Completion.execute', ([], {'prompt': 'prompt'}), '(**all_kwargs, prompt=prompt)\n', (3918, 3947), False, 'import ai21\n'), ((3964, 4059), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': "response['completions'][0]['data']['text']", 'raw': 'response.__dict__'}), "(text=response['completions'][0]['data']['text'], raw=\n response.__dict__)\n", (3982, 4059), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((4525, 4568), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self.complete'], {}), '(self.complete)\n', (4553, 4568), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, get_from_param_or_env\n'), ((2257, 2276), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2272, 2276), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((3075, 3113), 'llama_index.legacy.llms.ai21_utils.ai21_model_to_context_size', 'ai21_model_to_context_size', (['self.model'], {}), '(self.model)\n', (3101, 3113), False, 'from llama_index.legacy.llms.ai21_utils import ai21_model_to_context_size\n')]
import json from typing import Any, Callable, Dict, List, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.generic_utils import ( completion_response_to_chat_response, stream_completion_response_to_chat_response, ) from llama_index.legacy.llms.generic_utils import ( messages_to_prompt as generic_messages_to_prompt, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.llms.vllm_utils import get_response, post_http_request from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class Vllm(LLM): model: Optional[str] = Field(description="The HuggingFace Model to use.") temperature: float = Field(description="The temperature to use for sampling.") tensor_parallel_size: Optional[int] = Field( default=1, description="The number of GPUs to use for distributed execution with tensor parallelism.", ) trust_remote_code: Optional[bool] = Field( default=True, description="Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.", ) n: int = Field( default=1, description="Number of output sequences to return for the given prompt.", ) best_of: Optional[int] = Field( default=None, description="Number of output sequences that are generated from the prompt.", ) presence_penalty: float = Field( default=0.0, description="Float that penalizes new tokens based on whether they appear in the generated text so far.", ) frequency_penalty: float = Field( default=0.0, description="Float that penalizes new tokens based on their frequency in the generated text so far.", ) top_p: float = Field( default=1.0, description="Float that controls the cumulative probability of the top tokens to consider.", ) top_k: int = Field( default=-1, description="Integer that controls the number of top tokens to consider.", ) use_beam_search: bool = Field( default=False, description="Whether to use beam search instead of sampling." ) stop: Optional[List[str]] = Field( default=None, description="List of strings that stop the generation when they are generated.", ) ignore_eos: bool = Field( default=False, description="Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.", ) max_new_tokens: int = Field( default=512, description="Maximum number of tokens to generate per output sequence.", ) logprobs: Optional[int] = Field( default=None, description="Number of log probabilities to return per output token.", ) dtype: str = Field( default="auto", description="The data type for the model weights and activations.", ) download_dir: Optional[str] = Field( default=None, description="Directory to download and load the weights. (Default to the default cache dir of huggingface)", ) vllm_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Holds any model parameters valid for `vllm.LLM` call not explicitly specified.", ) api_url: str = Field(description="The api url for vllm server") _client: Any = PrivateAttr() def __init__( self, model: str = "facebook/opt-125m", temperature: float = 1.0, tensor_parallel_size: int = 1, trust_remote_code: bool = True, n: int = 1, best_of: Optional[int] = None, presence_penalty: float = 0.0, frequency_penalty: float = 0.0, top_p: float = 1.0, top_k: int = -1, use_beam_search: bool = False, stop: Optional[List[str]] = None, ignore_eos: bool = False, max_new_tokens: int = 512, logprobs: Optional[int] = None, dtype: str = "auto", download_dir: Optional[str] = None, vllm_kwargs: Dict[str, Any] = {}, api_url: Optional[str] = "", callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: try: from vllm import LLM as VLLModel except ImportError: raise ImportError( "Could not import vllm python package. " "Please install it with `pip install vllm`." ) if model != "": self._client = VLLModel( model=model, tensor_parallel_size=tensor_parallel_size, trust_remote_code=trust_remote_code, dtype=dtype, download_dir=download_dir, **vllm_kwargs ) else: self._client = None callback_manager = callback_manager or CallbackManager([]) super().__init__( model=model, temperature=temperature, n=n, best_of=best_of, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, top_p=top_p, top_k=top_k, use_beam_search=use_beam_search, stop=stop, ignore_eos=ignore_eos, max_new_tokens=max_new_tokens, logprobs=logprobs, dtype=dtype, download_dir=download_dir, vllm_kwargs=vllm_kwargs, api_url=api_url, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "Vllm" @property def metadata(self) -> LLMMetadata: return LLMMetadata(model_name=self.model) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "temperature": self.temperature, "max_tokens": self.max_new_tokens, "n": self.n, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "use_beam_search": self.use_beam_search, "best_of": self.best_of, "ignore_eos": self.ignore_eos, "stop": self.stop, "logprobs": self.logprobs, "top_k": self.top_k, "top_p": self.top_p, "stop": self.stop, } return {**base_kwargs} def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: kwargs = kwargs if kwargs else {} prompt = self.messages_to_prompt(messages) completion_response = self.complete(prompt, **kwargs) return completion_response_to_chat_response(completion_response) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: kwargs = kwargs if kwargs else {} params = {**self._model_kwargs, **kwargs} from vllm import SamplingParams # build sampling parameters sampling_params = SamplingParams(**params) outputs = self._client.generate([prompt], sampling_params) return CompletionResponse(text=outputs[0].outputs[0].text) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: raise (ValueError("Not Implemented")) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: raise (ValueError("Not Implemented")) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: kwargs = kwargs if kwargs else {} return self.chat(messages, **kwargs) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: raise (ValueError("Not Implemented")) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: raise (ValueError("Not Implemented")) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: raise (ValueError("Not Implemented")) class VllmServer(Vllm): def __init__( self, model: str = "facebook/opt-125m", api_url: str = "http://localhost:8000", temperature: float = 1.0, tensor_parallel_size: Optional[int] = 1, trust_remote_code: Optional[bool] = True, n: int = 1, best_of: Optional[int] = None, presence_penalty: float = 0.0, frequency_penalty: float = 0.0, top_p: float = 1.0, top_k: int = -1, use_beam_search: bool = False, stop: Optional[List[str]] = None, ignore_eos: bool = False, max_new_tokens: int = 512, logprobs: Optional[int] = None, dtype: str = "auto", download_dir: Optional[str] = None, messages_to_prompt: Optional[Callable] = None, completion_to_prompt: Optional[Callable] = None, vllm_kwargs: Dict[str, Any] = {}, callback_manager: Optional[CallbackManager] = None, output_parser: Optional[BaseOutputParser] = None, ) -> None: self._client = None messages_to_prompt = messages_to_prompt or generic_messages_to_prompt completion_to_prompt = completion_to_prompt or (lambda x: x) callback_manager = callback_manager or CallbackManager([]) model = "" super().__init__( model=model, temperature=temperature, n=n, best_of=best_of, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, top_p=top_p, top_k=top_k, use_beam_search=use_beam_search, stop=stop, ignore_eos=ignore_eos, max_new_tokens=max_new_tokens, logprobs=logprobs, dtype=dtype, download_dir=download_dir, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, vllm_kwargs=vllm_kwargs, api_url=api_url, callback_manager=callback_manager, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "VllmServer" @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> List[CompletionResponse]: kwargs = kwargs if kwargs else {} params = {**self._model_kwargs, **kwargs} from vllm import SamplingParams # build sampling parameters sampling_params = SamplingParams(**params).__dict__ sampling_params["prompt"] = prompt response = post_http_request(self.api_url, sampling_params, stream=False) output = get_response(response) return CompletionResponse(text=output[0]) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: kwargs = kwargs if kwargs else {} params = {**self._model_kwargs, **kwargs} from vllm import SamplingParams # build sampling parameters sampling_params = SamplingParams(**params).__dict__ sampling_params["prompt"] = prompt response = post_http_request(self.api_url, sampling_params, stream=True) def gen() -> CompletionResponseGen: for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\0" ): if chunk: data = json.loads(chunk.decode("utf-8")) yield CompletionResponse(text=data["text"][0]) return gen() @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: kwargs = kwargs if kwargs else {} return self.complete(prompt, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: kwargs = kwargs if kwargs else {} params = {**self._model_kwargs, **kwargs} from vllm import SamplingParams # build sampling parameters sampling_params = SamplingParams(**params).__dict__ sampling_params["prompt"] = prompt async def gen() -> CompletionResponseAsyncGen: for message in self.stream_complete(prompt, **kwargs): yield message return gen() @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: prompt = self.messages_to_prompt(messages) completion_response = self.stream_complete(prompt, **kwargs) return stream_completion_response_to_chat_response(completion_response) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: return self.stream_chat(messages, **kwargs)
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.vllm_utils.get_response", "llama_index.legacy.llms.generic_utils.completion_response_to_chat_response", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.vllm_utils.post_http_request" ]
[((1015, 1065), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The HuggingFace Model to use."""'}), "(description='The HuggingFace Model to use.')\n", (1020, 1065), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1092, 1149), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1097, 1149), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1193, 1311), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1)', 'description': '"""The number of GPUs to use for distributed execution with tensor parallelism."""'}), "(default=1, description=\n 'The number of GPUs to use for distributed execution with tensor parallelism.'\n )\n", (1198, 1311), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1366, 1495), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer."""'}), "(default=True, description=\n 'Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.'\n )\n", (1371, 1495), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1523, 1618), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1)', 'description': '"""Number of output sequences to return for the given prompt."""'}), "(default=1, description=\n 'Number of output sequences to return for the given prompt.')\n", (1528, 1618), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1667, 1769), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Number of output sequences that are generated from the prompt."""'}), "(default=None, description=\n 'Number of output sequences that are generated from the prompt.')\n", (1672, 1769), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1819, 1953), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.0)', 'description': '"""Float that penalizes new tokens based on whether they appear in the generated text so far."""'}), "(default=0.0, description=\n 'Float that penalizes new tokens based on whether they appear in the generated text so far.'\n )\n", (1824, 1953), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1999, 2129), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.0)', 'description': '"""Float that penalizes new tokens based on their frequency in the generated text so far."""'}), "(default=0.0, description=\n 'Float that penalizes new tokens based on their frequency in the generated text so far.'\n )\n", (2004, 2129), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2163, 2284), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1.0)', 'description': '"""Float that controls the cumulative probability of the top tokens to consider."""'}), "(default=1.0, description=\n 'Float that controls the cumulative probability of the top tokens to consider.'\n )\n", (2168, 2284), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2316, 2413), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(-1)', 'description': '"""Integer that controls the number of top tokens to consider."""'}), "(default=-1, description=\n 'Integer that controls the number of top tokens to consider.')\n", (2321, 2413), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2461, 2549), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to use beam search instead of sampling."""'}), "(default=False, description=\n 'Whether to use beam search instead of sampling.')\n", (2466, 2549), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2592, 2697), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""List of strings that stop the generation when they are generated."""'}), "(default=None, description=\n 'List of strings that stop the generation when they are generated.')\n", (2597, 2697), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2740, 2882), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to ignore the EOS token and continue generating tokens after the EOS token is generated."""'}), "(default=False, description=\n 'Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.'\n )\n", (2745, 2882), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2923, 3019), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(512)', 'description': '"""Maximum number of tokens to generate per output sequence."""'}), "(default=512, description=\n 'Maximum number of tokens to generate per output sequence.')\n", (2928, 3019), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3069, 3164), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Number of log probabilities to return per output token."""'}), "(default=None, description=\n 'Number of log probabilities to return per output token.')\n", (3074, 3164), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3201, 3295), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""auto"""', 'description': '"""The data type for the model weights and activations."""'}), "(default='auto', description=\n 'The data type for the model weights and activations.')\n", (3206, 3295), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3349, 3487), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Directory to download and load the weights. (Default to the default cache dir of huggingface)"""'}), "(default=None, description=\n 'Directory to download and load the weights. (Default to the default cache dir of huggingface)'\n )\n", (3354, 3487), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3536, 3667), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Holds any model parameters valid for `vllm.LLM` call not explicitly specified."""'}), "(default_factory=dict, description=\n 'Holds any model parameters valid for `vllm.LLM` call not explicitly specified.'\n )\n", (3541, 3667), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3701, 3749), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The api url for vllm server"""'}), "(description='The api url for vllm server')\n", (3706, 3749), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3770, 3783), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3781, 3783), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((7427, 7446), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7444, 7446), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7765, 7790), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7788, 7790), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8265, 8284), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8282, 8284), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8445, 8470), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8468, 8470), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8646, 8665), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8663, 8665), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8864, 8889), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8887, 8889), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9062, 9081), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9079, 9081), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9254, 9279), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (9277, 9279), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((11620, 11645), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (11643, 11645), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12217, 12242), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (12240, 12242), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13080, 13105), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13103, 13105), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13321, 13346), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13344, 13346), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13936, 13955), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (13953, 13955), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((14270, 14289), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (14287, 14289), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6582, 6616), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'model_name': 'self.model'}), '(model_name=self.model)\n', (6593, 6616), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((7701, 7758), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (7737, 7758), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((8100, 8124), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (8114, 8124), False, 'from vllm import SamplingParams\n'), ((8207, 8258), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'outputs[0].outputs[0].text'}), '(text=outputs[0].outputs[0].text)\n', (8225, 8258), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((12057, 12119), 'llama_index.legacy.llms.vllm_utils.post_http_request', 'post_http_request', (['self.api_url', 'sampling_params'], {'stream': '(False)'}), '(self.api_url, sampling_params, stream=False)\n', (12074, 12119), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((12137, 12159), 'llama_index.legacy.llms.vllm_utils.get_response', 'get_response', (['response'], {}), '(response)\n', (12149, 12159), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((12176, 12210), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'output[0]'}), '(text=output[0])\n', (12194, 12210), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((12658, 12719), 'llama_index.legacy.llms.vllm_utils.post_http_request', 'post_http_request', (['self.api_url', 'sampling_params'], {'stream': '(True)'}), '(self.api_url, sampling_params, stream=True)\n', (12675, 12719), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((14199, 14263), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (14242, 14263), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((5219, 5384), 'vllm.LLM', 'VLLModel', ([], {'model': 'model', 'tensor_parallel_size': 'tensor_parallel_size', 'trust_remote_code': 'trust_remote_code', 'dtype': 'dtype', 'download_dir': 'download_dir'}), '(model=model, tensor_parallel_size=tensor_parallel_size,\n trust_remote_code=trust_remote_code, dtype=dtype, download_dir=\n download_dir, **vllm_kwargs)\n', (5227, 5384), True, 'from vllm import LLM as VLLModel\n'), ((5579, 5598), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5594, 5598), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((10705, 10724), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (10720, 10724), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((11961, 11985), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (11975, 11985), False, 'from vllm import SamplingParams\n'), ((12562, 12586), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (12576, 12586), False, 'from vllm import SamplingParams\n'), ((13678, 13702), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (13692, 13702), False, 'from vllm import SamplingParams\n'), ((13011, 13051), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': "data['text'][0]"}), "(text=data['text'][0])\n", (13029, 13051), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')]
import json from typing import Any, Callable, Dict, List, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.generic_utils import ( completion_response_to_chat_response, stream_completion_response_to_chat_response, ) from llama_index.legacy.llms.generic_utils import ( messages_to_prompt as generic_messages_to_prompt, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.llms.vllm_utils import get_response, post_http_request from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class Vllm(LLM): model: Optional[str] = Field(description="The HuggingFace Model to use.") temperature: float = Field(description="The temperature to use for sampling.") tensor_parallel_size: Optional[int] = Field( default=1, description="The number of GPUs to use for distributed execution with tensor parallelism.", ) trust_remote_code: Optional[bool] = Field( default=True, description="Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.", ) n: int = Field( default=1, description="Number of output sequences to return for the given prompt.", ) best_of: Optional[int] = Field( default=None, description="Number of output sequences that are generated from the prompt.", ) presence_penalty: float = Field( default=0.0, description="Float that penalizes new tokens based on whether they appear in the generated text so far.", ) frequency_penalty: float = Field( default=0.0, description="Float that penalizes new tokens based on their frequency in the generated text so far.", ) top_p: float = Field( default=1.0, description="Float that controls the cumulative probability of the top tokens to consider.", ) top_k: int = Field( default=-1, description="Integer that controls the number of top tokens to consider.", ) use_beam_search: bool = Field( default=False, description="Whether to use beam search instead of sampling." ) stop: Optional[List[str]] = Field( default=None, description="List of strings that stop the generation when they are generated.", ) ignore_eos: bool = Field( default=False, description="Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.", ) max_new_tokens: int = Field( default=512, description="Maximum number of tokens to generate per output sequence.", ) logprobs: Optional[int] = Field( default=None, description="Number of log probabilities to return per output token.", ) dtype: str = Field( default="auto", description="The data type for the model weights and activations.", ) download_dir: Optional[str] = Field( default=None, description="Directory to download and load the weights. (Default to the default cache dir of huggingface)", ) vllm_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Holds any model parameters valid for `vllm.LLM` call not explicitly specified.", ) api_url: str = Field(description="The api url for vllm server") _client: Any = PrivateAttr() def __init__( self, model: str = "facebook/opt-125m", temperature: float = 1.0, tensor_parallel_size: int = 1, trust_remote_code: bool = True, n: int = 1, best_of: Optional[int] = None, presence_penalty: float = 0.0, frequency_penalty: float = 0.0, top_p: float = 1.0, top_k: int = -1, use_beam_search: bool = False, stop: Optional[List[str]] = None, ignore_eos: bool = False, max_new_tokens: int = 512, logprobs: Optional[int] = None, dtype: str = "auto", download_dir: Optional[str] = None, vllm_kwargs: Dict[str, Any] = {}, api_url: Optional[str] = "", callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: try: from vllm import LLM as VLLModel except ImportError: raise ImportError( "Could not import vllm python package. " "Please install it with `pip install vllm`." ) if model != "": self._client = VLLModel( model=model, tensor_parallel_size=tensor_parallel_size, trust_remote_code=trust_remote_code, dtype=dtype, download_dir=download_dir, **vllm_kwargs ) else: self._client = None callback_manager = callback_manager or CallbackManager([]) super().__init__( model=model, temperature=temperature, n=n, best_of=best_of, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, top_p=top_p, top_k=top_k, use_beam_search=use_beam_search, stop=stop, ignore_eos=ignore_eos, max_new_tokens=max_new_tokens, logprobs=logprobs, dtype=dtype, download_dir=download_dir, vllm_kwargs=vllm_kwargs, api_url=api_url, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "Vllm" @property def metadata(self) -> LLMMetadata: return LLMMetadata(model_name=self.model) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "temperature": self.temperature, "max_tokens": self.max_new_tokens, "n": self.n, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "use_beam_search": self.use_beam_search, "best_of": self.best_of, "ignore_eos": self.ignore_eos, "stop": self.stop, "logprobs": self.logprobs, "top_k": self.top_k, "top_p": self.top_p, "stop": self.stop, } return {**base_kwargs} def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: kwargs = kwargs if kwargs else {} prompt = self.messages_to_prompt(messages) completion_response = self.complete(prompt, **kwargs) return completion_response_to_chat_response(completion_response) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: kwargs = kwargs if kwargs else {} params = {**self._model_kwargs, **kwargs} from vllm import SamplingParams # build sampling parameters sampling_params = SamplingParams(**params) outputs = self._client.generate([prompt], sampling_params) return CompletionResponse(text=outputs[0].outputs[0].text) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: raise (ValueError("Not Implemented")) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: raise (ValueError("Not Implemented")) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: kwargs = kwargs if kwargs else {} return self.chat(messages, **kwargs) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: raise (ValueError("Not Implemented")) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: raise (ValueError("Not Implemented")) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: raise (ValueError("Not Implemented")) class VllmServer(Vllm): def __init__( self, model: str = "facebook/opt-125m", api_url: str = "http://localhost:8000", temperature: float = 1.0, tensor_parallel_size: Optional[int] = 1, trust_remote_code: Optional[bool] = True, n: int = 1, best_of: Optional[int] = None, presence_penalty: float = 0.0, frequency_penalty: float = 0.0, top_p: float = 1.0, top_k: int = -1, use_beam_search: bool = False, stop: Optional[List[str]] = None, ignore_eos: bool = False, max_new_tokens: int = 512, logprobs: Optional[int] = None, dtype: str = "auto", download_dir: Optional[str] = None, messages_to_prompt: Optional[Callable] = None, completion_to_prompt: Optional[Callable] = None, vllm_kwargs: Dict[str, Any] = {}, callback_manager: Optional[CallbackManager] = None, output_parser: Optional[BaseOutputParser] = None, ) -> None: self._client = None messages_to_prompt = messages_to_prompt or generic_messages_to_prompt completion_to_prompt = completion_to_prompt or (lambda x: x) callback_manager = callback_manager or CallbackManager([]) model = "" super().__init__( model=model, temperature=temperature, n=n, best_of=best_of, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, top_p=top_p, top_k=top_k, use_beam_search=use_beam_search, stop=stop, ignore_eos=ignore_eos, max_new_tokens=max_new_tokens, logprobs=logprobs, dtype=dtype, download_dir=download_dir, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, vllm_kwargs=vllm_kwargs, api_url=api_url, callback_manager=callback_manager, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "VllmServer" @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> List[CompletionResponse]: kwargs = kwargs if kwargs else {} params = {**self._model_kwargs, **kwargs} from vllm import SamplingParams # build sampling parameters sampling_params = SamplingParams(**params).__dict__ sampling_params["prompt"] = prompt response = post_http_request(self.api_url, sampling_params, stream=False) output = get_response(response) return CompletionResponse(text=output[0]) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: kwargs = kwargs if kwargs else {} params = {**self._model_kwargs, **kwargs} from vllm import SamplingParams # build sampling parameters sampling_params = SamplingParams(**params).__dict__ sampling_params["prompt"] = prompt response = post_http_request(self.api_url, sampling_params, stream=True) def gen() -> CompletionResponseGen: for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\0" ): if chunk: data = json.loads(chunk.decode("utf-8")) yield CompletionResponse(text=data["text"][0]) return gen() @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: kwargs = kwargs if kwargs else {} return self.complete(prompt, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: kwargs = kwargs if kwargs else {} params = {**self._model_kwargs, **kwargs} from vllm import SamplingParams # build sampling parameters sampling_params = SamplingParams(**params).__dict__ sampling_params["prompt"] = prompt async def gen() -> CompletionResponseAsyncGen: for message in self.stream_complete(prompt, **kwargs): yield message return gen() @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: prompt = self.messages_to_prompt(messages) completion_response = self.stream_complete(prompt, **kwargs) return stream_completion_response_to_chat_response(completion_response) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: return self.stream_chat(messages, **kwargs)
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.vllm_utils.get_response", "llama_index.legacy.llms.generic_utils.completion_response_to_chat_response", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.vllm_utils.post_http_request" ]
[((1015, 1065), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The HuggingFace Model to use."""'}), "(description='The HuggingFace Model to use.')\n", (1020, 1065), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1092, 1149), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1097, 1149), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1193, 1311), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1)', 'description': '"""The number of GPUs to use for distributed execution with tensor parallelism."""'}), "(default=1, description=\n 'The number of GPUs to use for distributed execution with tensor parallelism.'\n )\n", (1198, 1311), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1366, 1495), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer."""'}), "(default=True, description=\n 'Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.'\n )\n", (1371, 1495), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1523, 1618), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1)', 'description': '"""Number of output sequences to return for the given prompt."""'}), "(default=1, description=\n 'Number of output sequences to return for the given prompt.')\n", (1528, 1618), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1667, 1769), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Number of output sequences that are generated from the prompt."""'}), "(default=None, description=\n 'Number of output sequences that are generated from the prompt.')\n", (1672, 1769), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1819, 1953), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.0)', 'description': '"""Float that penalizes new tokens based on whether they appear in the generated text so far."""'}), "(default=0.0, description=\n 'Float that penalizes new tokens based on whether they appear in the generated text so far.'\n )\n", (1824, 1953), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1999, 2129), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.0)', 'description': '"""Float that penalizes new tokens based on their frequency in the generated text so far."""'}), "(default=0.0, description=\n 'Float that penalizes new tokens based on their frequency in the generated text so far.'\n )\n", (2004, 2129), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2163, 2284), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1.0)', 'description': '"""Float that controls the cumulative probability of the top tokens to consider."""'}), "(default=1.0, description=\n 'Float that controls the cumulative probability of the top tokens to consider.'\n )\n", (2168, 2284), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2316, 2413), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(-1)', 'description': '"""Integer that controls the number of top tokens to consider."""'}), "(default=-1, description=\n 'Integer that controls the number of top tokens to consider.')\n", (2321, 2413), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2461, 2549), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to use beam search instead of sampling."""'}), "(default=False, description=\n 'Whether to use beam search instead of sampling.')\n", (2466, 2549), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2592, 2697), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""List of strings that stop the generation when they are generated."""'}), "(default=None, description=\n 'List of strings that stop the generation when they are generated.')\n", (2597, 2697), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2740, 2882), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to ignore the EOS token and continue generating tokens after the EOS token is generated."""'}), "(default=False, description=\n 'Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.'\n )\n", (2745, 2882), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2923, 3019), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(512)', 'description': '"""Maximum number of tokens to generate per output sequence."""'}), "(default=512, description=\n 'Maximum number of tokens to generate per output sequence.')\n", (2928, 3019), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3069, 3164), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Number of log probabilities to return per output token."""'}), "(default=None, description=\n 'Number of log probabilities to return per output token.')\n", (3074, 3164), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3201, 3295), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""auto"""', 'description': '"""The data type for the model weights and activations."""'}), "(default='auto', description=\n 'The data type for the model weights and activations.')\n", (3206, 3295), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3349, 3487), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Directory to download and load the weights. (Default to the default cache dir of huggingface)"""'}), "(default=None, description=\n 'Directory to download and load the weights. (Default to the default cache dir of huggingface)'\n )\n", (3354, 3487), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3536, 3667), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Holds any model parameters valid for `vllm.LLM` call not explicitly specified."""'}), "(default_factory=dict, description=\n 'Holds any model parameters valid for `vllm.LLM` call not explicitly specified.'\n )\n", (3541, 3667), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3701, 3749), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The api url for vllm server"""'}), "(description='The api url for vllm server')\n", (3706, 3749), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3770, 3783), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3781, 3783), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((7427, 7446), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7444, 7446), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7765, 7790), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7788, 7790), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8265, 8284), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8282, 8284), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8445, 8470), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8468, 8470), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8646, 8665), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8663, 8665), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8864, 8889), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8887, 8889), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9062, 9081), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9079, 9081), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9254, 9279), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (9277, 9279), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((11620, 11645), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (11643, 11645), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12217, 12242), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (12240, 12242), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13080, 13105), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13103, 13105), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13321, 13346), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13344, 13346), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13936, 13955), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (13953, 13955), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((14270, 14289), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (14287, 14289), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6582, 6616), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'model_name': 'self.model'}), '(model_name=self.model)\n', (6593, 6616), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((7701, 7758), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (7737, 7758), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((8100, 8124), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (8114, 8124), False, 'from vllm import SamplingParams\n'), ((8207, 8258), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'outputs[0].outputs[0].text'}), '(text=outputs[0].outputs[0].text)\n', (8225, 8258), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((12057, 12119), 'llama_index.legacy.llms.vllm_utils.post_http_request', 'post_http_request', (['self.api_url', 'sampling_params'], {'stream': '(False)'}), '(self.api_url, sampling_params, stream=False)\n', (12074, 12119), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((12137, 12159), 'llama_index.legacy.llms.vllm_utils.get_response', 'get_response', (['response'], {}), '(response)\n', (12149, 12159), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((12176, 12210), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'output[0]'}), '(text=output[0])\n', (12194, 12210), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((12658, 12719), 'llama_index.legacy.llms.vllm_utils.post_http_request', 'post_http_request', (['self.api_url', 'sampling_params'], {'stream': '(True)'}), '(self.api_url, sampling_params, stream=True)\n', (12675, 12719), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((14199, 14263), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (14242, 14263), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((5219, 5384), 'vllm.LLM', 'VLLModel', ([], {'model': 'model', 'tensor_parallel_size': 'tensor_parallel_size', 'trust_remote_code': 'trust_remote_code', 'dtype': 'dtype', 'download_dir': 'download_dir'}), '(model=model, tensor_parallel_size=tensor_parallel_size,\n trust_remote_code=trust_remote_code, dtype=dtype, download_dir=\n download_dir, **vllm_kwargs)\n', (5227, 5384), True, 'from vllm import LLM as VLLModel\n'), ((5579, 5598), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5594, 5598), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((10705, 10724), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (10720, 10724), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((11961, 11985), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (11975, 11985), False, 'from vllm import SamplingParams\n'), ((12562, 12586), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (12576, 12586), False, 'from vllm import SamplingParams\n'), ((13678, 13702), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (13692, 13702), False, 'from vllm import SamplingParams\n'), ((13011, 13051), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': "data['text'][0]"}), "(text=data['text'][0])\n", (13029, 13051), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("Llama2PaperDataset", "./data") # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 319), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""Llama2PaperDataset"""', '"""./data"""'], {}), "('Llama2PaperDataset', './data')\n", (287, 319), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((364, 416), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (395, 416), False, 'from llama_index.core import VectorStoreIndex\n'), ((509, 564), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (528, 564), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1415, 1439), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1437, 1439), False, 'import asyncio\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("Llama2PaperDataset", "./data") # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 319), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""Llama2PaperDataset"""', '"""./data"""'], {}), "('Llama2PaperDataset', './data')\n", (287, 319), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((364, 416), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (395, 416), False, 'from llama_index.core import VectorStoreIndex\n'), ((509, 564), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (528, 564), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1415, 1439), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1437, 1439), False, 'import asyncio\n')]
"""Prompts.""" from abc import ABC, abstractmethod from copy import deepcopy from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, ) from llama_index.core.bridge.pydantic import Field if TYPE_CHECKING: from llama_index.core.bridge.langchain import ( BasePromptTemplate as LangchainTemplate, ) # pants: no-infer-dep from llama_index.core.bridge.langchain import ( ConditionalPromptSelector as LangchainSelector, ) from llama_index.core.base.llms.types import ChatMessage from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.base.llms.base import BaseLLM from llama_index.core.base.llms.generic_utils import ( messages_to_prompt as default_messages_to_prompt, ) from llama_index.core.base.llms.generic_utils import ( prompt_to_messages, ) from llama_index.core.prompts.prompt_type import PromptType from llama_index.core.prompts.utils import get_template_vars from llama_index.core.types import BaseOutputParser class BasePromptTemplate(ChainableMixin, BaseModel, ABC): metadata: Dict[str, Any] template_vars: List[str] kwargs: Dict[str, str] output_parser: Optional[BaseOutputParser] template_var_mappings: Optional[Dict[str, Any]] = Field( default_factory=dict, description="Template variable mappings (Optional)." ) function_mappings: Optional[Dict[str, Callable]] = Field( default_factory=dict, description=( "Function mappings (Optional). This is a mapping from template " "variable names to functions that take in the current kwargs and " "return a string." ), ) def _map_template_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: """For keys in template_var_mappings, swap in the right keys.""" template_var_mappings = self.template_var_mappings or {} return {template_var_mappings.get(k, k): v for k, v in kwargs.items()} def _map_function_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: """For keys in function_mappings, compute values and combine w/ kwargs. Users can pass in functions instead of fixed values as format variables. For each function, we call the function with the current kwargs, get back the value, and then use that value in the template for the corresponding format variable. """ function_mappings = self.function_mappings or {} # first generate the values for the functions new_kwargs = {} for k, v in function_mappings.items(): # TODO: figure out what variables to pass into each function # is it the kwargs specified during query time? just the fixed kwargs? # all kwargs? new_kwargs[k] = v(**kwargs) # then, add the fixed variables only if not in new_kwargs already # (implying that function mapping will override fixed variables) for k, v in kwargs.items(): if k not in new_kwargs: new_kwargs[k] = v return new_kwargs def _map_all_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: """Map both template and function variables. We (1) first call function mappings to compute functions, and then (2) call the template_var_mappings. """ # map function new_kwargs = self._map_function_vars(kwargs) # map template vars (to point to existing format vars in string template) return self._map_template_vars(new_kwargs) class Config: arbitrary_types_allowed = True @abstractmethod def partial_format(self, **kwargs: Any) -> "BasePromptTemplate": ... @abstractmethod def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str: ... @abstractmethod def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: ... @abstractmethod def get_template(self, llm: Optional[BaseLLM] = None) -> str: ... def _as_query_component( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> QueryComponent: """As query component.""" return PromptComponent(prompt=self, format_messages=False, llm=llm) class PromptTemplate(BasePromptTemplate): template: str def __init__( self, template: str, prompt_type: str = PromptType.CUSTOM, output_parser: Optional[BaseOutputParser] = None, metadata: Optional[Dict[str, Any]] = None, template_var_mappings: Optional[Dict[str, Any]] = None, function_mappings: Optional[Dict[str, Callable]] = None, **kwargs: Any, ) -> None: if metadata is None: metadata = {} metadata["prompt_type"] = prompt_type template_vars = get_template_vars(template) super().__init__( template=template, template_vars=template_vars, kwargs=kwargs, metadata=metadata, output_parser=output_parser, template_var_mappings=template_var_mappings, function_mappings=function_mappings, ) def partial_format(self, **kwargs: Any) -> "PromptTemplate": """Partially format the prompt.""" # NOTE: this is a hack to get around deepcopy failing on output parser output_parser = self.output_parser self.output_parser = None # get function and fixed kwargs, and add that to a copy # of the current prompt object prompt = deepcopy(self) prompt.kwargs.update(kwargs) # NOTE: put the output parser back prompt.output_parser = output_parser self.output_parser = output_parser return prompt def format( self, llm: Optional[BaseLLM] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, **kwargs: Any, ) -> str: """Format the prompt into a string.""" del llm # unused all_kwargs = { **self.kwargs, **kwargs, } mapped_all_kwargs = self._map_all_vars(all_kwargs) prompt = self.template.format(**mapped_all_kwargs) if self.output_parser is not None: prompt = self.output_parser.format(prompt) if completion_to_prompt is not None: prompt = completion_to_prompt(prompt) return prompt def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: """Format the prompt into a list of chat messages.""" del llm # unused prompt = self.format(**kwargs) return prompt_to_messages(prompt) def get_template(self, llm: Optional[BaseLLM] = None) -> str: return self.template class ChatPromptTemplate(BasePromptTemplate): message_templates: List[ChatMessage] def __init__( self, message_templates: List[ChatMessage], prompt_type: str = PromptType.CUSTOM, output_parser: Optional[BaseOutputParser] = None, metadata: Optional[Dict[str, Any]] = None, template_var_mappings: Optional[Dict[str, Any]] = None, function_mappings: Optional[Dict[str, Callable]] = None, **kwargs: Any, ): if metadata is None: metadata = {} metadata["prompt_type"] = prompt_type template_vars = [] for message_template in message_templates: template_vars.extend(get_template_vars(message_template.content or "")) super().__init__( message_templates=message_templates, kwargs=kwargs, metadata=metadata, output_parser=output_parser, template_vars=template_vars, template_var_mappings=template_var_mappings, function_mappings=function_mappings, ) @classmethod def from_messages( cls, message_templates: Union[List[Tuple[str, str]], List[ChatMessage]], **kwargs: Any, ) -> "ChatPromptTemplate": """From messages.""" if isinstance(message_templates[0], tuple): message_templates = [ ChatMessage.from_str(role=role, content=content) for role, content in message_templates ] return cls(message_templates=message_templates, **kwargs) def partial_format(self, **kwargs: Any) -> "ChatPromptTemplate": prompt = deepcopy(self) prompt.kwargs.update(kwargs) return prompt def format( self, llm: Optional[BaseLLM] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, **kwargs: Any, ) -> str: del llm # unused messages = self.format_messages(**kwargs) if messages_to_prompt is not None: return messages_to_prompt(messages) return default_messages_to_prompt(messages) def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: del llm # unused """Format the prompt into a list of chat messages.""" all_kwargs = { **self.kwargs, **kwargs, } mapped_all_kwargs = self._map_all_vars(all_kwargs) messages: List[ChatMessage] = [] for message_template in self.message_templates: template_vars = get_template_vars(message_template.content or "") relevant_kwargs = { k: v for k, v in mapped_all_kwargs.items() if k in template_vars } content_template = message_template.content or "" # if there's mappings specified, make sure those are used content = content_template.format(**relevant_kwargs) message: ChatMessage = message_template.copy() message.content = content messages.append(message) if self.output_parser is not None: messages = self.output_parser.format_messages(messages) return messages def get_template(self, llm: Optional[BaseLLM] = None) -> str: return default_messages_to_prompt(self.message_templates) def _as_query_component( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> QueryComponent: """As query component.""" return PromptComponent(prompt=self, format_messages=True, llm=llm) class SelectorPromptTemplate(BasePromptTemplate): default_template: BasePromptTemplate conditionals: Optional[ List[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]] ] = None def __init__( self, default_template: BasePromptTemplate, conditionals: Optional[ List[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]] ] = None, ): metadata = default_template.metadata kwargs = default_template.kwargs template_vars = default_template.template_vars output_parser = default_template.output_parser super().__init__( default_template=default_template, conditionals=conditionals, metadata=metadata, kwargs=kwargs, template_vars=template_vars, output_parser=output_parser, ) def select(self, llm: Optional[BaseLLM] = None) -> BasePromptTemplate: # ensure output parser is up to date self.default_template.output_parser = self.output_parser if llm is None: return self.default_template if self.conditionals is not None: for condition, prompt in self.conditionals: if condition(llm): # ensure output parser is up to date prompt.output_parser = self.output_parser return prompt return self.default_template def partial_format(self, **kwargs: Any) -> "SelectorPromptTemplate": default_template = self.default_template.partial_format(**kwargs) if self.conditionals is None: conditionals = None else: conditionals = [ (condition, prompt.partial_format(**kwargs)) for condition, prompt in self.conditionals ] return SelectorPromptTemplate( default_template=default_template, conditionals=conditionals ) def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str: """Format the prompt into a string.""" prompt = self.select(llm=llm) return prompt.format(**kwargs) def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: """Format the prompt into a list of chat messages.""" prompt = self.select(llm=llm) return prompt.format_messages(**kwargs) def get_template(self, llm: Optional[BaseLLM] = None) -> str: prompt = self.select(llm=llm) return prompt.get_template(llm=llm) class LangchainPromptTemplate(BasePromptTemplate): selector: Any requires_langchain_llm: bool = False def __init__( self, template: Optional["LangchainTemplate"] = None, selector: Optional["LangchainSelector"] = None, output_parser: Optional[BaseOutputParser] = None, prompt_type: str = PromptType.CUSTOM, metadata: Optional[Dict[str, Any]] = None, template_var_mappings: Optional[Dict[str, Any]] = None, function_mappings: Optional[Dict[str, Callable]] = None, requires_langchain_llm: bool = False, ) -> None: try: from llama_index.core.bridge.langchain import ( ConditionalPromptSelector as LangchainSelector, ) except ImportError: raise ImportError( "Must install `llama_index[langchain]` to use LangchainPromptTemplate." ) if selector is None: if template is None: raise ValueError("Must provide either template or selector.") selector = LangchainSelector(default_prompt=template) else: if template is not None: raise ValueError("Must provide either template or selector.") selector = selector kwargs = selector.default_prompt.partial_variables template_vars = selector.default_prompt.input_variables if metadata is None: metadata = {} metadata["prompt_type"] = prompt_type super().__init__( selector=selector, metadata=metadata, kwargs=kwargs, template_vars=template_vars, output_parser=output_parser, template_var_mappings=template_var_mappings, function_mappings=function_mappings, requires_langchain_llm=requires_langchain_llm, ) def partial_format(self, **kwargs: Any) -> "BasePromptTemplate": """Partially format the prompt.""" from llama_index.core.bridge.langchain import ( ConditionalPromptSelector as LangchainSelector, ) mapped_kwargs = self._map_all_vars(kwargs) default_prompt = self.selector.default_prompt.partial(**mapped_kwargs) conditionals = [ (condition, prompt.partial(**mapped_kwargs)) for condition, prompt in self.selector.conditionals ] lc_selector = LangchainSelector( default_prompt=default_prompt, conditionals=conditionals ) # copy full prompt object, replace selector lc_prompt = deepcopy(self) lc_prompt.selector = lc_selector return lc_prompt def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str: """Format the prompt into a string.""" from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep if llm is not None: # if llamaindex LLM is provided, and we require a langchain LLM, # then error. but otherwise if `requires_langchain_llm` is False, # then we can just use the default prompt if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm: raise ValueError("Must provide a LangChainLLM.") elif not isinstance(llm, LangChainLLM): lc_template = self.selector.default_prompt else: lc_template = self.selector.get_prompt(llm=llm.llm) else: lc_template = self.selector.default_prompt # if there's mappings specified, make sure those are used mapped_kwargs = self._map_all_vars(kwargs) return lc_template.format(**mapped_kwargs) def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: """Format the prompt into a list of chat messages.""" from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep from llama_index.llms.langchain.utils import ( from_lc_messages, ) # pants: no-infer-dep if llm is not None: # if llamaindex LLM is provided, and we require a langchain LLM, # then error. but otherwise if `requires_langchain_llm` is False, # then we can just use the default prompt if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm: raise ValueError("Must provide a LangChainLLM.") elif not isinstance(llm, LangChainLLM): lc_template = self.selector.default_prompt else: lc_template = self.selector.get_prompt(llm=llm.llm) else: lc_template = self.selector.default_prompt # if there's mappings specified, make sure those are used mapped_kwargs = self._map_all_vars(kwargs) lc_prompt_value = lc_template.format_prompt(**mapped_kwargs) lc_messages = lc_prompt_value.to_messages() return from_lc_messages(lc_messages) def get_template(self, llm: Optional[BaseLLM] = None) -> str: from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep if llm is not None: # if llamaindex LLM is provided, and we require a langchain LLM, # then error. but otherwise if `requires_langchain_llm` is False, # then we can just use the default prompt if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm: raise ValueError("Must provide a LangChainLLM.") elif not isinstance(llm, LangChainLLM): lc_template = self.selector.default_prompt else: lc_template = self.selector.get_prompt(llm=llm.llm) else: lc_template = self.selector.default_prompt try: return str(lc_template.template) # type: ignore except AttributeError: return str(lc_template) # NOTE: only for backwards compatibility Prompt = PromptTemplate class PromptComponent(QueryComponent): """Prompt component.""" prompt: BasePromptTemplate = Field(..., description="Prompt") llm: Optional[BaseLLM] = Field( default=None, description="LLM to use for formatting prompt." ) format_messages: bool = Field( default=False, description="Whether to format the prompt into a list of chat messages.", ) class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: Any) -> None: """Set callback manager.""" def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" keys = list(input.keys()) for k in keys: input[k] = validate_and_convert_stringable(input[k]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" if self.format_messages: output: Union[str, List[ChatMessage]] = self.prompt.format_messages( llm=self.llm, **kwargs ) else: output = self.prompt.format(llm=self.llm, **kwargs) return {"prompt": output} async def _arun_component(self, **kwargs: Any) -> Any: """Run component.""" # NOTE: no native async for prompt return self._run_component(**kwargs) @property def input_keys(self) -> InputKeys: """Input keys.""" return InputKeys.from_keys( set(self.prompt.template_vars) - set(self.prompt.kwargs) ) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"prompt"})
[ "llama_index.llms.langchain.utils.from_lc_messages", "llama_index.core.base.query_pipeline.query.OutputKeys.from_keys", "llama_index.core.bridge.pydantic.Field", "llama_index.core.bridge.langchain.ConditionalPromptSelector", "llama_index.core.base.query_pipeline.query.validate_and_convert_stringable", "llama_index.core.base.llms.generic_utils.messages_to_prompt", "llama_index.core.base.llms.types.ChatMessage.from_str", "llama_index.core.prompts.utils.get_template_vars", "llama_index.core.base.llms.generic_utils.prompt_to_messages" ]
[((1473, 1559), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Template variable mappings (Optional)."""'}), "(default_factory=dict, description=\n 'Template variable mappings (Optional).')\n", (1478, 1559), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1624, 1819), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Function mappings (Optional). This is a mapping from template variable names to functions that take in the current kwargs and return a string."""'}), "(default_factory=dict, description=\n 'Function mappings (Optional). This is a mapping from template variable names to functions that take in the current kwargs and return a string.'\n )\n", (1629, 1819), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19377, 19409), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Prompt"""'}), "(..., description='Prompt')\n", (19382, 19409), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19439, 19507), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""LLM to use for formatting prompt."""'}), "(default=None, description='LLM to use for formatting prompt.')\n", (19444, 19507), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19550, 19649), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to format the prompt into a list of chat messages."""'}), "(default=False, description=\n 'Whether to format the prompt into a list of chat messages.')\n", (19555, 19649), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((5075, 5102), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (['template'], {}), '(template)\n', (5092, 5102), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((5803, 5817), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (5811, 5817), False, 'from copy import deepcopy\n'), ((6932, 6958), 'llama_index.core.base.llms.generic_utils.prompt_to_messages', 'prompt_to_messages', (['prompt'], {}), '(prompt)\n', (6950, 6958), False, 'from llama_index.core.base.llms.generic_utils import prompt_to_messages\n'), ((8719, 8733), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (8727, 8733), False, 'from copy import deepcopy\n'), ((9169, 9205), 'llama_index.core.base.llms.generic_utils.messages_to_prompt', 'default_messages_to_prompt', (['messages'], {}), '(messages)\n', (9195, 9205), True, 'from llama_index.core.base.llms.generic_utils import messages_to_prompt as default_messages_to_prompt\n'), ((10403, 10453), 'llama_index.core.base.llms.generic_utils.messages_to_prompt', 'default_messages_to_prompt', (['self.message_templates'], {}), '(self.message_templates)\n', (10429, 10453), True, 'from llama_index.core.base.llms.generic_utils import messages_to_prompt as default_messages_to_prompt\n'), ((15675, 15750), 'llama_index.core.bridge.langchain.ConditionalPromptSelector', 'LangchainSelector', ([], {'default_prompt': 'default_prompt', 'conditionals': 'conditionals'}), '(default_prompt=default_prompt, conditionals=conditionals)\n', (15692, 15750), True, 'from llama_index.core.bridge.langchain import ConditionalPromptSelector as LangchainSelector\n'), ((15846, 15860), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (15854, 15860), False, 'from copy import deepcopy\n'), ((18234, 18263), 'llama_index.llms.langchain.utils.from_lc_messages', 'from_lc_messages', (['lc_messages'], {}), '(lc_messages)\n', (18250, 18263), False, 'from llama_index.llms.langchain.utils import from_lc_messages\n'), ((20950, 20982), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (20970, 20982), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((9674, 9723), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (["(message_template.content or '')"], {}), "(message_template.content or '')\n", (9691, 9723), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((14324, 14366), 'llama_index.core.bridge.langchain.ConditionalPromptSelector', 'LangchainSelector', ([], {'default_prompt': 'template'}), '(default_prompt=template)\n', (14341, 14366), True, 'from llama_index.core.bridge.langchain import ConditionalPromptSelector as LangchainSelector\n'), ((20056, 20097), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (['input[k]'], {}), '(input[k])\n', (20087, 20097), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((7750, 7799), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (["(message_template.content or '')"], {}), "(message_template.content or '')\n", (7767, 7799), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((8448, 8496), 'llama_index.core.base.llms.types.ChatMessage.from_str', 'ChatMessage.from_str', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (8468, 8496), False, 'from llama_index.core.base.llms.types import ChatMessage\n')]
"""Prompts.""" from abc import ABC, abstractmethod from copy import deepcopy from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, ) from llama_index.core.bridge.pydantic import Field if TYPE_CHECKING: from llama_index.core.bridge.langchain import ( BasePromptTemplate as LangchainTemplate, ) # pants: no-infer-dep from llama_index.core.bridge.langchain import ( ConditionalPromptSelector as LangchainSelector, ) from llama_index.core.base.llms.types import ChatMessage from llama_index.core.base.query_pipeline.query import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.base.llms.base import BaseLLM from llama_index.core.base.llms.generic_utils import ( messages_to_prompt as default_messages_to_prompt, ) from llama_index.core.base.llms.generic_utils import ( prompt_to_messages, ) from llama_index.core.prompts.prompt_type import PromptType from llama_index.core.prompts.utils import get_template_vars from llama_index.core.types import BaseOutputParser class BasePromptTemplate(ChainableMixin, BaseModel, ABC): metadata: Dict[str, Any] template_vars: List[str] kwargs: Dict[str, str] output_parser: Optional[BaseOutputParser] template_var_mappings: Optional[Dict[str, Any]] = Field( default_factory=dict, description="Template variable mappings (Optional)." ) function_mappings: Optional[Dict[str, Callable]] = Field( default_factory=dict, description=( "Function mappings (Optional). This is a mapping from template " "variable names to functions that take in the current kwargs and " "return a string." ), ) def _map_template_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: """For keys in template_var_mappings, swap in the right keys.""" template_var_mappings = self.template_var_mappings or {} return {template_var_mappings.get(k, k): v for k, v in kwargs.items()} def _map_function_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: """For keys in function_mappings, compute values and combine w/ kwargs. Users can pass in functions instead of fixed values as format variables. For each function, we call the function with the current kwargs, get back the value, and then use that value in the template for the corresponding format variable. """ function_mappings = self.function_mappings or {} # first generate the values for the functions new_kwargs = {} for k, v in function_mappings.items(): # TODO: figure out what variables to pass into each function # is it the kwargs specified during query time? just the fixed kwargs? # all kwargs? new_kwargs[k] = v(**kwargs) # then, add the fixed variables only if not in new_kwargs already # (implying that function mapping will override fixed variables) for k, v in kwargs.items(): if k not in new_kwargs: new_kwargs[k] = v return new_kwargs def _map_all_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: """Map both template and function variables. We (1) first call function mappings to compute functions, and then (2) call the template_var_mappings. """ # map function new_kwargs = self._map_function_vars(kwargs) # map template vars (to point to existing format vars in string template) return self._map_template_vars(new_kwargs) class Config: arbitrary_types_allowed = True @abstractmethod def partial_format(self, **kwargs: Any) -> "BasePromptTemplate": ... @abstractmethod def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str: ... @abstractmethod def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: ... @abstractmethod def get_template(self, llm: Optional[BaseLLM] = None) -> str: ... def _as_query_component( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> QueryComponent: """As query component.""" return PromptComponent(prompt=self, format_messages=False, llm=llm) class PromptTemplate(BasePromptTemplate): template: str def __init__( self, template: str, prompt_type: str = PromptType.CUSTOM, output_parser: Optional[BaseOutputParser] = None, metadata: Optional[Dict[str, Any]] = None, template_var_mappings: Optional[Dict[str, Any]] = None, function_mappings: Optional[Dict[str, Callable]] = None, **kwargs: Any, ) -> None: if metadata is None: metadata = {} metadata["prompt_type"] = prompt_type template_vars = get_template_vars(template) super().__init__( template=template, template_vars=template_vars, kwargs=kwargs, metadata=metadata, output_parser=output_parser, template_var_mappings=template_var_mappings, function_mappings=function_mappings, ) def partial_format(self, **kwargs: Any) -> "PromptTemplate": """Partially format the prompt.""" # NOTE: this is a hack to get around deepcopy failing on output parser output_parser = self.output_parser self.output_parser = None # get function and fixed kwargs, and add that to a copy # of the current prompt object prompt = deepcopy(self) prompt.kwargs.update(kwargs) # NOTE: put the output parser back prompt.output_parser = output_parser self.output_parser = output_parser return prompt def format( self, llm: Optional[BaseLLM] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, **kwargs: Any, ) -> str: """Format the prompt into a string.""" del llm # unused all_kwargs = { **self.kwargs, **kwargs, } mapped_all_kwargs = self._map_all_vars(all_kwargs) prompt = self.template.format(**mapped_all_kwargs) if self.output_parser is not None: prompt = self.output_parser.format(prompt) if completion_to_prompt is not None: prompt = completion_to_prompt(prompt) return prompt def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: """Format the prompt into a list of chat messages.""" del llm # unused prompt = self.format(**kwargs) return prompt_to_messages(prompt) def get_template(self, llm: Optional[BaseLLM] = None) -> str: return self.template class ChatPromptTemplate(BasePromptTemplate): message_templates: List[ChatMessage] def __init__( self, message_templates: List[ChatMessage], prompt_type: str = PromptType.CUSTOM, output_parser: Optional[BaseOutputParser] = None, metadata: Optional[Dict[str, Any]] = None, template_var_mappings: Optional[Dict[str, Any]] = None, function_mappings: Optional[Dict[str, Callable]] = None, **kwargs: Any, ): if metadata is None: metadata = {} metadata["prompt_type"] = prompt_type template_vars = [] for message_template in message_templates: template_vars.extend(get_template_vars(message_template.content or "")) super().__init__( message_templates=message_templates, kwargs=kwargs, metadata=metadata, output_parser=output_parser, template_vars=template_vars, template_var_mappings=template_var_mappings, function_mappings=function_mappings, ) @classmethod def from_messages( cls, message_templates: Union[List[Tuple[str, str]], List[ChatMessage]], **kwargs: Any, ) -> "ChatPromptTemplate": """From messages.""" if isinstance(message_templates[0], tuple): message_templates = [ ChatMessage.from_str(role=role, content=content) for role, content in message_templates ] return cls(message_templates=message_templates, **kwargs) def partial_format(self, **kwargs: Any) -> "ChatPromptTemplate": prompt = deepcopy(self) prompt.kwargs.update(kwargs) return prompt def format( self, llm: Optional[BaseLLM] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, **kwargs: Any, ) -> str: del llm # unused messages = self.format_messages(**kwargs) if messages_to_prompt is not None: return messages_to_prompt(messages) return default_messages_to_prompt(messages) def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: del llm # unused """Format the prompt into a list of chat messages.""" all_kwargs = { **self.kwargs, **kwargs, } mapped_all_kwargs = self._map_all_vars(all_kwargs) messages: List[ChatMessage] = [] for message_template in self.message_templates: template_vars = get_template_vars(message_template.content or "") relevant_kwargs = { k: v for k, v in mapped_all_kwargs.items() if k in template_vars } content_template = message_template.content or "" # if there's mappings specified, make sure those are used content = content_template.format(**relevant_kwargs) message: ChatMessage = message_template.copy() message.content = content messages.append(message) if self.output_parser is not None: messages = self.output_parser.format_messages(messages) return messages def get_template(self, llm: Optional[BaseLLM] = None) -> str: return default_messages_to_prompt(self.message_templates) def _as_query_component( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> QueryComponent: """As query component.""" return PromptComponent(prompt=self, format_messages=True, llm=llm) class SelectorPromptTemplate(BasePromptTemplate): default_template: BasePromptTemplate conditionals: Optional[ List[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]] ] = None def __init__( self, default_template: BasePromptTemplate, conditionals: Optional[ List[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]] ] = None, ): metadata = default_template.metadata kwargs = default_template.kwargs template_vars = default_template.template_vars output_parser = default_template.output_parser super().__init__( default_template=default_template, conditionals=conditionals, metadata=metadata, kwargs=kwargs, template_vars=template_vars, output_parser=output_parser, ) def select(self, llm: Optional[BaseLLM] = None) -> BasePromptTemplate: # ensure output parser is up to date self.default_template.output_parser = self.output_parser if llm is None: return self.default_template if self.conditionals is not None: for condition, prompt in self.conditionals: if condition(llm): # ensure output parser is up to date prompt.output_parser = self.output_parser return prompt return self.default_template def partial_format(self, **kwargs: Any) -> "SelectorPromptTemplate": default_template = self.default_template.partial_format(**kwargs) if self.conditionals is None: conditionals = None else: conditionals = [ (condition, prompt.partial_format(**kwargs)) for condition, prompt in self.conditionals ] return SelectorPromptTemplate( default_template=default_template, conditionals=conditionals ) def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str: """Format the prompt into a string.""" prompt = self.select(llm=llm) return prompt.format(**kwargs) def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: """Format the prompt into a list of chat messages.""" prompt = self.select(llm=llm) return prompt.format_messages(**kwargs) def get_template(self, llm: Optional[BaseLLM] = None) -> str: prompt = self.select(llm=llm) return prompt.get_template(llm=llm) class LangchainPromptTemplate(BasePromptTemplate): selector: Any requires_langchain_llm: bool = False def __init__( self, template: Optional["LangchainTemplate"] = None, selector: Optional["LangchainSelector"] = None, output_parser: Optional[BaseOutputParser] = None, prompt_type: str = PromptType.CUSTOM, metadata: Optional[Dict[str, Any]] = None, template_var_mappings: Optional[Dict[str, Any]] = None, function_mappings: Optional[Dict[str, Callable]] = None, requires_langchain_llm: bool = False, ) -> None: try: from llama_index.core.bridge.langchain import ( ConditionalPromptSelector as LangchainSelector, ) except ImportError: raise ImportError( "Must install `llama_index[langchain]` to use LangchainPromptTemplate." ) if selector is None: if template is None: raise ValueError("Must provide either template or selector.") selector = LangchainSelector(default_prompt=template) else: if template is not None: raise ValueError("Must provide either template or selector.") selector = selector kwargs = selector.default_prompt.partial_variables template_vars = selector.default_prompt.input_variables if metadata is None: metadata = {} metadata["prompt_type"] = prompt_type super().__init__( selector=selector, metadata=metadata, kwargs=kwargs, template_vars=template_vars, output_parser=output_parser, template_var_mappings=template_var_mappings, function_mappings=function_mappings, requires_langchain_llm=requires_langchain_llm, ) def partial_format(self, **kwargs: Any) -> "BasePromptTemplate": """Partially format the prompt.""" from llama_index.core.bridge.langchain import ( ConditionalPromptSelector as LangchainSelector, ) mapped_kwargs = self._map_all_vars(kwargs) default_prompt = self.selector.default_prompt.partial(**mapped_kwargs) conditionals = [ (condition, prompt.partial(**mapped_kwargs)) for condition, prompt in self.selector.conditionals ] lc_selector = LangchainSelector( default_prompt=default_prompt, conditionals=conditionals ) # copy full prompt object, replace selector lc_prompt = deepcopy(self) lc_prompt.selector = lc_selector return lc_prompt def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str: """Format the prompt into a string.""" from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep if llm is not None: # if llamaindex LLM is provided, and we require a langchain LLM, # then error. but otherwise if `requires_langchain_llm` is False, # then we can just use the default prompt if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm: raise ValueError("Must provide a LangChainLLM.") elif not isinstance(llm, LangChainLLM): lc_template = self.selector.default_prompt else: lc_template = self.selector.get_prompt(llm=llm.llm) else: lc_template = self.selector.default_prompt # if there's mappings specified, make sure those are used mapped_kwargs = self._map_all_vars(kwargs) return lc_template.format(**mapped_kwargs) def format_messages( self, llm: Optional[BaseLLM] = None, **kwargs: Any ) -> List[ChatMessage]: """Format the prompt into a list of chat messages.""" from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep from llama_index.llms.langchain.utils import ( from_lc_messages, ) # pants: no-infer-dep if llm is not None: # if llamaindex LLM is provided, and we require a langchain LLM, # then error. but otherwise if `requires_langchain_llm` is False, # then we can just use the default prompt if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm: raise ValueError("Must provide a LangChainLLM.") elif not isinstance(llm, LangChainLLM): lc_template = self.selector.default_prompt else: lc_template = self.selector.get_prompt(llm=llm.llm) else: lc_template = self.selector.default_prompt # if there's mappings specified, make sure those are used mapped_kwargs = self._map_all_vars(kwargs) lc_prompt_value = lc_template.format_prompt(**mapped_kwargs) lc_messages = lc_prompt_value.to_messages() return from_lc_messages(lc_messages) def get_template(self, llm: Optional[BaseLLM] = None) -> str: from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep if llm is not None: # if llamaindex LLM is provided, and we require a langchain LLM, # then error. but otherwise if `requires_langchain_llm` is False, # then we can just use the default prompt if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm: raise ValueError("Must provide a LangChainLLM.") elif not isinstance(llm, LangChainLLM): lc_template = self.selector.default_prompt else: lc_template = self.selector.get_prompt(llm=llm.llm) else: lc_template = self.selector.default_prompt try: return str(lc_template.template) # type: ignore except AttributeError: return str(lc_template) # NOTE: only for backwards compatibility Prompt = PromptTemplate class PromptComponent(QueryComponent): """Prompt component.""" prompt: BasePromptTemplate = Field(..., description="Prompt") llm: Optional[BaseLLM] = Field( default=None, description="LLM to use for formatting prompt." ) format_messages: bool = Field( default=False, description="Whether to format the prompt into a list of chat messages.", ) class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: Any) -> None: """Set callback manager.""" def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" keys = list(input.keys()) for k in keys: input[k] = validate_and_convert_stringable(input[k]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" if self.format_messages: output: Union[str, List[ChatMessage]] = self.prompt.format_messages( llm=self.llm, **kwargs ) else: output = self.prompt.format(llm=self.llm, **kwargs) return {"prompt": output} async def _arun_component(self, **kwargs: Any) -> Any: """Run component.""" # NOTE: no native async for prompt return self._run_component(**kwargs) @property def input_keys(self) -> InputKeys: """Input keys.""" return InputKeys.from_keys( set(self.prompt.template_vars) - set(self.prompt.kwargs) ) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"prompt"})
[ "llama_index.llms.langchain.utils.from_lc_messages", "llama_index.core.base.query_pipeline.query.OutputKeys.from_keys", "llama_index.core.bridge.pydantic.Field", "llama_index.core.bridge.langchain.ConditionalPromptSelector", "llama_index.core.base.query_pipeline.query.validate_and_convert_stringable", "llama_index.core.base.llms.generic_utils.messages_to_prompt", "llama_index.core.base.llms.types.ChatMessage.from_str", "llama_index.core.prompts.utils.get_template_vars", "llama_index.core.base.llms.generic_utils.prompt_to_messages" ]
[((1473, 1559), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Template variable mappings (Optional)."""'}), "(default_factory=dict, description=\n 'Template variable mappings (Optional).')\n", (1478, 1559), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1624, 1819), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Function mappings (Optional). This is a mapping from template variable names to functions that take in the current kwargs and return a string."""'}), "(default_factory=dict, description=\n 'Function mappings (Optional). This is a mapping from template variable names to functions that take in the current kwargs and return a string.'\n )\n", (1629, 1819), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19377, 19409), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Prompt"""'}), "(..., description='Prompt')\n", (19382, 19409), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19439, 19507), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""LLM to use for formatting prompt."""'}), "(default=None, description='LLM to use for formatting prompt.')\n", (19444, 19507), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19550, 19649), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to format the prompt into a list of chat messages."""'}), "(default=False, description=\n 'Whether to format the prompt into a list of chat messages.')\n", (19555, 19649), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((5075, 5102), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (['template'], {}), '(template)\n', (5092, 5102), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((5803, 5817), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (5811, 5817), False, 'from copy import deepcopy\n'), ((6932, 6958), 'llama_index.core.base.llms.generic_utils.prompt_to_messages', 'prompt_to_messages', (['prompt'], {}), '(prompt)\n', (6950, 6958), False, 'from llama_index.core.base.llms.generic_utils import prompt_to_messages\n'), ((8719, 8733), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (8727, 8733), False, 'from copy import deepcopy\n'), ((9169, 9205), 'llama_index.core.base.llms.generic_utils.messages_to_prompt', 'default_messages_to_prompt', (['messages'], {}), '(messages)\n', (9195, 9205), True, 'from llama_index.core.base.llms.generic_utils import messages_to_prompt as default_messages_to_prompt\n'), ((10403, 10453), 'llama_index.core.base.llms.generic_utils.messages_to_prompt', 'default_messages_to_prompt', (['self.message_templates'], {}), '(self.message_templates)\n', (10429, 10453), True, 'from llama_index.core.base.llms.generic_utils import messages_to_prompt as default_messages_to_prompt\n'), ((15675, 15750), 'llama_index.core.bridge.langchain.ConditionalPromptSelector', 'LangchainSelector', ([], {'default_prompt': 'default_prompt', 'conditionals': 'conditionals'}), '(default_prompt=default_prompt, conditionals=conditionals)\n', (15692, 15750), True, 'from llama_index.core.bridge.langchain import ConditionalPromptSelector as LangchainSelector\n'), ((15846, 15860), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (15854, 15860), False, 'from copy import deepcopy\n'), ((18234, 18263), 'llama_index.llms.langchain.utils.from_lc_messages', 'from_lc_messages', (['lc_messages'], {}), '(lc_messages)\n', (18250, 18263), False, 'from llama_index.llms.langchain.utils import from_lc_messages\n'), ((20950, 20982), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (20970, 20982), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((9674, 9723), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (["(message_template.content or '')"], {}), "(message_template.content or '')\n", (9691, 9723), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((14324, 14366), 'llama_index.core.bridge.langchain.ConditionalPromptSelector', 'LangchainSelector', ([], {'default_prompt': 'template'}), '(default_prompt=template)\n', (14341, 14366), True, 'from llama_index.core.bridge.langchain import ConditionalPromptSelector as LangchainSelector\n'), ((20056, 20097), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (['input[k]'], {}), '(input[k])\n', (20087, 20097), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((7750, 7799), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (["(message_template.content or '')"], {}), "(message_template.content or '')\n", (7767, 7799), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((8448, 8496), 'llama_index.core.base.llms.types.ChatMessage.from_str', 'ChatMessage.from_str', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (8468, 8496), False, 'from llama_index.core.base.llms.types import ChatMessage\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex from llama_index.llms import OpenAI async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "Uber10KDataset2021", "./uber10k_2021_dataset" ) # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff") judge_llm = OpenAI(model="gpt-3.5-turbo") rag_evaluator = RagEvaluatorPack( query_engine=query_engine, rag_dataset=rag_dataset, judge_llm=judge_llm ) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.llms.OpenAI", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((301, 371), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""Uber10KDataset2021"""', '"""./uber10k_2021_dataset"""'], {}), "('Uber10KDataset2021', './uber10k_2021_dataset')\n", (323, 371), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((430, 482), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (461, 482), False, 'from llama_index.core import VectorStoreIndex\n'), ((575, 630), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (594, 630), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((647, 676), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (653, 676), False, 'from llama_index.llms import OpenAI\n'), ((1562, 1586), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1584, 1586), False, 'import asyncio\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex from llama_index.llms import OpenAI async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "Uber10KDataset2021", "./uber10k_2021_dataset" ) # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff") judge_llm = OpenAI(model="gpt-3.5-turbo") rag_evaluator = RagEvaluatorPack( query_engine=query_engine, rag_dataset=rag_dataset, judge_llm=judge_llm ) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.llms.OpenAI", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((301, 371), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""Uber10KDataset2021"""', '"""./uber10k_2021_dataset"""'], {}), "('Uber10KDataset2021', './uber10k_2021_dataset')\n", (323, 371), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((430, 482), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (461, 482), False, 'from llama_index.core import VectorStoreIndex\n'), ((575, 630), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (594, 630), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((647, 676), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (653, 676), False, 'from llama_index.llms import OpenAI\n'), ((1562, 1586), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1584, 1586), False, 'import asyncio\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.llms import OpenAI, Gemini from llama_index.core import ServiceContext import pandas as pd async def main(): # DOWNLOAD LLAMADATASET pairwise_evaluator_dataset, _ = download_llama_dataset( "MtBenchHumanJudgementDataset", "./mt_bench_data" ) # DEFINE EVALUATORS gpt_4_context = ServiceContext.from_defaults( llm=OpenAI(temperature=0, model="gpt-4"), ) gpt_3p5_context = ServiceContext.from_defaults( llm=OpenAI(temperature=0, model="gpt-3.5-turbo"), ) gemini_pro_context = ServiceContext.from_defaults( llm=Gemini(model="models/gemini-pro", temperature=0) ) evaluators = { "gpt-4": PairwiseComparisonEvaluator(service_context=gpt_4_context), "gpt-3.5": PairwiseComparisonEvaluator(service_context=gpt_3p5_context), "gemini-pro": PairwiseComparisonEvaluator(service_context=gemini_pro_context), } # EVALUATE WITH PACK ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ EvaluatorBenchmarkerPack = download_llama_pack("EvaluatorBenchmarkerPack", "./pack") evaluator_benchmarker = EvaluatorBenchmarkerPack( evaluator=evaluators["gpt-3.5"], eval_dataset=pairwise_evaluator_dataset, show_progress=True, ) gpt_3p5_benchmark_df = await evaluator_benchmarker.arun( batch_size=100, sleep_time_in_seconds=0 ) evaluator_benchmarker = EvaluatorBenchmarkerPack( evaluator=evaluators["gpt-4"], eval_dataset=pairwise_evaluator_dataset, show_progress=True, ) gpt_4_benchmark_df = await evaluator_benchmarker.arun( batch_size=100, sleep_time_in_seconds=0 ) evaluator_benchmarker = EvaluatorBenchmarkerPack( evaluator=evaluators["gemini-pro"], eval_dataset=pairwise_evaluator_dataset, show_progress=True, ) gemini_pro_benchmark_df = await evaluator_benchmarker.arun( batch_size=5, sleep_time_in_seconds=0.5 ) benchmark_df = pd.concat( [ gpt_3p5_benchmark_df, gpt_4_benchmark_df, gemini_pro_benchmark_df, ], axis=0, ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.evaluation.PairwiseComparisonEvaluator", "llama_index.llms.Gemini", "llama_index.llms.OpenAI", "llama_index.core.llama_dataset.download_llama_dataset" ]
[((402, 475), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MtBenchHumanJudgementDataset"""', '"""./mt_bench_data"""'], {}), "('MtBenchHumanJudgementDataset', './mt_bench_data')\n", (424, 475), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((1675, 1732), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""EvaluatorBenchmarkerPack"""', '"""./pack"""'], {}), "('EvaluatorBenchmarkerPack', './pack')\n", (1694, 1732), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((2636, 2726), 'pandas.concat', 'pd.concat', (['[gpt_3p5_benchmark_df, gpt_4_benchmark_df, gemini_pro_benchmark_df]'], {'axis': '(0)'}), '([gpt_3p5_benchmark_df, gpt_4_benchmark_df,\n gemini_pro_benchmark_df], axis=0)\n', (2645, 2726), True, 'import pandas as pd\n'), ((2857, 2881), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2879, 2881), False, 'import asyncio\n'), ((898, 956), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gpt_4_context'}), '(service_context=gpt_4_context)\n', (925, 956), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((977, 1037), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gpt_3p5_context'}), '(service_context=gpt_3p5_context)\n', (1004, 1037), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((1061, 1124), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gemini_pro_context'}), '(service_context=gemini_pro_context)\n', (1088, 1124), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((577, 613), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (583, 613), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((686, 730), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0, model='gpt-3.5-turbo')\n", (692, 730), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((806, 854), 'llama_index.llms.Gemini', 'Gemini', ([], {'model': '"""models/gemini-pro"""', 'temperature': '(0)'}), "(model='models/gemini-pro', temperature=0)\n", (812, 854), False, 'from llama_index.llms import OpenAI, Gemini\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.llms import OpenAI, Gemini from llama_index.core import ServiceContext import pandas as pd async def main(): # DOWNLOAD LLAMADATASET pairwise_evaluator_dataset, _ = download_llama_dataset( "MtBenchHumanJudgementDataset", "./mt_bench_data" ) # DEFINE EVALUATORS gpt_4_context = ServiceContext.from_defaults( llm=OpenAI(temperature=0, model="gpt-4"), ) gpt_3p5_context = ServiceContext.from_defaults( llm=OpenAI(temperature=0, model="gpt-3.5-turbo"), ) gemini_pro_context = ServiceContext.from_defaults( llm=Gemini(model="models/gemini-pro", temperature=0) ) evaluators = { "gpt-4": PairwiseComparisonEvaluator(service_context=gpt_4_context), "gpt-3.5": PairwiseComparisonEvaluator(service_context=gpt_3p5_context), "gemini-pro": PairwiseComparisonEvaluator(service_context=gemini_pro_context), } # EVALUATE WITH PACK ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ EvaluatorBenchmarkerPack = download_llama_pack("EvaluatorBenchmarkerPack", "./pack") evaluator_benchmarker = EvaluatorBenchmarkerPack( evaluator=evaluators["gpt-3.5"], eval_dataset=pairwise_evaluator_dataset, show_progress=True, ) gpt_3p5_benchmark_df = await evaluator_benchmarker.arun( batch_size=100, sleep_time_in_seconds=0 ) evaluator_benchmarker = EvaluatorBenchmarkerPack( evaluator=evaluators["gpt-4"], eval_dataset=pairwise_evaluator_dataset, show_progress=True, ) gpt_4_benchmark_df = await evaluator_benchmarker.arun( batch_size=100, sleep_time_in_seconds=0 ) evaluator_benchmarker = EvaluatorBenchmarkerPack( evaluator=evaluators["gemini-pro"], eval_dataset=pairwise_evaluator_dataset, show_progress=True, ) gemini_pro_benchmark_df = await evaluator_benchmarker.arun( batch_size=5, sleep_time_in_seconds=0.5 ) benchmark_df = pd.concat( [ gpt_3p5_benchmark_df, gpt_4_benchmark_df, gemini_pro_benchmark_df, ], axis=0, ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.evaluation.PairwiseComparisonEvaluator", "llama_index.llms.Gemini", "llama_index.llms.OpenAI", "llama_index.core.llama_dataset.download_llama_dataset" ]
[((402, 475), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MtBenchHumanJudgementDataset"""', '"""./mt_bench_data"""'], {}), "('MtBenchHumanJudgementDataset', './mt_bench_data')\n", (424, 475), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((1675, 1732), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""EvaluatorBenchmarkerPack"""', '"""./pack"""'], {}), "('EvaluatorBenchmarkerPack', './pack')\n", (1694, 1732), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((2636, 2726), 'pandas.concat', 'pd.concat', (['[gpt_3p5_benchmark_df, gpt_4_benchmark_df, gemini_pro_benchmark_df]'], {'axis': '(0)'}), '([gpt_3p5_benchmark_df, gpt_4_benchmark_df,\n gemini_pro_benchmark_df], axis=0)\n', (2645, 2726), True, 'import pandas as pd\n'), ((2857, 2881), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2879, 2881), False, 'import asyncio\n'), ((898, 956), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gpt_4_context'}), '(service_context=gpt_4_context)\n', (925, 956), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((977, 1037), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gpt_3p5_context'}), '(service_context=gpt_3p5_context)\n', (1004, 1037), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((1061, 1124), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gemini_pro_context'}), '(service_context=gemini_pro_context)\n', (1088, 1124), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((577, 613), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (583, 613), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((686, 730), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0, model='gpt-3.5-turbo')\n", (692, 730), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((806, 854), 'llama_index.llms.Gemini', 'Gemini', ([], {'model': '"""models/gemini-pro"""', 'temperature': '(0)'}), "(model='models/gemini-pro', temperature=0)\n", (812, 854), False, 'from llama_index.llms import OpenAI, Gemini\n')]
from abc import abstractmethod from typing import ( Any, Sequence, ) from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.core.base.query_pipeline.query import ( ChainableMixin, ) from llama_index.core.bridge.pydantic import Field, validator from llama_index.core.callbacks import CallbackManager from llama_index.core.schema import BaseComponent class BaseLLM(ChainableMixin, BaseComponent): """LLM interface.""" callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True ) class Config: arbitrary_types_allowed = True @validator("callback_manager", pre=True) def _validate_callback_manager(cls, v: CallbackManager) -> CallbackManager: if v is None: return CallbackManager([]) return v @property @abstractmethod def metadata(self) -> LLMMetadata: """LLM metadata.""" @abstractmethod def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: """Chat endpoint for LLM.""" @abstractmethod def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Completion endpoint for LLM.""" @abstractmethod def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: """Streaming chat endpoint for LLM.""" @abstractmethod def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: """Streaming completion endpoint for LLM.""" # ===== Async Endpoints ===== @abstractmethod async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: """Async chat endpoint for LLM.""" @abstractmethod async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Async completion endpoint for LLM.""" @abstractmethod async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: """Async streaming chat endpoint for LLM.""" @abstractmethod async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: """Async streaming completion endpoint for LLM."""
[ "llama_index.core.bridge.pydantic.Field", "llama_index.core.callbacks.CallbackManager", "llama_index.core.bridge.pydantic.validator" ]
[((669, 721), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (674, 721), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((800, 839), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)'}), "('callback_manager', pre=True)\n", (809, 839), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((961, 980), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (976, 980), False, 'from llama_index.core.callbacks import CallbackManager\n')]
from abc import abstractmethod from typing import ( Any, Sequence, ) from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.core.base.query_pipeline.query import ( ChainableMixin, ) from llama_index.core.bridge.pydantic import Field, validator from llama_index.core.callbacks import CallbackManager from llama_index.core.schema import BaseComponent class BaseLLM(ChainableMixin, BaseComponent): """LLM interface.""" callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True ) class Config: arbitrary_types_allowed = True @validator("callback_manager", pre=True) def _validate_callback_manager(cls, v: CallbackManager) -> CallbackManager: if v is None: return CallbackManager([]) return v @property @abstractmethod def metadata(self) -> LLMMetadata: """LLM metadata.""" @abstractmethod def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: """Chat endpoint for LLM.""" @abstractmethod def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Completion endpoint for LLM.""" @abstractmethod def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: """Streaming chat endpoint for LLM.""" @abstractmethod def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: """Streaming completion endpoint for LLM.""" # ===== Async Endpoints ===== @abstractmethod async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: """Async chat endpoint for LLM.""" @abstractmethod async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Async completion endpoint for LLM.""" @abstractmethod async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: """Async streaming chat endpoint for LLM.""" @abstractmethod async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: """Async streaming completion endpoint for LLM."""
[ "llama_index.core.bridge.pydantic.Field", "llama_index.core.callbacks.CallbackManager", "llama_index.core.bridge.pydantic.validator" ]
[((669, 721), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (674, 721), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((800, 839), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)'}), "('callback_manager', pre=True)\n", (809, 839), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((961, 980), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (976, 980), False, 'from llama_index.core.callbacks import CallbackManager\n')]
from abc import abstractmethod from typing import ( Any, Sequence, ) from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.core.base.query_pipeline.query import ( ChainableMixin, ) from llama_index.core.bridge.pydantic import Field, validator from llama_index.core.callbacks import CallbackManager from llama_index.core.schema import BaseComponent class BaseLLM(ChainableMixin, BaseComponent): """LLM interface.""" callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True ) class Config: arbitrary_types_allowed = True @validator("callback_manager", pre=True) def _validate_callback_manager(cls, v: CallbackManager) -> CallbackManager: if v is None: return CallbackManager([]) return v @property @abstractmethod def metadata(self) -> LLMMetadata: """LLM metadata.""" @abstractmethod def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: """Chat endpoint for LLM.""" @abstractmethod def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Completion endpoint for LLM.""" @abstractmethod def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: """Streaming chat endpoint for LLM.""" @abstractmethod def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: """Streaming completion endpoint for LLM.""" # ===== Async Endpoints ===== @abstractmethod async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: """Async chat endpoint for LLM.""" @abstractmethod async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Async completion endpoint for LLM.""" @abstractmethod async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: """Async streaming chat endpoint for LLM.""" @abstractmethod async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: """Async streaming completion endpoint for LLM."""
[ "llama_index.core.bridge.pydantic.Field", "llama_index.core.callbacks.CallbackManager", "llama_index.core.bridge.pydantic.validator" ]
[((669, 721), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (674, 721), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((800, 839), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)'}), "('callback_manager', pre=True)\n", (809, 839), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((961, 980), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (976, 980), False, 'from llama_index.core.callbacks import CallbackManager\n')]
from typing import Any, Sequence from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.generic_utils import ( completion_response_to_chat_response, stream_completion_response_to_chat_response, ) from llama_index.legacy.llms.llm import LLM class CustomLLM(LLM): """Simple abstract base class for custom LLMs. Subclasses must implement the `__init__`, `_complete`, `_stream_complete`, and `metadata` methods. """ @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: prompt = self.messages_to_prompt(messages) completion_response = self.complete(prompt, formatted=True, **kwargs) return completion_response_to_chat_response(completion_response) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: prompt = self.messages_to_prompt(messages) completion_response_gen = self.stream_complete(prompt, formatted=True, **kwargs) return stream_completion_response_to_chat_response(completion_response_gen) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: return self.chat(messages, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseAsyncGen: async def gen() -> ChatResponseAsyncGen: for message in self.stream_chat(messages, **kwargs): yield message # NOTE: convert generator to async generator return gen() @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: return self.complete(prompt, formatted=formatted, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: async def gen() -> CompletionResponseAsyncGen: for message in self.stream_complete(prompt, formatted=formatted, **kwargs): yield message # NOTE: convert generator to async generator return gen() @classmethod def class_name(cls) -> str: return "custom_llm"
[ "llama_index.legacy.llms.generic_utils.completion_response_to_chat_response", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response" ]
[((710, 729), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (727, 729), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1022, 1041), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1039, 1041), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1380, 1399), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1397, 1399), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1573, 1592), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1590, 1592), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1955, 1980), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1978, 1980), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2175, 2200), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (2198, 2200), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((958, 1015), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (994, 1015), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((1305, 1373), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response_gen'], {}), '(completion_response_gen)\n', (1348, 1373), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n')]
from typing import Any, Sequence from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.generic_utils import ( completion_response_to_chat_response, stream_completion_response_to_chat_response, ) from llama_index.legacy.llms.llm import LLM class CustomLLM(LLM): """Simple abstract base class for custom LLMs. Subclasses must implement the `__init__`, `_complete`, `_stream_complete`, and `metadata` methods. """ @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: prompt = self.messages_to_prompt(messages) completion_response = self.complete(prompt, formatted=True, **kwargs) return completion_response_to_chat_response(completion_response) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: prompt = self.messages_to_prompt(messages) completion_response_gen = self.stream_complete(prompt, formatted=True, **kwargs) return stream_completion_response_to_chat_response(completion_response_gen) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: return self.chat(messages, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseAsyncGen: async def gen() -> ChatResponseAsyncGen: for message in self.stream_chat(messages, **kwargs): yield message # NOTE: convert generator to async generator return gen() @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: return self.complete(prompt, formatted=formatted, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: async def gen() -> CompletionResponseAsyncGen: for message in self.stream_complete(prompt, formatted=formatted, **kwargs): yield message # NOTE: convert generator to async generator return gen() @classmethod def class_name(cls) -> str: return "custom_llm"
[ "llama_index.legacy.llms.generic_utils.completion_response_to_chat_response", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response" ]
[((710, 729), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (727, 729), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1022, 1041), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1039, 1041), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1380, 1399), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1397, 1399), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1573, 1592), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1590, 1592), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1955, 1980), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1978, 1980), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2175, 2200), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (2198, 2200), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((958, 1015), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (994, 1015), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((1305, 1373), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response_gen'], {}), '(completion_response_gen)\n', (1348, 1373), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n')]
from abc import abstractmethod from typing import Any, List, Sequence, Union from llama_index.core.base.query_pipeline.query import ( ChainableMixin, QueryComponent, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType from llama_index.core.schema import QueryBundle, QueryType from llama_index.core.tools.types import ToolMetadata MetadataType = Union[str, ToolMetadata] class SingleSelection(BaseModel): """A single selection of a choice.""" index: int reason: str class MultiSelection(BaseModel): """A multi-selection of choices.""" selections: List[SingleSelection] @property def ind(self) -> int: if len(self.selections) != 1: raise ValueError( f"There are {len(self.selections)} selections, " "please use .inds." ) return self.selections[0].index @property def reason(self) -> str: if len(self.reasons) != 1: raise ValueError( f"There are {len(self.reasons)} selections, " "please use .reasons." ) return self.selections[0].reason @property def inds(self) -> List[int]: return [x.index for x in self.selections] @property def reasons(self) -> List[str]: return [x.reason for x in self.selections] # separate name for clarity and to not confuse function calling model SelectorResult = MultiSelection def _wrap_choice(choice: MetadataType) -> ToolMetadata: if isinstance(choice, ToolMetadata): return choice elif isinstance(choice, str): return ToolMetadata(description=choice) else: raise ValueError(f"Unexpected type: {type(choice)}") def _wrap_query(query: QueryType) -> QueryBundle: if isinstance(query, QueryBundle): return query elif isinstance(query, str): return QueryBundle(query_str=query) else: raise ValueError(f"Unexpected type: {type(query)}") class BaseSelector(PromptMixin, ChainableMixin): """Base selector.""" def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} def select( self, choices: Sequence[MetadataType], query: QueryType ) -> SelectorResult: metadatas = [_wrap_choice(choice) for choice in choices] query_bundle = _wrap_query(query) return self._select(choices=metadatas, query=query_bundle) async def aselect( self, choices: Sequence[MetadataType], query: QueryType ) -> SelectorResult: metadatas = [_wrap_choice(choice) for choice in choices] query_bundle = _wrap_query(query) return await self._aselect(choices=metadatas, query=query_bundle) @abstractmethod def _select( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: pass @abstractmethod async def _aselect( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: pass def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" from llama_index.core.query_pipeline.components.router import ( SelectorComponent, ) return SelectorComponent(selector=self)
[ "llama_index.core.query_pipeline.components.router.SelectorComponent", "llama_index.core.tools.types.ToolMetadata", "llama_index.core.schema.QueryBundle" ]
[((3300, 3332), 'llama_index.core.query_pipeline.components.router.SelectorComponent', 'SelectorComponent', ([], {'selector': 'self'}), '(selector=self)\n', (3317, 3332), False, 'from llama_index.core.query_pipeline.components.router import SelectorComponent\n'), ((1653, 1685), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'description': 'choice'}), '(description=choice)\n', (1665, 1685), False, 'from llama_index.core.tools.types import ToolMetadata\n'), ((1917, 1945), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (1928, 1945), False, 'from llama_index.core.schema import QueryBundle, QueryType\n')]
from abc import abstractmethod from typing import Any, List, Sequence, Union from llama_index.core.base.query_pipeline.query import ( ChainableMixin, QueryComponent, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType from llama_index.core.schema import QueryBundle, QueryType from llama_index.core.tools.types import ToolMetadata MetadataType = Union[str, ToolMetadata] class SingleSelection(BaseModel): """A single selection of a choice.""" index: int reason: str class MultiSelection(BaseModel): """A multi-selection of choices.""" selections: List[SingleSelection] @property def ind(self) -> int: if len(self.selections) != 1: raise ValueError( f"There are {len(self.selections)} selections, " "please use .inds." ) return self.selections[0].index @property def reason(self) -> str: if len(self.reasons) != 1: raise ValueError( f"There are {len(self.reasons)} selections, " "please use .reasons." ) return self.selections[0].reason @property def inds(self) -> List[int]: return [x.index for x in self.selections] @property def reasons(self) -> List[str]: return [x.reason for x in self.selections] # separate name for clarity and to not confuse function calling model SelectorResult = MultiSelection def _wrap_choice(choice: MetadataType) -> ToolMetadata: if isinstance(choice, ToolMetadata): return choice elif isinstance(choice, str): return ToolMetadata(description=choice) else: raise ValueError(f"Unexpected type: {type(choice)}") def _wrap_query(query: QueryType) -> QueryBundle: if isinstance(query, QueryBundle): return query elif isinstance(query, str): return QueryBundle(query_str=query) else: raise ValueError(f"Unexpected type: {type(query)}") class BaseSelector(PromptMixin, ChainableMixin): """Base selector.""" def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} def select( self, choices: Sequence[MetadataType], query: QueryType ) -> SelectorResult: metadatas = [_wrap_choice(choice) for choice in choices] query_bundle = _wrap_query(query) return self._select(choices=metadatas, query=query_bundle) async def aselect( self, choices: Sequence[MetadataType], query: QueryType ) -> SelectorResult: metadatas = [_wrap_choice(choice) for choice in choices] query_bundle = _wrap_query(query) return await self._aselect(choices=metadatas, query=query_bundle) @abstractmethod def _select( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: pass @abstractmethod async def _aselect( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: pass def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" from llama_index.core.query_pipeline.components.router import ( SelectorComponent, ) return SelectorComponent(selector=self)
[ "llama_index.core.query_pipeline.components.router.SelectorComponent", "llama_index.core.tools.types.ToolMetadata", "llama_index.core.schema.QueryBundle" ]
[((3300, 3332), 'llama_index.core.query_pipeline.components.router.SelectorComponent', 'SelectorComponent', ([], {'selector': 'self'}), '(selector=self)\n', (3317, 3332), False, 'from llama_index.core.query_pipeline.components.router import SelectorComponent\n'), ((1653, 1685), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'description': 'choice'}), '(description=choice)\n', (1665, 1685), False, 'from llama_index.core.tools.types import ToolMetadata\n'), ((1917, 1945), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (1928, 1945), False, 'from llama_index.core.schema import QueryBundle, QueryType\n')]
from abc import abstractmethod from typing import Any, List, Sequence, Union from llama_index.core.base.query_pipeline.query import ( ChainableMixin, QueryComponent, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType from llama_index.core.schema import QueryBundle, QueryType from llama_index.core.tools.types import ToolMetadata MetadataType = Union[str, ToolMetadata] class SingleSelection(BaseModel): """A single selection of a choice.""" index: int reason: str class MultiSelection(BaseModel): """A multi-selection of choices.""" selections: List[SingleSelection] @property def ind(self) -> int: if len(self.selections) != 1: raise ValueError( f"There are {len(self.selections)} selections, " "please use .inds." ) return self.selections[0].index @property def reason(self) -> str: if len(self.reasons) != 1: raise ValueError( f"There are {len(self.reasons)} selections, " "please use .reasons." ) return self.selections[0].reason @property def inds(self) -> List[int]: return [x.index for x in self.selections] @property def reasons(self) -> List[str]: return [x.reason for x in self.selections] # separate name for clarity and to not confuse function calling model SelectorResult = MultiSelection def _wrap_choice(choice: MetadataType) -> ToolMetadata: if isinstance(choice, ToolMetadata): return choice elif isinstance(choice, str): return ToolMetadata(description=choice) else: raise ValueError(f"Unexpected type: {type(choice)}") def _wrap_query(query: QueryType) -> QueryBundle: if isinstance(query, QueryBundle): return query elif isinstance(query, str): return QueryBundle(query_str=query) else: raise ValueError(f"Unexpected type: {type(query)}") class BaseSelector(PromptMixin, ChainableMixin): """Base selector.""" def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} def select( self, choices: Sequence[MetadataType], query: QueryType ) -> SelectorResult: metadatas = [_wrap_choice(choice) for choice in choices] query_bundle = _wrap_query(query) return self._select(choices=metadatas, query=query_bundle) async def aselect( self, choices: Sequence[MetadataType], query: QueryType ) -> SelectorResult: metadatas = [_wrap_choice(choice) for choice in choices] query_bundle = _wrap_query(query) return await self._aselect(choices=metadatas, query=query_bundle) @abstractmethod def _select( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: pass @abstractmethod async def _aselect( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: pass def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" from llama_index.core.query_pipeline.components.router import ( SelectorComponent, ) return SelectorComponent(selector=self)
[ "llama_index.core.query_pipeline.components.router.SelectorComponent", "llama_index.core.tools.types.ToolMetadata", "llama_index.core.schema.QueryBundle" ]
[((3300, 3332), 'llama_index.core.query_pipeline.components.router.SelectorComponent', 'SelectorComponent', ([], {'selector': 'self'}), '(selector=self)\n', (3317, 3332), False, 'from llama_index.core.query_pipeline.components.router import SelectorComponent\n'), ((1653, 1685), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'description': 'choice'}), '(description=choice)\n', (1665, 1685), False, 'from llama_index.core.tools.types import ToolMetadata\n'), ((1917, 1945), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (1928, 1945), False, 'from llama_index.core.schema import QueryBundle, QueryType\n')]
from abc import abstractmethod from typing import Any, List, Sequence, Union from llama_index.core.base.query_pipeline.query import ( ChainableMixin, QueryComponent, ) from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType from llama_index.core.schema import QueryBundle, QueryType from llama_index.core.tools.types import ToolMetadata MetadataType = Union[str, ToolMetadata] class SingleSelection(BaseModel): """A single selection of a choice.""" index: int reason: str class MultiSelection(BaseModel): """A multi-selection of choices.""" selections: List[SingleSelection] @property def ind(self) -> int: if len(self.selections) != 1: raise ValueError( f"There are {len(self.selections)} selections, " "please use .inds." ) return self.selections[0].index @property def reason(self) -> str: if len(self.reasons) != 1: raise ValueError( f"There are {len(self.reasons)} selections, " "please use .reasons." ) return self.selections[0].reason @property def inds(self) -> List[int]: return [x.index for x in self.selections] @property def reasons(self) -> List[str]: return [x.reason for x in self.selections] # separate name for clarity and to not confuse function calling model SelectorResult = MultiSelection def _wrap_choice(choice: MetadataType) -> ToolMetadata: if isinstance(choice, ToolMetadata): return choice elif isinstance(choice, str): return ToolMetadata(description=choice) else: raise ValueError(f"Unexpected type: {type(choice)}") def _wrap_query(query: QueryType) -> QueryBundle: if isinstance(query, QueryBundle): return query elif isinstance(query, str): return QueryBundle(query_str=query) else: raise ValueError(f"Unexpected type: {type(query)}") class BaseSelector(PromptMixin, ChainableMixin): """Base selector.""" def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} def select( self, choices: Sequence[MetadataType], query: QueryType ) -> SelectorResult: metadatas = [_wrap_choice(choice) for choice in choices] query_bundle = _wrap_query(query) return self._select(choices=metadatas, query=query_bundle) async def aselect( self, choices: Sequence[MetadataType], query: QueryType ) -> SelectorResult: metadatas = [_wrap_choice(choice) for choice in choices] query_bundle = _wrap_query(query) return await self._aselect(choices=metadatas, query=query_bundle) @abstractmethod def _select( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: pass @abstractmethod async def _aselect( self, choices: Sequence[ToolMetadata], query: QueryBundle ) -> SelectorResult: pass def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" from llama_index.core.query_pipeline.components.router import ( SelectorComponent, ) return SelectorComponent(selector=self)
[ "llama_index.core.query_pipeline.components.router.SelectorComponent", "llama_index.core.tools.types.ToolMetadata", "llama_index.core.schema.QueryBundle" ]
[((3300, 3332), 'llama_index.core.query_pipeline.components.router.SelectorComponent', 'SelectorComponent', ([], {'selector': 'self'}), '(selector=self)\n', (3317, 3332), False, 'from llama_index.core.query_pipeline.components.router import SelectorComponent\n'), ((1653, 1685), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'description': 'choice'}), '(description=choice)\n', (1665, 1685), False, 'from llama_index.core.tools.types import ToolMetadata\n'), ((1917, 1945), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (1928, 1945), False, 'from llama_index.core.schema import QueryBundle, QueryType\n')]
"""Base agent type.""" import uuid from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.legacy.bridge.pydantic import BaseModel, Field from llama_index.legacy.callbacks import CallbackManager, trace_method from llama_index.legacy.chat_engine.types import ( BaseChatEngine, StreamingAgentChatResponse, ) from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_index.legacy.core.llms.types import ChatMessage from llama_index.legacy.core.response.schema import RESPONSE_TYPE, Response from llama_index.legacy.memory.types import BaseMemory from llama_index.legacy.prompts.mixin import ( PromptDictType, PromptMixin, PromptMixinType, ) from llama_index.legacy.schema import QueryBundle class BaseAgent(BaseChatEngine, BaseQueryEngine): """Base Agent.""" def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" # ===== Query Engine Interface ===== @trace_method("query") def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = self.chat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) @trace_method("query") async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = await self.achat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) def stream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("stream_chat not implemented") async def astream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("astream_chat not implemented") class TaskStep(BaseModel): """Agent task step. Represents a single input step within the execution run ("Task") of an agent given a user input. The output is returned as a `TaskStepOutput`. """ task_id: str = Field(..., diescription="Task ID") step_id: str = Field(..., description="Step ID") input: Optional[str] = Field(default=None, description="User input") # memory: BaseMemory = Field( # ..., type=BaseMemory, description="Conversational Memory" # ) step_state: Dict[str, Any] = Field( default_factory=dict, description="Additional state for a given step." ) # NOTE: the state below may change throughout the course of execution # this tracks the relationships to other steps next_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Next steps to be executed." ) prev_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Previous steps that were dependencies for this step.", ) is_ready: bool = Field( default=True, description="Is this step ready to be executed?" ) def get_next_step( self, step_id: str, input: Optional[str] = None, step_state: Optional[Dict[str, Any]] = None, ) -> "TaskStep": """Convenience function to get next step. Preserve task_id, memory, step_state. """ return TaskStep( task_id=self.task_id, step_id=step_id, input=input, # memory=self.memory, step_state=step_state or self.step_state, ) def link_step( self, next_step: "TaskStep", ) -> None: """Link to next step. Add link from this step to next, and from next step to current. """ self.next_steps[next_step.step_id] = next_step next_step.prev_steps[self.step_id] = self class TaskStepOutput(BaseModel): """Agent task step output.""" output: Any = Field(..., description="Task step output") task_step: TaskStep = Field(..., description="Task step input") next_steps: List[TaskStep] = Field(..., description="Next steps to be executed.") is_last: bool = Field(default=False, description="Is this the last step?") def __str__(self) -> str: """String representation.""" return str(self.output) class Task(BaseModel): """Agent Task. Represents a "run" of an agent given a user input. """ class Config: arbitrary_types_allowed = True task_id: str = Field( default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID" ) input: str = Field(..., type=str, description="User input") # NOTE: this is state that may be modified throughout the course of execution of the task memory: BaseMemory = Field( ..., type=BaseMemory, description=( "Conversational Memory. Maintains state before execution of this task." ), ) callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True, description="Callback manager for the task.", ) extra_state: Dict[str, Any] = Field( default_factory=dict, description=( "Additional user-specified state for a given task. " "Can be modified throughout the execution of a task." ), ) class BaseAgentWorker(PromptMixin): """Base agent worker.""" class Config: arbitrary_types_allowed = True def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" @abstractmethod def initialize_step(self, task: Task, **kwargs: Any) -> TaskStep: """Initialize step from task.""" @abstractmethod def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step.""" @abstractmethod async def arun_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async).""" raise NotImplementedError @abstractmethod def stream_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step (stream).""" # TODO: figure out if we need a different type for TaskStepOutput raise NotImplementedError @abstractmethod async def astream_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async stream).""" raise NotImplementedError @abstractmethod def finalize_task(self, task: Task, **kwargs: Any) -> None: """Finalize task, after all the steps are completed.""" def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" # TODO: make this abstractmethod (right now will break some agent impls)
[ "llama_index.legacy.callbacks.trace_method", "llama_index.legacy.bridge.pydantic.Field" ]
[((1310, 1331), 'llama_index.legacy.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1322, 1331), False, 'from llama_index.legacy.callbacks import CallbackManager, trace_method\n'), ((1633, 1654), 'llama_index.legacy.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1645, 1654), False, 'from llama_index.legacy.callbacks import CallbackManager, trace_method\n'), ((2613, 2647), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'diescription': '"""Task ID"""'}), "(..., diescription='Task ID')\n", (2618, 2647), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2667, 2700), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Step ID"""'}), "(..., description='Step ID')\n", (2672, 2700), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2728, 2773), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""User input"""'}), "(default=None, description='User input')\n", (2733, 2773), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2917, 2994), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional state for a given step."""'}), "(default_factory=dict, description='Additional state for a given step.')\n", (2922, 2994), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3175, 3244), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Next steps to be executed."""'}), "(default_factory=dict, description='Next steps to be executed.')\n", (3180, 3244), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3299, 3399), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Previous steps that were dependencies for this step."""'}), "(default_factory=dict, description=\n 'Previous steps that were dependencies for this step.')\n", (3304, 3399), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3439, 3508), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Is this step ready to be executed?"""'}), "(default=True, description='Is this step ready to be executed?')\n", (3444, 3508), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4404, 4446), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step output"""'}), "(..., description='Task step output')\n", (4409, 4446), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4473, 4514), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step input"""'}), "(..., description='Task step input')\n", (4478, 4514), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4548, 4600), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Next steps to be executed."""'}), "(..., description='Next steps to be executed.')\n", (4553, 4600), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4621, 4679), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Is this the last step?"""'}), "(default=False, description='Is this the last step?')\n", (4626, 4679), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5080, 5126), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'type': 'str', 'description': '"""User input"""'}), "(..., type=str, description='User input')\n", (5085, 5126), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5247, 5364), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'type': 'BaseMemory', 'description': '"""Conversational Memory. Maintains state before execution of this task."""'}), "(..., type=BaseMemory, description=\n 'Conversational Memory. Maintains state before execution of this task.')\n", (5252, 5364), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5456, 5559), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)', 'description': '"""Callback manager for the task."""'}), "(default_factory=CallbackManager, exclude=True, description=\n 'Callback manager for the task.')\n", (5461, 5559), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5621, 5775), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional user-specified state for a given task. Can be modified throughout the execution of a task."""'}), "(default_factory=dict, description=\n 'Additional user-specified state for a given task. Can be modified throughout the execution of a task.'\n )\n", (5626, 5775), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5010, 5022), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5020, 5022), False, 'import uuid\n')]
"""Base agent type.""" import uuid from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.legacy.bridge.pydantic import BaseModel, Field from llama_index.legacy.callbacks import CallbackManager, trace_method from llama_index.legacy.chat_engine.types import ( BaseChatEngine, StreamingAgentChatResponse, ) from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_index.legacy.core.llms.types import ChatMessage from llama_index.legacy.core.response.schema import RESPONSE_TYPE, Response from llama_index.legacy.memory.types import BaseMemory from llama_index.legacy.prompts.mixin import ( PromptDictType, PromptMixin, PromptMixinType, ) from llama_index.legacy.schema import QueryBundle class BaseAgent(BaseChatEngine, BaseQueryEngine): """Base Agent.""" def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" # ===== Query Engine Interface ===== @trace_method("query") def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = self.chat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) @trace_method("query") async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: agent_response = await self.achat( query_bundle.query_str, chat_history=[], ) return Response( response=str(agent_response), source_nodes=agent_response.source_nodes ) def stream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("stream_chat not implemented") async def astream_chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None ) -> StreamingAgentChatResponse: raise NotImplementedError("astream_chat not implemented") class TaskStep(BaseModel): """Agent task step. Represents a single input step within the execution run ("Task") of an agent given a user input. The output is returned as a `TaskStepOutput`. """ task_id: str = Field(..., diescription="Task ID") step_id: str = Field(..., description="Step ID") input: Optional[str] = Field(default=None, description="User input") # memory: BaseMemory = Field( # ..., type=BaseMemory, description="Conversational Memory" # ) step_state: Dict[str, Any] = Field( default_factory=dict, description="Additional state for a given step." ) # NOTE: the state below may change throughout the course of execution # this tracks the relationships to other steps next_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Next steps to be executed." ) prev_steps: Dict[str, "TaskStep"] = Field( default_factory=dict, description="Previous steps that were dependencies for this step.", ) is_ready: bool = Field( default=True, description="Is this step ready to be executed?" ) def get_next_step( self, step_id: str, input: Optional[str] = None, step_state: Optional[Dict[str, Any]] = None, ) -> "TaskStep": """Convenience function to get next step. Preserve task_id, memory, step_state. """ return TaskStep( task_id=self.task_id, step_id=step_id, input=input, # memory=self.memory, step_state=step_state or self.step_state, ) def link_step( self, next_step: "TaskStep", ) -> None: """Link to next step. Add link from this step to next, and from next step to current. """ self.next_steps[next_step.step_id] = next_step next_step.prev_steps[self.step_id] = self class TaskStepOutput(BaseModel): """Agent task step output.""" output: Any = Field(..., description="Task step output") task_step: TaskStep = Field(..., description="Task step input") next_steps: List[TaskStep] = Field(..., description="Next steps to be executed.") is_last: bool = Field(default=False, description="Is this the last step?") def __str__(self) -> str: """String representation.""" return str(self.output) class Task(BaseModel): """Agent Task. Represents a "run" of an agent given a user input. """ class Config: arbitrary_types_allowed = True task_id: str = Field( default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID" ) input: str = Field(..., type=str, description="User input") # NOTE: this is state that may be modified throughout the course of execution of the task memory: BaseMemory = Field( ..., type=BaseMemory, description=( "Conversational Memory. Maintains state before execution of this task." ), ) callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True, description="Callback manager for the task.", ) extra_state: Dict[str, Any] = Field( default_factory=dict, description=( "Additional user-specified state for a given task. " "Can be modified throughout the execution of a task." ), ) class BaseAgentWorker(PromptMixin): """Base agent worker.""" class Config: arbitrary_types_allowed = True def _get_prompts(self) -> PromptDictType: """Get prompts.""" # TODO: the ReAct agent does not explicitly specify prompts, would need a # refactor to expose those prompts return {} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt modules.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" @abstractmethod def initialize_step(self, task: Task, **kwargs: Any) -> TaskStep: """Initialize step from task.""" @abstractmethod def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step.""" @abstractmethod async def arun_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async).""" raise NotImplementedError @abstractmethod def stream_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput: """Run step (stream).""" # TODO: figure out if we need a different type for TaskStepOutput raise NotImplementedError @abstractmethod async def astream_step( self, step: TaskStep, task: Task, **kwargs: Any ) -> TaskStepOutput: """Run step (async stream).""" raise NotImplementedError @abstractmethod def finalize_task(self, task: Task, **kwargs: Any) -> None: """Finalize task, after all the steps are completed.""" def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" # TODO: make this abstractmethod (right now will break some agent impls)
[ "llama_index.legacy.callbacks.trace_method", "llama_index.legacy.bridge.pydantic.Field" ]
[((1310, 1331), 'llama_index.legacy.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1322, 1331), False, 'from llama_index.legacy.callbacks import CallbackManager, trace_method\n'), ((1633, 1654), 'llama_index.legacy.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1645, 1654), False, 'from llama_index.legacy.callbacks import CallbackManager, trace_method\n'), ((2613, 2647), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'diescription': '"""Task ID"""'}), "(..., diescription='Task ID')\n", (2618, 2647), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2667, 2700), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Step ID"""'}), "(..., description='Step ID')\n", (2672, 2700), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2728, 2773), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""User input"""'}), "(default=None, description='User input')\n", (2733, 2773), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2917, 2994), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional state for a given step."""'}), "(default_factory=dict, description='Additional state for a given step.')\n", (2922, 2994), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3175, 3244), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Next steps to be executed."""'}), "(default_factory=dict, description='Next steps to be executed.')\n", (3180, 3244), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3299, 3399), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Previous steps that were dependencies for this step."""'}), "(default_factory=dict, description=\n 'Previous steps that were dependencies for this step.')\n", (3304, 3399), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3439, 3508), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Is this step ready to be executed?"""'}), "(default=True, description='Is this step ready to be executed?')\n", (3444, 3508), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4404, 4446), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step output"""'}), "(..., description='Task step output')\n", (4409, 4446), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4473, 4514), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step input"""'}), "(..., description='Task step input')\n", (4478, 4514), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4548, 4600), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Next steps to be executed."""'}), "(..., description='Next steps to be executed.')\n", (4553, 4600), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4621, 4679), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Is this the last step?"""'}), "(default=False, description='Is this the last step?')\n", (4626, 4679), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5080, 5126), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'type': 'str', 'description': '"""User input"""'}), "(..., type=str, description='User input')\n", (5085, 5126), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5247, 5364), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'type': 'BaseMemory', 'description': '"""Conversational Memory. Maintains state before execution of this task."""'}), "(..., type=BaseMemory, description=\n 'Conversational Memory. Maintains state before execution of this task.')\n", (5252, 5364), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5456, 5559), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)', 'description': '"""Callback manager for the task."""'}), "(default_factory=CallbackManager, exclude=True, description=\n 'Callback manager for the task.')\n", (5461, 5559), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5621, 5775), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional user-specified state for a given task. Can be modified throughout the execution of a task."""'}), "(default_factory=dict, description=\n 'Additional user-specified state for a given task. Can be modified throughout the execution of a task.'\n )\n", (5626, 5775), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5010, 5022), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5020, 5022), False, 'import uuid\n')]
import json from typing import Any, Dict, Sequence, Tuple import httpx from httpx import Timeout from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM DEFAULT_REQUEST_TIMEOUT = 30.0 def get_addtional_kwargs( response: Dict[str, Any], exclude: Tuple[str, ...] ) -> Dict[str, Any]: return {k: v for k, v in response.items() if k not in exclude} class Ollama(CustomLLM): base_url: str = Field( default="http://localhost:11434", description="Base url the model is hosted under.", ) model: str = Field(description="The Ollama model to use.") temperature: float = Field( default=0.75, description="The temperature to use for sampling.", gte=0.0, lte=1.0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, description="The maximum number of context tokens for the model.", gt=0, ) request_timeout: float = Field( default=DEFAULT_REQUEST_TIMEOUT, description="The timeout for making http request to Ollama API server", ) prompt_key: str = Field( default="prompt", description="The key to use for the prompt in API calls." ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional model parameters for the Ollama API.", ) @classmethod def class_name(cls) -> str: return "Ollama_llm" @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return LLMMetadata( context_window=self.context_window, num_output=DEFAULT_NUM_OUTPUTS, model_name=self.model, is_chat_model=True, # Ollama supports chat API for all models ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "temperature": self.temperature, "num_ctx": self.context_window, } return { **base_kwargs, **self.additional_kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: payload = { "model": self.model, "messages": [ { "role": message.role.value, "content": message.content, **message.additional_kwargs, } for message in messages ], "options": self._model_kwargs, "stream": False, **kwargs, } with httpx.Client(timeout=Timeout(self.request_timeout)) as client: response = client.post( url=f"{self.base_url}/api/chat", json=payload, ) response.raise_for_status() raw = response.json() message = raw["message"] return ChatResponse( message=ChatMessage( content=message.get("content"), role=MessageRole(message.get("role")), additional_kwargs=get_addtional_kwargs( message, ("content", "role") ), ), raw=raw, additional_kwargs=get_addtional_kwargs(raw, ("message",)), ) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: payload = { "model": self.model, "messages": [ { "role": message.role.value, "content": message.content, **message.additional_kwargs, } for message in messages ], "options": self._model_kwargs, "stream": True, **kwargs, } with httpx.Client(timeout=Timeout(self.request_timeout)) as client: with client.stream( method="POST", url=f"{self.base_url}/api/chat", json=payload, ) as response: response.raise_for_status() text = "" for line in response.iter_lines(): if line: chunk = json.loads(line) if "done" in chunk and chunk["done"]: break message = chunk["message"] delta = message.get("content") text += delta yield ChatResponse( message=ChatMessage( content=text, role=MessageRole(message.get("role")), additional_kwargs=get_addtional_kwargs( message, ("content", "role") ), ), delta=delta, raw=chunk, additional_kwargs=get_addtional_kwargs(chunk, ("message",)), ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: payload = { self.prompt_key: prompt, "model": self.model, "options": self._model_kwargs, "stream": False, **kwargs, } with httpx.Client(timeout=Timeout(self.request_timeout)) as client: response = client.post( url=f"{self.base_url}/api/generate", json=payload, ) response.raise_for_status() raw = response.json() text = raw.get("response") return CompletionResponse( text=text, raw=raw, additional_kwargs=get_addtional_kwargs(raw, ("response",)), ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: payload = { self.prompt_key: prompt, "model": self.model, "options": self._model_kwargs, "stream": True, **kwargs, } with httpx.Client(timeout=Timeout(self.request_timeout)) as client: with client.stream( method="POST", url=f"{self.base_url}/api/generate", json=payload, ) as response: response.raise_for_status() text = "" for line in response.iter_lines(): if line: chunk = json.loads(line) delta = chunk.get("response") text += delta yield CompletionResponse( delta=delta, text=text, raw=chunk, additional_kwargs=get_addtional_kwargs( chunk, ("response",) ), )
[ "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field" ]
[((816, 911), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""http://localhost:11434"""', 'description': '"""Base url the model is hosted under."""'}), "(default='http://localhost:11434', description=\n 'Base url the model is hosted under.')\n", (821, 911), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((947, 992), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Ollama model to use."""'}), "(description='The Ollama model to use.')\n", (952, 992), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1018, 1112), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.75)', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=0.75, description='The temperature to use for sampling.', gte\n =0.0, lte=1.0)\n", (1023, 1112), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1173, 1288), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(default=DEFAULT_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model.', gt=0)\n", (1178, 1288), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1344, 1459), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_REQUEST_TIMEOUT', 'description': '"""The timeout for making http request to Ollama API server"""'}), "(default=DEFAULT_REQUEST_TIMEOUT, description=\n 'The timeout for making http request to Ollama API server')\n", (1349, 1459), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1500, 1587), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""prompt"""', 'description': '"""The key to use for the prompt in API calls."""'}), "(default='prompt', description=\n 'The key to use for the prompt in API calls.')\n", (1505, 1587), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1637, 1732), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional model parameters for the Ollama API."""'}), "(default_factory=dict, description=\n 'Additional model parameters for the Ollama API.')\n", (1642, 1732), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2434, 2453), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (2451, 2453), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3730, 3749), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (3747, 3749), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5575, 5600), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5598, 5600), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6418, 6443), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6441, 6443), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1926, 2053), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'DEFAULT_NUM_OUTPUTS', 'model_name': 'self.model', 'is_chat_model': '(True)'}), '(context_window=self.context_window, num_output=\n DEFAULT_NUM_OUTPUTS, model_name=self.model, is_chat_model=True)\n', (1937, 2053), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((2992, 3021), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (2999, 3021), False, 'from httpx import Timeout\n'), ((4311, 4340), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (4318, 4340), False, 'from httpx import Timeout\n'), ((5943, 5972), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (5950, 5972), False, 'from httpx import Timeout\n'), ((6795, 6824), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (6802, 6824), False, 'from httpx import Timeout\n'), ((4704, 4720), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4714, 4720), False, 'import json\n'), ((7192, 7208), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (7202, 7208), False, 'import json\n')]
import json from typing import Any, Dict, Sequence, Tuple import httpx from httpx import Timeout from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM DEFAULT_REQUEST_TIMEOUT = 30.0 def get_addtional_kwargs( response: Dict[str, Any], exclude: Tuple[str, ...] ) -> Dict[str, Any]: return {k: v for k, v in response.items() if k not in exclude} class Ollama(CustomLLM): base_url: str = Field( default="http://localhost:11434", description="Base url the model is hosted under.", ) model: str = Field(description="The Ollama model to use.") temperature: float = Field( default=0.75, description="The temperature to use for sampling.", gte=0.0, lte=1.0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, description="The maximum number of context tokens for the model.", gt=0, ) request_timeout: float = Field( default=DEFAULT_REQUEST_TIMEOUT, description="The timeout for making http request to Ollama API server", ) prompt_key: str = Field( default="prompt", description="The key to use for the prompt in API calls." ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional model parameters for the Ollama API.", ) @classmethod def class_name(cls) -> str: return "Ollama_llm" @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return LLMMetadata( context_window=self.context_window, num_output=DEFAULT_NUM_OUTPUTS, model_name=self.model, is_chat_model=True, # Ollama supports chat API for all models ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "temperature": self.temperature, "num_ctx": self.context_window, } return { **base_kwargs, **self.additional_kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: payload = { "model": self.model, "messages": [ { "role": message.role.value, "content": message.content, **message.additional_kwargs, } for message in messages ], "options": self._model_kwargs, "stream": False, **kwargs, } with httpx.Client(timeout=Timeout(self.request_timeout)) as client: response = client.post( url=f"{self.base_url}/api/chat", json=payload, ) response.raise_for_status() raw = response.json() message = raw["message"] return ChatResponse( message=ChatMessage( content=message.get("content"), role=MessageRole(message.get("role")), additional_kwargs=get_addtional_kwargs( message, ("content", "role") ), ), raw=raw, additional_kwargs=get_addtional_kwargs(raw, ("message",)), ) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: payload = { "model": self.model, "messages": [ { "role": message.role.value, "content": message.content, **message.additional_kwargs, } for message in messages ], "options": self._model_kwargs, "stream": True, **kwargs, } with httpx.Client(timeout=Timeout(self.request_timeout)) as client: with client.stream( method="POST", url=f"{self.base_url}/api/chat", json=payload, ) as response: response.raise_for_status() text = "" for line in response.iter_lines(): if line: chunk = json.loads(line) if "done" in chunk and chunk["done"]: break message = chunk["message"] delta = message.get("content") text += delta yield ChatResponse( message=ChatMessage( content=text, role=MessageRole(message.get("role")), additional_kwargs=get_addtional_kwargs( message, ("content", "role") ), ), delta=delta, raw=chunk, additional_kwargs=get_addtional_kwargs(chunk, ("message",)), ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: payload = { self.prompt_key: prompt, "model": self.model, "options": self._model_kwargs, "stream": False, **kwargs, } with httpx.Client(timeout=Timeout(self.request_timeout)) as client: response = client.post( url=f"{self.base_url}/api/generate", json=payload, ) response.raise_for_status() raw = response.json() text = raw.get("response") return CompletionResponse( text=text, raw=raw, additional_kwargs=get_addtional_kwargs(raw, ("response",)), ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: payload = { self.prompt_key: prompt, "model": self.model, "options": self._model_kwargs, "stream": True, **kwargs, } with httpx.Client(timeout=Timeout(self.request_timeout)) as client: with client.stream( method="POST", url=f"{self.base_url}/api/generate", json=payload, ) as response: response.raise_for_status() text = "" for line in response.iter_lines(): if line: chunk = json.loads(line) delta = chunk.get("response") text += delta yield CompletionResponse( delta=delta, text=text, raw=chunk, additional_kwargs=get_addtional_kwargs( chunk, ("response",) ), )
[ "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field" ]
[((816, 911), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""http://localhost:11434"""', 'description': '"""Base url the model is hosted under."""'}), "(default='http://localhost:11434', description=\n 'Base url the model is hosted under.')\n", (821, 911), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((947, 992), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Ollama model to use."""'}), "(description='The Ollama model to use.')\n", (952, 992), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1018, 1112), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.75)', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=0.75, description='The temperature to use for sampling.', gte\n =0.0, lte=1.0)\n", (1023, 1112), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1173, 1288), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(default=DEFAULT_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model.', gt=0)\n", (1178, 1288), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1344, 1459), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_REQUEST_TIMEOUT', 'description': '"""The timeout for making http request to Ollama API server"""'}), "(default=DEFAULT_REQUEST_TIMEOUT, description=\n 'The timeout for making http request to Ollama API server')\n", (1349, 1459), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1500, 1587), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""prompt"""', 'description': '"""The key to use for the prompt in API calls."""'}), "(default='prompt', description=\n 'The key to use for the prompt in API calls.')\n", (1505, 1587), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1637, 1732), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional model parameters for the Ollama API."""'}), "(default_factory=dict, description=\n 'Additional model parameters for the Ollama API.')\n", (1642, 1732), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2434, 2453), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (2451, 2453), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3730, 3749), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (3747, 3749), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5575, 5600), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5598, 5600), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6418, 6443), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6441, 6443), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1926, 2053), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'DEFAULT_NUM_OUTPUTS', 'model_name': 'self.model', 'is_chat_model': '(True)'}), '(context_window=self.context_window, num_output=\n DEFAULT_NUM_OUTPUTS, model_name=self.model, is_chat_model=True)\n', (1937, 2053), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((2992, 3021), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (2999, 3021), False, 'from httpx import Timeout\n'), ((4311, 4340), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (4318, 4340), False, 'from httpx import Timeout\n'), ((5943, 5972), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (5950, 5972), False, 'from httpx import Timeout\n'), ((6795, 6824), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (6802, 6824), False, 'from httpx import Timeout\n'), ((4704, 4720), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4714, 4720), False, 'import json\n'), ((7192, 7208), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (7202, 7208), False, 'import json\n')]
import warnings from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.cohere_utils import ( CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class Cohere(LLM): model: str = Field(description="The cohere model to use.") temperature: float = Field(description="The temperature to use for sampling.") max_retries: int = Field( default=10, description="The maximum number of API retries." ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the Cohere API." ) max_tokens: int = Field(description="The maximum number of tokens to generate.") _client: Any = PrivateAttr() _aclient: Any = PrivateAttr() def __init__( self, model: str = "command", temperature: float = 0.5, max_tokens: int = 512, timeout: Optional[float] = None, max_retries: int = 10, api_key: Optional[str] = None, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: try: import cohere except ImportError as e: raise ImportError( "You must install the `cohere` package to use Cohere." "Please `pip install cohere`" ) from e additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) self._client = cohere.Client(api_key, client_name="llama_index") self._aclient = cohere.AsyncClient(api_key, client_name="llama_index") super().__init__( temperature=temperature, additional_kwargs=additional_kwargs, timeout=timeout, max_retries=max_retries, model=model, callback_manager=callback_manager, max_tokens=max_tokens, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "Cohere_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=cohere_modelname_to_contextsize(self.model), num_output=self.max_tokens, is_chat_model=True, model_name=self.model, system_role=MessageRole.CHATBOT, ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "model": self.model, "temperature": self.temperature, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: history = messages_to_cohere_history(messages[:-1]) prompt = messages[-1].content all_kwargs = self._get_all_kwargs(**kwargs) if all_kwargs["model"] not in CHAT_MODELS: raise ValueError(f"{all_kwargs['model']} not supported for chat") if "stream" in all_kwargs: warnings.warn( "Parameter `stream` is not supported by the `chat` method." "Use the `stream_chat` method instead" ) response = completion_with_retry( client=self._client, max_retries=self.max_retries, chat=True, message=prompt, chat_history=history, **all_kwargs, ) return ChatResponse( message=ChatMessage(role=MessageRole.ASSISTANT, content=response.text), raw=response.__dict__, ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_all_kwargs(**kwargs) if "stream" in all_kwargs: warnings.warn( "Parameter `stream` is not supported by the `chat` method." "Use the `stream_chat` method instead" ) response = completion_with_retry( client=self._client, max_retries=self.max_retries, chat=False, prompt=prompt, **all_kwargs, ) return CompletionResponse( text=response.generations[0].text, raw=response.__dict__, ) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: history = messages_to_cohere_history(messages[:-1]) prompt = messages[-1].content all_kwargs = self._get_all_kwargs(**kwargs) all_kwargs["stream"] = True if all_kwargs["model"] not in CHAT_MODELS: raise ValueError(f"{all_kwargs['model']} not supported for chat") response = completion_with_retry( client=self._client, max_retries=self.max_retries, chat=True, message=prompt, chat_history=history, **all_kwargs, ) def gen() -> ChatResponseGen: content = "" role = MessageRole.ASSISTANT for r in response: if "text" in r.__dict__: content_delta = r.text else: content_delta = "" content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=r.__dict__, ) return gen() @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: all_kwargs = self._get_all_kwargs(**kwargs) all_kwargs["stream"] = True response = completion_with_retry( client=self._client, max_retries=self.max_retries, chat=False, prompt=prompt, **all_kwargs, ) def gen() -> CompletionResponseGen: content = "" for r in response: content_delta = r.text content += content_delta yield CompletionResponse( text=content, delta=content_delta, raw=r._asdict() ) return gen() @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: history = messages_to_cohere_history(messages[:-1]) prompt = messages[-1].content all_kwargs = self._get_all_kwargs(**kwargs) if all_kwargs["model"] not in CHAT_MODELS: raise ValueError(f"{all_kwargs['model']} not supported for chat") if "stream" in all_kwargs: warnings.warn( "Parameter `stream` is not supported by the `chat` method." "Use the `stream_chat` method instead" ) response = await acompletion_with_retry( aclient=self._aclient, max_retries=self.max_retries, chat=True, message=prompt, chat_history=history, **all_kwargs, ) return ChatResponse( message=ChatMessage(role=MessageRole.ASSISTANT, content=response.text), raw=response.__dict__, ) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_all_kwargs(**kwargs) if "stream" in all_kwargs: warnings.warn( "Parameter `stream` is not supported by the `chat` method." "Use the `stream_chat` method instead" ) response = await acompletion_with_retry( aclient=self._aclient, max_retries=self.max_retries, chat=False, prompt=prompt, **all_kwargs, ) return CompletionResponse( text=response.generations[0].text, raw=response.__dict__, ) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: history = messages_to_cohere_history(messages[:-1]) prompt = messages[-1].content all_kwargs = self._get_all_kwargs(**kwargs) all_kwargs["stream"] = True if all_kwargs["model"] not in CHAT_MODELS: raise ValueError(f"{all_kwargs['model']} not supported for chat") response = await acompletion_with_retry( aclient=self._aclient, max_retries=self.max_retries, chat=True, message=prompt, chat_history=history, **all_kwargs, ) async def gen() -> ChatResponseAsyncGen: content = "" role = MessageRole.ASSISTANT async for r in response: if "text" in r.__dict__: content_delta = r.text else: content_delta = "" content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=r.__dict__, ) return gen() @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: all_kwargs = self._get_all_kwargs(**kwargs) all_kwargs["stream"] = True response = await acompletion_with_retry( aclient=self._aclient, max_retries=self.max_retries, chat=False, prompt=prompt, **all_kwargs, ) async def gen() -> CompletionResponseAsyncGen: content = "" async for r in response: content_delta = r.text content += content_delta yield CompletionResponse( text=content, delta=content_delta, raw=r._asdict() ) return gen()
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.cohere_utils.cohere_modelname_to_contextsize", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.llms.cohere_utils.messages_to_cohere_history", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.cohere_utils.acompletion_with_retry", "llama_index.legacy.llms.cohere_utils.completion_with_retry" ]
[((897, 942), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The cohere model to use."""'}), "(description='The cohere model to use.')\n", (902, 942), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((968, 1025), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (973, 1025), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1049, 1116), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""'}), "(default=10, description='The maximum number of API retries.')\n", (1054, 1116), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1171, 1256), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the Cohere API."""'}), "(default_factory=dict, description='Additional kwargs for the Cohere API.'\n )\n", (1176, 1256), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1288, 1350), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1293, 1350), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1371, 1384), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1382, 1384), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1405, 1418), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1416, 1418), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4032, 4051), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4049, 4051), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5025, 5050), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5048, 5050), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5762, 5781), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5779, 5781), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6993, 7018), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7016, 7018), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7775, 7794), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7792, 7794), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8799, 8824), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8822, 8824), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9552, 9571), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9569, 9571), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((10821, 10846), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (10844, 10846), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2518, 2567), 'cohere.Client', 'cohere.Client', (['api_key'], {'client_name': '"""llama_index"""'}), "(api_key, client_name='llama_index')\n", (2531, 2567), False, 'import cohere\n'), ((2592, 2646), 'cohere.AsyncClient', 'cohere.AsyncClient', (['api_key'], {'client_name': '"""llama_index"""'}), "(api_key, client_name='llama_index')\n", (2610, 2646), False, 'import cohere\n'), ((4154, 4195), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (4180, 4195), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((4642, 4781), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(client=self._client, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (4663, 4781), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((5443, 5560), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(client=self._client, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (5464, 5560), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((5644, 5720), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response.generations[0].text', 'raw': 'response.__dict__'}), '(text=response.generations[0].text, raw=response.__dict__)\n', (5662, 5720), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((5908, 5949), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (5934, 5949), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((6224, 6363), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(client=self._client, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (6245, 6363), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((7250, 7367), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(client=self._client, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (7271, 7367), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((7918, 7959), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (7944, 7959), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((9434, 9510), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response.generations[0].text', 'raw': 'response.__dict__'}), '(text=response.generations[0].text, raw=response.__dict__)\n', (9452, 9510), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((9710, 9751), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (9736, 9751), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((2474, 2493), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2489, 2493), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((4463, 4583), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (4476, 4583), False, 'import warnings\n'), ((5263, 5383), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (5276, 5383), False, 'import warnings\n'), ((8226, 8346), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (8239, 8346), False, 'import warnings\n'), ((8412, 8554), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (8434, 8554), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((9044, 9164), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (9057, 9164), False, 'import warnings\n'), ((9230, 9350), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (9252, 9350), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((10032, 10174), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (10054, 10174), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((11096, 11216), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (11118, 11216), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((3405, 3448), 'llama_index.legacy.llms.cohere_utils.cohere_modelname_to_contextsize', 'cohere_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (3436, 3448), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((4910, 4972), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.text'}), '(role=MessageRole.ASSISTANT, content=response.text)\n', (4921, 4972), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((8684, 8746), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.text'}), '(role=MessageRole.ASSISTANT, content=response.text)\n', (8695, 8746), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((6829, 6868), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (6840, 6868), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10657, 10696), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10668, 10696), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')]
import warnings from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.cohere_utils import ( CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class Cohere(LLM): model: str = Field(description="The cohere model to use.") temperature: float = Field(description="The temperature to use for sampling.") max_retries: int = Field( default=10, description="The maximum number of API retries." ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the Cohere API." ) max_tokens: int = Field(description="The maximum number of tokens to generate.") _client: Any = PrivateAttr() _aclient: Any = PrivateAttr() def __init__( self, model: str = "command", temperature: float = 0.5, max_tokens: int = 512, timeout: Optional[float] = None, max_retries: int = 10, api_key: Optional[str] = None, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: try: import cohere except ImportError as e: raise ImportError( "You must install the `cohere` package to use Cohere." "Please `pip install cohere`" ) from e additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) self._client = cohere.Client(api_key, client_name="llama_index") self._aclient = cohere.AsyncClient(api_key, client_name="llama_index") super().__init__( temperature=temperature, additional_kwargs=additional_kwargs, timeout=timeout, max_retries=max_retries, model=model, callback_manager=callback_manager, max_tokens=max_tokens, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "Cohere_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=cohere_modelname_to_contextsize(self.model), num_output=self.max_tokens, is_chat_model=True, model_name=self.model, system_role=MessageRole.CHATBOT, ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "model": self.model, "temperature": self.temperature, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: history = messages_to_cohere_history(messages[:-1]) prompt = messages[-1].content all_kwargs = self._get_all_kwargs(**kwargs) if all_kwargs["model"] not in CHAT_MODELS: raise ValueError(f"{all_kwargs['model']} not supported for chat") if "stream" in all_kwargs: warnings.warn( "Parameter `stream` is not supported by the `chat` method." "Use the `stream_chat` method instead" ) response = completion_with_retry( client=self._client, max_retries=self.max_retries, chat=True, message=prompt, chat_history=history, **all_kwargs, ) return ChatResponse( message=ChatMessage(role=MessageRole.ASSISTANT, content=response.text), raw=response.__dict__, ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_all_kwargs(**kwargs) if "stream" in all_kwargs: warnings.warn( "Parameter `stream` is not supported by the `chat` method." "Use the `stream_chat` method instead" ) response = completion_with_retry( client=self._client, max_retries=self.max_retries, chat=False, prompt=prompt, **all_kwargs, ) return CompletionResponse( text=response.generations[0].text, raw=response.__dict__, ) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: history = messages_to_cohere_history(messages[:-1]) prompt = messages[-1].content all_kwargs = self._get_all_kwargs(**kwargs) all_kwargs["stream"] = True if all_kwargs["model"] not in CHAT_MODELS: raise ValueError(f"{all_kwargs['model']} not supported for chat") response = completion_with_retry( client=self._client, max_retries=self.max_retries, chat=True, message=prompt, chat_history=history, **all_kwargs, ) def gen() -> ChatResponseGen: content = "" role = MessageRole.ASSISTANT for r in response: if "text" in r.__dict__: content_delta = r.text else: content_delta = "" content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=r.__dict__, ) return gen() @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: all_kwargs = self._get_all_kwargs(**kwargs) all_kwargs["stream"] = True response = completion_with_retry( client=self._client, max_retries=self.max_retries, chat=False, prompt=prompt, **all_kwargs, ) def gen() -> CompletionResponseGen: content = "" for r in response: content_delta = r.text content += content_delta yield CompletionResponse( text=content, delta=content_delta, raw=r._asdict() ) return gen() @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: history = messages_to_cohere_history(messages[:-1]) prompt = messages[-1].content all_kwargs = self._get_all_kwargs(**kwargs) if all_kwargs["model"] not in CHAT_MODELS: raise ValueError(f"{all_kwargs['model']} not supported for chat") if "stream" in all_kwargs: warnings.warn( "Parameter `stream` is not supported by the `chat` method." "Use the `stream_chat` method instead" ) response = await acompletion_with_retry( aclient=self._aclient, max_retries=self.max_retries, chat=True, message=prompt, chat_history=history, **all_kwargs, ) return ChatResponse( message=ChatMessage(role=MessageRole.ASSISTANT, content=response.text), raw=response.__dict__, ) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_all_kwargs(**kwargs) if "stream" in all_kwargs: warnings.warn( "Parameter `stream` is not supported by the `chat` method." "Use the `stream_chat` method instead" ) response = await acompletion_with_retry( aclient=self._aclient, max_retries=self.max_retries, chat=False, prompt=prompt, **all_kwargs, ) return CompletionResponse( text=response.generations[0].text, raw=response.__dict__, ) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: history = messages_to_cohere_history(messages[:-1]) prompt = messages[-1].content all_kwargs = self._get_all_kwargs(**kwargs) all_kwargs["stream"] = True if all_kwargs["model"] not in CHAT_MODELS: raise ValueError(f"{all_kwargs['model']} not supported for chat") response = await acompletion_with_retry( aclient=self._aclient, max_retries=self.max_retries, chat=True, message=prompt, chat_history=history, **all_kwargs, ) async def gen() -> ChatResponseAsyncGen: content = "" role = MessageRole.ASSISTANT async for r in response: if "text" in r.__dict__: content_delta = r.text else: content_delta = "" content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=r.__dict__, ) return gen() @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: all_kwargs = self._get_all_kwargs(**kwargs) all_kwargs["stream"] = True response = await acompletion_with_retry( aclient=self._aclient, max_retries=self.max_retries, chat=False, prompt=prompt, **all_kwargs, ) async def gen() -> CompletionResponseAsyncGen: content = "" async for r in response: content_delta = r.text content += content_delta yield CompletionResponse( text=content, delta=content_delta, raw=r._asdict() ) return gen()
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.cohere_utils.cohere_modelname_to_contextsize", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.llms.cohere_utils.messages_to_cohere_history", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.cohere_utils.acompletion_with_retry", "llama_index.legacy.llms.cohere_utils.completion_with_retry" ]
[((897, 942), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The cohere model to use."""'}), "(description='The cohere model to use.')\n", (902, 942), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((968, 1025), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (973, 1025), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1049, 1116), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""'}), "(default=10, description='The maximum number of API retries.')\n", (1054, 1116), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1171, 1256), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the Cohere API."""'}), "(default_factory=dict, description='Additional kwargs for the Cohere API.'\n )\n", (1176, 1256), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1288, 1350), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1293, 1350), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1371, 1384), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1382, 1384), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1405, 1418), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1416, 1418), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4032, 4051), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4049, 4051), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5025, 5050), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5048, 5050), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5762, 5781), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5779, 5781), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6993, 7018), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7016, 7018), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7775, 7794), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7792, 7794), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8799, 8824), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8822, 8824), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9552, 9571), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9569, 9571), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((10821, 10846), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (10844, 10846), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2518, 2567), 'cohere.Client', 'cohere.Client', (['api_key'], {'client_name': '"""llama_index"""'}), "(api_key, client_name='llama_index')\n", (2531, 2567), False, 'import cohere\n'), ((2592, 2646), 'cohere.AsyncClient', 'cohere.AsyncClient', (['api_key'], {'client_name': '"""llama_index"""'}), "(api_key, client_name='llama_index')\n", (2610, 2646), False, 'import cohere\n'), ((4154, 4195), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (4180, 4195), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((4642, 4781), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(client=self._client, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (4663, 4781), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((5443, 5560), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(client=self._client, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (5464, 5560), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((5644, 5720), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response.generations[0].text', 'raw': 'response.__dict__'}), '(text=response.generations[0].text, raw=response.__dict__)\n', (5662, 5720), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((5908, 5949), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (5934, 5949), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((6224, 6363), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(client=self._client, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (6245, 6363), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((7250, 7367), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(client=self._client, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (7271, 7367), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((7918, 7959), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (7944, 7959), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((9434, 9510), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response.generations[0].text', 'raw': 'response.__dict__'}), '(text=response.generations[0].text, raw=response.__dict__)\n', (9452, 9510), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((9710, 9751), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (9736, 9751), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((2474, 2493), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2489, 2493), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((4463, 4583), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (4476, 4583), False, 'import warnings\n'), ((5263, 5383), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (5276, 5383), False, 'import warnings\n'), ((8226, 8346), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (8239, 8346), False, 'import warnings\n'), ((8412, 8554), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (8434, 8554), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((9044, 9164), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (9057, 9164), False, 'import warnings\n'), ((9230, 9350), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (9252, 9350), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((10032, 10174), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (10054, 10174), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((11096, 11216), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (11118, 11216), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((3405, 3448), 'llama_index.legacy.llms.cohere_utils.cohere_modelname_to_contextsize', 'cohere_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (3436, 3448), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((4910, 4972), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.text'}), '(role=MessageRole.ASSISTANT, content=response.text)\n', (4921, 4972), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((8684, 8746), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.text'}), '(role=MessageRole.ASSISTANT, content=response.text)\n', (8695, 8746), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((6829, 6868), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (6840, 6868), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10657, 10696), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10668, 10696), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')]
from abc import abstractmethod from typing import List from llama_index.core.indices.query.schema import QueryBundle, QueryType from llama_index.core.prompts.mixin import PromptMixin from llama_index.core.schema import NodeWithScore class BaseImageRetriever(PromptMixin): """Base Image Retriever Abstraction.""" def text_to_image_retrieve( self, str_or_query_bundle: QueryType ) -> List[NodeWithScore]: """Retrieve image nodes given query or single image input. Args: str_or_query_bundle (QueryType): a query text string or a QueryBundle object. """ if isinstance(str_or_query_bundle, str): str_or_query_bundle = QueryBundle(query_str=str_or_query_bundle) return self._text_to_image_retrieve(str_or_query_bundle) @abstractmethod def _text_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Retrieve image nodes or documents given query text. Implemented by the user. """ def image_to_image_retrieve( self, str_or_query_bundle: QueryType ) -> List[NodeWithScore]: """Retrieve image nodes given single image input. Args: str_or_query_bundle (QueryType): a image path string or a QueryBundle object. """ if isinstance(str_or_query_bundle, str): # leave query_str as empty since we are using image_path for image retrieval str_or_query_bundle = QueryBundle( query_str="", image_path=str_or_query_bundle ) return self._image_to_image_retrieve(str_or_query_bundle) @abstractmethod def _image_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Retrieve image nodes or documents given image. Implemented by the user. """ # Async Methods async def atext_to_image_retrieve( self, str_or_query_bundle: QueryType, ) -> List[NodeWithScore]: if isinstance(str_or_query_bundle, str): str_or_query_bundle = QueryBundle(query_str=str_or_query_bundle) return await self._atext_to_image_retrieve(str_or_query_bundle) @abstractmethod async def _atext_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Async retrieve image nodes or documents given query text. Implemented by the user. """ async def aimage_to_image_retrieve( self, str_or_query_bundle: QueryType, ) -> List[NodeWithScore]: if isinstance(str_or_query_bundle, str): # leave query_str as empty since we are using image_path for image retrieval str_or_query_bundle = QueryBundle( query_str="", image_path=str_or_query_bundle ) return await self._aimage_to_image_retrieve(str_or_query_bundle) @abstractmethod async def _aimage_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Async retrieve image nodes or documents given image. Implemented by the user. """
[ "llama_index.core.indices.query.schema.QueryBundle" ]
[((706, 748), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (717, 748), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((1525, 1582), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': '""""""', 'image_path': 'str_or_query_bundle'}), "(query_str='', image_path=str_or_query_bundle)\n", (1536, 1582), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((2145, 2187), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (2156, 2187), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((2813, 2870), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': '""""""', 'image_path': 'str_or_query_bundle'}), "(query_str='', image_path=str_or_query_bundle)\n", (2824, 2870), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n')]
from abc import abstractmethod from typing import List from llama_index.core.indices.query.schema import QueryBundle, QueryType from llama_index.core.prompts.mixin import PromptMixin from llama_index.core.schema import NodeWithScore class BaseImageRetriever(PromptMixin): """Base Image Retriever Abstraction.""" def text_to_image_retrieve( self, str_or_query_bundle: QueryType ) -> List[NodeWithScore]: """Retrieve image nodes given query or single image input. Args: str_or_query_bundle (QueryType): a query text string or a QueryBundle object. """ if isinstance(str_or_query_bundle, str): str_or_query_bundle = QueryBundle(query_str=str_or_query_bundle) return self._text_to_image_retrieve(str_or_query_bundle) @abstractmethod def _text_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Retrieve image nodes or documents given query text. Implemented by the user. """ def image_to_image_retrieve( self, str_or_query_bundle: QueryType ) -> List[NodeWithScore]: """Retrieve image nodes given single image input. Args: str_or_query_bundle (QueryType): a image path string or a QueryBundle object. """ if isinstance(str_or_query_bundle, str): # leave query_str as empty since we are using image_path for image retrieval str_or_query_bundle = QueryBundle( query_str="", image_path=str_or_query_bundle ) return self._image_to_image_retrieve(str_or_query_bundle) @abstractmethod def _image_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Retrieve image nodes or documents given image. Implemented by the user. """ # Async Methods async def atext_to_image_retrieve( self, str_or_query_bundle: QueryType, ) -> List[NodeWithScore]: if isinstance(str_or_query_bundle, str): str_or_query_bundle = QueryBundle(query_str=str_or_query_bundle) return await self._atext_to_image_retrieve(str_or_query_bundle) @abstractmethod async def _atext_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Async retrieve image nodes or documents given query text. Implemented by the user. """ async def aimage_to_image_retrieve( self, str_or_query_bundle: QueryType, ) -> List[NodeWithScore]: if isinstance(str_or_query_bundle, str): # leave query_str as empty since we are using image_path for image retrieval str_or_query_bundle = QueryBundle( query_str="", image_path=str_or_query_bundle ) return await self._aimage_to_image_retrieve(str_or_query_bundle) @abstractmethod async def _aimage_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Async retrieve image nodes or documents given image. Implemented by the user. """
[ "llama_index.core.indices.query.schema.QueryBundle" ]
[((706, 748), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (717, 748), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((1525, 1582), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': '""""""', 'image_path': 'str_or_query_bundle'}), "(query_str='', image_path=str_or_query_bundle)\n", (1536, 1582), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((2145, 2187), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (2156, 2187), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((2813, 2870), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': '""""""', 'image_path': 'str_or_query_bundle'}), "(query_str='', image_path=str_or_query_bundle)\n", (2824, 2870), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n')]
from abc import abstractmethod from typing import List from llama_index.core.indices.query.schema import QueryBundle, QueryType from llama_index.core.prompts.mixin import PromptMixin from llama_index.core.schema import NodeWithScore class BaseImageRetriever(PromptMixin): """Base Image Retriever Abstraction.""" def text_to_image_retrieve( self, str_or_query_bundle: QueryType ) -> List[NodeWithScore]: """Retrieve image nodes given query or single image input. Args: str_or_query_bundle (QueryType): a query text string or a QueryBundle object. """ if isinstance(str_or_query_bundle, str): str_or_query_bundle = QueryBundle(query_str=str_or_query_bundle) return self._text_to_image_retrieve(str_or_query_bundle) @abstractmethod def _text_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Retrieve image nodes or documents given query text. Implemented by the user. """ def image_to_image_retrieve( self, str_or_query_bundle: QueryType ) -> List[NodeWithScore]: """Retrieve image nodes given single image input. Args: str_or_query_bundle (QueryType): a image path string or a QueryBundle object. """ if isinstance(str_or_query_bundle, str): # leave query_str as empty since we are using image_path for image retrieval str_or_query_bundle = QueryBundle( query_str="", image_path=str_or_query_bundle ) return self._image_to_image_retrieve(str_or_query_bundle) @abstractmethod def _image_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Retrieve image nodes or documents given image. Implemented by the user. """ # Async Methods async def atext_to_image_retrieve( self, str_or_query_bundle: QueryType, ) -> List[NodeWithScore]: if isinstance(str_or_query_bundle, str): str_or_query_bundle = QueryBundle(query_str=str_or_query_bundle) return await self._atext_to_image_retrieve(str_or_query_bundle) @abstractmethod async def _atext_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Async retrieve image nodes or documents given query text. Implemented by the user. """ async def aimage_to_image_retrieve( self, str_or_query_bundle: QueryType, ) -> List[NodeWithScore]: if isinstance(str_or_query_bundle, str): # leave query_str as empty since we are using image_path for image retrieval str_or_query_bundle = QueryBundle( query_str="", image_path=str_or_query_bundle ) return await self._aimage_to_image_retrieve(str_or_query_bundle) @abstractmethod async def _aimage_to_image_retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Async retrieve image nodes or documents given image. Implemented by the user. """
[ "llama_index.core.indices.query.schema.QueryBundle" ]
[((706, 748), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (717, 748), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((1525, 1582), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': '""""""', 'image_path': 'str_or_query_bundle'}), "(query_str='', image_path=str_or_query_bundle)\n", (1536, 1582), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((2145, 2187), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (2156, 2187), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((2813, 2870), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': '""""""', 'image_path': 'str_or_query_bundle'}), "(query_str='', image_path=str_or_query_bundle)\n", (2824, 2870), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n')]
import json from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Optional, Type if TYPE_CHECKING: from llama_index.legacy.bridge.langchain import StructuredTool, Tool from deprecated import deprecated from llama_index.legacy.bridge.pydantic import BaseModel class DefaultToolFnSchema(BaseModel): """Default tool function Schema.""" input: str @dataclass class ToolMetadata: description: str name: Optional[str] = None fn_schema: Optional[Type[BaseModel]] = DefaultToolFnSchema def get_parameters_dict(self) -> dict: if self.fn_schema is None: parameters = { "type": "object", "properties": { "input": {"title": "input query string", "type": "string"}, }, "required": ["input"], } else: parameters = self.fn_schema.schema() parameters = { k: v for k, v in parameters.items() if k in ["type", "properties", "required", "definitions"] } return parameters @property def fn_schema_str(self) -> str: """Get fn schema as string.""" if self.fn_schema is None: raise ValueError("fn_schema is None.") parameters = self.get_parameters_dict() return json.dumps(parameters) def get_name(self) -> str: """Get name.""" if self.name is None: raise ValueError("name is None.") return self.name @deprecated( "Deprecated in favor of `to_openai_tool`, which should be used instead." ) def to_openai_function(self) -> Dict[str, Any]: """Deprecated and replaced by `to_openai_tool`. The name and arguments of a function that should be called, as generated by the model. """ return { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), } def to_openai_tool(self) -> Dict[str, Any]: """To OpenAI tool.""" return { "type": "function", "function": { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), }, } class ToolOutput(BaseModel): """Tool output.""" content: str tool_name: str raw_input: Dict[str, Any] raw_output: Any def __str__(self) -> str: """String.""" return str(self.content) class BaseTool: @property @abstractmethod def metadata(self) -> ToolMetadata: pass @abstractmethod def __call__(self, input: Any) -> ToolOutput: pass def _process_langchain_tool_kwargs( self, langchain_tool_kwargs: Any, ) -> Dict[str, Any]: """Process langchain tool kwargs.""" if "name" not in langchain_tool_kwargs: langchain_tool_kwargs["name"] = self.metadata.name or "" if "description" not in langchain_tool_kwargs: langchain_tool_kwargs["description"] = self.metadata.description if "fn_schema" not in langchain_tool_kwargs: langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema return langchain_tool_kwargs def to_langchain_tool( self, **langchain_tool_kwargs: Any, ) -> "Tool": """To langchain tool.""" from llama_index.legacy.bridge.langchain import Tool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return Tool.from_function( func=self.__call__, **langchain_tool_kwargs, ) def to_langchain_structured_tool( self, **langchain_tool_kwargs: Any, ) -> "StructuredTool": """To langchain structured tool.""" from llama_index.legacy.bridge.langchain import StructuredTool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return StructuredTool.from_function( func=self.__call__, **langchain_tool_kwargs, ) class AsyncBaseTool(BaseTool): """ Base-level tool class that is backwards compatible with the old tool spec but also supports async. """ def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: return self.call(*args, **kwargs) @abstractmethod def call(self, input: Any) -> ToolOutput: """ This is the method that should be implemented by the tool developer. """ @abstractmethod async def acall(self, input: Any) -> ToolOutput: """ This is the async version of the call method. Should also be implemented by the tool developer as an async-compatible implementation. """ class BaseToolAsyncAdapter(AsyncBaseTool): """ Adapter class that allows a synchronous tool to be used as an async tool. """ def __init__(self, tool: BaseTool): self.base_tool = tool @property def metadata(self) -> ToolMetadata: return self.base_tool.metadata def call(self, input: Any) -> ToolOutput: return self.base_tool(input) async def acall(self, input: Any) -> ToolOutput: return self.call(input) def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool: """ Converts a synchronous tool to an async tool. """ if isinstance(tool, AsyncBaseTool): return tool else: return BaseToolAsyncAdapter(tool)
[ "llama_index.legacy.bridge.langchain.Tool.from_function", "llama_index.legacy.bridge.langchain.StructuredTool.from_function" ]
[((1586, 1675), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1596, 1675), False, 'from deprecated import deprecated\n'), ((1400, 1422), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (1410, 1422), False, 'import json\n'), ((3697, 3760), 'llama_index.legacy.bridge.langchain.Tool.from_function', 'Tool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (3715, 3760), False, 'from llama_index.legacy.bridge.langchain import Tool\n'), ((4158, 4231), 'llama_index.legacy.bridge.langchain.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (4186, 4231), False, 'from llama_index.legacy.bridge.langchain import StructuredTool\n')]
import json from abc import abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Optional, Type if TYPE_CHECKING: from llama_index.legacy.bridge.langchain import StructuredTool, Tool from deprecated import deprecated from llama_index.legacy.bridge.pydantic import BaseModel class DefaultToolFnSchema(BaseModel): """Default tool function Schema.""" input: str @dataclass class ToolMetadata: description: str name: Optional[str] = None fn_schema: Optional[Type[BaseModel]] = DefaultToolFnSchema def get_parameters_dict(self) -> dict: if self.fn_schema is None: parameters = { "type": "object", "properties": { "input": {"title": "input query string", "type": "string"}, }, "required": ["input"], } else: parameters = self.fn_schema.schema() parameters = { k: v for k, v in parameters.items() if k in ["type", "properties", "required", "definitions"] } return parameters @property def fn_schema_str(self) -> str: """Get fn schema as string.""" if self.fn_schema is None: raise ValueError("fn_schema is None.") parameters = self.get_parameters_dict() return json.dumps(parameters) def get_name(self) -> str: """Get name.""" if self.name is None: raise ValueError("name is None.") return self.name @deprecated( "Deprecated in favor of `to_openai_tool`, which should be used instead." ) def to_openai_function(self) -> Dict[str, Any]: """Deprecated and replaced by `to_openai_tool`. The name and arguments of a function that should be called, as generated by the model. """ return { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), } def to_openai_tool(self) -> Dict[str, Any]: """To OpenAI tool.""" return { "type": "function", "function": { "name": self.name, "description": self.description, "parameters": self.get_parameters_dict(), }, } class ToolOutput(BaseModel): """Tool output.""" content: str tool_name: str raw_input: Dict[str, Any] raw_output: Any def __str__(self) -> str: """String.""" return str(self.content) class BaseTool: @property @abstractmethod def metadata(self) -> ToolMetadata: pass @abstractmethod def __call__(self, input: Any) -> ToolOutput: pass def _process_langchain_tool_kwargs( self, langchain_tool_kwargs: Any, ) -> Dict[str, Any]: """Process langchain tool kwargs.""" if "name" not in langchain_tool_kwargs: langchain_tool_kwargs["name"] = self.metadata.name or "" if "description" not in langchain_tool_kwargs: langchain_tool_kwargs["description"] = self.metadata.description if "fn_schema" not in langchain_tool_kwargs: langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema return langchain_tool_kwargs def to_langchain_tool( self, **langchain_tool_kwargs: Any, ) -> "Tool": """To langchain tool.""" from llama_index.legacy.bridge.langchain import Tool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return Tool.from_function( func=self.__call__, **langchain_tool_kwargs, ) def to_langchain_structured_tool( self, **langchain_tool_kwargs: Any, ) -> "StructuredTool": """To langchain structured tool.""" from llama_index.legacy.bridge.langchain import StructuredTool langchain_tool_kwargs = self._process_langchain_tool_kwargs( langchain_tool_kwargs ) return StructuredTool.from_function( func=self.__call__, **langchain_tool_kwargs, ) class AsyncBaseTool(BaseTool): """ Base-level tool class that is backwards compatible with the old tool spec but also supports async. """ def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: return self.call(*args, **kwargs) @abstractmethod def call(self, input: Any) -> ToolOutput: """ This is the method that should be implemented by the tool developer. """ @abstractmethod async def acall(self, input: Any) -> ToolOutput: """ This is the async version of the call method. Should also be implemented by the tool developer as an async-compatible implementation. """ class BaseToolAsyncAdapter(AsyncBaseTool): """ Adapter class that allows a synchronous tool to be used as an async tool. """ def __init__(self, tool: BaseTool): self.base_tool = tool @property def metadata(self) -> ToolMetadata: return self.base_tool.metadata def call(self, input: Any) -> ToolOutput: return self.base_tool(input) async def acall(self, input: Any) -> ToolOutput: return self.call(input) def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool: """ Converts a synchronous tool to an async tool. """ if isinstance(tool, AsyncBaseTool): return tool else: return BaseToolAsyncAdapter(tool)
[ "llama_index.legacy.bridge.langchain.Tool.from_function", "llama_index.legacy.bridge.langchain.StructuredTool.from_function" ]
[((1586, 1675), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1596, 1675), False, 'from deprecated import deprecated\n'), ((1400, 1422), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (1410, 1422), False, 'import json\n'), ((3697, 3760), 'llama_index.legacy.bridge.langchain.Tool.from_function', 'Tool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (3715, 3760), False, 'from llama_index.legacy.bridge.langchain import Tool\n'), ((4158, 4231), 'llama_index.legacy.bridge.langchain.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (4186, 4231), False, 'from llama_index.legacy.bridge.langchain import StructuredTool\n')]
import logging from dataclasses import dataclass from typing import Any, List, Optional, cast from deprecated import deprecated import llama_index.core from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.callbacks.base import CallbackManager from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.indices.prompt_helper import PromptHelper from llama_index.core.service_context_elements.llm_predictor import ( LLMPredictor, BaseLLMPredictor, ) from llama_index.core.base.llms.types import LLMMetadata from llama_index.core.llms.llm import LLM from llama_index.core.llms.utils import LLMType, resolve_llm from llama_index.core.service_context_elements.llama_logger import LlamaLogger from llama_index.core.node_parser.interface import NodeParser, TextSplitter from llama_index.core.node_parser.text.sentence import ( DEFAULT_CHUNK_SIZE, SENTENCE_CHUNK_OVERLAP, SentenceSplitter, ) from llama_index.core.prompts.base import BasePromptTemplate from llama_index.core.schema import TransformComponent from llama_index.core.types import PydanticProgramMode logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: int = DEFAULT_CHUNK_SIZE, chunk_overlap: int = SENTENCE_CHUNK_OVERLAP, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SentenceSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager or CallbackManager(), ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata.context_window = context_window if num_output is not None: llm_metadata.num_output = num_output return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) class ServiceContextData(BaseModel): llm: dict llm_predictor: dict prompt_helper: dict embed_model: dict transformations: List[dict] @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding transformations: List[TransformComponent] llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod @deprecated( version="0.10.0", reason="ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.", ) def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Any] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # pydantic program mode (used if output_cls is specified) pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding or "local" (use local model) node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager system_prompt (Optional[str]): System-wide prompt to be prepended to all input prompts, used to guide system "decision making" query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap passed-in input queries. Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model embed_model = cast(EmbedType, embed_model) if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if llama_index.core.global_service_context is not None: return cls.from_service_context( llama_index.core.global_service_context, llm=llm, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, text_splitter=text_splitter, llama_logger=llama_logger, callback_manager=callback_manager, context_window=context_window, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, chunk_overlap=chunk_overlap, num_output=num_output, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, transformations=transformations, ) callback_manager = callback_manager or CallbackManager([]) if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm.system_prompt = llm.system_prompt or system_prompt llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt llm.pydantic_program_mode = ( llm.pydantic_program_mode or pydantic_program_mode ) if llm_predictor is not None: print("LLMPredictor is deprecated, please use LLM instead.") llm_predictor = llm_predictor or LLMPredictor( llm=llm, pydantic_program_mode=pydantic_program_mode ) if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # NOTE: embed model should be a transformation, but the way the service # context works, we can't put in there yet. embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or [node_parser] llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Any] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model embed_model = cast(EmbedType, embed_model) if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # default to using the embed model passed from the service context if embed_model == "default": embed_model = service_context.embed_model embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or service_context.prompt_helper if context_window is not None or num_output is not None: prompt_helper = _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) transformations = transformations or [] node_parser_found = False for transform in service_context.transformations: if isinstance(transform, NodeParser): node_parser_found = True node_parser = transform break if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") if not node_parser_found: node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or service_context.transformations llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @property def llm(self) -> LLM: return self.llm_predictor.llm @property def node_parser(self) -> NodeParser: """Get the node parser.""" for transform in self.transformations: if isinstance(transform, NodeParser): return transform raise ValueError("No node parser found.") def to_dict(self) -> dict: """Convert service context to dict.""" llm_dict = self.llm_predictor.llm.to_dict() llm_predictor_dict = self.llm_predictor.to_dict() embed_model_dict = self.embed_model.to_dict() prompt_helper_dict = self.prompt_helper.to_dict() tranform_list_dict = [x.to_dict() for x in self.transformations] return ServiceContextData( llm=llm_dict, llm_predictor=llm_predictor_dict, prompt_helper=prompt_helper_dict, embed_model=embed_model_dict, transformations=tranform_list_dict, ).dict() @classmethod def from_dict(cls, data: dict) -> "ServiceContext": from llama_index.core.embeddings.loading import load_embed_model from llama_index.core.extractors.loading import load_extractor from llama_index.core.node_parser.loading import load_parser from llama_index.core.service_context_elements.llm_predictor import ( load_predictor, ) service_context_data = ServiceContextData.parse_obj(data) llm_predictor = load_predictor(service_context_data.llm_predictor) embed_model = load_embed_model(service_context_data.embed_model) prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper) transformations: List[TransformComponent] = [] for transform in service_context_data.transformations: try: transformations.append(load_parser(transform)) except ValueError: transformations.append(load_extractor(transform)) return cls.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, transformations=transformations, ) def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.core.global_service_context = service_context if service_context is not None: from llama_index.core.settings import Settings Settings.llm = service_context.llm Settings.embed_model = service_context.embed_model Settings.prompt_helper = service_context.prompt_helper Settings.transformations = service_context.transformations Settings.node_parser = service_context.node_parser Settings.callback_manager = service_context.callback_manager
[ "llama_index.core.llms.utils.resolve_llm", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.indices.prompt_helper.PromptHelper.from_dict", "llama_index.core.embeddings.utils.resolve_embed_model", "llama_index.core.embeddings.loading.load_embed_model", "llama_index.core.service_context_elements.llm_predictor.LLMPredictor", "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.node_parser.loading.load_parser", "llama_index.core.extractors.loading.load_extractor", "llama_index.core.service_context_elements.llama_logger.LlamaLogger", "llama_index.core.service_context_elements.llm_predictor.load_predictor" ]
[((1138, 1165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1155, 1165), False, 'import logging\n'), ((1940, 1997), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1970, 1997), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((2817, 2947), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.10.0"""', 'reason': '"""ServiceContext is deprecated, please use `llama_index.settings.Settings` instead."""'}), "(version='0.10.0', reason=\n 'ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.'\n )\n", (2827, 2947), False, 'from deprecated import deprecated\n'), ((5458, 5486), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5462, 5486), False, 'from typing import Any, List, Optional, cast\n'), ((7915, 7947), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7934, 7947), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10364, 10392), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10368, 10392), False, 'from typing import Any, List, Optional, cast\n'), ((11608, 11640), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11627, 11640), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14844, 14894), 'llama_index.core.service_context_elements.llm_predictor.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14858, 14894), False, 'from llama_index.core.service_context_elements.llm_predictor import load_predictor\n'), ((14918, 14968), 'llama_index.core.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14934, 14968), False, 'from llama_index.core.embeddings.loading import load_embed_model\n'), ((14994, 15052), 'llama_index.core.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (15016, 15052), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((6659, 6678), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6674, 6678), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((6846, 6862), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6857, 6862), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((7294, 7360), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (7306, 7360), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((8823, 8836), 'llama_index.core.service_context_elements.llama_logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8834, 8836), False, 'from llama_index.core.service_context_elements.llama_logger import LlamaLogger\n'), ((10903, 10919), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10914, 10919), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((10948, 10969), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10960, 10969), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((1539, 1556), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1554, 1556), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((15228, 15250), 'llama_index.core.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (15239, 15250), False, 'from llama_index.core.node_parser.loading import load_parser\n'), ((15322, 15347), 'llama_index.core.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (15336, 15347), False, 'from llama_index.core.extractors.loading import load_extractor\n')]
import logging from dataclasses import dataclass from typing import Any, List, Optional, cast from deprecated import deprecated import llama_index.core from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.callbacks.base import CallbackManager from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.indices.prompt_helper import PromptHelper from llama_index.core.service_context_elements.llm_predictor import ( LLMPredictor, BaseLLMPredictor, ) from llama_index.core.base.llms.types import LLMMetadata from llama_index.core.llms.llm import LLM from llama_index.core.llms.utils import LLMType, resolve_llm from llama_index.core.service_context_elements.llama_logger import LlamaLogger from llama_index.core.node_parser.interface import NodeParser, TextSplitter from llama_index.core.node_parser.text.sentence import ( DEFAULT_CHUNK_SIZE, SENTENCE_CHUNK_OVERLAP, SentenceSplitter, ) from llama_index.core.prompts.base import BasePromptTemplate from llama_index.core.schema import TransformComponent from llama_index.core.types import PydanticProgramMode logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: int = DEFAULT_CHUNK_SIZE, chunk_overlap: int = SENTENCE_CHUNK_OVERLAP, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SentenceSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager or CallbackManager(), ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata.context_window = context_window if num_output is not None: llm_metadata.num_output = num_output return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) class ServiceContextData(BaseModel): llm: dict llm_predictor: dict prompt_helper: dict embed_model: dict transformations: List[dict] @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding transformations: List[TransformComponent] llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod @deprecated( version="0.10.0", reason="ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.", ) def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Any] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # pydantic program mode (used if output_cls is specified) pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding or "local" (use local model) node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager system_prompt (Optional[str]): System-wide prompt to be prepended to all input prompts, used to guide system "decision making" query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap passed-in input queries. Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model embed_model = cast(EmbedType, embed_model) if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if llama_index.core.global_service_context is not None: return cls.from_service_context( llama_index.core.global_service_context, llm=llm, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, text_splitter=text_splitter, llama_logger=llama_logger, callback_manager=callback_manager, context_window=context_window, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, chunk_overlap=chunk_overlap, num_output=num_output, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, transformations=transformations, ) callback_manager = callback_manager or CallbackManager([]) if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm.system_prompt = llm.system_prompt or system_prompt llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt llm.pydantic_program_mode = ( llm.pydantic_program_mode or pydantic_program_mode ) if llm_predictor is not None: print("LLMPredictor is deprecated, please use LLM instead.") llm_predictor = llm_predictor or LLMPredictor( llm=llm, pydantic_program_mode=pydantic_program_mode ) if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # NOTE: embed model should be a transformation, but the way the service # context works, we can't put in there yet. embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or [node_parser] llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Any] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model embed_model = cast(EmbedType, embed_model) if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # default to using the embed model passed from the service context if embed_model == "default": embed_model = service_context.embed_model embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or service_context.prompt_helper if context_window is not None or num_output is not None: prompt_helper = _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) transformations = transformations or [] node_parser_found = False for transform in service_context.transformations: if isinstance(transform, NodeParser): node_parser_found = True node_parser = transform break if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") if not node_parser_found: node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or service_context.transformations llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @property def llm(self) -> LLM: return self.llm_predictor.llm @property def node_parser(self) -> NodeParser: """Get the node parser.""" for transform in self.transformations: if isinstance(transform, NodeParser): return transform raise ValueError("No node parser found.") def to_dict(self) -> dict: """Convert service context to dict.""" llm_dict = self.llm_predictor.llm.to_dict() llm_predictor_dict = self.llm_predictor.to_dict() embed_model_dict = self.embed_model.to_dict() prompt_helper_dict = self.prompt_helper.to_dict() tranform_list_dict = [x.to_dict() for x in self.transformations] return ServiceContextData( llm=llm_dict, llm_predictor=llm_predictor_dict, prompt_helper=prompt_helper_dict, embed_model=embed_model_dict, transformations=tranform_list_dict, ).dict() @classmethod def from_dict(cls, data: dict) -> "ServiceContext": from llama_index.core.embeddings.loading import load_embed_model from llama_index.core.extractors.loading import load_extractor from llama_index.core.node_parser.loading import load_parser from llama_index.core.service_context_elements.llm_predictor import ( load_predictor, ) service_context_data = ServiceContextData.parse_obj(data) llm_predictor = load_predictor(service_context_data.llm_predictor) embed_model = load_embed_model(service_context_data.embed_model) prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper) transformations: List[TransformComponent] = [] for transform in service_context_data.transformations: try: transformations.append(load_parser(transform)) except ValueError: transformations.append(load_extractor(transform)) return cls.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, transformations=transformations, ) def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.core.global_service_context = service_context if service_context is not None: from llama_index.core.settings import Settings Settings.llm = service_context.llm Settings.embed_model = service_context.embed_model Settings.prompt_helper = service_context.prompt_helper Settings.transformations = service_context.transformations Settings.node_parser = service_context.node_parser Settings.callback_manager = service_context.callback_manager
[ "llama_index.core.llms.utils.resolve_llm", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.indices.prompt_helper.PromptHelper.from_dict", "llama_index.core.embeddings.utils.resolve_embed_model", "llama_index.core.embeddings.loading.load_embed_model", "llama_index.core.service_context_elements.llm_predictor.LLMPredictor", "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.node_parser.loading.load_parser", "llama_index.core.extractors.loading.load_extractor", "llama_index.core.service_context_elements.llama_logger.LlamaLogger", "llama_index.core.service_context_elements.llm_predictor.load_predictor" ]
[((1138, 1165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1155, 1165), False, 'import logging\n'), ((1940, 1997), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1970, 1997), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((2817, 2947), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.10.0"""', 'reason': '"""ServiceContext is deprecated, please use `llama_index.settings.Settings` instead."""'}), "(version='0.10.0', reason=\n 'ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.'\n )\n", (2827, 2947), False, 'from deprecated import deprecated\n'), ((5458, 5486), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5462, 5486), False, 'from typing import Any, List, Optional, cast\n'), ((7915, 7947), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7934, 7947), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10364, 10392), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10368, 10392), False, 'from typing import Any, List, Optional, cast\n'), ((11608, 11640), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11627, 11640), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14844, 14894), 'llama_index.core.service_context_elements.llm_predictor.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14858, 14894), False, 'from llama_index.core.service_context_elements.llm_predictor import load_predictor\n'), ((14918, 14968), 'llama_index.core.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14934, 14968), False, 'from llama_index.core.embeddings.loading import load_embed_model\n'), ((14994, 15052), 'llama_index.core.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (15016, 15052), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((6659, 6678), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6674, 6678), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((6846, 6862), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6857, 6862), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((7294, 7360), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (7306, 7360), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((8823, 8836), 'llama_index.core.service_context_elements.llama_logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8834, 8836), False, 'from llama_index.core.service_context_elements.llama_logger import LlamaLogger\n'), ((10903, 10919), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10914, 10919), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((10948, 10969), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10960, 10969), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((1539, 1556), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1554, 1556), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((15228, 15250), 'llama_index.core.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (15239, 15250), False, 'from llama_index.core.node_parser.loading import load_parser\n'), ((15322, 15347), 'llama_index.core.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (15336, 15347), False, 'from llama_index.core.extractors.loading import load_extractor\n')]
import json from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.llm import LLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode DEFAULT_RUNGPT_MODEL = "rungpt" DEFAULT_RUNGPT_TEMP = 0.75 class RunGptLLM(LLM): """The opengpt of Jina AI models.""" model: Optional[str] = Field( default=DEFAULT_RUNGPT_MODEL, description="The rungpt model to use." ) endpoint: str = Field(description="The endpoint of serving address.") temperature: float = Field( default=DEFAULT_RUNGPT_TEMP, description="The temperature to use for sampling.", gte=0.0, lte=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, description="Max tokens model generates.", gt=0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, description="The maximum number of context tokens for the model.", gt=0, ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the Replicate API." ) base_url: str = Field( description="The address of your target model served by rungpt." ) def __init__( self, model: Optional[str] = DEFAULT_RUNGPT_MODEL, endpoint: str = "0.0.0.0:51002", temperature: float = DEFAULT_RUNGPT_TEMP, max_tokens: Optional[int] = DEFAULT_NUM_OUTPUTS, context_window: int = DEFAULT_CONTEXT_WINDOW, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ): if endpoint.startswith("http://"): base_url = endpoint else: base_url = "http://" + endpoint super().__init__( model=model, endpoint=endpoint, temperature=temperature, max_tokens=max_tokens, context_window=context_window, additional_kwargs=additional_kwargs or {}, callback_manager=callback_manager or CallbackManager([]), base_url=base_url, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "RunGptLLM" @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return LLMMetadata( context_window=self.context_window, num_output=self.max_tokens, model_name=self._model, ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: try: import requests except ImportError: raise ImportError( "Could not import requests library." "Please install requests with `pip install requests`" ) response_gpt = requests.post( self.base_url + "/generate", json=self._request_pack("complete", prompt, **kwargs), stream=False, ).json() return CompletionResponse( text=response_gpt["choices"][0]["text"], additional_kwargs=response_gpt["usage"], raw=response_gpt, ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: try: import requests except ImportError: raise ImportError( "Could not import requests library." "Please install requests with `pip install requests`" ) response_gpt = requests.post( self.base_url + "/generate_stream", json=self._request_pack("complete", prompt, **kwargs), stream=True, ) try: import sseclient except ImportError: raise ImportError( "Could not import sseclient-py library." "Please install requests with `pip install sseclient-py`" ) client = sseclient.SSEClient(response_gpt) response_iter = client.events() def gen() -> CompletionResponseGen: text = "" for item in response_iter: item_dict = json.loads(json.dumps(eval(item.data))) delta = item_dict["choices"][0]["text"] additional_kwargs = item_dict["usage"] text = text + self._space_handler(delta) yield CompletionResponse( text=text, delta=delta, raw=item_dict, additional_kwargs=additional_kwargs, ) return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: message_list = self._message_wrapper(messages) try: import requests except ImportError: raise ImportError( "Could not import requests library." "Please install requests with `pip install requests`" ) response_gpt = requests.post( self.base_url + "/chat", json=self._request_pack("chat", message_list, **kwargs), stream=False, ).json() chat_message, _ = self._message_unpacker(response_gpt) return ChatResponse(message=chat_message, raw=response_gpt) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: message_list = self._message_wrapper(messages) try: import requests except ImportError: raise ImportError( "Could not import requests library." "Please install requests with `pip install requests`" ) response_gpt = requests.post( self.base_url + "/chat_stream", json=self._request_pack("chat", message_list, **kwargs), stream=True, ) try: import sseclient except ImportError: raise ImportError( "Could not import sseclient-py library." "Please install requests with `pip install sseclient-py`" ) client = sseclient.SSEClient(response_gpt) chat_iter = client.events() def gen() -> ChatResponseGen: content = "" for item in chat_iter: item_dict = json.loads(json.dumps(eval(item.data))) chat_message, delta = self._message_unpacker(item_dict) content = content + self._space_handler(delta) chat_message.content = content yield ChatResponse(message=chat_message, raw=item_dict, delta=delta) return gen() @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: return self.chat(messages, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseAsyncGen: async def gen() -> ChatResponseAsyncGen: for message in self.stream_chat(messages, **kwargs): yield message # NOTE: convert generator to async generator return gen() @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: return self.complete(prompt, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: async def gen() -> CompletionResponseAsyncGen: for message in self.stream_complete(prompt, **kwargs): yield message return gen() def _message_wrapper(self, messages: Sequence[ChatMessage]) -> List[Dict[str, Any]]: message_list = [] for message in messages: role = message.role.value content = message.content message_list.append({"role": role, "content": content}) return message_list def _message_unpacker( self, response_gpt: Dict[str, Any] ) -> Tuple[ChatMessage, str]: message = response_gpt["choices"][0]["message"] additional_kwargs = response_gpt["usage"] role = message["role"] content = message["content"] key = MessageRole.SYSTEM for r in MessageRole: if r.value == role: key = r chat_message = ChatMessage( role=key, content=content, additional_kwargs=additional_kwargs ) return chat_message, content def _request_pack( self, mode: str, prompt: Union[str, List[Dict[str, Any]]], **kwargs: Any ) -> Optional[Dict[str, Any]]: if mode == "complete": return { "prompt": prompt, "max_tokens": kwargs.pop("max_tokens", self.max_tokens), "temperature": kwargs.pop("temperature", self.temperature), "top_k": kwargs.pop("top_k", 50), "top_p": kwargs.pop("top_p", 0.95), "repetition_penalty": kwargs.pop("repetition_penalty", 1.2), "do_sample": kwargs.pop("do_sample", False), "echo": kwargs.pop("echo", True), "n": kwargs.pop("n", 1), "stop": kwargs.pop("stop", "."), } elif mode == "chat": return { "messages": prompt, "max_tokens": kwargs.pop("max_tokens", self.max_tokens), "temperature": kwargs.pop("temperature", self.temperature), "top_k": kwargs.pop("top_k", 50), "top_p": kwargs.pop("top_p", 0.95), "repetition_penalty": kwargs.pop("repetition_penalty", 1.2), "do_sample": kwargs.pop("do_sample", False), "echo": kwargs.pop("echo", True), "n": kwargs.pop("n", 1), "stop": kwargs.pop("stop", "."), } return None def _space_handler(self, word: str) -> str: if word.isalnum(): return " " + word return word
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.core.llms.types.ChatResponse", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.core.llms.types.CompletionResponse" ]
[((893, 968), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_RUNGPT_MODEL', 'description': '"""The rungpt model to use."""'}), "(default=DEFAULT_RUNGPT_MODEL, description='The rungpt model to use.')\n", (898, 968), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1003, 1056), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The endpoint of serving address."""'}), "(description='The endpoint of serving address.')\n", (1008, 1056), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1082, 1191), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_RUNGPT_TEMP', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_RUNGPT_TEMP, description=\n 'The temperature to use for sampling.', gte=0.0, lte=1.0)\n", (1087, 1191), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1248, 1336), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""Max tokens model generates."""', 'gt': '(0)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'Max tokens model generates.', gt=0)\n", (1253, 1336), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1389, 1504), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(default=DEFAULT_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model.', gt=0)\n", (1394, 1504), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1571, 1659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the Replicate API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the Replicate API.')\n", (1576, 1659), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1689, 1760), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The address of your target model served by rungpt."""'}), "(description='The address of your target model served by rungpt.')\n", (1694, 1760), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3597, 3622), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3620, 3622), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4350, 4375), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4373, 4375), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5847, 5866), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5864, 5866), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6567, 6586), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6584, 6586), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7968, 7987), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7985, 7987), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8161, 8180), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8178, 8180), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8543, 8568), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8566, 8568), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8742, 8767), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8765, 8767), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3444, 3547), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.max_tokens', 'model_name': 'self._model'}), '(context_window=self.context_window, num_output=self.max_tokens,\n model_name=self._model)\n', (3455, 3547), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((4178, 4300), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': "response_gpt['choices'][0]['text']", 'additional_kwargs': "response_gpt['usage']", 'raw': 'response_gpt'}), "(text=response_gpt['choices'][0]['text'],\n additional_kwargs=response_gpt['usage'], raw=response_gpt)\n", (4196, 4300), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((5187, 5220), 'sseclient.SSEClient', 'sseclient.SSEClient', (['response_gpt'], {}), '(response_gpt)\n', (5206, 5220), False, 'import sseclient\n'), ((6508, 6560), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'chat_message', 'raw': 'response_gpt'}), '(message=chat_message, raw=response_gpt)\n', (6520, 6560), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7436, 7469), 'sseclient.SSEClient', 'sseclient.SSEClient', (['response_gpt'], {}), '(response_gpt)\n', (7455, 7469), False, 'import sseclient\n'), ((9819, 9894), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'key', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=key, content=content, additional_kwargs=additional_kwargs)\n', (9830, 9894), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((2963, 2982), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2978, 2982), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((5625, 5724), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'delta': 'delta', 'raw': 'item_dict', 'additional_kwargs': 'additional_kwargs'}), '(text=text, delta=delta, raw=item_dict, additional_kwargs\n =additional_kwargs)\n', (5643, 5724), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7877, 7939), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'chat_message', 'raw': 'item_dict', 'delta': 'delta'}), '(message=chat_message, raw=item_dict, delta=delta)\n', (7889, 7939), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')]
import json from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.llm import LLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode DEFAULT_RUNGPT_MODEL = "rungpt" DEFAULT_RUNGPT_TEMP = 0.75 class RunGptLLM(LLM): """The opengpt of Jina AI models.""" model: Optional[str] = Field( default=DEFAULT_RUNGPT_MODEL, description="The rungpt model to use." ) endpoint: str = Field(description="The endpoint of serving address.") temperature: float = Field( default=DEFAULT_RUNGPT_TEMP, description="The temperature to use for sampling.", gte=0.0, lte=1.0, ) max_tokens: int = Field( default=DEFAULT_NUM_OUTPUTS, description="Max tokens model generates.", gt=0, ) context_window: int = Field( default=DEFAULT_CONTEXT_WINDOW, description="The maximum number of context tokens for the model.", gt=0, ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the Replicate API." ) base_url: str = Field( description="The address of your target model served by rungpt." ) def __init__( self, model: Optional[str] = DEFAULT_RUNGPT_MODEL, endpoint: str = "0.0.0.0:51002", temperature: float = DEFAULT_RUNGPT_TEMP, max_tokens: Optional[int] = DEFAULT_NUM_OUTPUTS, context_window: int = DEFAULT_CONTEXT_WINDOW, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ): if endpoint.startswith("http://"): base_url = endpoint else: base_url = "http://" + endpoint super().__init__( model=model, endpoint=endpoint, temperature=temperature, max_tokens=max_tokens, context_window=context_window, additional_kwargs=additional_kwargs or {}, callback_manager=callback_manager or CallbackManager([]), base_url=base_url, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "RunGptLLM" @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return LLMMetadata( context_window=self.context_window, num_output=self.max_tokens, model_name=self._model, ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: try: import requests except ImportError: raise ImportError( "Could not import requests library." "Please install requests with `pip install requests`" ) response_gpt = requests.post( self.base_url + "/generate", json=self._request_pack("complete", prompt, **kwargs), stream=False, ).json() return CompletionResponse( text=response_gpt["choices"][0]["text"], additional_kwargs=response_gpt["usage"], raw=response_gpt, ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: try: import requests except ImportError: raise ImportError( "Could not import requests library." "Please install requests with `pip install requests`" ) response_gpt = requests.post( self.base_url + "/generate_stream", json=self._request_pack("complete", prompt, **kwargs), stream=True, ) try: import sseclient except ImportError: raise ImportError( "Could not import sseclient-py library." "Please install requests with `pip install sseclient-py`" ) client = sseclient.SSEClient(response_gpt) response_iter = client.events() def gen() -> CompletionResponseGen: text = "" for item in response_iter: item_dict = json.loads(json.dumps(eval(item.data))) delta = item_dict["choices"][0]["text"] additional_kwargs = item_dict["usage"] text = text + self._space_handler(delta) yield CompletionResponse( text=text, delta=delta, raw=item_dict, additional_kwargs=additional_kwargs, ) return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: message_list = self._message_wrapper(messages) try: import requests except ImportError: raise ImportError( "Could not import requests library." "Please install requests with `pip install requests`" ) response_gpt = requests.post( self.base_url + "/chat", json=self._request_pack("chat", message_list, **kwargs), stream=False, ).json() chat_message, _ = self._message_unpacker(response_gpt) return ChatResponse(message=chat_message, raw=response_gpt) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: message_list = self._message_wrapper(messages) try: import requests except ImportError: raise ImportError( "Could not import requests library." "Please install requests with `pip install requests`" ) response_gpt = requests.post( self.base_url + "/chat_stream", json=self._request_pack("chat", message_list, **kwargs), stream=True, ) try: import sseclient except ImportError: raise ImportError( "Could not import sseclient-py library." "Please install requests with `pip install sseclient-py`" ) client = sseclient.SSEClient(response_gpt) chat_iter = client.events() def gen() -> ChatResponseGen: content = "" for item in chat_iter: item_dict = json.loads(json.dumps(eval(item.data))) chat_message, delta = self._message_unpacker(item_dict) content = content + self._space_handler(delta) chat_message.content = content yield ChatResponse(message=chat_message, raw=item_dict, delta=delta) return gen() @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: return self.chat(messages, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseAsyncGen: async def gen() -> ChatResponseAsyncGen: for message in self.stream_chat(messages, **kwargs): yield message # NOTE: convert generator to async generator return gen() @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: return self.complete(prompt, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: async def gen() -> CompletionResponseAsyncGen: for message in self.stream_complete(prompt, **kwargs): yield message return gen() def _message_wrapper(self, messages: Sequence[ChatMessage]) -> List[Dict[str, Any]]: message_list = [] for message in messages: role = message.role.value content = message.content message_list.append({"role": role, "content": content}) return message_list def _message_unpacker( self, response_gpt: Dict[str, Any] ) -> Tuple[ChatMessage, str]: message = response_gpt["choices"][0]["message"] additional_kwargs = response_gpt["usage"] role = message["role"] content = message["content"] key = MessageRole.SYSTEM for r in MessageRole: if r.value == role: key = r chat_message = ChatMessage( role=key, content=content, additional_kwargs=additional_kwargs ) return chat_message, content def _request_pack( self, mode: str, prompt: Union[str, List[Dict[str, Any]]], **kwargs: Any ) -> Optional[Dict[str, Any]]: if mode == "complete": return { "prompt": prompt, "max_tokens": kwargs.pop("max_tokens", self.max_tokens), "temperature": kwargs.pop("temperature", self.temperature), "top_k": kwargs.pop("top_k", 50), "top_p": kwargs.pop("top_p", 0.95), "repetition_penalty": kwargs.pop("repetition_penalty", 1.2), "do_sample": kwargs.pop("do_sample", False), "echo": kwargs.pop("echo", True), "n": kwargs.pop("n", 1), "stop": kwargs.pop("stop", "."), } elif mode == "chat": return { "messages": prompt, "max_tokens": kwargs.pop("max_tokens", self.max_tokens), "temperature": kwargs.pop("temperature", self.temperature), "top_k": kwargs.pop("top_k", 50), "top_p": kwargs.pop("top_p", 0.95), "repetition_penalty": kwargs.pop("repetition_penalty", 1.2), "do_sample": kwargs.pop("do_sample", False), "echo": kwargs.pop("echo", True), "n": kwargs.pop("n", 1), "stop": kwargs.pop("stop", "."), } return None def _space_handler(self, word: str) -> str: if word.isalnum(): return " " + word return word
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.core.llms.types.ChatResponse", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.core.llms.types.CompletionResponse" ]
[((893, 968), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_RUNGPT_MODEL', 'description': '"""The rungpt model to use."""'}), "(default=DEFAULT_RUNGPT_MODEL, description='The rungpt model to use.')\n", (898, 968), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1003, 1056), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The endpoint of serving address."""'}), "(description='The endpoint of serving address.')\n", (1008, 1056), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1082, 1191), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_RUNGPT_TEMP', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_RUNGPT_TEMP, description=\n 'The temperature to use for sampling.', gte=0.0, lte=1.0)\n", (1087, 1191), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1248, 1336), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""Max tokens model generates."""', 'gt': '(0)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'Max tokens model generates.', gt=0)\n", (1253, 1336), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1389, 1504), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(default=DEFAULT_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model.', gt=0)\n", (1394, 1504), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1571, 1659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the Replicate API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the Replicate API.')\n", (1576, 1659), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1689, 1760), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The address of your target model served by rungpt."""'}), "(description='The address of your target model served by rungpt.')\n", (1694, 1760), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3597, 3622), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3620, 3622), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4350, 4375), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4373, 4375), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5847, 5866), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5864, 5866), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6567, 6586), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6584, 6586), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7968, 7987), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7985, 7987), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8161, 8180), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8178, 8180), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8543, 8568), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8566, 8568), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8742, 8767), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8765, 8767), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3444, 3547), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.max_tokens', 'model_name': 'self._model'}), '(context_window=self.context_window, num_output=self.max_tokens,\n model_name=self._model)\n', (3455, 3547), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((4178, 4300), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': "response_gpt['choices'][0]['text']", 'additional_kwargs': "response_gpt['usage']", 'raw': 'response_gpt'}), "(text=response_gpt['choices'][0]['text'],\n additional_kwargs=response_gpt['usage'], raw=response_gpt)\n", (4196, 4300), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((5187, 5220), 'sseclient.SSEClient', 'sseclient.SSEClient', (['response_gpt'], {}), '(response_gpt)\n', (5206, 5220), False, 'import sseclient\n'), ((6508, 6560), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'chat_message', 'raw': 'response_gpt'}), '(message=chat_message, raw=response_gpt)\n', (6520, 6560), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7436, 7469), 'sseclient.SSEClient', 'sseclient.SSEClient', (['response_gpt'], {}), '(response_gpt)\n', (7455, 7469), False, 'import sseclient\n'), ((9819, 9894), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'key', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=key, content=content, additional_kwargs=additional_kwargs)\n', (9830, 9894), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((2963, 2982), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2978, 2982), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((5625, 5724), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'delta': 'delta', 'raw': 'item_dict', 'additional_kwargs': 'additional_kwargs'}), '(text=text, delta=delta, raw=item_dict, additional_kwargs\n =additional_kwargs)\n', (5643, 5724), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7877, 7939), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'chat_message', 'raw': 'item_dict', 'delta': 'delta'}), '(message=chat_message, raw=item_dict, delta=delta)\n', (7889, 7939), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')]
from typing import List, Optional import fsspec from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.schema import BaseNode from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc from llama_index.core.storage.kvstore import ( SimpleKVStore as SimpleCache, ) from llama_index.core.storage.kvstore.types import ( BaseKVStore as BaseCache, ) DEFAULT_CACHE_NAME = "llama_cache" class IngestionCache(BaseModel): class Config: arbitrary_types_allowed = True nodes_key = "nodes" collection: str = Field( default=DEFAULT_CACHE_NAME, description="Collection name of the cache." ) cache: BaseCache = Field(default_factory=SimpleCache, description="Cache to use.") # TODO: add async get/put methods? def put( self, key: str, nodes: List[BaseNode], collection: Optional[str] = None ) -> None: """Put a value into the cache.""" collection = collection or self.collection val = {self.nodes_key: [doc_to_json(node) for node in nodes]} self.cache.put(key, val, collection=collection) def get( self, key: str, collection: Optional[str] = None ) -> Optional[List[BaseNode]]: """Get a value from the cache.""" collection = collection or self.collection node_dicts = self.cache.get(key, collection=collection) if node_dicts is None: return None return [json_to_doc(node_dict) for node_dict in node_dicts[self.nodes_key]] def clear(self, collection: Optional[str] = None) -> None: """Clear the cache.""" collection = collection or self.collection data = self.cache.get_all(collection=collection) for key in data: self.cache.delete(key, collection=collection) def persist( self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None ) -> None: """Persist the cache to a directory, if possible.""" if isinstance(self.cache, SimpleCache): self.cache.persist(persist_path, fs=fs) else: print("Warning: skipping persist, only needed for SimpleCache.") @classmethod def from_persist_path( cls, persist_path: str, collection: str = DEFAULT_CACHE_NAME, fs: Optional[fsspec.AbstractFileSystem] = None, ) -> "IngestionCache": """Create a IngestionCache from a persist directory.""" return cls( collection=collection, cache=SimpleCache.from_persist_path(persist_path, fs=fs), ) __all__ = ["SimpleCache", "BaseCache"]
[ "llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path", "llama_index.core.bridge.pydantic.Field", "llama_index.core.storage.docstore.utils.json_to_doc", "llama_index.core.storage.docstore.utils.doc_to_json" ]
[((577, 655), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CACHE_NAME', 'description': '"""Collection name of the cache."""'}), "(default=DEFAULT_CACHE_NAME, description='Collection name of the cache.')\n", (582, 655), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((693, 756), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'SimpleCache', 'description': '"""Cache to use."""'}), "(default_factory=SimpleCache, description='Cache to use.')\n", (698, 756), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1461, 1483), 'llama_index.core.storage.docstore.utils.json_to_doc', 'json_to_doc', (['node_dict'], {}), '(node_dict)\n', (1472, 1483), False, 'from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc\n'), ((1031, 1048), 'llama_index.core.storage.docstore.utils.doc_to_json', 'doc_to_json', (['node'], {}), '(node)\n', (1042, 1048), False, 'from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc\n'), ((2531, 2581), 'llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path', 'SimpleCache.from_persist_path', (['persist_path'], {'fs': 'fs'}), '(persist_path, fs=fs)\n', (2560, 2581), True, 'from llama_index.core.storage.kvstore import SimpleKVStore as SimpleCache\n')]
from typing import List, Optional import fsspec from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.schema import BaseNode from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc from llama_index.core.storage.kvstore import ( SimpleKVStore as SimpleCache, ) from llama_index.core.storage.kvstore.types import ( BaseKVStore as BaseCache, ) DEFAULT_CACHE_NAME = "llama_cache" class IngestionCache(BaseModel): class Config: arbitrary_types_allowed = True nodes_key = "nodes" collection: str = Field( default=DEFAULT_CACHE_NAME, description="Collection name of the cache." ) cache: BaseCache = Field(default_factory=SimpleCache, description="Cache to use.") # TODO: add async get/put methods? def put( self, key: str, nodes: List[BaseNode], collection: Optional[str] = None ) -> None: """Put a value into the cache.""" collection = collection or self.collection val = {self.nodes_key: [doc_to_json(node) for node in nodes]} self.cache.put(key, val, collection=collection) def get( self, key: str, collection: Optional[str] = None ) -> Optional[List[BaseNode]]: """Get a value from the cache.""" collection = collection or self.collection node_dicts = self.cache.get(key, collection=collection) if node_dicts is None: return None return [json_to_doc(node_dict) for node_dict in node_dicts[self.nodes_key]] def clear(self, collection: Optional[str] = None) -> None: """Clear the cache.""" collection = collection or self.collection data = self.cache.get_all(collection=collection) for key in data: self.cache.delete(key, collection=collection) def persist( self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None ) -> None: """Persist the cache to a directory, if possible.""" if isinstance(self.cache, SimpleCache): self.cache.persist(persist_path, fs=fs) else: print("Warning: skipping persist, only needed for SimpleCache.") @classmethod def from_persist_path( cls, persist_path: str, collection: str = DEFAULT_CACHE_NAME, fs: Optional[fsspec.AbstractFileSystem] = None, ) -> "IngestionCache": """Create a IngestionCache from a persist directory.""" return cls( collection=collection, cache=SimpleCache.from_persist_path(persist_path, fs=fs), ) __all__ = ["SimpleCache", "BaseCache"]
[ "llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path", "llama_index.core.bridge.pydantic.Field", "llama_index.core.storage.docstore.utils.json_to_doc", "llama_index.core.storage.docstore.utils.doc_to_json" ]
[((577, 655), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CACHE_NAME', 'description': '"""Collection name of the cache."""'}), "(default=DEFAULT_CACHE_NAME, description='Collection name of the cache.')\n", (582, 655), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((693, 756), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'SimpleCache', 'description': '"""Cache to use."""'}), "(default_factory=SimpleCache, description='Cache to use.')\n", (698, 756), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1461, 1483), 'llama_index.core.storage.docstore.utils.json_to_doc', 'json_to_doc', (['node_dict'], {}), '(node_dict)\n', (1472, 1483), False, 'from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc\n'), ((1031, 1048), 'llama_index.core.storage.docstore.utils.doc_to_json', 'doc_to_json', (['node'], {}), '(node)\n', (1042, 1048), False, 'from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc\n'), ((2531, 2581), 'llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path', 'SimpleCache.from_persist_path', (['persist_path'], {'fs': 'fs'}), '(persist_path, fs=fs)\n', (2560, 2581), True, 'from llama_index.core.storage.kvstore import SimpleKVStore as SimpleCache\n')]
from typing import List, Optional import fsspec from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.schema import BaseNode from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc from llama_index.core.storage.kvstore import ( SimpleKVStore as SimpleCache, ) from llama_index.core.storage.kvstore.types import ( BaseKVStore as BaseCache, ) DEFAULT_CACHE_NAME = "llama_cache" class IngestionCache(BaseModel): class Config: arbitrary_types_allowed = True nodes_key = "nodes" collection: str = Field( default=DEFAULT_CACHE_NAME, description="Collection name of the cache." ) cache: BaseCache = Field(default_factory=SimpleCache, description="Cache to use.") # TODO: add async get/put methods? def put( self, key: str, nodes: List[BaseNode], collection: Optional[str] = None ) -> None: """Put a value into the cache.""" collection = collection or self.collection val = {self.nodes_key: [doc_to_json(node) for node in nodes]} self.cache.put(key, val, collection=collection) def get( self, key: str, collection: Optional[str] = None ) -> Optional[List[BaseNode]]: """Get a value from the cache.""" collection = collection or self.collection node_dicts = self.cache.get(key, collection=collection) if node_dicts is None: return None return [json_to_doc(node_dict) for node_dict in node_dicts[self.nodes_key]] def clear(self, collection: Optional[str] = None) -> None: """Clear the cache.""" collection = collection or self.collection data = self.cache.get_all(collection=collection) for key in data: self.cache.delete(key, collection=collection) def persist( self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None ) -> None: """Persist the cache to a directory, if possible.""" if isinstance(self.cache, SimpleCache): self.cache.persist(persist_path, fs=fs) else: print("Warning: skipping persist, only needed for SimpleCache.") @classmethod def from_persist_path( cls, persist_path: str, collection: str = DEFAULT_CACHE_NAME, fs: Optional[fsspec.AbstractFileSystem] = None, ) -> "IngestionCache": """Create a IngestionCache from a persist directory.""" return cls( collection=collection, cache=SimpleCache.from_persist_path(persist_path, fs=fs), ) __all__ = ["SimpleCache", "BaseCache"]
[ "llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path", "llama_index.core.bridge.pydantic.Field", "llama_index.core.storage.docstore.utils.json_to_doc", "llama_index.core.storage.docstore.utils.doc_to_json" ]
[((577, 655), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CACHE_NAME', 'description': '"""Collection name of the cache."""'}), "(default=DEFAULT_CACHE_NAME, description='Collection name of the cache.')\n", (582, 655), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((693, 756), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'SimpleCache', 'description': '"""Cache to use."""'}), "(default_factory=SimpleCache, description='Cache to use.')\n", (698, 756), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1461, 1483), 'llama_index.core.storage.docstore.utils.json_to_doc', 'json_to_doc', (['node_dict'], {}), '(node_dict)\n', (1472, 1483), False, 'from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc\n'), ((1031, 1048), 'llama_index.core.storage.docstore.utils.doc_to_json', 'doc_to_json', (['node'], {}), '(node)\n', (1042, 1048), False, 'from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc\n'), ((2531, 2581), 'llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path', 'SimpleCache.from_persist_path', (['persist_path'], {'fs': 'fs'}), '(persist_path, fs=fs)\n', (2560, 2581), True, 'from llama_index.core.storage.kvstore import SimpleKVStore as SimpleCache\n')]
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks.base import CallbackManager from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.core.query_pipeline.query_component import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.legacy.indices.base import BaseIndex from llama_index.legacy.indices.vector_store.base import VectorStoreIndex from llama_index.legacy.objects.base_node_mapping import ( DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping, ) from llama_index.legacy.schema import QueryType from llama_index.legacy.storage.storage_context import ( DEFAULT_PERSIST_DIR, StorageContext, ) OT = TypeVar("OT") class ObjectRetriever(ChainableMixin, Generic[OT]): """Object retriever.""" def __init__( self, retriever: BaseRetriever, object_node_mapping: BaseObjectNodeMapping[OT] ): self._retriever = retriever self._object_node_mapping = object_node_mapping @property def retriever(self) -> BaseRetriever: """Retriever.""" return self._retriever def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = self._retriever.retrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = await self._retriever.aretrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" return ObjectRetrieverComponent(retriever=self) class ObjectRetrieverComponent(QueryComponent): """Object retriever component.""" retriever: ObjectRetriever = Field(..., description="Retriever.") class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" self.retriever.retriever.callback_manager = callback_manager def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" # make sure input is a string input["input"] = validate_and_convert_stringable(input["input"]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" output = self.retriever.retrieve(kwargs["input"]) return {"output": output} async def _arun_component(self, **kwargs: Any) -> Any: """Run component (async).""" output = await self.retriever.aretrieve(kwargs["input"]) return {"output": output} @property def input_keys(self) -> InputKeys: """Input keys.""" return InputKeys.from_keys({"input"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"}) class ObjectIndex(Generic[OT]): """Object index.""" def __init__( self, index: BaseIndex, object_node_mapping: BaseObjectNodeMapping ) -> None: self._index = index self._object_node_mapping = object_node_mapping @classmethod def from_objects( cls, objects: Sequence[OT], object_mapping: Optional[BaseObjectNodeMapping] = None, index_cls: Type[BaseIndex] = VectorStoreIndex, **index_kwargs: Any, ) -> "ObjectIndex": if object_mapping is None: object_mapping = SimpleObjectNodeMapping.from_objects(objects) nodes = object_mapping.to_nodes(objects) index = index_cls(nodes, **index_kwargs) return cls(index, object_mapping) def insert_object(self, obj: Any) -> None: self._object_node_mapping.add_object(obj) node = self._object_node_mapping.to_node(obj) self._index.insert_nodes([node]) def as_retriever(self, **kwargs: Any) -> ObjectRetriever: return ObjectRetriever( retriever=self._index.as_retriever(**kwargs), object_node_mapping=self._object_node_mapping, ) def as_node_retriever(self, **kwargs: Any) -> BaseRetriever: return self._index.as_retriever(**kwargs) def persist( self, persist_dir: str = DEFAULT_PERSIST_DIR, obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME, ) -> None: # try to persist object node mapping try: self._object_node_mapping.persist( persist_dir=persist_dir, obj_node_mapping_fname=obj_node_mapping_fname ) except (NotImplementedError, pickle.PickleError) as err: warnings.warn( ( "Unable to persist ObjectNodeMapping. You will need to " "reconstruct the same object node mapping to build this ObjectIndex" ), stacklevel=2, ) self._index._storage_context.persist(persist_dir=persist_dir) @classmethod def from_persist_dir( cls, persist_dir: str = DEFAULT_PERSIST_DIR, object_node_mapping: Optional[BaseObjectNodeMapping] = None, ) -> "ObjectIndex": from llama_index.legacy.indices import load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir=persist_dir) index = load_index_from_storage(storage_context) if object_node_mapping: return cls(index=index, object_node_mapping=object_node_mapping) else: # try to load object_node_mapping # assume SimpleObjectNodeMapping for simplicity as its only subclass # that supports this method try: object_node_mapping = SimpleObjectNodeMapping.from_persist_dir( persist_dir=persist_dir ) except Exception as err: raise Exception( "Unable to load from persist dir. The object_node_mapping cannot be loaded." ) from err else: return cls(index=index, object_node_mapping=object_node_mapping)
[ "llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys", "llama_index.legacy.indices.load_index_from_storage", "llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys", "llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.legacy.storage.storage_context.StorageContext.from_defaults", "llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable" ]
[((925, 938), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (932, 938), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2060, 2096), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriever.')\n", (2065, 2096), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2549, 2596), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (2580, 2596), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3083, 3113), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (3102, 3113), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3212, 3244), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (3232, 3244), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((5600, 5653), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (5628, 5653), False, 'from llama_index.legacy.storage.storage_context import DEFAULT_PERSIST_DIR, StorageContext\n'), ((5670, 5710), 'llama_index.legacy.indices.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (5693, 5710), False, 'from llama_index.legacy.indices import load_index_from_storage\n'), ((3816, 3861), 'llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects', 'SimpleObjectNodeMapping.from_objects', (['objects'], {}), '(objects)\n', (3852, 3861), False, 'from llama_index.legacy.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n'), ((4972, 5133), 'warnings.warn', 'warnings.warn', (['"""Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex"""'], {'stacklevel': '(2)'}), "(\n 'Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex'\n , stacklevel=2)\n", (4985, 5133), False, 'import warnings\n'), ((6056, 6121), 'llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir', 'SimpleObjectNodeMapping.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (6096, 6121), False, 'from llama_index.legacy.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n')]
"""Base object types.""" import pickle import warnings from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks.base import CallbackManager from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.core.query_pipeline.query_component import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.legacy.indices.base import BaseIndex from llama_index.legacy.indices.vector_store.base import VectorStoreIndex from llama_index.legacy.objects.base_node_mapping import ( DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping, ) from llama_index.legacy.schema import QueryType from llama_index.legacy.storage.storage_context import ( DEFAULT_PERSIST_DIR, StorageContext, ) OT = TypeVar("OT") class ObjectRetriever(ChainableMixin, Generic[OT]): """Object retriever.""" def __init__( self, retriever: BaseRetriever, object_node_mapping: BaseObjectNodeMapping[OT] ): self._retriever = retriever self._object_node_mapping = object_node_mapping @property def retriever(self) -> BaseRetriever: """Retriever.""" return self._retriever def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = self._retriever.retrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]: nodes = await self._retriever.aretrieve(str_or_query_bundle) return [self._object_node_mapping.from_node(node.node) for node in nodes] def _as_query_component(self, **kwargs: Any) -> QueryComponent: """As query component.""" return ObjectRetrieverComponent(retriever=self) class ObjectRetrieverComponent(QueryComponent): """Object retriever component.""" retriever: ObjectRetriever = Field(..., description="Retriever.") class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" self.retriever.retriever.callback_manager = callback_manager def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" # make sure input is a string input["input"] = validate_and_convert_stringable(input["input"]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" output = self.retriever.retrieve(kwargs["input"]) return {"output": output} async def _arun_component(self, **kwargs: Any) -> Any: """Run component (async).""" output = await self.retriever.aretrieve(kwargs["input"]) return {"output": output} @property def input_keys(self) -> InputKeys: """Input keys.""" return InputKeys.from_keys({"input"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"}) class ObjectIndex(Generic[OT]): """Object index.""" def __init__( self, index: BaseIndex, object_node_mapping: BaseObjectNodeMapping ) -> None: self._index = index self._object_node_mapping = object_node_mapping @classmethod def from_objects( cls, objects: Sequence[OT], object_mapping: Optional[BaseObjectNodeMapping] = None, index_cls: Type[BaseIndex] = VectorStoreIndex, **index_kwargs: Any, ) -> "ObjectIndex": if object_mapping is None: object_mapping = SimpleObjectNodeMapping.from_objects(objects) nodes = object_mapping.to_nodes(objects) index = index_cls(nodes, **index_kwargs) return cls(index, object_mapping) def insert_object(self, obj: Any) -> None: self._object_node_mapping.add_object(obj) node = self._object_node_mapping.to_node(obj) self._index.insert_nodes([node]) def as_retriever(self, **kwargs: Any) -> ObjectRetriever: return ObjectRetriever( retriever=self._index.as_retriever(**kwargs), object_node_mapping=self._object_node_mapping, ) def as_node_retriever(self, **kwargs: Any) -> BaseRetriever: return self._index.as_retriever(**kwargs) def persist( self, persist_dir: str = DEFAULT_PERSIST_DIR, obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME, ) -> None: # try to persist object node mapping try: self._object_node_mapping.persist( persist_dir=persist_dir, obj_node_mapping_fname=obj_node_mapping_fname ) except (NotImplementedError, pickle.PickleError) as err: warnings.warn( ( "Unable to persist ObjectNodeMapping. You will need to " "reconstruct the same object node mapping to build this ObjectIndex" ), stacklevel=2, ) self._index._storage_context.persist(persist_dir=persist_dir) @classmethod def from_persist_dir( cls, persist_dir: str = DEFAULT_PERSIST_DIR, object_node_mapping: Optional[BaseObjectNodeMapping] = None, ) -> "ObjectIndex": from llama_index.legacy.indices import load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir=persist_dir) index = load_index_from_storage(storage_context) if object_node_mapping: return cls(index=index, object_node_mapping=object_node_mapping) else: # try to load object_node_mapping # assume SimpleObjectNodeMapping for simplicity as its only subclass # that supports this method try: object_node_mapping = SimpleObjectNodeMapping.from_persist_dir( persist_dir=persist_dir ) except Exception as err: raise Exception( "Unable to load from persist dir. The object_node_mapping cannot be loaded." ) from err else: return cls(index=index, object_node_mapping=object_node_mapping)
[ "llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys", "llama_index.legacy.indices.load_index_from_storage", "llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys", "llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects", "llama_index.legacy.storage.storage_context.StorageContext.from_defaults", "llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable" ]
[((925, 938), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (932, 938), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2060, 2096), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriever.')\n", (2065, 2096), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2549, 2596), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (2580, 2596), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3083, 3113), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (3102, 3113), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3212, 3244), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (3232, 3244), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((5600, 5653), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (5628, 5653), False, 'from llama_index.legacy.storage.storage_context import DEFAULT_PERSIST_DIR, StorageContext\n'), ((5670, 5710), 'llama_index.legacy.indices.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (5693, 5710), False, 'from llama_index.legacy.indices import load_index_from_storage\n'), ((3816, 3861), 'llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects', 'SimpleObjectNodeMapping.from_objects', (['objects'], {}), '(objects)\n', (3852, 3861), False, 'from llama_index.legacy.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n'), ((4972, 5133), 'warnings.warn', 'warnings.warn', (['"""Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex"""'], {'stacklevel': '(2)'}), "(\n 'Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex'\n , stacklevel=2)\n", (4985, 5133), False, 'import warnings\n'), ((6056, 6121), 'llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir', 'SimpleObjectNodeMapping.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (6096, 6121), False, 'from llama_index.legacy.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n')]
"""Base reader class.""" from abc import ABC from typing import TYPE_CHECKING, Any, Dict, Iterable, List if TYPE_CHECKING: from llama_index.legacy.bridge.langchain import Document as LCDocument from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.schema import BaseComponent, Document class BaseReader(ABC): """Utilities for loading data from a directory.""" def lazy_load_data(self, *args: Any, **load_kwargs: Any) -> Iterable[Document]: """Load data from the input directory lazily.""" raise NotImplementedError( f"{self.__class__.__name__} does not provide lazy_load_data method currently" ) def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]: """Load data from the input directory.""" return list(self.lazy_load_data(*args, **load_kwargs)) def load_langchain_documents(self, **load_kwargs: Any) -> List["LCDocument"]: """Load data in LangChain document format.""" docs = self.load_data(**load_kwargs) return [d.to_langchain_format() for d in docs] class BasePydanticReader(BaseReader, BaseComponent): """Serialiable Data Loader with Pydatnic.""" is_remote: bool = Field( default=False, description="Whether the data is loaded from a remote API or a local file.", ) class Config: arbitrary_types_allowed = True class ReaderConfig(BaseComponent): """Represents a reader and it's input arguments.""" reader: BasePydanticReader = Field(..., description="Reader to use.") reader_args: List[Any] = Field(default_factory=list, description="Reader args.") reader_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Reader kwargs." ) class Config: arbitrary_types_allowed = True @classmethod def class_name(cls) -> str: """Get the name identifier of the class.""" return "ReaderConfig" def to_dict(self, **kwargs: Any) -> Dict[str, Any]: """Convert the class to a dictionary.""" return { "loader": self.reader.to_dict(**kwargs), "reader_args": self.reader_args, "reader_kwargs": self.reader_kwargs, "class_name": self.class_name(), } def read(self) -> List[Document]: """Call the loader with the given arguments.""" return self.reader.load_data(*self.reader_args, **self.reader_kwargs)
[ "llama_index.legacy.bridge.pydantic.Field" ]
[((1225, 1327), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the data is loaded from a remote API or a local file."""'}), "(default=False, description=\n 'Whether the data is loaded from a remote API or a local file.')\n", (1230, 1327), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1531, 1571), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Reader to use."""'}), "(..., description='Reader to use.')\n", (1536, 1571), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1601, 1656), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Reader args."""'}), "(default_factory=list, description='Reader args.')\n", (1606, 1656), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1693, 1750), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Reader kwargs."""'}), "(default_factory=dict, description='Reader kwargs.')\n", (1698, 1750), False, 'from llama_index.legacy.bridge.pydantic import Field\n')]
"""Base reader class.""" from abc import ABC from typing import TYPE_CHECKING, Any, Dict, Iterable, List if TYPE_CHECKING: from llama_index.legacy.bridge.langchain import Document as LCDocument from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.schema import BaseComponent, Document class BaseReader(ABC): """Utilities for loading data from a directory.""" def lazy_load_data(self, *args: Any, **load_kwargs: Any) -> Iterable[Document]: """Load data from the input directory lazily.""" raise NotImplementedError( f"{self.__class__.__name__} does not provide lazy_load_data method currently" ) def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]: """Load data from the input directory.""" return list(self.lazy_load_data(*args, **load_kwargs)) def load_langchain_documents(self, **load_kwargs: Any) -> List["LCDocument"]: """Load data in LangChain document format.""" docs = self.load_data(**load_kwargs) return [d.to_langchain_format() for d in docs] class BasePydanticReader(BaseReader, BaseComponent): """Serialiable Data Loader with Pydatnic.""" is_remote: bool = Field( default=False, description="Whether the data is loaded from a remote API or a local file.", ) class Config: arbitrary_types_allowed = True class ReaderConfig(BaseComponent): """Represents a reader and it's input arguments.""" reader: BasePydanticReader = Field(..., description="Reader to use.") reader_args: List[Any] = Field(default_factory=list, description="Reader args.") reader_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Reader kwargs." ) class Config: arbitrary_types_allowed = True @classmethod def class_name(cls) -> str: """Get the name identifier of the class.""" return "ReaderConfig" def to_dict(self, **kwargs: Any) -> Dict[str, Any]: """Convert the class to a dictionary.""" return { "loader": self.reader.to_dict(**kwargs), "reader_args": self.reader_args, "reader_kwargs": self.reader_kwargs, "class_name": self.class_name(), } def read(self) -> List[Document]: """Call the loader with the given arguments.""" return self.reader.load_data(*self.reader_args, **self.reader_kwargs)
[ "llama_index.legacy.bridge.pydantic.Field" ]
[((1225, 1327), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the data is loaded from a remote API or a local file."""'}), "(default=False, description=\n 'Whether the data is loaded from a remote API or a local file.')\n", (1230, 1327), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1531, 1571), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Reader to use."""'}), "(..., description='Reader to use.')\n", (1536, 1571), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1601, 1656), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Reader args."""'}), "(default_factory=list, description='Reader args.')\n", (1606, 1656), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1693, 1750), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Reader kwargs."""'}), "(default_factory=dict, description='Reader kwargs.')\n", (1698, 1750), False, 'from llama_index.legacy.bridge.pydantic import Field\n')]
""" Portkey integration with Llama_index for enhanced monitoring. """ from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.llms.generic_utils import ( chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator, ) from llama_index.legacy.llms.portkey_utils import ( IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model, ) from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode if TYPE_CHECKING: from portkey import ( LLMOptions, Modes, ModesLiteral, PortkeyResponse, ) DEFAULT_PORTKEY_MODEL = "gpt-3.5-turbo" class Portkey(CustomLLM): """_summary_. Args: LLM (_type_): _description_ """ mode: Optional[Union["Modes", "ModesLiteral"]] = Field( description="The mode for using the Portkey integration" ) model: Optional[str] = Field(default=DEFAULT_PORTKEY_MODEL) llm: "LLMOptions" = Field(description="LLM parameter", default_factory=dict) llms: List["LLMOptions"] = Field(description="LLM parameters", default_factory=list) _client: Any = PrivateAttr() def __init__( self, *, mode: Union["Modes", "ModesLiteral"], api_key: Optional[str] = None, base_url: Optional[str] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: """ Initialize a Portkey instance. Args: mode (Optional[Modes]): The mode for using the Portkey integration (default: Modes.SINGLE). api_key (Optional[str]): The API key to authenticate with Portkey. base_url (Optional[str]): The Base url to the self hosted rubeus \ (the opensource version of portkey) or any other self hosted server. """ try: import portkey except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc super().__init__( base_url=base_url, api_key=api_key, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) if api_key is not None: portkey.api_key = api_key if base_url is not None: portkey.base_url = base_url portkey.mode = mode self._client = portkey self.model = None self.mode = mode @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return generate_llm_metadata(self.llms[0]) def add_llms( self, llm_params: Union["LLMOptions", List["LLMOptions"]] ) -> "Portkey": """ Adds the specified LLM parameters to the list of LLMs. This may be used for fallbacks or load-balancing as specified in the mode. Args: llm_params (Union[LLMOptions, List[LLMOptions]]): A single LLM parameter \ set or a list of LLM parameter sets. Each set should be an instance of \ LLMOptions with the specified attributes. > provider: Optional[ProviderTypes] > model: str > temperature: float > max_tokens: Optional[int] > max_retries: int > trace_id: Optional[str] > cache_status: Optional[CacheType] > cache: Optional[bool] > metadata: Dict[str, Any] > weight: Optional[float] > **kwargs : Other additional parameters that are supported by \ LLMOptions in portkey-ai NOTE: User may choose to pass additional params as well. Returns: self """ try: from portkey import LLMOptions except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc if isinstance(llm_params, LLMOptions): llm_params = [llm_params] self.llms.extend(llm_params) if self.model is None: self.model = self.llms[0].model return self @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Completion endpoint for LLM.""" if self._is_chat_model: complete_fn = chat_to_completion_decorator(self._chat) else: complete_fn = self._complete return complete_fn(prompt, **kwargs) @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: if self._is_chat_model: chat_fn = self._chat else: chat_fn = completion_to_chat_decorator(self._complete) return chat_fn(messages, **kwargs) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: """Completion endpoint for LLM.""" if self._is_chat_model: complete_fn = stream_chat_to_completion_decorator(self._stream_chat) else: complete_fn = self._stream_complete return complete_fn(prompt, **kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: if self._is_chat_model: stream_chat_fn = self._stream_chat else: stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete) return stream_chat_fn(messages, **kwargs) def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: try: from portkey import Config, Message except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc _messages = cast( List[Message], [{"role": i.role.value, "content": i.content} for i in messages], ) config = Config(llms=self.llms) response = self._client.ChatCompletions.create( messages=_messages, config=config ) self.llm = self._get_llm(response) message = response.choices[0].message return ChatResponse(message=message, raw=response) def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: try: from portkey import Config except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc config = Config(llms=self.llms) response = self._client.Completions.create(prompt=prompt, config=config) text = response.choices[0].text return CompletionResponse(text=text, raw=response) def _stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: try: from portkey import Config, Message except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc _messages = cast( List[Message], [{"role": i.role.value, "content": i.content} for i in messages], ) config = Config(llms=self.llms) response = self._client.ChatCompletions.create( messages=_messages, config=config, stream=True, **kwargs ) def gen() -> ChatResponseGen: content = "" function_call: Optional[dict] = {} for resp in response: if resp.choices is None: continue delta = resp.choices[0].delta role = delta.get("role", "assistant") content_delta = delta.get("content", "") or "" content += content_delta function_call_delta = delta.get("function_call", None) if function_call_delta is not None: if function_call is None: function_call = function_call_delta # ensure we do not add a blank function call if ( function_call and function_call.get("function_name", "") is None ): del function_call["function_name"] else: function_call["arguments"] += function_call_delta["arguments"] additional_kwargs = {} if function_call is not None: additional_kwargs["function_call"] = function_call yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=resp, ) return gen() def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: try: from portkey import Config except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc config = Config(llms=self.llms) response = self._client.Completions.create( prompt=prompt, config=config, stream=True, **kwargs ) def gen() -> CompletionResponseGen: text = "" for resp in response: delta = resp.choices[0].text or "" text += delta yield CompletionResponse( delta=delta, text=text, raw=resp, ) return gen() @property def _is_chat_model(self) -> bool: """Check if a given model is a chat-based language model. Returns: bool: True if the provided model is a chat-based language model, False otherwise. """ return is_chat_model(self.model or "") def _get_llm(self, response: "PortkeyResponse") -> "LLMOptions": return get_llm(response, self.llms)
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.core.llms.types.ChatResponse", "llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator", "llama_index.legacy.llms.portkey_utils.is_chat_model", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.llms.generic_utils.completion_to_chat_decorator", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.chat_to_completion_decorator", "llama_index.legacy.llms.portkey_utils.generate_llm_metadata", "llama_index.legacy.llms.portkey_utils.get_llm" ]
[((1284, 1347), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The mode for using the Portkey integration"""'}), "(description='The mode for using the Portkey integration')\n", (1289, 1347), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1390, 1426), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_PORTKEY_MODEL'}), '(default=DEFAULT_PORTKEY_MODEL)\n', (1395, 1426), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1451, 1507), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""LLM parameter"""', 'default_factory': 'dict'}), "(description='LLM parameter', default_factory=dict)\n", (1456, 1507), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1540, 1597), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""LLM parameters"""', 'default_factory': 'list'}), "(description='LLM parameters', default_factory=list)\n", (1545, 1597), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1618, 1631), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1629, 1631), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5009, 5034), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5032, 5034), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5396, 5415), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5413, 5415), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5695, 5720), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5718, 5720), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6113, 6132), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6130, 6132), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3423, 3458), 'llama_index.legacy.llms.portkey_utils.generate_llm_metadata', 'generate_llm_metadata', (['self.llms[0]'], {}), '(self.llms[0])\n', (3444, 3458), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((6735, 6824), 'typing.cast', 'cast', (['List[Message]', "[{'role': i.role.value, 'content': i.content} for i in messages]"], {}), "(List[Message], [{'role': i.role.value, 'content': i.content} for i in\n messages])\n", (6739, 6824), False, 'from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast\n'), ((6873, 6895), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (6879, 6895), False, 'from portkey import Config\n'), ((7113, 7156), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'message', 'raw': 'response'}), '(message=message, raw=response)\n', (7125, 7156), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((7399, 7421), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (7405, 7421), False, 'from portkey import Config\n'), ((7558, 7601), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'raw': 'response'}), '(text=text, raw=response)\n', (7576, 7601), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((7889, 7978), 'typing.cast', 'cast', (['List[Message]', "[{'role': i.role.value, 'content': i.content} for i in messages]"], {}), "(List[Message], [{'role': i.role.value, 'content': i.content} for i in\n messages])\n", (7893, 7978), False, 'from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast\n'), ((8027, 8049), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (8033, 8049), False, 'from portkey import Config\n'), ((10014, 10036), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (10020, 10036), False, 'from portkey import Config\n'), ((10791, 10822), 'llama_index.legacy.llms.portkey_utils.is_chat_model', 'is_chat_model', (["(self.model or '')"], {}), "(self.model or '')\n", (10804, 10822), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((10908, 10936), 'llama_index.legacy.llms.portkey_utils.get_llm', 'get_llm', (['response', 'self.llms'], {}), '(response, self.llms)\n', (10915, 10936), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((5249, 5289), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self._chat'], {}), '(self._chat)\n', (5277, 5289), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5601, 5645), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self._complete'], {}), '(self._complete)\n', (5629, 5645), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5945, 5999), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self._stream_chat'], {}), '(self._stream_chat)\n', (5980, 5999), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((6363, 6421), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self._stream_complete'], {}), '(self._stream_complete)\n', (6398, 6421), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((10367, 10419), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'delta': 'delta', 'text': 'text', 'raw': 'resp'}), '(delta=delta, text=text, raw=resp)\n', (10385, 10419), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((9478, 9554), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (9489, 9554), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n')]
""" Portkey integration with Llama_index for enhanced monitoring. """ from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.llms.generic_utils import ( chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator, ) from llama_index.legacy.llms.portkey_utils import ( IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model, ) from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode if TYPE_CHECKING: from portkey import ( LLMOptions, Modes, ModesLiteral, PortkeyResponse, ) DEFAULT_PORTKEY_MODEL = "gpt-3.5-turbo" class Portkey(CustomLLM): """_summary_. Args: LLM (_type_): _description_ """ mode: Optional[Union["Modes", "ModesLiteral"]] = Field( description="The mode for using the Portkey integration" ) model: Optional[str] = Field(default=DEFAULT_PORTKEY_MODEL) llm: "LLMOptions" = Field(description="LLM parameter", default_factory=dict) llms: List["LLMOptions"] = Field(description="LLM parameters", default_factory=list) _client: Any = PrivateAttr() def __init__( self, *, mode: Union["Modes", "ModesLiteral"], api_key: Optional[str] = None, base_url: Optional[str] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: """ Initialize a Portkey instance. Args: mode (Optional[Modes]): The mode for using the Portkey integration (default: Modes.SINGLE). api_key (Optional[str]): The API key to authenticate with Portkey. base_url (Optional[str]): The Base url to the self hosted rubeus \ (the opensource version of portkey) or any other self hosted server. """ try: import portkey except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc super().__init__( base_url=base_url, api_key=api_key, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) if api_key is not None: portkey.api_key = api_key if base_url is not None: portkey.base_url = base_url portkey.mode = mode self._client = portkey self.model = None self.mode = mode @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return generate_llm_metadata(self.llms[0]) def add_llms( self, llm_params: Union["LLMOptions", List["LLMOptions"]] ) -> "Portkey": """ Adds the specified LLM parameters to the list of LLMs. This may be used for fallbacks or load-balancing as specified in the mode. Args: llm_params (Union[LLMOptions, List[LLMOptions]]): A single LLM parameter \ set or a list of LLM parameter sets. Each set should be an instance of \ LLMOptions with the specified attributes. > provider: Optional[ProviderTypes] > model: str > temperature: float > max_tokens: Optional[int] > max_retries: int > trace_id: Optional[str] > cache_status: Optional[CacheType] > cache: Optional[bool] > metadata: Dict[str, Any] > weight: Optional[float] > **kwargs : Other additional parameters that are supported by \ LLMOptions in portkey-ai NOTE: User may choose to pass additional params as well. Returns: self """ try: from portkey import LLMOptions except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc if isinstance(llm_params, LLMOptions): llm_params = [llm_params] self.llms.extend(llm_params) if self.model is None: self.model = self.llms[0].model return self @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: """Completion endpoint for LLM.""" if self._is_chat_model: complete_fn = chat_to_completion_decorator(self._chat) else: complete_fn = self._complete return complete_fn(prompt, **kwargs) @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: if self._is_chat_model: chat_fn = self._chat else: chat_fn = completion_to_chat_decorator(self._complete) return chat_fn(messages, **kwargs) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: """Completion endpoint for LLM.""" if self._is_chat_model: complete_fn = stream_chat_to_completion_decorator(self._stream_chat) else: complete_fn = self._stream_complete return complete_fn(prompt, **kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: if self._is_chat_model: stream_chat_fn = self._stream_chat else: stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete) return stream_chat_fn(messages, **kwargs) def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: try: from portkey import Config, Message except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc _messages = cast( List[Message], [{"role": i.role.value, "content": i.content} for i in messages], ) config = Config(llms=self.llms) response = self._client.ChatCompletions.create( messages=_messages, config=config ) self.llm = self._get_llm(response) message = response.choices[0].message return ChatResponse(message=message, raw=response) def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: try: from portkey import Config except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc config = Config(llms=self.llms) response = self._client.Completions.create(prompt=prompt, config=config) text = response.choices[0].text return CompletionResponse(text=text, raw=response) def _stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: try: from portkey import Config, Message except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc _messages = cast( List[Message], [{"role": i.role.value, "content": i.content} for i in messages], ) config = Config(llms=self.llms) response = self._client.ChatCompletions.create( messages=_messages, config=config, stream=True, **kwargs ) def gen() -> ChatResponseGen: content = "" function_call: Optional[dict] = {} for resp in response: if resp.choices is None: continue delta = resp.choices[0].delta role = delta.get("role", "assistant") content_delta = delta.get("content", "") or "" content += content_delta function_call_delta = delta.get("function_call", None) if function_call_delta is not None: if function_call is None: function_call = function_call_delta # ensure we do not add a blank function call if ( function_call and function_call.get("function_name", "") is None ): del function_call["function_name"] else: function_call["arguments"] += function_call_delta["arguments"] additional_kwargs = {} if function_call is not None: additional_kwargs["function_call"] = function_call yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=resp, ) return gen() def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: try: from portkey import Config except ImportError as exc: raise ImportError(IMPORT_ERROR_MESSAGE) from exc config = Config(llms=self.llms) response = self._client.Completions.create( prompt=prompt, config=config, stream=True, **kwargs ) def gen() -> CompletionResponseGen: text = "" for resp in response: delta = resp.choices[0].text or "" text += delta yield CompletionResponse( delta=delta, text=text, raw=resp, ) return gen() @property def _is_chat_model(self) -> bool: """Check if a given model is a chat-based language model. Returns: bool: True if the provided model is a chat-based language model, False otherwise. """ return is_chat_model(self.model or "") def _get_llm(self, response: "PortkeyResponse") -> "LLMOptions": return get_llm(response, self.llms)
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.core.llms.types.ChatResponse", "llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator", "llama_index.legacy.llms.portkey_utils.is_chat_model", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.llms.generic_utils.completion_to_chat_decorator", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.chat_to_completion_decorator", "llama_index.legacy.llms.portkey_utils.generate_llm_metadata", "llama_index.legacy.llms.portkey_utils.get_llm" ]
[((1284, 1347), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The mode for using the Portkey integration"""'}), "(description='The mode for using the Portkey integration')\n", (1289, 1347), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1390, 1426), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_PORTKEY_MODEL'}), '(default=DEFAULT_PORTKEY_MODEL)\n', (1395, 1426), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1451, 1507), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""LLM parameter"""', 'default_factory': 'dict'}), "(description='LLM parameter', default_factory=dict)\n", (1456, 1507), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1540, 1597), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""LLM parameters"""', 'default_factory': 'list'}), "(description='LLM parameters', default_factory=list)\n", (1545, 1597), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1618, 1631), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1629, 1631), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5009, 5034), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5032, 5034), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5396, 5415), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5413, 5415), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5695, 5720), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5718, 5720), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6113, 6132), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6130, 6132), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3423, 3458), 'llama_index.legacy.llms.portkey_utils.generate_llm_metadata', 'generate_llm_metadata', (['self.llms[0]'], {}), '(self.llms[0])\n', (3444, 3458), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((6735, 6824), 'typing.cast', 'cast', (['List[Message]', "[{'role': i.role.value, 'content': i.content} for i in messages]"], {}), "(List[Message], [{'role': i.role.value, 'content': i.content} for i in\n messages])\n", (6739, 6824), False, 'from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast\n'), ((6873, 6895), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (6879, 6895), False, 'from portkey import Config\n'), ((7113, 7156), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'message', 'raw': 'response'}), '(message=message, raw=response)\n', (7125, 7156), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((7399, 7421), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (7405, 7421), False, 'from portkey import Config\n'), ((7558, 7601), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'raw': 'response'}), '(text=text, raw=response)\n', (7576, 7601), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((7889, 7978), 'typing.cast', 'cast', (['List[Message]', "[{'role': i.role.value, 'content': i.content} for i in messages]"], {}), "(List[Message], [{'role': i.role.value, 'content': i.content} for i in\n messages])\n", (7893, 7978), False, 'from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast\n'), ((8027, 8049), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (8033, 8049), False, 'from portkey import Config\n'), ((10014, 10036), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (10020, 10036), False, 'from portkey import Config\n'), ((10791, 10822), 'llama_index.legacy.llms.portkey_utils.is_chat_model', 'is_chat_model', (["(self.model or '')"], {}), "(self.model or '')\n", (10804, 10822), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((10908, 10936), 'llama_index.legacy.llms.portkey_utils.get_llm', 'get_llm', (['response', 'self.llms'], {}), '(response, self.llms)\n', (10915, 10936), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((5249, 5289), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self._chat'], {}), '(self._chat)\n', (5277, 5289), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5601, 5645), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self._complete'], {}), '(self._complete)\n', (5629, 5645), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5945, 5999), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self._stream_chat'], {}), '(self._stream_chat)\n', (5980, 5999), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((6363, 6421), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self._stream_complete'], {}), '(self._stream_complete)\n', (6398, 6421), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((10367, 10419), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'delta': 'delta', 'text': 'text', 'raw': 'resp'}), '(delta=delta, text=text, raw=resp)\n', (10385, 10419), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((9478, 9554), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (9489, 9554), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n')]
"""Query plan tool.""" from typing import Any, Dict, List, Optional from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.response_synthesizers import ( BaseSynthesizer, get_response_synthesizer, ) from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core.tools.types import BaseTool, ToolMetadata, ToolOutput from llama_index.core.utils import print_text DEFAULT_NAME = "query_plan_tool" QUERYNODE_QUERY_STR_DESC = """\ Question we are asking. This is the query string that will be executed. \ """ QUERYNODE_TOOL_NAME_DESC = """\ Name of the tool to execute the `query_str`. \ Should NOT be specified if there are subquestions to be specified, in which \ case child_nodes should be nonempty instead.\ """ QUERYNODE_DEPENDENCIES_DESC = """\ List of sub-questions that need to be answered in order \ to answer the question given by `query_str`.\ Should be blank if there are no sub-questions to be specified, in which case \ `tool_name` is specified.\ """ class QueryNode(BaseModel): """Query node. A query node represents a query (query_str) that must be answered. It can either be answered by a tool (tool_name), or by a list of child nodes (child_nodes). The tool_name and child_nodes fields are mutually exclusive. """ # NOTE: inspired from https://github.com/jxnl/openai_function_call/pull/3/files id: int = Field(..., description="ID of the query node.") query_str: str = Field(..., description=QUERYNODE_QUERY_STR_DESC) tool_name: Optional[str] = Field( default=None, description="Name of the tool to execute the `query_str`." ) dependencies: List[int] = Field( default_factory=list, description=QUERYNODE_DEPENDENCIES_DESC ) class QueryPlan(BaseModel): """Query plan. Contains a list of QueryNode objects (which is a recursive object). Out of the list of QueryNode objects, one of them must be the root node. The root node is the one that isn't a dependency of any other node. """ nodes: List[QueryNode] = Field( ..., description="The original question we are asking.", ) DEFAULT_DESCRIPTION_PREFIX = """\ This is a query plan tool that takes in a list of tools and executes a \ query plan over these tools to answer a query. The query plan is a DAG of query nodes. Given a list of tool names and the query plan schema, you \ can choose to generate a query plan to answer a question. The tool names and descriptions are as follows: """ class QueryPlanTool(BaseTool): """Query plan tool. A tool that takes in a list of tools and executes a query plan. """ def __init__( self, query_engine_tools: List[BaseTool], response_synthesizer: BaseSynthesizer, name: str, description_prefix: str, ) -> None: """Initialize.""" self._query_tools_dict = {t.metadata.name: t for t in query_engine_tools} self._response_synthesizer = response_synthesizer self._name = name self._description_prefix = description_prefix @classmethod def from_defaults( cls, query_engine_tools: List[BaseTool], response_synthesizer: Optional[BaseSynthesizer] = None, name: Optional[str] = None, description_prefix: Optional[str] = None, ) -> "QueryPlanTool": """Initialize from defaults.""" name = name or DEFAULT_NAME description_prefix = description_prefix or DEFAULT_DESCRIPTION_PREFIX response_synthesizer = response_synthesizer or get_response_synthesizer() return cls( query_engine_tools=query_engine_tools, response_synthesizer=response_synthesizer, name=name, description_prefix=description_prefix, ) @property def metadata(self) -> ToolMetadata: """Metadata.""" tools_description = "\n\n".join( [ f"Tool Name: {tool.metadata.name}\n" + f"Tool Description: {tool.metadata.description} " for tool in self._query_tools_dict.values() ] ) # TODO: fill in description with query engine tools. description = f"""\ {self._description_prefix}\n\n {tools_description} """ return ToolMetadata(description, self._name, fn_schema=QueryPlan) def _execute_node( self, node: QueryNode, nodes_dict: Dict[int, QueryNode] ) -> ToolOutput: """Execute node.""" print_text(f"Executing node {node.json()}\n", color="blue") if len(node.dependencies) > 0: print_text( f"Executing {len(node.dependencies)} child nodes\n", color="pink" ) child_query_nodes: List[QueryNode] = [ nodes_dict[dep] for dep in node.dependencies ] # execute the child nodes first child_responses: List[ToolOutput] = [ self._execute_node(child, nodes_dict) for child in child_query_nodes ] # form the child Node/NodeWithScore objects child_nodes = [] for child_query_node, child_response in zip( child_query_nodes, child_responses ): node_text = ( f"Query: {child_query_node.query_str}\n" f"Response: {child_response!s}\n" ) child_node = TextNode(text=node_text) child_nodes.append(child_node) # use response synthesizer to combine results child_nodes_with_scores = [ NodeWithScore(node=n, score=1.0) for n in child_nodes ] response_obj = self._response_synthesizer.synthesize( query=node.query_str, nodes=child_nodes_with_scores, ) response = ToolOutput( content=str(response_obj), tool_name=node.query_str, raw_input={"query": node.query_str}, raw_output=response_obj, ) else: # this is a leaf request, execute the query string using the specified tool tool = self._query_tools_dict[node.tool_name] print_text(f"Selected Tool: {tool.metadata}\n", color="pink") response = tool(node.query_str) print_text( "Executed query, got response.\n" f"Query: {node.query_str}\n" f"Response: {response!s}\n", color="blue", ) return response def _find_root_nodes(self, nodes_dict: Dict[int, QueryNode]) -> List[QueryNode]: """Find root node.""" # the root node is the one that isn't a dependency of any other node node_counts = {node_id: 0 for node_id in nodes_dict} for node in nodes_dict.values(): for dep in node.dependencies: node_counts[dep] += 1 root_node_ids = [ node_id for node_id, count in node_counts.items() if count == 0 ] return [nodes_dict[node_id] for node_id in root_node_ids] def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: """Call.""" # the kwargs represented as a JSON object # should be a QueryPlan object query_plan = QueryPlan(**kwargs) nodes_dict = {node.id: node for node in query_plan.nodes} root_nodes = self._find_root_nodes(nodes_dict) if len(root_nodes) > 1: raise ValueError("Query plan should have exactly one root node.") return self._execute_node(root_nodes[0], nodes_dict)
[ "llama_index.core.schema.TextNode", "llama_index.core.bridge.pydantic.Field", "llama_index.core.tools.types.ToolMetadata", "llama_index.core.schema.NodeWithScore", "llama_index.core.response_synthesizers.get_response_synthesizer", "llama_index.core.utils.print_text" ]
[((1418, 1465), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""ID of the query node."""'}), "(..., description='ID of the query node.')\n", (1423, 1465), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1487, 1535), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': 'QUERYNODE_QUERY_STR_DESC'}), '(..., description=QUERYNODE_QUERY_STR_DESC)\n', (1492, 1535), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1567, 1646), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Name of the tool to execute the `query_str`."""'}), "(default=None, description='Name of the tool to execute the `query_str`.')\n", (1572, 1646), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1691, 1759), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': 'QUERYNODE_DEPENDENCIES_DESC'}), '(default_factory=list, description=QUERYNODE_DEPENDENCIES_DESC)\n', (1696, 1759), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2084, 2146), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""The original question we are asking."""'}), "(..., description='The original question we are asking.')\n", (2089, 2146), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4353, 4411), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', (['description', 'self._name'], {'fn_schema': 'QueryPlan'}), '(description, self._name, fn_schema=QueryPlan)\n', (4365, 4411), False, 'from llama_index.core.tools.types import BaseTool, ToolMetadata, ToolOutput\n'), ((6429, 6549), 'llama_index.core.utils.print_text', 'print_text', (['f"""Executed query, got response.\nQuery: {node.query_str}\nResponse: {response!s}\n"""'], {'color': '"""blue"""'}), '(\n f"""Executed query, got response.\nQuery: {node.query_str}\nResponse: {response!s}\n"""\n , color=\'blue\')\n', (6439, 6549), False, 'from llama_index.core.utils import print_text\n'), ((3593, 3619), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (3617, 3619), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, get_response_synthesizer\n'), ((6315, 6376), 'llama_index.core.utils.print_text', 'print_text', (['f"""Selected Tool: {tool.metadata}\n"""'], {'color': '"""pink"""'}), "(f'Selected Tool: {tool.metadata}\\n', color='pink')\n", (6325, 6376), False, 'from llama_index.core.utils import print_text\n'), ((5495, 5519), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'node_text'}), '(text=node_text)\n', (5503, 5519), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n'), ((5681, 5713), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'n', 'score': '(1.0)'}), '(node=n, score=1.0)\n', (5694, 5713), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n')]
"""Query plan tool.""" from typing import Any, Dict, List, Optional from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.response_synthesizers import ( BaseSynthesizer, get_response_synthesizer, ) from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core.tools.types import BaseTool, ToolMetadata, ToolOutput from llama_index.core.utils import print_text DEFAULT_NAME = "query_plan_tool" QUERYNODE_QUERY_STR_DESC = """\ Question we are asking. This is the query string that will be executed. \ """ QUERYNODE_TOOL_NAME_DESC = """\ Name of the tool to execute the `query_str`. \ Should NOT be specified if there are subquestions to be specified, in which \ case child_nodes should be nonempty instead.\ """ QUERYNODE_DEPENDENCIES_DESC = """\ List of sub-questions that need to be answered in order \ to answer the question given by `query_str`.\ Should be blank if there are no sub-questions to be specified, in which case \ `tool_name` is specified.\ """ class QueryNode(BaseModel): """Query node. A query node represents a query (query_str) that must be answered. It can either be answered by a tool (tool_name), or by a list of child nodes (child_nodes). The tool_name and child_nodes fields are mutually exclusive. """ # NOTE: inspired from https://github.com/jxnl/openai_function_call/pull/3/files id: int = Field(..., description="ID of the query node.") query_str: str = Field(..., description=QUERYNODE_QUERY_STR_DESC) tool_name: Optional[str] = Field( default=None, description="Name of the tool to execute the `query_str`." ) dependencies: List[int] = Field( default_factory=list, description=QUERYNODE_DEPENDENCIES_DESC ) class QueryPlan(BaseModel): """Query plan. Contains a list of QueryNode objects (which is a recursive object). Out of the list of QueryNode objects, one of them must be the root node. The root node is the one that isn't a dependency of any other node. """ nodes: List[QueryNode] = Field( ..., description="The original question we are asking.", ) DEFAULT_DESCRIPTION_PREFIX = """\ This is a query plan tool that takes in a list of tools and executes a \ query plan over these tools to answer a query. The query plan is a DAG of query nodes. Given a list of tool names and the query plan schema, you \ can choose to generate a query plan to answer a question. The tool names and descriptions are as follows: """ class QueryPlanTool(BaseTool): """Query plan tool. A tool that takes in a list of tools and executes a query plan. """ def __init__( self, query_engine_tools: List[BaseTool], response_synthesizer: BaseSynthesizer, name: str, description_prefix: str, ) -> None: """Initialize.""" self._query_tools_dict = {t.metadata.name: t for t in query_engine_tools} self._response_synthesizer = response_synthesizer self._name = name self._description_prefix = description_prefix @classmethod def from_defaults( cls, query_engine_tools: List[BaseTool], response_synthesizer: Optional[BaseSynthesizer] = None, name: Optional[str] = None, description_prefix: Optional[str] = None, ) -> "QueryPlanTool": """Initialize from defaults.""" name = name or DEFAULT_NAME description_prefix = description_prefix or DEFAULT_DESCRIPTION_PREFIX response_synthesizer = response_synthesizer or get_response_synthesizer() return cls( query_engine_tools=query_engine_tools, response_synthesizer=response_synthesizer, name=name, description_prefix=description_prefix, ) @property def metadata(self) -> ToolMetadata: """Metadata.""" tools_description = "\n\n".join( [ f"Tool Name: {tool.metadata.name}\n" + f"Tool Description: {tool.metadata.description} " for tool in self._query_tools_dict.values() ] ) # TODO: fill in description with query engine tools. description = f"""\ {self._description_prefix}\n\n {tools_description} """ return ToolMetadata(description, self._name, fn_schema=QueryPlan) def _execute_node( self, node: QueryNode, nodes_dict: Dict[int, QueryNode] ) -> ToolOutput: """Execute node.""" print_text(f"Executing node {node.json()}\n", color="blue") if len(node.dependencies) > 0: print_text( f"Executing {len(node.dependencies)} child nodes\n", color="pink" ) child_query_nodes: List[QueryNode] = [ nodes_dict[dep] for dep in node.dependencies ] # execute the child nodes first child_responses: List[ToolOutput] = [ self._execute_node(child, nodes_dict) for child in child_query_nodes ] # form the child Node/NodeWithScore objects child_nodes = [] for child_query_node, child_response in zip( child_query_nodes, child_responses ): node_text = ( f"Query: {child_query_node.query_str}\n" f"Response: {child_response!s}\n" ) child_node = TextNode(text=node_text) child_nodes.append(child_node) # use response synthesizer to combine results child_nodes_with_scores = [ NodeWithScore(node=n, score=1.0) for n in child_nodes ] response_obj = self._response_synthesizer.synthesize( query=node.query_str, nodes=child_nodes_with_scores, ) response = ToolOutput( content=str(response_obj), tool_name=node.query_str, raw_input={"query": node.query_str}, raw_output=response_obj, ) else: # this is a leaf request, execute the query string using the specified tool tool = self._query_tools_dict[node.tool_name] print_text(f"Selected Tool: {tool.metadata}\n", color="pink") response = tool(node.query_str) print_text( "Executed query, got response.\n" f"Query: {node.query_str}\n" f"Response: {response!s}\n", color="blue", ) return response def _find_root_nodes(self, nodes_dict: Dict[int, QueryNode]) -> List[QueryNode]: """Find root node.""" # the root node is the one that isn't a dependency of any other node node_counts = {node_id: 0 for node_id in nodes_dict} for node in nodes_dict.values(): for dep in node.dependencies: node_counts[dep] += 1 root_node_ids = [ node_id for node_id, count in node_counts.items() if count == 0 ] return [nodes_dict[node_id] for node_id in root_node_ids] def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: """Call.""" # the kwargs represented as a JSON object # should be a QueryPlan object query_plan = QueryPlan(**kwargs) nodes_dict = {node.id: node for node in query_plan.nodes} root_nodes = self._find_root_nodes(nodes_dict) if len(root_nodes) > 1: raise ValueError("Query plan should have exactly one root node.") return self._execute_node(root_nodes[0], nodes_dict)
[ "llama_index.core.schema.TextNode", "llama_index.core.bridge.pydantic.Field", "llama_index.core.tools.types.ToolMetadata", "llama_index.core.schema.NodeWithScore", "llama_index.core.response_synthesizers.get_response_synthesizer", "llama_index.core.utils.print_text" ]
[((1418, 1465), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""ID of the query node."""'}), "(..., description='ID of the query node.')\n", (1423, 1465), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1487, 1535), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': 'QUERYNODE_QUERY_STR_DESC'}), '(..., description=QUERYNODE_QUERY_STR_DESC)\n', (1492, 1535), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1567, 1646), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Name of the tool to execute the `query_str`."""'}), "(default=None, description='Name of the tool to execute the `query_str`.')\n", (1572, 1646), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1691, 1759), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': 'QUERYNODE_DEPENDENCIES_DESC'}), '(default_factory=list, description=QUERYNODE_DEPENDENCIES_DESC)\n', (1696, 1759), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2084, 2146), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""The original question we are asking."""'}), "(..., description='The original question we are asking.')\n", (2089, 2146), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4353, 4411), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', (['description', 'self._name'], {'fn_schema': 'QueryPlan'}), '(description, self._name, fn_schema=QueryPlan)\n', (4365, 4411), False, 'from llama_index.core.tools.types import BaseTool, ToolMetadata, ToolOutput\n'), ((6429, 6549), 'llama_index.core.utils.print_text', 'print_text', (['f"""Executed query, got response.\nQuery: {node.query_str}\nResponse: {response!s}\n"""'], {'color': '"""blue"""'}), '(\n f"""Executed query, got response.\nQuery: {node.query_str}\nResponse: {response!s}\n"""\n , color=\'blue\')\n', (6439, 6549), False, 'from llama_index.core.utils import print_text\n'), ((3593, 3619), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (3617, 3619), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, get_response_synthesizer\n'), ((6315, 6376), 'llama_index.core.utils.print_text', 'print_text', (['f"""Selected Tool: {tool.metadata}\n"""'], {'color': '"""pink"""'}), "(f'Selected Tool: {tool.metadata}\\n', color='pink')\n", (6325, 6376), False, 'from llama_index.core.utils import print_text\n'), ((5495, 5519), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'node_text'}), '(text=node_text)\n', (5503, 5519), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n'), ((5681, 5713), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'n', 'score': '(1.0)'}), '(node=n, score=1.0)\n', (5694, 5713), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n')]
"""Query plan tool.""" from typing import Any, Dict, List, Optional from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.response_synthesizers import ( BaseSynthesizer, get_response_synthesizer, ) from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core.tools.types import BaseTool, ToolMetadata, ToolOutput from llama_index.core.utils import print_text DEFAULT_NAME = "query_plan_tool" QUERYNODE_QUERY_STR_DESC = """\ Question we are asking. This is the query string that will be executed. \ """ QUERYNODE_TOOL_NAME_DESC = """\ Name of the tool to execute the `query_str`. \ Should NOT be specified if there are subquestions to be specified, in which \ case child_nodes should be nonempty instead.\ """ QUERYNODE_DEPENDENCIES_DESC = """\ List of sub-questions that need to be answered in order \ to answer the question given by `query_str`.\ Should be blank if there are no sub-questions to be specified, in which case \ `tool_name` is specified.\ """ class QueryNode(BaseModel): """Query node. A query node represents a query (query_str) that must be answered. It can either be answered by a tool (tool_name), or by a list of child nodes (child_nodes). The tool_name and child_nodes fields are mutually exclusive. """ # NOTE: inspired from https://github.com/jxnl/openai_function_call/pull/3/files id: int = Field(..., description="ID of the query node.") query_str: str = Field(..., description=QUERYNODE_QUERY_STR_DESC) tool_name: Optional[str] = Field( default=None, description="Name of the tool to execute the `query_str`." ) dependencies: List[int] = Field( default_factory=list, description=QUERYNODE_DEPENDENCIES_DESC ) class QueryPlan(BaseModel): """Query plan. Contains a list of QueryNode objects (which is a recursive object). Out of the list of QueryNode objects, one of them must be the root node. The root node is the one that isn't a dependency of any other node. """ nodes: List[QueryNode] = Field( ..., description="The original question we are asking.", ) DEFAULT_DESCRIPTION_PREFIX = """\ This is a query plan tool that takes in a list of tools and executes a \ query plan over these tools to answer a query. The query plan is a DAG of query nodes. Given a list of tool names and the query plan schema, you \ can choose to generate a query plan to answer a question. The tool names and descriptions are as follows: """ class QueryPlanTool(BaseTool): """Query plan tool. A tool that takes in a list of tools and executes a query plan. """ def __init__( self, query_engine_tools: List[BaseTool], response_synthesizer: BaseSynthesizer, name: str, description_prefix: str, ) -> None: """Initialize.""" self._query_tools_dict = {t.metadata.name: t for t in query_engine_tools} self._response_synthesizer = response_synthesizer self._name = name self._description_prefix = description_prefix @classmethod def from_defaults( cls, query_engine_tools: List[BaseTool], response_synthesizer: Optional[BaseSynthesizer] = None, name: Optional[str] = None, description_prefix: Optional[str] = None, ) -> "QueryPlanTool": """Initialize from defaults.""" name = name or DEFAULT_NAME description_prefix = description_prefix or DEFAULT_DESCRIPTION_PREFIX response_synthesizer = response_synthesizer or get_response_synthesizer() return cls( query_engine_tools=query_engine_tools, response_synthesizer=response_synthesizer, name=name, description_prefix=description_prefix, ) @property def metadata(self) -> ToolMetadata: """Metadata.""" tools_description = "\n\n".join( [ f"Tool Name: {tool.metadata.name}\n" + f"Tool Description: {tool.metadata.description} " for tool in self._query_tools_dict.values() ] ) # TODO: fill in description with query engine tools. description = f"""\ {self._description_prefix}\n\n {tools_description} """ return ToolMetadata(description, self._name, fn_schema=QueryPlan) def _execute_node( self, node: QueryNode, nodes_dict: Dict[int, QueryNode] ) -> ToolOutput: """Execute node.""" print_text(f"Executing node {node.json()}\n", color="blue") if len(node.dependencies) > 0: print_text( f"Executing {len(node.dependencies)} child nodes\n", color="pink" ) child_query_nodes: List[QueryNode] = [ nodes_dict[dep] for dep in node.dependencies ] # execute the child nodes first child_responses: List[ToolOutput] = [ self._execute_node(child, nodes_dict) for child in child_query_nodes ] # form the child Node/NodeWithScore objects child_nodes = [] for child_query_node, child_response in zip( child_query_nodes, child_responses ): node_text = ( f"Query: {child_query_node.query_str}\n" f"Response: {child_response!s}\n" ) child_node = TextNode(text=node_text) child_nodes.append(child_node) # use response synthesizer to combine results child_nodes_with_scores = [ NodeWithScore(node=n, score=1.0) for n in child_nodes ] response_obj = self._response_synthesizer.synthesize( query=node.query_str, nodes=child_nodes_with_scores, ) response = ToolOutput( content=str(response_obj), tool_name=node.query_str, raw_input={"query": node.query_str}, raw_output=response_obj, ) else: # this is a leaf request, execute the query string using the specified tool tool = self._query_tools_dict[node.tool_name] print_text(f"Selected Tool: {tool.metadata}\n", color="pink") response = tool(node.query_str) print_text( "Executed query, got response.\n" f"Query: {node.query_str}\n" f"Response: {response!s}\n", color="blue", ) return response def _find_root_nodes(self, nodes_dict: Dict[int, QueryNode]) -> List[QueryNode]: """Find root node.""" # the root node is the one that isn't a dependency of any other node node_counts = {node_id: 0 for node_id in nodes_dict} for node in nodes_dict.values(): for dep in node.dependencies: node_counts[dep] += 1 root_node_ids = [ node_id for node_id, count in node_counts.items() if count == 0 ] return [nodes_dict[node_id] for node_id in root_node_ids] def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput: """Call.""" # the kwargs represented as a JSON object # should be a QueryPlan object query_plan = QueryPlan(**kwargs) nodes_dict = {node.id: node for node in query_plan.nodes} root_nodes = self._find_root_nodes(nodes_dict) if len(root_nodes) > 1: raise ValueError("Query plan should have exactly one root node.") return self._execute_node(root_nodes[0], nodes_dict)
[ "llama_index.core.schema.TextNode", "llama_index.core.bridge.pydantic.Field", "llama_index.core.tools.types.ToolMetadata", "llama_index.core.schema.NodeWithScore", "llama_index.core.response_synthesizers.get_response_synthesizer", "llama_index.core.utils.print_text" ]
[((1418, 1465), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""ID of the query node."""'}), "(..., description='ID of the query node.')\n", (1423, 1465), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1487, 1535), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': 'QUERYNODE_QUERY_STR_DESC'}), '(..., description=QUERYNODE_QUERY_STR_DESC)\n', (1492, 1535), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1567, 1646), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Name of the tool to execute the `query_str`."""'}), "(default=None, description='Name of the tool to execute the `query_str`.')\n", (1572, 1646), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1691, 1759), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': 'QUERYNODE_DEPENDENCIES_DESC'}), '(default_factory=list, description=QUERYNODE_DEPENDENCIES_DESC)\n', (1696, 1759), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2084, 2146), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""The original question we are asking."""'}), "(..., description='The original question we are asking.')\n", (2089, 2146), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4353, 4411), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', (['description', 'self._name'], {'fn_schema': 'QueryPlan'}), '(description, self._name, fn_schema=QueryPlan)\n', (4365, 4411), False, 'from llama_index.core.tools.types import BaseTool, ToolMetadata, ToolOutput\n'), ((6429, 6549), 'llama_index.core.utils.print_text', 'print_text', (['f"""Executed query, got response.\nQuery: {node.query_str}\nResponse: {response!s}\n"""'], {'color': '"""blue"""'}), '(\n f"""Executed query, got response.\nQuery: {node.query_str}\nResponse: {response!s}\n"""\n , color=\'blue\')\n', (6439, 6549), False, 'from llama_index.core.utils import print_text\n'), ((3593, 3619), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (3617, 3619), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, get_response_synthesizer\n'), ((6315, 6376), 'llama_index.core.utils.print_text', 'print_text', (['f"""Selected Tool: {tool.metadata}\n"""'], {'color': '"""pink"""'}), "(f'Selected Tool: {tool.metadata}\\n', color='pink')\n", (6325, 6376), False, 'from llama_index.core.utils import print_text\n'), ((5495, 5519), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'node_text'}), '(text=node_text)\n', (5503, 5519), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n'), ((5681, 5713), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'n', 'score': '(1.0)'}), '(node=n, score=1.0)\n', (5694, 5713), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n')]
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.generic_utils import ( completion_to_chat_decorator, stream_completion_to_chat_decorator, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.llms.watsonx_utils import ( WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size, ) from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class WatsonX(LLM): """IBM WatsonX LLM.""" model_id: str = Field(description="The Model to use.") max_new_tokens: int = Field(description="The maximum number of tokens to generate.") temperature: float = Field(description="The temperature to use for sampling.") additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional Kwargs for the WatsonX model" ) model_info: Dict[str, Any] = Field( default_factory=dict, description="Details about the selected model" ) _model = PrivateAttr() def __init__( self, credentials: Dict[str, Any], model_id: Optional[str] = "ibm/mpt-7b-instruct2", project_id: Optional[str] = None, space_id: Optional[str] = None, max_new_tokens: Optional[int] = 512, temperature: Optional[float] = 0.1, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: """Initialize params.""" if model_id not in WATSONX_MODELS: raise ValueError( f"Model name {model_id} not found in {WATSONX_MODELS.keys()}" ) try: from ibm_watson_machine_learning.foundation_models.model import Model except ImportError as e: raise ImportError( "You must install the `ibm_watson_machine_learning` package to use WatsonX" "please `pip install ibm_watson_machine_learning`" ) from e additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) project_id = get_from_param_or_env_without_error( project_id, "IBM_WATSONX_PROJECT_ID" ) space_id = get_from_param_or_env_without_error(space_id, "IBM_WATSONX_SPACE_ID") if project_id is not None or space_id is not None: self._model = Model( model_id=model_id, credentials=credentials, project_id=project_id, space_id=space_id, ) else: raise ValueError( f"Did not find `project_id` or `space_id`, Please pass them as named parameters" f" or as environment variables, `IBM_WATSONX_PROJECT_ID` or `IBM_WATSONX_SPACE_ID`." ) super().__init__( model_id=model_id, temperature=temperature, max_new_tokens=max_new_tokens, additional_kwargs=additional_kwargs, model_info=self._model.get_details(), callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(self) -> str: """Get Class Name.""" return "WatsonX_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=watsonx_model_to_context_size(self.model_id), num_output=self.max_new_tokens, model_name=self.model_id, ) @property def sample_model_kwargs(self) -> Dict[str, Any]: """Get a sample of Model kwargs that a user can pass to the model.""" try: from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames except ImportError as e: raise ImportError( "You must install the `ibm_watson_machine_learning` package to use WatsonX" "please `pip install ibm_watson_machine_learning`" ) from e params = GenTextParamsMetaNames().get_example_values() params.pop("return_options") return params @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "max_new_tokens": self.max_new_tokens, "temperature": self.temperature, } return {**base_kwargs, **self.additional_kwargs} def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return {**self._model_kwargs, **kwargs} @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_all_kwargs(**kwargs) response = self._model.generate_text(prompt=prompt, params=all_kwargs) return CompletionResponse(text=response) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: all_kwargs = self._get_all_kwargs(**kwargs) stream_response = self._model.generate_text_stream( prompt=prompt, params=all_kwargs ) def gen() -> CompletionResponseGen: content = "" for stream_delta in stream_response: content += stream_delta yield CompletionResponse(text=content, delta=stream_delta) return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: all_kwargs = self._get_all_kwargs(**kwargs) chat_fn = completion_to_chat_decorator(self.complete) return chat_fn(messages, **all_kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: all_kwargs = self._get_all_kwargs(**kwargs) chat_stream_fn = stream_completion_to_chat_decorator(self.stream_complete) return chat_stream_fn(messages, **all_kwargs) # Async Functions # IBM Watson Machine Learning Package currently does not have Support for Async calls async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: raise NotImplementedError async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: raise NotImplementedError async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: raise NotImplementedError async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: raise NotImplementedError
[ "llama_index.legacy.llms.watsonx_utils.WATSONX_MODELS.keys", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.completion_to_chat_decorator", "llama_index.legacy.llms.watsonx_utils.watsonx_model_to_context_size" ]
[((968, 1006), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Model to use."""'}), "(description='The Model to use.')\n", (973, 1006), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1033, 1095), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1038, 1095), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1121, 1178), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1126, 1178), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1219, 1306), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional Kwargs for the WatsonX model"""'}), "(default_factory=dict, description=\n 'Additional Kwargs for the WatsonX model')\n", (1224, 1306), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1349, 1424), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Details about the selected model"""'}), "(default_factory=dict, description='Details about the selected model')\n", (1354, 1424), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1453, 1466), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1464, 1466), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5496, 5521), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5519, 5521), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5823, 5848), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5846, 5848), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6402, 6421), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6419, 6421), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6674, 6693), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6691, 6693), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2920, 2993), 'llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error', 'get_from_param_or_env_without_error', (['project_id', '"""IBM_WATSONX_PROJECT_ID"""'], {}), "(project_id, 'IBM_WATSONX_PROJECT_ID')\n", (2955, 2993), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((3035, 3104), 'llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error', 'get_from_param_or_env_without_error', (['space_id', '"""IBM_WATSONX_SPACE_ID"""'], {}), "(space_id, 'IBM_WATSONX_SPACE_ID')\n", (3070, 3104), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((5783, 5816), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response'}), '(text=response)\n', (5801, 5816), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((6576, 6619), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self.complete'], {}), '(self.complete)\n', (6604, 6619), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, stream_completion_to_chat_decorator\n'), ((6879, 6936), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self.stream_complete'], {}), '(self.stream_complete)\n', (6914, 6936), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, stream_completion_to_chat_decorator\n'), ((2878, 2897), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2893, 2897), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((3191, 3286), 'ibm_watson_machine_learning.foundation_models.model.Model', 'Model', ([], {'model_id': 'model_id', 'credentials': 'credentials', 'project_id': 'project_id', 'space_id': 'space_id'}), '(model_id=model_id, credentials=credentials, project_id=project_id,\n space_id=space_id)\n', (3196, 3286), False, 'from ibm_watson_machine_learning.foundation_models.model import Model\n'), ((4376, 4420), 'llama_index.legacy.llms.watsonx_utils.watsonx_model_to_context_size', 'watsonx_model_to_context_size', (['self.model_id'], {}), '(self.model_id)\n', (4405, 4420), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((5020, 5044), 'ibm_watson_machine_learning.metanames.GenTextParamsMetaNames', 'GenTextParamsMetaNames', ([], {}), '()\n', (5042, 5044), False, 'from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames\n'), ((6321, 6373), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'stream_delta'}), '(text=content, delta=stream_delta)\n', (6339, 6373), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((2400, 2421), 'llama_index.legacy.llms.watsonx_utils.WATSONX_MODELS.keys', 'WATSONX_MODELS.keys', ([], {}), '()\n', (2419, 2421), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n')]
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.generic_utils import ( completion_to_chat_decorator, stream_completion_to_chat_decorator, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.llms.watsonx_utils import ( WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size, ) from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class WatsonX(LLM): """IBM WatsonX LLM.""" model_id: str = Field(description="The Model to use.") max_new_tokens: int = Field(description="The maximum number of tokens to generate.") temperature: float = Field(description="The temperature to use for sampling.") additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional Kwargs for the WatsonX model" ) model_info: Dict[str, Any] = Field( default_factory=dict, description="Details about the selected model" ) _model = PrivateAttr() def __init__( self, credentials: Dict[str, Any], model_id: Optional[str] = "ibm/mpt-7b-instruct2", project_id: Optional[str] = None, space_id: Optional[str] = None, max_new_tokens: Optional[int] = 512, temperature: Optional[float] = 0.1, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: """Initialize params.""" if model_id not in WATSONX_MODELS: raise ValueError( f"Model name {model_id} not found in {WATSONX_MODELS.keys()}" ) try: from ibm_watson_machine_learning.foundation_models.model import Model except ImportError as e: raise ImportError( "You must install the `ibm_watson_machine_learning` package to use WatsonX" "please `pip install ibm_watson_machine_learning`" ) from e additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) project_id = get_from_param_or_env_without_error( project_id, "IBM_WATSONX_PROJECT_ID" ) space_id = get_from_param_or_env_without_error(space_id, "IBM_WATSONX_SPACE_ID") if project_id is not None or space_id is not None: self._model = Model( model_id=model_id, credentials=credentials, project_id=project_id, space_id=space_id, ) else: raise ValueError( f"Did not find `project_id` or `space_id`, Please pass them as named parameters" f" or as environment variables, `IBM_WATSONX_PROJECT_ID` or `IBM_WATSONX_SPACE_ID`." ) super().__init__( model_id=model_id, temperature=temperature, max_new_tokens=max_new_tokens, additional_kwargs=additional_kwargs, model_info=self._model.get_details(), callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(self) -> str: """Get Class Name.""" return "WatsonX_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=watsonx_model_to_context_size(self.model_id), num_output=self.max_new_tokens, model_name=self.model_id, ) @property def sample_model_kwargs(self) -> Dict[str, Any]: """Get a sample of Model kwargs that a user can pass to the model.""" try: from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames except ImportError as e: raise ImportError( "You must install the `ibm_watson_machine_learning` package to use WatsonX" "please `pip install ibm_watson_machine_learning`" ) from e params = GenTextParamsMetaNames().get_example_values() params.pop("return_options") return params @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "max_new_tokens": self.max_new_tokens, "temperature": self.temperature, } return {**base_kwargs, **self.additional_kwargs} def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return {**self._model_kwargs, **kwargs} @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_all_kwargs(**kwargs) response = self._model.generate_text(prompt=prompt, params=all_kwargs) return CompletionResponse(text=response) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: all_kwargs = self._get_all_kwargs(**kwargs) stream_response = self._model.generate_text_stream( prompt=prompt, params=all_kwargs ) def gen() -> CompletionResponseGen: content = "" for stream_delta in stream_response: content += stream_delta yield CompletionResponse(text=content, delta=stream_delta) return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: all_kwargs = self._get_all_kwargs(**kwargs) chat_fn = completion_to_chat_decorator(self.complete) return chat_fn(messages, **all_kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: all_kwargs = self._get_all_kwargs(**kwargs) chat_stream_fn = stream_completion_to_chat_decorator(self.stream_complete) return chat_stream_fn(messages, **all_kwargs) # Async Functions # IBM Watson Machine Learning Package currently does not have Support for Async calls async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: raise NotImplementedError async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: raise NotImplementedError async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: raise NotImplementedError async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: raise NotImplementedError
[ "llama_index.legacy.llms.watsonx_utils.WATSONX_MODELS.keys", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.completion_to_chat_decorator", "llama_index.legacy.llms.watsonx_utils.watsonx_model_to_context_size" ]
[((968, 1006), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Model to use."""'}), "(description='The Model to use.')\n", (973, 1006), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1033, 1095), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1038, 1095), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1121, 1178), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1126, 1178), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1219, 1306), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional Kwargs for the WatsonX model"""'}), "(default_factory=dict, description=\n 'Additional Kwargs for the WatsonX model')\n", (1224, 1306), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1349, 1424), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Details about the selected model"""'}), "(default_factory=dict, description='Details about the selected model')\n", (1354, 1424), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1453, 1466), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1464, 1466), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5496, 5521), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5519, 5521), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5823, 5848), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5846, 5848), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6402, 6421), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6419, 6421), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6674, 6693), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6691, 6693), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2920, 2993), 'llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error', 'get_from_param_or_env_without_error', (['project_id', '"""IBM_WATSONX_PROJECT_ID"""'], {}), "(project_id, 'IBM_WATSONX_PROJECT_ID')\n", (2955, 2993), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((3035, 3104), 'llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error', 'get_from_param_or_env_without_error', (['space_id', '"""IBM_WATSONX_SPACE_ID"""'], {}), "(space_id, 'IBM_WATSONX_SPACE_ID')\n", (3070, 3104), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((5783, 5816), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response'}), '(text=response)\n', (5801, 5816), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((6576, 6619), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self.complete'], {}), '(self.complete)\n', (6604, 6619), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, stream_completion_to_chat_decorator\n'), ((6879, 6936), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self.stream_complete'], {}), '(self.stream_complete)\n', (6914, 6936), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, stream_completion_to_chat_decorator\n'), ((2878, 2897), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2893, 2897), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((3191, 3286), 'ibm_watson_machine_learning.foundation_models.model.Model', 'Model', ([], {'model_id': 'model_id', 'credentials': 'credentials', 'project_id': 'project_id', 'space_id': 'space_id'}), '(model_id=model_id, credentials=credentials, project_id=project_id,\n space_id=space_id)\n', (3196, 3286), False, 'from ibm_watson_machine_learning.foundation_models.model import Model\n'), ((4376, 4420), 'llama_index.legacy.llms.watsonx_utils.watsonx_model_to_context_size', 'watsonx_model_to_context_size', (['self.model_id'], {}), '(self.model_id)\n', (4405, 4420), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((5020, 5044), 'ibm_watson_machine_learning.metanames.GenTextParamsMetaNames', 'GenTextParamsMetaNames', ([], {}), '()\n', (5042, 5044), False, 'from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames\n'), ((6321, 6373), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'stream_delta'}), '(text=content, delta=stream_delta)\n', (6339, 6373), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((2400, 2421), 'llama_index.legacy.llms.watsonx_utils.WATSONX_MODELS.keys', 'WATSONX_MODELS.keys', ([], {}), '()\n', (2419, 2421), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n')]
from typing import Any, Awaitable, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_TEMPERATURE from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.generic_utils import ( achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator, ) from llama_index.legacy.llms.litellm_utils import ( acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode DEFAULT_LITELLM_MODEL = "gpt-3.5-turbo" class LiteLLM(LLM): model: str = Field( default=DEFAULT_LITELLM_MODEL, description=( "The LiteLLM model to use. " "For complete list of providers https://docs.litellm.ai/docs/providers" ), ) temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", gte=0.0, lte=1.0, ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate.", gt=0, ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the LLM API.", # for all inputs https://docs.litellm.ai/docs/completion/input ) max_retries: int = Field( default=10, description="The maximum number of API retries." ) def __init__( self, model: str = DEFAULT_LITELLM_MODEL, temperature: float = DEFAULT_TEMPERATURE, max_tokens: Optional[int] = None, additional_kwargs: Optional[Dict[str, Any]] = None, max_retries: int = 10, api_key: Optional[str] = None, api_type: Optional[str] = None, api_base: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, **kwargs: Any, ) -> None: if "custom_llm_provider" in kwargs: if ( kwargs["custom_llm_provider"] != "ollama" and kwargs["custom_llm_provider"] != "vllm" ): # don't check keys for local models validate_litellm_api_key(api_key, api_type) else: # by default assume it's a hosted endpoint validate_litellm_api_key(api_key, api_type) additional_kwargs = additional_kwargs or {} if api_key is not None: additional_kwargs["api_key"] = api_key if api_type is not None: additional_kwargs["api_type"] = api_type if api_base is not None: additional_kwargs["api_base"] = api_base super().__init__( model=model, temperature=temperature, max_tokens=max_tokens, additional_kwargs=additional_kwargs, max_retries=max_retries, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, **kwargs, ) def _get_model_name(self) -> str: model_name = self.model if "ft-" in model_name: # legacy fine-tuning model_name = model_name.split(":")[0] elif model_name.startswith("ft:"): model_name = model_name.split(":")[1] return model_name @classmethod def class_name(cls) -> str: return "litellm_llm" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=openai_modelname_to_contextsize(self._get_model_name()), num_output=self.max_tokens or -1, is_chat_model=True, is_function_calling_model=is_function_calling_model(self._get_model_name()), model_name=self.model, ) @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: if self._is_chat_model: chat_fn = self._chat else: chat_fn = completion_to_chat_decorator(self._complete) return chat_fn(messages, **kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: if self._is_chat_model: stream_chat_fn = self._stream_chat else: stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete) return stream_chat_fn(messages, **kwargs) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: # litellm assumes all llms are chat llms if self._is_chat_model: complete_fn = chat_to_completion_decorator(self._chat) else: complete_fn = self._complete return complete_fn(prompt, **kwargs) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: if self._is_chat_model: stream_complete_fn = stream_chat_to_completion_decorator(self._stream_chat) else: stream_complete_fn = self._stream_complete return stream_complete_fn(prompt, **kwargs) @property def _is_chat_model(self) -> bool: # litellm assumes all llms are chat llms return True @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "model": self.model, "temperature": self.temperature, "max_tokens": self.max_tokens, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: if not self._is_chat_model: raise ValueError("This model is not a chat model.") message_dicts = to_openai_message_dicts(messages) all_kwargs = self._get_all_kwargs(**kwargs) if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None: all_kwargs.pop( "max_tokens" ) # don't send max_tokens == None, this throws errors for Non OpenAI providers response = completion_with_retry( is_chat_model=self._is_chat_model, max_retries=self.max_retries, messages=message_dicts, stream=False, **all_kwargs, ) message_dict = response["choices"][0]["message"] message = from_litellm_message(message_dict) return ChatResponse( message=message, raw=response, additional_kwargs=self._get_response_token_counts(response), ) def _stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: if not self._is_chat_model: raise ValueError("This model is not a chat model.") message_dicts = to_openai_message_dicts(messages) all_kwargs = self._get_all_kwargs(**kwargs) if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None: all_kwargs.pop( "max_tokens" ) # don't send max_tokens == None, this throws errors for Non OpenAI providers def gen() -> ChatResponseGen: content = "" function_call: Optional[dict] = None for response in completion_with_retry( is_chat_model=self._is_chat_model, max_retries=self.max_retries, messages=message_dicts, stream=True, **all_kwargs, ): delta = response["choices"][0]["delta"] role = delta.get("role", "assistant") content_delta = delta.get("content", "") or "" content += content_delta function_call_delta = delta.get("function_call", None) if function_call_delta is not None: if function_call is None: function_call = function_call_delta ## ensure we do not add a blank function call if function_call.get("function_name", "") is None: del function_call["function_name"] else: function_call["arguments"] += function_call_delta["arguments"] additional_kwargs = {} if function_call is not None: additional_kwargs["function_call"] = function_call yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: raise NotImplementedError("litellm assumes all llms are chat llms.") def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: raise NotImplementedError("litellm assumes all llms are chat llms.") def _get_max_token_for_prompt(self, prompt: str) -> int: try: import tiktoken except ImportError: raise ImportError( "Please install tiktoken to use the max_tokens=None feature." ) context_window = self.metadata.context_window try: encoding = tiktoken.encoding_for_model(self._get_model_name()) except KeyError: encoding = encoding = tiktoken.get_encoding( "cl100k_base" ) # default to using cl10k_base tokens = encoding.encode(prompt) max_token = context_window - len(tokens) if max_token <= 0: raise ValueError( f"The prompt is too long for the model. " f"Please use a prompt that is less than {context_window} tokens." ) return max_token def _get_response_token_counts(self, raw_response: Any) -> dict: """Get the token usage reported by the response.""" if not isinstance(raw_response, dict): return {} usage = raw_response.get("usage", {}) return { "prompt_tokens": usage.get("prompt_tokens", 0), "completion_tokens": usage.get("completion_tokens", 0), "total_tokens": usage.get("total_tokens", 0), } # ===== Async Endpoints ===== @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: achat_fn: Callable[..., Awaitable[ChatResponse]] if self._is_chat_model: achat_fn = self._achat else: achat_fn = acompletion_to_chat_decorator(self._acomplete) return await achat_fn(messages, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseAsyncGen: astream_chat_fn: Callable[..., Awaitable[ChatResponseAsyncGen]] if self._is_chat_model: astream_chat_fn = self._astream_chat else: astream_chat_fn = astream_completion_to_chat_decorator( self._astream_complete ) return await astream_chat_fn(messages, **kwargs) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: if self._is_chat_model: acomplete_fn = achat_to_completion_decorator(self._achat) else: acomplete_fn = self._acomplete return await acomplete_fn(prompt, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: if self._is_chat_model: astream_complete_fn = astream_chat_to_completion_decorator( self._astream_chat ) else: astream_complete_fn = self._astream_complete return await astream_complete_fn(prompt, **kwargs) async def _achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: if not self._is_chat_model: raise ValueError("This model is not a chat model.") message_dicts = to_openai_message_dicts(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = await acompletion_with_retry( is_chat_model=self._is_chat_model, max_retries=self.max_retries, messages=message_dicts, stream=False, **all_kwargs, ) message_dict = response["choices"][0]["message"] message = from_litellm_message(message_dict) return ChatResponse( message=message, raw=response, additional_kwargs=self._get_response_token_counts(response), ) async def _astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: if not self._is_chat_model: raise ValueError("This model is not a chat model.") message_dicts = to_openai_message_dicts(messages) all_kwargs = self._get_all_kwargs(**kwargs) async def gen() -> ChatResponseAsyncGen: content = "" function_call: Optional[dict] = None async for response in await acompletion_with_retry( is_chat_model=self._is_chat_model, max_retries=self.max_retries, messages=message_dicts, stream=True, **all_kwargs, ): delta = response["choices"][0]["delta"] role = delta.get("role", "assistant") content_delta = delta.get("content", "") or "" content += content_delta function_call_delta = delta.get("function_call", None) if function_call_delta is not None: if function_call is None: function_call = function_call_delta ## ensure we do not add a blank function call if function_call.get("function_name", "") is None: del function_call["function_name"] else: function_call["arguments"] += function_call_delta["arguments"] additional_kwargs = {} if function_call is not None: additional_kwargs["function_call"] = function_call yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() async def _acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse: raise NotImplementedError("litellm assumes all llms are chat llms.") async def _astream_complete( self, prompt: str, **kwargs: Any ) -> CompletionResponseAsyncGen: raise NotImplementedError("litellm assumes all llms are chat llms.")
[ "llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.litellm_utils.completion_with_retry", "llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator", "llama_index.legacy.llms.litellm_utils.acompletion_with_retry", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.litellm_utils.validate_litellm_api_key", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.llms.generic_utils.astream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.achat_to_completion_decorator", "llama_index.legacy.llms.litellm_utils.to_openai_message_dicts", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.generic_utils.acompletion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.chat_to_completion_decorator", "llama_index.legacy.llms.litellm_utils.from_litellm_message" ]
[((1378, 1535), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_LITELLM_MODEL', 'description': '"""The LiteLLM model to use. For complete list of providers https://docs.litellm.ai/docs/providers"""'}), "(default=DEFAULT_LITELLM_MODEL, description=\n 'The LiteLLM model to use. For complete list of providers https://docs.litellm.ai/docs/providers'\n )\n", (1383, 1535), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1613, 1727), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_TEMPERATURE', 'description': '"""The temperature to use during generation."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_TEMPERATURE, description=\n 'The temperature to use during generation.', gte=0.0, lte=1.0)\n", (1618, 1727), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1794, 1862), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""', 'gt': '(0)'}), "(description='The maximum number of tokens to generate.', gt=0)\n", (1799, 1862), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1926, 2003), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the LLM API."""'}), "(default_factory=dict, description='Additional kwargs for the LLM API.')\n", (1931, 2003), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2121, 2188), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""'}), "(default=10, description='The maximum number of API retries.')\n", (2126, 2188), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((5024, 5043), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5041, 5043), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5323, 5342), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5340, 5342), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5688, 5713), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5711, 5713), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6082, 6107), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6105, 6107), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12053, 12072), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (12070, 12072), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12459, 12478), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (12476, 12478), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12967, 12992), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (12990, 12992), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13330, 13355), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13353, 13355), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7262, 7295), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (7285, 7295), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((7593, 7736), 'llama_index.legacy.llms.litellm_utils.completion_with_retry', 'completion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(False)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=False, **all_kwargs)\n', (7614, 7736), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((7878, 7912), 'llama_index.legacy.llms.litellm_utils.from_litellm_message', 'from_litellm_message', (['message_dict'], {}), '(message_dict)\n', (7898, 7912), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((8316, 8349), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (8339, 8349), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14006, 14039), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (14029, 14039), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14403, 14437), 'llama_index.legacy.llms.litellm_utils.from_litellm_message', 'from_litellm_message', (['message_dict'], {}), '(message_dict)\n', (14423, 14437), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14853, 14886), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (14876, 14886), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((3380, 3423), 'llama_index.legacy.llms.litellm_utils.validate_litellm_api_key', 'validate_litellm_api_key', (['api_key', 'api_type'], {}), '(api_key, api_type)\n', (3404, 3423), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((5229, 5273), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self._complete'], {}), '(self._complete)\n', (5257, 5273), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5573, 5631), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self._stream_complete'], {}), '(self._stream_complete)\n', (5608, 5631), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5934, 5974), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self._chat'], {}), '(self._chat)\n', (5962, 5974), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((6296, 6350), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self._stream_chat'], {}), '(self._stream_chat)\n', (6331, 6350), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((8768, 8910), 'llama_index.legacy.llms.litellm_utils.completion_with_retry', 'completion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(True)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=True, **all_kwargs)\n', (8789, 8910), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((12356, 12402), 'llama_index.legacy.llms.generic_utils.acompletion_to_chat_decorator', 'acompletion_to_chat_decorator', (['self._acomplete'], {}), '(self._acomplete)\n', (12385, 12402), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((12813, 12873), 'llama_index.legacy.llms.generic_utils.astream_completion_to_chat_decorator', 'astream_completion_to_chat_decorator', (['self._astream_complete'], {}), '(self._astream_complete)\n', (12849, 12873), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((13172, 13214), 'llama_index.legacy.llms.generic_utils.achat_to_completion_decorator', 'achat_to_completion_decorator', (['self._achat'], {}), '(self._achat)\n', (13201, 13214), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((13557, 13613), 'llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator', 'astream_chat_to_completion_decorator', (['self._astream_chat'], {}), '(self._astream_chat)\n', (13593, 13613), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((14117, 14261), 'llama_index.legacy.llms.litellm_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(False)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=False, **all_kwargs)\n', (14139, 14261), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((3266, 3309), 'llama_index.legacy.llms.litellm_utils.validate_litellm_api_key', 'validate_litellm_api_key', (['api_key', 'api_type'], {}), '(api_key, api_type)\n', (3290, 3309), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((11130, 11166), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""cl100k_base"""'], {}), "('cl100k_base')\n", (11151, 11166), False, 'import tiktoken\n'), ((15103, 15246), 'llama_index.legacy.llms.litellm_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(True)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=True, **all_kwargs)\n', (15125, 15246), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((9990, 10066), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (10001, 10066), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((16326, 16402), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (16337, 16402), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')]
from typing import Any, Awaitable, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_TEMPERATURE from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback from llama_index.legacy.llms.generic_utils import ( achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator, ) from llama_index.legacy.llms.litellm_utils import ( acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode DEFAULT_LITELLM_MODEL = "gpt-3.5-turbo" class LiteLLM(LLM): model: str = Field( default=DEFAULT_LITELLM_MODEL, description=( "The LiteLLM model to use. " "For complete list of providers https://docs.litellm.ai/docs/providers" ), ) temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use during generation.", gte=0.0, lte=1.0, ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate.", gt=0, ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the LLM API.", # for all inputs https://docs.litellm.ai/docs/completion/input ) max_retries: int = Field( default=10, description="The maximum number of API retries." ) def __init__( self, model: str = DEFAULT_LITELLM_MODEL, temperature: float = DEFAULT_TEMPERATURE, max_tokens: Optional[int] = None, additional_kwargs: Optional[Dict[str, Any]] = None, max_retries: int = 10, api_key: Optional[str] = None, api_type: Optional[str] = None, api_base: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, **kwargs: Any, ) -> None: if "custom_llm_provider" in kwargs: if ( kwargs["custom_llm_provider"] != "ollama" and kwargs["custom_llm_provider"] != "vllm" ): # don't check keys for local models validate_litellm_api_key(api_key, api_type) else: # by default assume it's a hosted endpoint validate_litellm_api_key(api_key, api_type) additional_kwargs = additional_kwargs or {} if api_key is not None: additional_kwargs["api_key"] = api_key if api_type is not None: additional_kwargs["api_type"] = api_type if api_base is not None: additional_kwargs["api_base"] = api_base super().__init__( model=model, temperature=temperature, max_tokens=max_tokens, additional_kwargs=additional_kwargs, max_retries=max_retries, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, **kwargs, ) def _get_model_name(self) -> str: model_name = self.model if "ft-" in model_name: # legacy fine-tuning model_name = model_name.split(":")[0] elif model_name.startswith("ft:"): model_name = model_name.split(":")[1] return model_name @classmethod def class_name(cls) -> str: return "litellm_llm" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=openai_modelname_to_contextsize(self._get_model_name()), num_output=self.max_tokens or -1, is_chat_model=True, is_function_calling_model=is_function_calling_model(self._get_model_name()), model_name=self.model, ) @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: if self._is_chat_model: chat_fn = self._chat else: chat_fn = completion_to_chat_decorator(self._complete) return chat_fn(messages, **kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: if self._is_chat_model: stream_chat_fn = self._stream_chat else: stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete) return stream_chat_fn(messages, **kwargs) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: # litellm assumes all llms are chat llms if self._is_chat_model: complete_fn = chat_to_completion_decorator(self._chat) else: complete_fn = self._complete return complete_fn(prompt, **kwargs) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: if self._is_chat_model: stream_complete_fn = stream_chat_to_completion_decorator(self._stream_chat) else: stream_complete_fn = self._stream_complete return stream_complete_fn(prompt, **kwargs) @property def _is_chat_model(self) -> bool: # litellm assumes all llms are chat llms return True @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "model": self.model, "temperature": self.temperature, "max_tokens": self.max_tokens, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: if not self._is_chat_model: raise ValueError("This model is not a chat model.") message_dicts = to_openai_message_dicts(messages) all_kwargs = self._get_all_kwargs(**kwargs) if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None: all_kwargs.pop( "max_tokens" ) # don't send max_tokens == None, this throws errors for Non OpenAI providers response = completion_with_retry( is_chat_model=self._is_chat_model, max_retries=self.max_retries, messages=message_dicts, stream=False, **all_kwargs, ) message_dict = response["choices"][0]["message"] message = from_litellm_message(message_dict) return ChatResponse( message=message, raw=response, additional_kwargs=self._get_response_token_counts(response), ) def _stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: if not self._is_chat_model: raise ValueError("This model is not a chat model.") message_dicts = to_openai_message_dicts(messages) all_kwargs = self._get_all_kwargs(**kwargs) if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None: all_kwargs.pop( "max_tokens" ) # don't send max_tokens == None, this throws errors for Non OpenAI providers def gen() -> ChatResponseGen: content = "" function_call: Optional[dict] = None for response in completion_with_retry( is_chat_model=self._is_chat_model, max_retries=self.max_retries, messages=message_dicts, stream=True, **all_kwargs, ): delta = response["choices"][0]["delta"] role = delta.get("role", "assistant") content_delta = delta.get("content", "") or "" content += content_delta function_call_delta = delta.get("function_call", None) if function_call_delta is not None: if function_call is None: function_call = function_call_delta ## ensure we do not add a blank function call if function_call.get("function_name", "") is None: del function_call["function_name"] else: function_call["arguments"] += function_call_delta["arguments"] additional_kwargs = {} if function_call is not None: additional_kwargs["function_call"] = function_call yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: raise NotImplementedError("litellm assumes all llms are chat llms.") def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: raise NotImplementedError("litellm assumes all llms are chat llms.") def _get_max_token_for_prompt(self, prompt: str) -> int: try: import tiktoken except ImportError: raise ImportError( "Please install tiktoken to use the max_tokens=None feature." ) context_window = self.metadata.context_window try: encoding = tiktoken.encoding_for_model(self._get_model_name()) except KeyError: encoding = encoding = tiktoken.get_encoding( "cl100k_base" ) # default to using cl10k_base tokens = encoding.encode(prompt) max_token = context_window - len(tokens) if max_token <= 0: raise ValueError( f"The prompt is too long for the model. " f"Please use a prompt that is less than {context_window} tokens." ) return max_token def _get_response_token_counts(self, raw_response: Any) -> dict: """Get the token usage reported by the response.""" if not isinstance(raw_response, dict): return {} usage = raw_response.get("usage", {}) return { "prompt_tokens": usage.get("prompt_tokens", 0), "completion_tokens": usage.get("completion_tokens", 0), "total_tokens": usage.get("total_tokens", 0), } # ===== Async Endpoints ===== @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: achat_fn: Callable[..., Awaitable[ChatResponse]] if self._is_chat_model: achat_fn = self._achat else: achat_fn = acompletion_to_chat_decorator(self._acomplete) return await achat_fn(messages, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseAsyncGen: astream_chat_fn: Callable[..., Awaitable[ChatResponseAsyncGen]] if self._is_chat_model: astream_chat_fn = self._astream_chat else: astream_chat_fn = astream_completion_to_chat_decorator( self._astream_complete ) return await astream_chat_fn(messages, **kwargs) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: if self._is_chat_model: acomplete_fn = achat_to_completion_decorator(self._achat) else: acomplete_fn = self._acomplete return await acomplete_fn(prompt, **kwargs) @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: if self._is_chat_model: astream_complete_fn = astream_chat_to_completion_decorator( self._astream_chat ) else: astream_complete_fn = self._astream_complete return await astream_complete_fn(prompt, **kwargs) async def _achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: if not self._is_chat_model: raise ValueError("This model is not a chat model.") message_dicts = to_openai_message_dicts(messages) all_kwargs = self._get_all_kwargs(**kwargs) response = await acompletion_with_retry( is_chat_model=self._is_chat_model, max_retries=self.max_retries, messages=message_dicts, stream=False, **all_kwargs, ) message_dict = response["choices"][0]["message"] message = from_litellm_message(message_dict) return ChatResponse( message=message, raw=response, additional_kwargs=self._get_response_token_counts(response), ) async def _astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: if not self._is_chat_model: raise ValueError("This model is not a chat model.") message_dicts = to_openai_message_dicts(messages) all_kwargs = self._get_all_kwargs(**kwargs) async def gen() -> ChatResponseAsyncGen: content = "" function_call: Optional[dict] = None async for response in await acompletion_with_retry( is_chat_model=self._is_chat_model, max_retries=self.max_retries, messages=message_dicts, stream=True, **all_kwargs, ): delta = response["choices"][0]["delta"] role = delta.get("role", "assistant") content_delta = delta.get("content", "") or "" content += content_delta function_call_delta = delta.get("function_call", None) if function_call_delta is not None: if function_call is None: function_call = function_call_delta ## ensure we do not add a blank function call if function_call.get("function_name", "") is None: del function_call["function_name"] else: function_call["arguments"] += function_call_delta["arguments"] additional_kwargs = {} if function_call is not None: additional_kwargs["function_call"] = function_call yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() async def _acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse: raise NotImplementedError("litellm assumes all llms are chat llms.") async def _astream_complete( self, prompt: str, **kwargs: Any ) -> CompletionResponseAsyncGen: raise NotImplementedError("litellm assumes all llms are chat llms.")
[ "llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.litellm_utils.completion_with_retry", "llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator", "llama_index.legacy.llms.litellm_utils.acompletion_with_retry", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.litellm_utils.validate_litellm_api_key", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.llms.generic_utils.astream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.achat_to_completion_decorator", "llama_index.legacy.llms.litellm_utils.to_openai_message_dicts", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.generic_utils.acompletion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator", "llama_index.legacy.llms.generic_utils.chat_to_completion_decorator", "llama_index.legacy.llms.litellm_utils.from_litellm_message" ]
[((1378, 1535), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_LITELLM_MODEL', 'description': '"""The LiteLLM model to use. For complete list of providers https://docs.litellm.ai/docs/providers"""'}), "(default=DEFAULT_LITELLM_MODEL, description=\n 'The LiteLLM model to use. For complete list of providers https://docs.litellm.ai/docs/providers'\n )\n", (1383, 1535), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1613, 1727), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_TEMPERATURE', 'description': '"""The temperature to use during generation."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_TEMPERATURE, description=\n 'The temperature to use during generation.', gte=0.0, lte=1.0)\n", (1618, 1727), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1794, 1862), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""', 'gt': '(0)'}), "(description='The maximum number of tokens to generate.', gt=0)\n", (1799, 1862), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1926, 2003), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the LLM API."""'}), "(default_factory=dict, description='Additional kwargs for the LLM API.')\n", (1931, 2003), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2121, 2188), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""'}), "(default=10, description='The maximum number of API retries.')\n", (2126, 2188), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((5024, 5043), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5041, 5043), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5323, 5342), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5340, 5342), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5688, 5713), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5711, 5713), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6082, 6107), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6105, 6107), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12053, 12072), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (12070, 12072), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12459, 12478), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (12476, 12478), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12967, 12992), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (12990, 12992), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13330, 13355), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13353, 13355), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7262, 7295), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (7285, 7295), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((7593, 7736), 'llama_index.legacy.llms.litellm_utils.completion_with_retry', 'completion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(False)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=False, **all_kwargs)\n', (7614, 7736), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((7878, 7912), 'llama_index.legacy.llms.litellm_utils.from_litellm_message', 'from_litellm_message', (['message_dict'], {}), '(message_dict)\n', (7898, 7912), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((8316, 8349), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (8339, 8349), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14006, 14039), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (14029, 14039), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14403, 14437), 'llama_index.legacy.llms.litellm_utils.from_litellm_message', 'from_litellm_message', (['message_dict'], {}), '(message_dict)\n', (14423, 14437), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14853, 14886), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (14876, 14886), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((3380, 3423), 'llama_index.legacy.llms.litellm_utils.validate_litellm_api_key', 'validate_litellm_api_key', (['api_key', 'api_type'], {}), '(api_key, api_type)\n', (3404, 3423), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((5229, 5273), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self._complete'], {}), '(self._complete)\n', (5257, 5273), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5573, 5631), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self._stream_complete'], {}), '(self._stream_complete)\n', (5608, 5631), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5934, 5974), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self._chat'], {}), '(self._chat)\n', (5962, 5974), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((6296, 6350), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self._stream_chat'], {}), '(self._stream_chat)\n', (6331, 6350), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((8768, 8910), 'llama_index.legacy.llms.litellm_utils.completion_with_retry', 'completion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(True)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=True, **all_kwargs)\n', (8789, 8910), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((12356, 12402), 'llama_index.legacy.llms.generic_utils.acompletion_to_chat_decorator', 'acompletion_to_chat_decorator', (['self._acomplete'], {}), '(self._acomplete)\n', (12385, 12402), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((12813, 12873), 'llama_index.legacy.llms.generic_utils.astream_completion_to_chat_decorator', 'astream_completion_to_chat_decorator', (['self._astream_complete'], {}), '(self._astream_complete)\n', (12849, 12873), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((13172, 13214), 'llama_index.legacy.llms.generic_utils.achat_to_completion_decorator', 'achat_to_completion_decorator', (['self._achat'], {}), '(self._achat)\n', (13201, 13214), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((13557, 13613), 'llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator', 'astream_chat_to_completion_decorator', (['self._astream_chat'], {}), '(self._astream_chat)\n', (13593, 13613), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((14117, 14261), 'llama_index.legacy.llms.litellm_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(False)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=False, **all_kwargs)\n', (14139, 14261), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((3266, 3309), 'llama_index.legacy.llms.litellm_utils.validate_litellm_api_key', 'validate_litellm_api_key', (['api_key', 'api_type'], {}), '(api_key, api_type)\n', (3290, 3309), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((11130, 11166), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""cl100k_base"""'], {}), "('cl100k_base')\n", (11151, 11166), False, 'import tiktoken\n'), ((15103, 15246), 'llama_index.legacy.llms.litellm_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(True)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=True, **all_kwargs)\n', (15125, 15246), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((9990, 10066), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (10001, 10066), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((16326, 16402), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (16337, 16402), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("MiniTruthfulQADataset", "./data") # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 322), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniTruthfulQADataset"""', '"""./data"""'], {}), "('MiniTruthfulQADataset', './data')\n", (287, 322), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((367, 419), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (398, 419), False, 'from llama_index.core import VectorStoreIndex\n'), ((512, 561), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (531, 561), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1412, 1436), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1434, 1436), False, 'import asyncio\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset("MiniTruthfulQADataset", "./data") # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 322), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniTruthfulQADataset"""', '"""./data"""'], {}), "('MiniTruthfulQADataset', './data')\n", (287, 322), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((367, 419), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (398, 419), False, 'from llama_index.core import VectorStoreIndex\n'), ((512, 561), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (531, 561), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1412, 1436), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1434, 1436), False, 'import asyncio\n')]
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_TEMPERATURE # from mistralai.models.chat_completion import ChatMessage from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.generic_utils import ( achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.llms.mistralai_utils import ( mistralai_modelname_to_contextsize, ) from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode DEFAULT_MISTRALAI_MODEL = "mistral-tiny" DEFAULT_MISTRALAI_ENDPOINT = "https://api.mistral.ai" DEFAULT_MISTRALAI_MAX_TOKENS = 512 class MistralAI(LLM): model: str = Field( default=DEFAULT_MISTRALAI_MODEL, description="The mistralai model to use." ) temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", gte=0.0, lte=1.0, ) max_tokens: int = Field( default=DEFAULT_MISTRALAI_MAX_TOKENS, description="The maximum number of tokens to generate.", gt=0, ) timeout: float = Field( default=120, description="The timeout to use in seconds.", gte=0 ) max_retries: int = Field( default=5, description="The maximum number of API retries.", gte=0 ) safe_mode: bool = Field( default=False, description="The parameter to enforce guardrails in chat generations.", ) random_seed: str = Field( default=None, description="The random seed to use for sampling." ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the MistralAI API." ) _client: Any = PrivateAttr() _aclient: Any = PrivateAttr() def __init__( self, model: str = DEFAULT_MISTRALAI_MODEL, temperature: float = DEFAULT_TEMPERATURE, max_tokens: int = DEFAULT_MISTRALAI_MAX_TOKENS, timeout: int = 120, max_retries: int = 5, safe_mode: bool = False, random_seed: Optional[int] = None, api_key: Optional[str] = None, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: try: from mistralai.async_client import MistralAsyncClient from mistralai.client import MistralClient except ImportError as e: raise ImportError( "You must install the `mistralai` package to use mistralai." "Please `pip install mistralai`" ) from e additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) api_key = get_from_param_or_env("api_key", api_key, "MISTRAL_API_KEY", "") if not api_key: raise ValueError( "You must provide an API key to use mistralai. " "You can either pass it in as an argument or set it `MISTRAL_API_KEY`." ) self._client = MistralClient( api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT, timeout=timeout, max_retries=max_retries, ) self._aclient = MistralAsyncClient( api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT, timeout=timeout, max_retries=max_retries, ) super().__init__( temperature=temperature, max_tokens=max_tokens, additional_kwargs=additional_kwargs, timeout=timeout, max_retries=max_retries, safe_mode=safe_mode, random_seed=random_seed, model=model, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "MistralAI_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=mistralai_modelname_to_contextsize(self.model), num_output=self.max_tokens, is_chat_model=True, model_name=self.model, safe_mode=self.safe_mode, random_seed=self.random_seed, ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "model": self.model, "temperature": self.temperature, "max_tokens": self.max_tokens, "random_seed": self.random_seed, "safe_mode": self.safe_mode, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: # convert messages to mistral ChatMessage from mistralai.client import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=x.role, content=x.content) for x in messages ] all_kwargs = self._get_all_kwargs(**kwargs) response = self._client.chat(messages=messages, **all_kwargs) return ChatResponse( message=ChatMessage( role=MessageRole.ASSISTANT, content=response.choices[0].message.content ), raw=dict(response), ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: complete_fn = chat_to_completion_decorator(self.chat) return complete_fn(prompt, **kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: # convert messages to mistral ChatMessage from mistralai.client import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=message.role, content=message.content) for message in messages ] all_kwargs = self._get_all_kwargs(**kwargs) response = self._client.chat_stream(messages=messages, **all_kwargs) def gen() -> ChatResponseGen: content = "" role = MessageRole.ASSISTANT for chunk in response: content_delta = chunk.choices[0].delta.content if content_delta is None: continue content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=chunk, ) return gen() @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat) return stream_complete_fn(prompt, **kwargs) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: # convert messages to mistral ChatMessage from mistralai.client import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=message.role, content=message.content) for message in messages ] all_kwargs = self._get_all_kwargs(**kwargs) response = await self._aclient.chat(messages=messages, **all_kwargs) return ChatResponse( message=ChatMessage( role=MessageRole.ASSISTANT, content=response.choices[0].message.content ), raw=dict(response), ) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: acomplete_fn = achat_to_completion_decorator(self.achat) return await acomplete_fn(prompt, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: # convert messages to mistral ChatMessage from mistralai.client import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=x.role, content=x.content) for x in messages ] all_kwargs = self._get_all_kwargs(**kwargs) response = await self._aclient.chat_stream(messages=messages, **all_kwargs) async def gen() -> ChatResponseAsyncGen: content = "" role = MessageRole.ASSISTANT async for chunk in response: content_delta = chunk.choices[0].delta.content if content_delta is None: continue content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=chunk, ) return gen() @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat) return await astream_complete_fn(prompt, **kwargs)
[ "llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.get_from_param_or_env", "llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.mistralai_utils.mistralai_modelname_to_contextsize", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.llms.generic_utils.achat_to_completion_decorator", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.llms.generic_utils.chat_to_completion_decorator" ]
[((1271, 1357), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MISTRALAI_MODEL', 'description': '"""The mistralai model to use."""'}), "(default=DEFAULT_MISTRALAI_MODEL, description=\n 'The mistralai model to use.')\n", (1276, 1357), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1392, 1501), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_TEMPERATURE', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_TEMPERATURE, description=\n 'The temperature to use for sampling.', gte=0.0, lte=1.0)\n", (1397, 1501), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1558, 1669), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MISTRALAI_MAX_TOKENS', 'description': '"""The maximum number of tokens to generate."""', 'gt': '(0)'}), "(default=DEFAULT_MISTRALAI_MAX_TOKENS, description=\n 'The maximum number of tokens to generate.', gt=0)\n", (1563, 1669), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1718, 1789), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(120)', 'description': '"""The timeout to use in seconds."""', 'gte': '(0)'}), "(default=120, description='The timeout to use in seconds.', gte=0)\n", (1723, 1789), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1827, 1900), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(5)', 'description': '"""The maximum number of API retries."""', 'gte': '(0)'}), "(default=5, description='The maximum number of API retries.', gte=0)\n", (1832, 1900), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1937, 2034), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""The parameter to enforce guardrails in chat generations."""'}), "(default=False, description=\n 'The parameter to enforce guardrails in chat generations.')\n", (1942, 2034), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2076, 2147), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The random seed to use for sampling."""'}), "(default=None, description='The random seed to use for sampling.')\n", (2081, 2147), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2202, 2290), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the MistralAI API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the MistralAI API.')\n", (2207, 2290), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2320, 2333), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2331, 2333), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2354, 2367), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2365, 2367), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5957, 5976), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5974, 5976), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6632, 6657), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6655, 6657), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6884, 6903), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6901, 6903), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7946, 7971), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7969, 7971), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8236, 8255), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8253, 8255), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8969, 8994), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8992, 8994), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9238, 9257), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9255, 9257), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((10306, 10331), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (10329, 10331), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3684, 3748), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""MISTRAL_API_KEY"""', '""""""'], {}), "('api_key', api_key, 'MISTRAL_API_KEY', '')\n", (3705, 3748), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((3995, 4109), 'mistralai.client.MistralClient', 'MistralClient', ([], {'api_key': 'api_key', 'endpoint': 'DEFAULT_MISTRALAI_ENDPOINT', 'timeout': 'timeout', 'max_retries': 'max_retries'}), '(api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT, timeout\n =timeout, max_retries=max_retries)\n', (4008, 4109), False, 'from mistralai.client import MistralClient\n'), ((4188, 4306), 'mistralai.async_client.MistralAsyncClient', 'MistralAsyncClient', ([], {'api_key': 'api_key', 'endpoint': 'DEFAULT_MISTRALAI_ENDPOINT', 'timeout': 'timeout', 'max_retries': 'max_retries'}), '(api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT,\n timeout=timeout, max_retries=max_retries)\n', (4206, 4306), False, 'from mistralai.async_client import MistralAsyncClient\n'), ((6793, 6832), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self.chat'], {}), '(self.chat)\n', (6821, 6832), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((8124, 8177), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self.stream_chat'], {}), '(self.stream_chat)\n', (8159, 8177), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((9138, 9179), 'llama_index.legacy.llms.generic_utils.achat_to_completion_decorator', 'achat_to_completion_decorator', (['self.achat'], {}), '(self.achat)\n', (9167, 9179), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((10497, 10552), 'llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator', 'astream_chat_to_completion_decorator', (['self.astream_chat'], {}), '(self.astream_chat)\n', (10533, 10552), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((3645, 3664), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (3660, 3664), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((6217, 6268), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'x.role', 'content': 'x.content'}), '(role=x.role, content=x.content)\n', (6236, 6268), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((7168, 7231), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'message.role', 'content': 'message.content'}), '(role=message.role, content=message.content)\n', (7187, 7231), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((8517, 8580), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'message.role', 'content': 'message.content'}), '(role=message.role, content=message.content)\n', (8536, 8580), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((9534, 9585), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'x.role', 'content': 'x.content'}), '(role=x.role, content=x.content)\n', (9553, 9585), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((5163, 5209), 'llama_index.legacy.llms.mistralai_utils.mistralai_modelname_to_contextsize', 'mistralai_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (5197, 5209), False, 'from llama_index.legacy.llms.mistralai_utils import mistralai_modelname_to_contextsize\n'), ((6468, 6557), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.choices[0].message.content'}), '(role=MessageRole.ASSISTANT, content=response.choices[0].message\n .content)\n', (6479, 6557), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((8805, 8894), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.choices[0].message.content'}), '(role=MessageRole.ASSISTANT, content=response.choices[0].message\n .content)\n', (8816, 8894), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7787, 7826), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (7798, 7826), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10147, 10186), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10158, 10186), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')]
from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_TEMPERATURE # from mistralai.models.chat_completion import ChatMessage from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.generic_utils import ( achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.llms.mistralai_utils import ( mistralai_modelname_to_contextsize, ) from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode DEFAULT_MISTRALAI_MODEL = "mistral-tiny" DEFAULT_MISTRALAI_ENDPOINT = "https://api.mistral.ai" DEFAULT_MISTRALAI_MAX_TOKENS = 512 class MistralAI(LLM): model: str = Field( default=DEFAULT_MISTRALAI_MODEL, description="The mistralai model to use." ) temperature: float = Field( default=DEFAULT_TEMPERATURE, description="The temperature to use for sampling.", gte=0.0, lte=1.0, ) max_tokens: int = Field( default=DEFAULT_MISTRALAI_MAX_TOKENS, description="The maximum number of tokens to generate.", gt=0, ) timeout: float = Field( default=120, description="The timeout to use in seconds.", gte=0 ) max_retries: int = Field( default=5, description="The maximum number of API retries.", gte=0 ) safe_mode: bool = Field( default=False, description="The parameter to enforce guardrails in chat generations.", ) random_seed: str = Field( default=None, description="The random seed to use for sampling." ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the MistralAI API." ) _client: Any = PrivateAttr() _aclient: Any = PrivateAttr() def __init__( self, model: str = DEFAULT_MISTRALAI_MODEL, temperature: float = DEFAULT_TEMPERATURE, max_tokens: int = DEFAULT_MISTRALAI_MAX_TOKENS, timeout: int = 120, max_retries: int = 5, safe_mode: bool = False, random_seed: Optional[int] = None, api_key: Optional[str] = None, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, ) -> None: try: from mistralai.async_client import MistralAsyncClient from mistralai.client import MistralClient except ImportError as e: raise ImportError( "You must install the `mistralai` package to use mistralai." "Please `pip install mistralai`" ) from e additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) api_key = get_from_param_or_env("api_key", api_key, "MISTRAL_API_KEY", "") if not api_key: raise ValueError( "You must provide an API key to use mistralai. " "You can either pass it in as an argument or set it `MISTRAL_API_KEY`." ) self._client = MistralClient( api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT, timeout=timeout, max_retries=max_retries, ) self._aclient = MistralAsyncClient( api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT, timeout=timeout, max_retries=max_retries, ) super().__init__( temperature=temperature, max_tokens=max_tokens, additional_kwargs=additional_kwargs, timeout=timeout, max_retries=max_retries, safe_mode=safe_mode, random_seed=random_seed, model=model, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: return "MistralAI_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=mistralai_modelname_to_contextsize(self.model), num_output=self.max_tokens, is_chat_model=True, model_name=self.model, safe_mode=self.safe_mode, random_seed=self.random_seed, ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "model": self.model, "temperature": self.temperature, "max_tokens": self.max_tokens, "random_seed": self.random_seed, "safe_mode": self.safe_mode, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: # convert messages to mistral ChatMessage from mistralai.client import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=x.role, content=x.content) for x in messages ] all_kwargs = self._get_all_kwargs(**kwargs) response = self._client.chat(messages=messages, **all_kwargs) return ChatResponse( message=ChatMessage( role=MessageRole.ASSISTANT, content=response.choices[0].message.content ), raw=dict(response), ) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: complete_fn = chat_to_completion_decorator(self.chat) return complete_fn(prompt, **kwargs) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: # convert messages to mistral ChatMessage from mistralai.client import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=message.role, content=message.content) for message in messages ] all_kwargs = self._get_all_kwargs(**kwargs) response = self._client.chat_stream(messages=messages, **all_kwargs) def gen() -> ChatResponseGen: content = "" role = MessageRole.ASSISTANT for chunk in response: content_delta = chunk.choices[0].delta.content if content_delta is None: continue content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=chunk, ) return gen() @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat) return stream_complete_fn(prompt, **kwargs) @llm_chat_callback() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: # convert messages to mistral ChatMessage from mistralai.client import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=message.role, content=message.content) for message in messages ] all_kwargs = self._get_all_kwargs(**kwargs) response = await self._aclient.chat(messages=messages, **all_kwargs) return ChatResponse( message=ChatMessage( role=MessageRole.ASSISTANT, content=response.choices[0].message.content ), raw=dict(response), ) @llm_completion_callback() async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: acomplete_fn = achat_to_completion_decorator(self.achat) return await acomplete_fn(prompt, **kwargs) @llm_chat_callback() async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: # convert messages to mistral ChatMessage from mistralai.client import ChatMessage as mistral_chatmessage messages = [ mistral_chatmessage(role=x.role, content=x.content) for x in messages ] all_kwargs = self._get_all_kwargs(**kwargs) response = await self._aclient.chat_stream(messages=messages, **all_kwargs) async def gen() -> ChatResponseAsyncGen: content = "" role = MessageRole.ASSISTANT async for chunk in response: content_delta = chunk.choices[0].delta.content if content_delta is None: continue content += content_delta yield ChatResponse( message=ChatMessage(role=role, content=content), delta=content_delta, raw=chunk, ) return gen() @llm_completion_callback() async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat) return await astream_complete_fn(prompt, **kwargs)
[ "llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.get_from_param_or_env", "llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.mistralai_utils.mistralai_modelname_to_contextsize", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.llms.generic_utils.achat_to_completion_decorator", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.llms.generic_utils.chat_to_completion_decorator" ]
[((1271, 1357), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MISTRALAI_MODEL', 'description': '"""The mistralai model to use."""'}), "(default=DEFAULT_MISTRALAI_MODEL, description=\n 'The mistralai model to use.')\n", (1276, 1357), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1392, 1501), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_TEMPERATURE', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_TEMPERATURE, description=\n 'The temperature to use for sampling.', gte=0.0, lte=1.0)\n", (1397, 1501), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1558, 1669), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MISTRALAI_MAX_TOKENS', 'description': '"""The maximum number of tokens to generate."""', 'gt': '(0)'}), "(default=DEFAULT_MISTRALAI_MAX_TOKENS, description=\n 'The maximum number of tokens to generate.', gt=0)\n", (1563, 1669), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1718, 1789), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(120)', 'description': '"""The timeout to use in seconds."""', 'gte': '(0)'}), "(default=120, description='The timeout to use in seconds.', gte=0)\n", (1723, 1789), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1827, 1900), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(5)', 'description': '"""The maximum number of API retries."""', 'gte': '(0)'}), "(default=5, description='The maximum number of API retries.', gte=0)\n", (1832, 1900), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1937, 2034), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""The parameter to enforce guardrails in chat generations."""'}), "(default=False, description=\n 'The parameter to enforce guardrails in chat generations.')\n", (1942, 2034), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2076, 2147), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The random seed to use for sampling."""'}), "(default=None, description='The random seed to use for sampling.')\n", (2081, 2147), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2202, 2290), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the MistralAI API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the MistralAI API.')\n", (2207, 2290), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2320, 2333), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2331, 2333), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2354, 2367), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2365, 2367), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5957, 5976), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5974, 5976), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6632, 6657), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6655, 6657), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6884, 6903), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6901, 6903), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7946, 7971), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7969, 7971), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8236, 8255), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8253, 8255), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8969, 8994), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8992, 8994), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9238, 9257), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9255, 9257), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((10306, 10331), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (10329, 10331), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3684, 3748), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""MISTRAL_API_KEY"""', '""""""'], {}), "('api_key', api_key, 'MISTRAL_API_KEY', '')\n", (3705, 3748), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((3995, 4109), 'mistralai.client.MistralClient', 'MistralClient', ([], {'api_key': 'api_key', 'endpoint': 'DEFAULT_MISTRALAI_ENDPOINT', 'timeout': 'timeout', 'max_retries': 'max_retries'}), '(api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT, timeout\n =timeout, max_retries=max_retries)\n', (4008, 4109), False, 'from mistralai.client import MistralClient\n'), ((4188, 4306), 'mistralai.async_client.MistralAsyncClient', 'MistralAsyncClient', ([], {'api_key': 'api_key', 'endpoint': 'DEFAULT_MISTRALAI_ENDPOINT', 'timeout': 'timeout', 'max_retries': 'max_retries'}), '(api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT,\n timeout=timeout, max_retries=max_retries)\n', (4206, 4306), False, 'from mistralai.async_client import MistralAsyncClient\n'), ((6793, 6832), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self.chat'], {}), '(self.chat)\n', (6821, 6832), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((8124, 8177), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self.stream_chat'], {}), '(self.stream_chat)\n', (8159, 8177), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((9138, 9179), 'llama_index.legacy.llms.generic_utils.achat_to_completion_decorator', 'achat_to_completion_decorator', (['self.achat'], {}), '(self.achat)\n', (9167, 9179), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((10497, 10552), 'llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator', 'astream_chat_to_completion_decorator', (['self.astream_chat'], {}), '(self.astream_chat)\n', (10533, 10552), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((3645, 3664), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (3660, 3664), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((6217, 6268), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'x.role', 'content': 'x.content'}), '(role=x.role, content=x.content)\n', (6236, 6268), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((7168, 7231), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'message.role', 'content': 'message.content'}), '(role=message.role, content=message.content)\n', (7187, 7231), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((8517, 8580), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'message.role', 'content': 'message.content'}), '(role=message.role, content=message.content)\n', (8536, 8580), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((9534, 9585), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'x.role', 'content': 'x.content'}), '(role=x.role, content=x.content)\n', (9553, 9585), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((5163, 5209), 'llama_index.legacy.llms.mistralai_utils.mistralai_modelname_to_contextsize', 'mistralai_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (5197, 5209), False, 'from llama_index.legacy.llms.mistralai_utils import mistralai_modelname_to_contextsize\n'), ((6468, 6557), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.choices[0].message.content'}), '(role=MessageRole.ASSISTANT, content=response.choices[0].message\n .content)\n', (6479, 6557), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((8805, 8894), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.choices[0].message.content'}), '(role=MessageRole.ASSISTANT, content=response.choices[0].message\n .content)\n', (8816, 8894), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7787, 7826), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (7798, 7826), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10147, 10186), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10158, 10186), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')]
"""Base index classes.""" import logging from abc import ABC, abstractmethod from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast from llama_index.legacy.chat_engine.types import BaseChatEngine, ChatMode from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.data_structs.data_structs import IndexStruct from llama_index.legacy.ingestion import run_transformations from llama_index.legacy.schema import BaseNode, Document, IndexNode from llama_index.legacy.service_context import ServiceContext from llama_index.legacy.storage.docstore.types import BaseDocumentStore, RefDocInfo from llama_index.legacy.storage.storage_context import StorageContext IS = TypeVar("IS", bound=IndexStruct) IndexType = TypeVar("IndexType", bound="BaseIndex") logger = logging.getLogger(__name__) class BaseIndex(Generic[IS], ABC): """Base LlamaIndex. Args: nodes (List[Node]): List of nodes to index show_progress (bool): Whether to show tqdm progress bars. Defaults to False. service_context (ServiceContext): Service context container (contains components like LLM, Embeddings, etc.). """ index_struct_cls: Type[IS] def __init__( self, nodes: Optional[Sequence[BaseNode]] = None, objects: Optional[Sequence[IndexNode]] = None, index_struct: Optional[IS] = None, storage_context: Optional[StorageContext] = None, service_context: Optional[ServiceContext] = None, show_progress: bool = False, **kwargs: Any, ) -> None: """Initialize with parameters.""" if index_struct is None and nodes is None and objects is None: raise ValueError("One of nodes, objects, or index_struct must be provided.") if index_struct is not None and nodes is not None: raise ValueError("Only one of nodes or index_struct can be provided.") # This is to explicitly make sure that the old UX is not used if nodes is not None and len(nodes) >= 1 and not isinstance(nodes[0], BaseNode): if isinstance(nodes[0], Document): raise ValueError( "The constructor now takes in a list of Node objects. " "Since you are passing in a list of Document objects, " "please use `from_documents` instead." ) else: raise ValueError("nodes must be a list of Node objects.") self._service_context = service_context or ServiceContext.from_defaults() self._storage_context = storage_context or StorageContext.from_defaults() self._docstore = self._storage_context.docstore self._show_progress = show_progress self._vector_store = self._storage_context.vector_store self._graph_store = self._storage_context.graph_store objects = objects or [] self._object_map = {} for obj in objects: self._object_map[obj.index_id] = obj.obj obj.obj = None # clear the object avoid serialization issues with self._service_context.callback_manager.as_trace("index_construction"): if index_struct is None: nodes = nodes or [] index_struct = self.build_index_from_nodes( nodes + objects # type: ignore ) self._index_struct = index_struct self._storage_context.index_store.add_index_struct(self._index_struct) @classmethod def from_documents( cls: Type[IndexType], documents: Sequence[Document], storage_context: Optional[StorageContext] = None, service_context: Optional[ServiceContext] = None, show_progress: bool = False, **kwargs: Any, ) -> IndexType: """Create index from documents. Args: documents (Optional[Sequence[BaseDocument]]): List of documents to build the index from. """ storage_context = storage_context or StorageContext.from_defaults() service_context = service_context or ServiceContext.from_defaults() docstore = storage_context.docstore with service_context.callback_manager.as_trace("index_construction"): for doc in documents: docstore.set_document_hash(doc.get_doc_id(), doc.hash) nodes = run_transformations( documents, # type: ignore service_context.transformations, show_progress=show_progress, **kwargs, ) return cls( nodes=nodes, storage_context=storage_context, service_context=service_context, show_progress=show_progress, **kwargs, ) @property def index_struct(self) -> IS: """Get the index struct.""" return self._index_struct @property def index_id(self) -> str: """Get the index struct.""" return self._index_struct.index_id def set_index_id(self, index_id: str) -> None: """Set the index id. NOTE: if you decide to set the index_id on the index_struct manually, you will need to explicitly call `add_index_struct` on the `index_store` to update the index store. Args: index_id (str): Index id to set. """ # delete the old index struct old_id = self._index_struct.index_id self._storage_context.index_store.delete_index_struct(old_id) # add the new index struct self._index_struct.index_id = index_id self._storage_context.index_store.add_index_struct(self._index_struct) @property def docstore(self) -> BaseDocumentStore: """Get the docstore corresponding to the index.""" return self._docstore @property def service_context(self) -> ServiceContext: return self._service_context @property def storage_context(self) -> StorageContext: return self._storage_context @property def summary(self) -> str: return str(self._index_struct.summary) @summary.setter def summary(self, new_summary: str) -> None: self._index_struct.summary = new_summary self._storage_context.index_store.add_index_struct(self._index_struct) @abstractmethod def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IS: """Build the index from nodes.""" def build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IS: """Build the index from nodes.""" self._docstore.add_documents(nodes, allow_update=True) return self._build_index_from_nodes(nodes) @abstractmethod def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None: """Index-specific logic for inserting nodes to the index struct.""" def insert_nodes(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None: """Insert nodes.""" with self._service_context.callback_manager.as_trace("insert_nodes"): self.docstore.add_documents(nodes, allow_update=True) self._insert(nodes, **insert_kwargs) self._storage_context.index_store.add_index_struct(self._index_struct) def insert(self, document: Document, **insert_kwargs: Any) -> None: """Insert a document.""" with self._service_context.callback_manager.as_trace("insert"): nodes = run_transformations( [document], self._service_context.transformations, show_progress=self._show_progress, ) self.insert_nodes(nodes, **insert_kwargs) self.docstore.set_document_hash(document.get_doc_id(), document.hash) @abstractmethod def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None: """Delete a node.""" def delete_nodes( self, node_ids: List[str], delete_from_docstore: bool = False, **delete_kwargs: Any, ) -> None: """Delete a list of nodes from the index. Args: doc_ids (List[str]): A list of doc_ids from the nodes to delete """ for node_id in node_ids: self._delete_node(node_id, **delete_kwargs) if delete_from_docstore: self.docstore.delete_document(node_id, raise_error=False) self._storage_context.index_store.add_index_struct(self._index_struct) def delete(self, doc_id: str, **delete_kwargs: Any) -> None: """Delete a document from the index. All nodes in the index related to the index will be deleted. Args: doc_id (str): A doc_id of the ingested document """ logger.warning( "delete() is now deprecated, please refer to delete_ref_doc() to delete " "ingested documents+nodes or delete_nodes to delete a list of nodes." ) self.delete_ref_doc(doc_id) def delete_ref_doc( self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any ) -> None: """Delete a document and it's nodes by using ref_doc_id.""" ref_doc_info = self.docstore.get_ref_doc_info(ref_doc_id) if ref_doc_info is None: logger.warning(f"ref_doc_id {ref_doc_id} not found, nothing deleted.") return self.delete_nodes( ref_doc_info.node_ids, delete_from_docstore=False, **delete_kwargs, ) if delete_from_docstore: self.docstore.delete_ref_doc(ref_doc_id, raise_error=False) def update(self, document: Document, **update_kwargs: Any) -> None: """Update a document and it's corresponding nodes. This is equivalent to deleting the document and then inserting it again. Args: document (Union[BaseDocument, BaseIndex]): document to update insert_kwargs (Dict): kwargs to pass to insert delete_kwargs (Dict): kwargs to pass to delete """ logger.warning( "update() is now deprecated, please refer to update_ref_doc() to update " "ingested documents+nodes." ) self.update_ref_doc(document, **update_kwargs) def update_ref_doc(self, document: Document, **update_kwargs: Any) -> None: """Update a document and it's corresponding nodes. This is equivalent to deleting the document and then inserting it again. Args: document (Union[BaseDocument, BaseIndex]): document to update insert_kwargs (Dict): kwargs to pass to insert delete_kwargs (Dict): kwargs to pass to delete """ with self._service_context.callback_manager.as_trace("update"): self.delete_ref_doc( document.get_doc_id(), delete_from_docstore=True, **update_kwargs.pop("delete_kwargs", {}), ) self.insert(document, **update_kwargs.pop("insert_kwargs", {})) def refresh( self, documents: Sequence[Document], **update_kwargs: Any ) -> List[bool]: """Refresh an index with documents that have changed. This allows users to save LLM and Embedding model calls, while only updating documents that have any changes in text or metadata. It will also insert any documents that previously were not stored. """ logger.warning( "refresh() is now deprecated, please refer to refresh_ref_docs() to " "refresh ingested documents+nodes with an updated list of documents." ) return self.refresh_ref_docs(documents, **update_kwargs) def refresh_ref_docs( self, documents: Sequence[Document], **update_kwargs: Any ) -> List[bool]: """Refresh an index with documents that have changed. This allows users to save LLM and Embedding model calls, while only updating documents that have any changes in text or metadata. It will also insert any documents that previously were not stored. """ with self._service_context.callback_manager.as_trace("refresh"): refreshed_documents = [False] * len(documents) for i, document in enumerate(documents): existing_doc_hash = self._docstore.get_document_hash( document.get_doc_id() ) if existing_doc_hash is None: self.insert(document, **update_kwargs.pop("insert_kwargs", {})) refreshed_documents[i] = True elif existing_doc_hash != document.hash: self.update_ref_doc( document, **update_kwargs.pop("update_kwargs", {}) ) refreshed_documents[i] = True return refreshed_documents @property @abstractmethod def ref_doc_info(self) -> Dict[str, RefDocInfo]: """Retrieve a dict mapping of ingested documents and their nodes+metadata.""" ... @abstractmethod def as_retriever(self, **kwargs: Any) -> BaseRetriever: ... def as_query_engine(self, **kwargs: Any) -> BaseQueryEngine: # NOTE: lazy import from llama_index.legacy.query_engine.retriever_query_engine import ( RetrieverQueryEngine, ) retriever = self.as_retriever(**kwargs) kwargs["retriever"] = retriever if "service_context" not in kwargs: kwargs["service_context"] = self._service_context return RetrieverQueryEngine.from_args(**kwargs) def as_chat_engine( self, chat_mode: ChatMode = ChatMode.BEST, **kwargs: Any ) -> BaseChatEngine: query_engine = self.as_query_engine(**kwargs) if "service_context" not in kwargs: kwargs["service_context"] = self._service_context # resolve chat mode if chat_mode in [ChatMode.REACT, ChatMode.OPENAI, ChatMode.BEST]: # use an agent with query engine tool in these chat modes # NOTE: lazy import from llama_index.legacy.agent import AgentRunner from llama_index.legacy.tools.query_engine import QueryEngineTool # get LLM service_context = cast(ServiceContext, kwargs["service_context"]) llm = service_context.llm # convert query engine to tool query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine) return AgentRunner.from_llm(tools=[query_engine_tool], llm=llm, **kwargs) if chat_mode == ChatMode.CONDENSE_QUESTION: # NOTE: lazy import from llama_index.legacy.chat_engine import CondenseQuestionChatEngine return CondenseQuestionChatEngine.from_defaults( query_engine=query_engine, **kwargs, ) elif chat_mode == ChatMode.CONTEXT: from llama_index.legacy.chat_engine import ContextChatEngine return ContextChatEngine.from_defaults( retriever=self.as_retriever(**kwargs), **kwargs, ) elif chat_mode == ChatMode.CONDENSE_PLUS_CONTEXT: from llama_index.legacy.chat_engine import CondensePlusContextChatEngine return CondensePlusContextChatEngine.from_defaults( retriever=self.as_retriever(**kwargs), **kwargs, ) elif chat_mode == ChatMode.SIMPLE: from llama_index.legacy.chat_engine import SimpleChatEngine return SimpleChatEngine.from_defaults( **kwargs, ) else: raise ValueError(f"Unknown chat mode: {chat_mode}") # legacy BaseGPTIndex = BaseIndex
[ "llama_index.legacy.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args", "llama_index.legacy.chat_engine.CondenseQuestionChatEngine.from_defaults", "llama_index.legacy.tools.query_engine.QueryEngineTool.from_defaults", "llama_index.legacy.chat_engine.SimpleChatEngine.from_defaults", "llama_index.legacy.service_context.ServiceContext.from_defaults", "llama_index.legacy.storage.storage_context.StorageContext.from_defaults", "llama_index.legacy.agent.AgentRunner.from_llm", "llama_index.legacy.ingestion.run_transformations" ]
[((793, 825), 'typing.TypeVar', 'TypeVar', (['"""IS"""'], {'bound': 'IndexStruct'}), "('IS', bound=IndexStruct)\n", (800, 825), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((838, 877), 'typing.TypeVar', 'TypeVar', (['"""IndexType"""'], {'bound': '"""BaseIndex"""'}), "('IndexType', bound='BaseIndex')\n", (845, 877), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((888, 915), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (905, 915), False, 'import logging\n'), ((13757, 13797), 'llama_index.legacy.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', ([], {}), '(**kwargs)\n', (13787, 13797), False, 'from llama_index.legacy.query_engine.retriever_query_engine import RetrieverQueryEngine\n'), ((2626, 2656), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (2654, 2656), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((2708, 2738), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2736, 2738), False, 'from llama_index.legacy.storage.storage_context import StorageContext\n'), ((4137, 4167), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (4165, 4167), False, 'from llama_index.legacy.storage.storage_context import StorageContext\n'), ((4213, 4243), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (4241, 4243), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((4493, 4599), 'llama_index.legacy.ingestion.run_transformations', 'run_transformations', (['documents', 'service_context.transformations'], {'show_progress': 'show_progress'}), '(documents, service_context.transformations,\n show_progress=show_progress, **kwargs)\n', (4512, 4599), False, 'from llama_index.legacy.ingestion import run_transformations\n'), ((7604, 7713), 'llama_index.legacy.ingestion.run_transformations', 'run_transformations', (['[document]', 'self._service_context.transformations'], {'show_progress': 'self._show_progress'}), '([document], self._service_context.transformations,\n show_progress=self._show_progress)\n', (7623, 7713), False, 'from llama_index.legacy.ingestion import run_transformations\n'), ((14470, 14517), 'typing.cast', 'cast', (['ServiceContext', "kwargs['service_context']"], {}), "(ServiceContext, kwargs['service_context'])\n", (14474, 14517), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((14632, 14688), 'llama_index.legacy.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine)\n', (14661, 14688), False, 'from llama_index.legacy.tools.query_engine import QueryEngineTool\n'), ((14709, 14775), 'llama_index.legacy.agent.AgentRunner.from_llm', 'AgentRunner.from_llm', ([], {'tools': '[query_engine_tool]', 'llm': 'llm'}), '(tools=[query_engine_tool], llm=llm, **kwargs)\n', (14729, 14775), False, 'from llama_index.legacy.agent import AgentRunner\n'), ((14963, 15040), 'llama_index.legacy.chat_engine.CondenseQuestionChatEngine.from_defaults', 'CondenseQuestionChatEngine.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine, **kwargs)\n', (15003, 15040), False, 'from llama_index.legacy.chat_engine import CondenseQuestionChatEngine\n'), ((15793, 15833), 'llama_index.legacy.chat_engine.SimpleChatEngine.from_defaults', 'SimpleChatEngine.from_defaults', ([], {}), '(**kwargs)\n', (15823, 15833), False, 'from llama_index.legacy.chat_engine import SimpleChatEngine\n')]
"""Base index classes.""" import logging from abc import ABC, abstractmethod from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast from llama_index.legacy.chat_engine.types import BaseChatEngine, ChatMode from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.data_structs.data_structs import IndexStruct from llama_index.legacy.ingestion import run_transformations from llama_index.legacy.schema import BaseNode, Document, IndexNode from llama_index.legacy.service_context import ServiceContext from llama_index.legacy.storage.docstore.types import BaseDocumentStore, RefDocInfo from llama_index.legacy.storage.storage_context import StorageContext IS = TypeVar("IS", bound=IndexStruct) IndexType = TypeVar("IndexType", bound="BaseIndex") logger = logging.getLogger(__name__) class BaseIndex(Generic[IS], ABC): """Base LlamaIndex. Args: nodes (List[Node]): List of nodes to index show_progress (bool): Whether to show tqdm progress bars. Defaults to False. service_context (ServiceContext): Service context container (contains components like LLM, Embeddings, etc.). """ index_struct_cls: Type[IS] def __init__( self, nodes: Optional[Sequence[BaseNode]] = None, objects: Optional[Sequence[IndexNode]] = None, index_struct: Optional[IS] = None, storage_context: Optional[StorageContext] = None, service_context: Optional[ServiceContext] = None, show_progress: bool = False, **kwargs: Any, ) -> None: """Initialize with parameters.""" if index_struct is None and nodes is None and objects is None: raise ValueError("One of nodes, objects, or index_struct must be provided.") if index_struct is not None and nodes is not None: raise ValueError("Only one of nodes or index_struct can be provided.") # This is to explicitly make sure that the old UX is not used if nodes is not None and len(nodes) >= 1 and not isinstance(nodes[0], BaseNode): if isinstance(nodes[0], Document): raise ValueError( "The constructor now takes in a list of Node objects. " "Since you are passing in a list of Document objects, " "please use `from_documents` instead." ) else: raise ValueError("nodes must be a list of Node objects.") self._service_context = service_context or ServiceContext.from_defaults() self._storage_context = storage_context or StorageContext.from_defaults() self._docstore = self._storage_context.docstore self._show_progress = show_progress self._vector_store = self._storage_context.vector_store self._graph_store = self._storage_context.graph_store objects = objects or [] self._object_map = {} for obj in objects: self._object_map[obj.index_id] = obj.obj obj.obj = None # clear the object avoid serialization issues with self._service_context.callback_manager.as_trace("index_construction"): if index_struct is None: nodes = nodes or [] index_struct = self.build_index_from_nodes( nodes + objects # type: ignore ) self._index_struct = index_struct self._storage_context.index_store.add_index_struct(self._index_struct) @classmethod def from_documents( cls: Type[IndexType], documents: Sequence[Document], storage_context: Optional[StorageContext] = None, service_context: Optional[ServiceContext] = None, show_progress: bool = False, **kwargs: Any, ) -> IndexType: """Create index from documents. Args: documents (Optional[Sequence[BaseDocument]]): List of documents to build the index from. """ storage_context = storage_context or StorageContext.from_defaults() service_context = service_context or ServiceContext.from_defaults() docstore = storage_context.docstore with service_context.callback_manager.as_trace("index_construction"): for doc in documents: docstore.set_document_hash(doc.get_doc_id(), doc.hash) nodes = run_transformations( documents, # type: ignore service_context.transformations, show_progress=show_progress, **kwargs, ) return cls( nodes=nodes, storage_context=storage_context, service_context=service_context, show_progress=show_progress, **kwargs, ) @property def index_struct(self) -> IS: """Get the index struct.""" return self._index_struct @property def index_id(self) -> str: """Get the index struct.""" return self._index_struct.index_id def set_index_id(self, index_id: str) -> None: """Set the index id. NOTE: if you decide to set the index_id on the index_struct manually, you will need to explicitly call `add_index_struct` on the `index_store` to update the index store. Args: index_id (str): Index id to set. """ # delete the old index struct old_id = self._index_struct.index_id self._storage_context.index_store.delete_index_struct(old_id) # add the new index struct self._index_struct.index_id = index_id self._storage_context.index_store.add_index_struct(self._index_struct) @property def docstore(self) -> BaseDocumentStore: """Get the docstore corresponding to the index.""" return self._docstore @property def service_context(self) -> ServiceContext: return self._service_context @property def storage_context(self) -> StorageContext: return self._storage_context @property def summary(self) -> str: return str(self._index_struct.summary) @summary.setter def summary(self, new_summary: str) -> None: self._index_struct.summary = new_summary self._storage_context.index_store.add_index_struct(self._index_struct) @abstractmethod def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IS: """Build the index from nodes.""" def build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IS: """Build the index from nodes.""" self._docstore.add_documents(nodes, allow_update=True) return self._build_index_from_nodes(nodes) @abstractmethod def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None: """Index-specific logic for inserting nodes to the index struct.""" def insert_nodes(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None: """Insert nodes.""" with self._service_context.callback_manager.as_trace("insert_nodes"): self.docstore.add_documents(nodes, allow_update=True) self._insert(nodes, **insert_kwargs) self._storage_context.index_store.add_index_struct(self._index_struct) def insert(self, document: Document, **insert_kwargs: Any) -> None: """Insert a document.""" with self._service_context.callback_manager.as_trace("insert"): nodes = run_transformations( [document], self._service_context.transformations, show_progress=self._show_progress, ) self.insert_nodes(nodes, **insert_kwargs) self.docstore.set_document_hash(document.get_doc_id(), document.hash) @abstractmethod def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None: """Delete a node.""" def delete_nodes( self, node_ids: List[str], delete_from_docstore: bool = False, **delete_kwargs: Any, ) -> None: """Delete a list of nodes from the index. Args: doc_ids (List[str]): A list of doc_ids from the nodes to delete """ for node_id in node_ids: self._delete_node(node_id, **delete_kwargs) if delete_from_docstore: self.docstore.delete_document(node_id, raise_error=False) self._storage_context.index_store.add_index_struct(self._index_struct) def delete(self, doc_id: str, **delete_kwargs: Any) -> None: """Delete a document from the index. All nodes in the index related to the index will be deleted. Args: doc_id (str): A doc_id of the ingested document """ logger.warning( "delete() is now deprecated, please refer to delete_ref_doc() to delete " "ingested documents+nodes or delete_nodes to delete a list of nodes." ) self.delete_ref_doc(doc_id) def delete_ref_doc( self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any ) -> None: """Delete a document and it's nodes by using ref_doc_id.""" ref_doc_info = self.docstore.get_ref_doc_info(ref_doc_id) if ref_doc_info is None: logger.warning(f"ref_doc_id {ref_doc_id} not found, nothing deleted.") return self.delete_nodes( ref_doc_info.node_ids, delete_from_docstore=False, **delete_kwargs, ) if delete_from_docstore: self.docstore.delete_ref_doc(ref_doc_id, raise_error=False) def update(self, document: Document, **update_kwargs: Any) -> None: """Update a document and it's corresponding nodes. This is equivalent to deleting the document and then inserting it again. Args: document (Union[BaseDocument, BaseIndex]): document to update insert_kwargs (Dict): kwargs to pass to insert delete_kwargs (Dict): kwargs to pass to delete """ logger.warning( "update() is now deprecated, please refer to update_ref_doc() to update " "ingested documents+nodes." ) self.update_ref_doc(document, **update_kwargs) def update_ref_doc(self, document: Document, **update_kwargs: Any) -> None: """Update a document and it's corresponding nodes. This is equivalent to deleting the document and then inserting it again. Args: document (Union[BaseDocument, BaseIndex]): document to update insert_kwargs (Dict): kwargs to pass to insert delete_kwargs (Dict): kwargs to pass to delete """ with self._service_context.callback_manager.as_trace("update"): self.delete_ref_doc( document.get_doc_id(), delete_from_docstore=True, **update_kwargs.pop("delete_kwargs", {}), ) self.insert(document, **update_kwargs.pop("insert_kwargs", {})) def refresh( self, documents: Sequence[Document], **update_kwargs: Any ) -> List[bool]: """Refresh an index with documents that have changed. This allows users to save LLM and Embedding model calls, while only updating documents that have any changes in text or metadata. It will also insert any documents that previously were not stored. """ logger.warning( "refresh() is now deprecated, please refer to refresh_ref_docs() to " "refresh ingested documents+nodes with an updated list of documents." ) return self.refresh_ref_docs(documents, **update_kwargs) def refresh_ref_docs( self, documents: Sequence[Document], **update_kwargs: Any ) -> List[bool]: """Refresh an index with documents that have changed. This allows users to save LLM and Embedding model calls, while only updating documents that have any changes in text or metadata. It will also insert any documents that previously were not stored. """ with self._service_context.callback_manager.as_trace("refresh"): refreshed_documents = [False] * len(documents) for i, document in enumerate(documents): existing_doc_hash = self._docstore.get_document_hash( document.get_doc_id() ) if existing_doc_hash is None: self.insert(document, **update_kwargs.pop("insert_kwargs", {})) refreshed_documents[i] = True elif existing_doc_hash != document.hash: self.update_ref_doc( document, **update_kwargs.pop("update_kwargs", {}) ) refreshed_documents[i] = True return refreshed_documents @property @abstractmethod def ref_doc_info(self) -> Dict[str, RefDocInfo]: """Retrieve a dict mapping of ingested documents and their nodes+metadata.""" ... @abstractmethod def as_retriever(self, **kwargs: Any) -> BaseRetriever: ... def as_query_engine(self, **kwargs: Any) -> BaseQueryEngine: # NOTE: lazy import from llama_index.legacy.query_engine.retriever_query_engine import ( RetrieverQueryEngine, ) retriever = self.as_retriever(**kwargs) kwargs["retriever"] = retriever if "service_context" not in kwargs: kwargs["service_context"] = self._service_context return RetrieverQueryEngine.from_args(**kwargs) def as_chat_engine( self, chat_mode: ChatMode = ChatMode.BEST, **kwargs: Any ) -> BaseChatEngine: query_engine = self.as_query_engine(**kwargs) if "service_context" not in kwargs: kwargs["service_context"] = self._service_context # resolve chat mode if chat_mode in [ChatMode.REACT, ChatMode.OPENAI, ChatMode.BEST]: # use an agent with query engine tool in these chat modes # NOTE: lazy import from llama_index.legacy.agent import AgentRunner from llama_index.legacy.tools.query_engine import QueryEngineTool # get LLM service_context = cast(ServiceContext, kwargs["service_context"]) llm = service_context.llm # convert query engine to tool query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine) return AgentRunner.from_llm(tools=[query_engine_tool], llm=llm, **kwargs) if chat_mode == ChatMode.CONDENSE_QUESTION: # NOTE: lazy import from llama_index.legacy.chat_engine import CondenseQuestionChatEngine return CondenseQuestionChatEngine.from_defaults( query_engine=query_engine, **kwargs, ) elif chat_mode == ChatMode.CONTEXT: from llama_index.legacy.chat_engine import ContextChatEngine return ContextChatEngine.from_defaults( retriever=self.as_retriever(**kwargs), **kwargs, ) elif chat_mode == ChatMode.CONDENSE_PLUS_CONTEXT: from llama_index.legacy.chat_engine import CondensePlusContextChatEngine return CondensePlusContextChatEngine.from_defaults( retriever=self.as_retriever(**kwargs), **kwargs, ) elif chat_mode == ChatMode.SIMPLE: from llama_index.legacy.chat_engine import SimpleChatEngine return SimpleChatEngine.from_defaults( **kwargs, ) else: raise ValueError(f"Unknown chat mode: {chat_mode}") # legacy BaseGPTIndex = BaseIndex
[ "llama_index.legacy.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args", "llama_index.legacy.chat_engine.CondenseQuestionChatEngine.from_defaults", "llama_index.legacy.tools.query_engine.QueryEngineTool.from_defaults", "llama_index.legacy.chat_engine.SimpleChatEngine.from_defaults", "llama_index.legacy.service_context.ServiceContext.from_defaults", "llama_index.legacy.storage.storage_context.StorageContext.from_defaults", "llama_index.legacy.agent.AgentRunner.from_llm", "llama_index.legacy.ingestion.run_transformations" ]
[((793, 825), 'typing.TypeVar', 'TypeVar', (['"""IS"""'], {'bound': 'IndexStruct'}), "('IS', bound=IndexStruct)\n", (800, 825), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((838, 877), 'typing.TypeVar', 'TypeVar', (['"""IndexType"""'], {'bound': '"""BaseIndex"""'}), "('IndexType', bound='BaseIndex')\n", (845, 877), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((888, 915), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (905, 915), False, 'import logging\n'), ((13757, 13797), 'llama_index.legacy.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', ([], {}), '(**kwargs)\n', (13787, 13797), False, 'from llama_index.legacy.query_engine.retriever_query_engine import RetrieverQueryEngine\n'), ((2626, 2656), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (2654, 2656), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((2708, 2738), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2736, 2738), False, 'from llama_index.legacy.storage.storage_context import StorageContext\n'), ((4137, 4167), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (4165, 4167), False, 'from llama_index.legacy.storage.storage_context import StorageContext\n'), ((4213, 4243), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (4241, 4243), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((4493, 4599), 'llama_index.legacy.ingestion.run_transformations', 'run_transformations', (['documents', 'service_context.transformations'], {'show_progress': 'show_progress'}), '(documents, service_context.transformations,\n show_progress=show_progress, **kwargs)\n', (4512, 4599), False, 'from llama_index.legacy.ingestion import run_transformations\n'), ((7604, 7713), 'llama_index.legacy.ingestion.run_transformations', 'run_transformations', (['[document]', 'self._service_context.transformations'], {'show_progress': 'self._show_progress'}), '([document], self._service_context.transformations,\n show_progress=self._show_progress)\n', (7623, 7713), False, 'from llama_index.legacy.ingestion import run_transformations\n'), ((14470, 14517), 'typing.cast', 'cast', (['ServiceContext', "kwargs['service_context']"], {}), "(ServiceContext, kwargs['service_context'])\n", (14474, 14517), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((14632, 14688), 'llama_index.legacy.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine)\n', (14661, 14688), False, 'from llama_index.legacy.tools.query_engine import QueryEngineTool\n'), ((14709, 14775), 'llama_index.legacy.agent.AgentRunner.from_llm', 'AgentRunner.from_llm', ([], {'tools': '[query_engine_tool]', 'llm': 'llm'}), '(tools=[query_engine_tool], llm=llm, **kwargs)\n', (14729, 14775), False, 'from llama_index.legacy.agent import AgentRunner\n'), ((14963, 15040), 'llama_index.legacy.chat_engine.CondenseQuestionChatEngine.from_defaults', 'CondenseQuestionChatEngine.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine, **kwargs)\n', (15003, 15040), False, 'from llama_index.legacy.chat_engine import CondenseQuestionChatEngine\n'), ((15793, 15833), 'llama_index.legacy.chat_engine.SimpleChatEngine.from_defaults', 'SimpleChatEngine.from_defaults', ([], {}), '(**kwargs)\n', (15823, 15833), False, 'from llama_index.legacy.chat_engine import SimpleChatEngine\n')]
import json from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import ( DEFAULT_TEMPERATURE, ) from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.bedrock_utils import ( BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider, ) from llama_index.legacy.llms.generic_utils import ( completion_response_to_chat_response, stream_completion_response_to_chat_response, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class Bedrock(LLM): model: str = Field(description="The modelId of the Bedrock model to use.") temperature: float = Field(description="The temperature to use for sampling.") max_tokens: int = Field(description="The maximum number of tokens to generate.") context_size: int = Field("The maximum number of tokens available for input.") profile_name: Optional[str] = Field( description="The name of aws profile to use. If not given, then the default profile is used." ) aws_access_key_id: Optional[str] = Field( description="AWS Access Key ID to use", exclude=True ) aws_secret_access_key: Optional[str] = Field( description="AWS Secret Access Key to use", exclude=True ) aws_session_token: Optional[str] = Field( description="AWS Session Token to use", exclude=True ) region_name: Optional[str] = Field( description="AWS region name to use. Uses region configured in AWS CLI if not passed", exclude=True, ) botocore_session: Optional[Any] = Field( description="Use this Botocore session instead of creating a new default one.", exclude=True, ) botocore_config: Optional[Any] = Field( description="Custom configuration object to use instead of the default generated one.", exclude=True, ) max_retries: int = Field( default=10, description="The maximum number of API retries.", gt=0 ) timeout: float = Field( default=60.0, description="The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.", ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the bedrock invokeModel request.", ) _client: Any = PrivateAttr() _aclient: Any = PrivateAttr() _provider: Provider = PrivateAttr() def __init__( self, model: str, temperature: Optional[float] = DEFAULT_TEMPERATURE, max_tokens: Optional[int] = 512, context_size: Optional[int] = None, profile_name: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, region_name: Optional[str] = None, botocore_session: Optional[Any] = None, client: Optional[Any] = None, timeout: Optional[float] = 60.0, max_retries: Optional[int] = 10, botocore_config: Optional[Any] = None, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, **kwargs: Any, ) -> None: if context_size is None and model not in BEDROCK_FOUNDATION_LLMS: raise ValueError( "`context_size` argument not provided and" "model provided refers to a non-foundation model." " Please specify the context_size" ) session_kwargs = { "profile_name": profile_name, "region_name": region_name, "aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "aws_session_token": aws_session_token, "botocore_session": botocore_session, } config = None try: import boto3 from botocore.config import Config config = ( Config( retries={"max_attempts": max_retries, "mode": "standard"}, connect_timeout=timeout, read_timeout=timeout, ) if botocore_config is None else botocore_config ) session = boto3.Session(**session_kwargs) except ImportError: raise ImportError( "boto3 package not found, install with" "'pip install boto3'" ) # Prior to general availability, custom boto3 wheel files were # distributed that used the bedrock service to invokeModel. # This check prevents any services still using those wheel files # from breaking if client is not None: self._client = client elif "bedrock-runtime" in session.get_available_services(): self._client = session.client("bedrock-runtime", config=config) else: self._client = session.client("bedrock", config=config) additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) context_size = context_size or BEDROCK_FOUNDATION_LLMS[model] self._provider = get_provider(model) messages_to_prompt = messages_to_prompt or self._provider.messages_to_prompt completion_to_prompt = ( completion_to_prompt or self._provider.completion_to_prompt ) super().__init__( model=model, temperature=temperature, max_tokens=max_tokens, context_size=context_size, profile_name=profile_name, timeout=timeout, max_retries=max_retries, botocore_config=config, additional_kwargs=additional_kwargs, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "Bedrock_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=self.context_size, num_output=self.max_tokens, is_chat_model=self.model in CHAT_ONLY_MODELS, model_name=self.model, ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "temperature": self.temperature, self._provider.max_tokens_key: self.max_tokens, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: if not formatted: prompt = self.completion_to_prompt(prompt) all_kwargs = self._get_all_kwargs(**kwargs) request_body = self._provider.get_request_body(prompt, all_kwargs) request_body_str = json.dumps(request_body) response = completion_with_retry( client=self._client, model=self.model, request_body=request_body_str, max_retries=self.max_retries, **all_kwargs, )["body"].read() response = json.loads(response) return CompletionResponse( text=self._provider.get_text_from_response(response), raw=response ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: if self.model in BEDROCK_FOUNDATION_LLMS and self.model not in STREAMING_MODELS: raise ValueError(f"Model {self.model} does not support streaming") if not formatted: prompt = self.completion_to_prompt(prompt) all_kwargs = self._get_all_kwargs(**kwargs) request_body = self._provider.get_request_body(prompt, all_kwargs) request_body_str = json.dumps(request_body) response = completion_with_retry( client=self._client, model=self.model, request_body=request_body_str, max_retries=self.max_retries, stream=True, **all_kwargs, )["body"] def gen() -> CompletionResponseGen: content = "" for r in response: r = json.loads(r["chunk"]["bytes"]) content_delta = self._provider.get_text_from_stream_response(r) content += content_delta yield CompletionResponse(text=content, delta=content_delta, raw=r) return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: prompt = self.messages_to_prompt(messages) completion_response = self.complete(prompt, formatted=True, **kwargs) return completion_response_to_chat_response(completion_response) def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: prompt = self.messages_to_prompt(messages) completion_response = self.stream_complete(prompt, formatted=True, **kwargs) return stream_completion_response_to_chat_response(completion_response) async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: """Chat asynchronously.""" # TODO: do synchronous chat for now return self.chat(messages, **kwargs) async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: raise NotImplementedError async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: raise NotImplementedError async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: raise NotImplementedError
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.bedrock_utils.completion_with_retry", "llama_index.legacy.llms.generic_utils.completion_response_to_chat_response", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.bedrock_utils.get_provider" ]
[((1084, 1145), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The modelId of the Bedrock model to use."""'}), "(description='The modelId of the Bedrock model to use.')\n", (1089, 1145), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1171, 1228), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1176, 1228), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1251, 1313), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1256, 1313), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1338, 1396), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['"""The maximum number of tokens available for input."""'], {}), "('The maximum number of tokens available for input.')\n", (1343, 1396), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1431, 1541), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The name of aws profile to use. If not given, then the default profile is used."""'}), "(description=\n 'The name of aws profile to use. If not given, then the default profile is used.'\n )\n", (1436, 1541), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1585, 1644), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Access Key ID to use"""', 'exclude': '(True)'}), "(description='AWS Access Key ID to use', exclude=True)\n", (1590, 1644), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1702, 1765), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Secret Access Key to use"""', 'exclude': '(True)'}), "(description='AWS Secret Access Key to use', exclude=True)\n", (1707, 1765), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1819, 1878), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Session Token to use"""', 'exclude': '(True)'}), "(description='AWS Session Token to use', exclude=True)\n", (1824, 1878), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1926, 2041), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS region name to use. Uses region configured in AWS CLI if not passed"""', 'exclude': '(True)'}), "(description=\n 'AWS region name to use. Uses region configured in AWS CLI if not passed',\n exclude=True)\n", (1931, 2041), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2094, 2202), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Use this Botocore session instead of creating a new default one."""', 'exclude': '(True)'}), "(description=\n 'Use this Botocore session instead of creating a new default one.',\n exclude=True)\n", (2099, 2202), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2254, 2370), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Custom configuration object to use instead of the default generated one."""', 'exclude': '(True)'}), "(description=\n 'Custom configuration object to use instead of the default generated one.',\n exclude=True)\n", (2259, 2370), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2408, 2481), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""', 'gt': '(0)'}), "(default=10, description='The maximum number of API retries.', gt=0)\n", (2413, 2481), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2517, 2665), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(60.0)', 'description': '"""The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts."""'}), "(default=60.0, description=\n 'The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.'\n )\n", (2522, 2665), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2719, 2821), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the bedrock invokeModel request."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the bedrock invokeModel request.')\n", (2724, 2821), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2860, 2873), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2871, 2873), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2894, 2907), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2905, 2907), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2934, 2947), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2945, 2947), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((7807, 7832), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7830, 7832), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8617, 8642), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8640, 8642), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9840, 9859), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9857, 9859), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6106, 6125), 'llama_index.legacy.llms.bedrock_utils.get_provider', 'get_provider', (['model'], {}), '(model)\n', (6118, 6125), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((7158, 7304), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_size', 'num_output': 'self.max_tokens', 'is_chat_model': '(self.model in CHAT_ONLY_MODELS)', 'model_name': 'self.model'}), '(context_window=self.context_size, num_output=self.max_tokens,\n is_chat_model=self.model in CHAT_ONLY_MODELS, model_name=self.model)\n', (7169, 7304), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((8181, 8205), 'json.dumps', 'json.dumps', (['request_body'], {}), '(request_body)\n', (8191, 8205), False, 'import json\n'), ((8466, 8486), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (8476, 8486), False, 'import json\n'), ((9171, 9195), 'json.dumps', 'json.dumps', (['request_body'], {}), '(request_body)\n', (9181, 9195), False, 'import json\n'), ((10088, 10145), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (10124, 10145), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((10406, 10470), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (10449, 10470), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((5180, 5211), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (5193, 5211), False, 'import boto3\n'), ((5991, 6010), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6006, 6010), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((9215, 9368), 'llama_index.legacy.llms.bedrock_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'model': 'self.model', 'request_body': 'request_body_str', 'max_retries': 'self.max_retries', 'stream': '(True)'}), '(client=self._client, model=self.model, request_body=\n request_body_str, max_retries=self.max_retries, stream=True, **all_kwargs)\n', (9236, 9368), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((4872, 4988), 'botocore.config.Config', 'Config', ([], {'retries': "{'max_attempts': max_retries, 'mode': 'standard'}", 'connect_timeout': 'timeout', 'read_timeout': 'timeout'}), "(retries={'max_attempts': max_retries, 'mode': 'standard'},\n connect_timeout=timeout, read_timeout=timeout)\n", (4878, 4988), False, 'from botocore.config import Config\n'), ((9576, 9607), 'json.loads', 'json.loads', (["r['chunk']['bytes']"], {}), "(r['chunk']['bytes'])\n", (9586, 9607), False, 'import json\n'), ((8225, 8365), 'llama_index.legacy.llms.bedrock_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'model': 'self.model', 'request_body': 'request_body_str', 'max_retries': 'self.max_retries'}), '(client=self._client, model=self.model, request_body=\n request_body_str, max_retries=self.max_retries, **all_kwargs)\n', (8246, 8365), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((9751, 9811), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'content_delta', 'raw': 'r'}), '(text=content, delta=content_delta, raw=r)\n', (9769, 9811), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')]
import json from typing import Any, Callable, Dict, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import ( DEFAULT_TEMPERATURE, ) from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.bedrock_utils import ( BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider, ) from llama_index.legacy.llms.generic_utils import ( completion_response_to_chat_response, stream_completion_response_to_chat_response, ) from llama_index.legacy.llms.llm import LLM from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class Bedrock(LLM): model: str = Field(description="The modelId of the Bedrock model to use.") temperature: float = Field(description="The temperature to use for sampling.") max_tokens: int = Field(description="The maximum number of tokens to generate.") context_size: int = Field("The maximum number of tokens available for input.") profile_name: Optional[str] = Field( description="The name of aws profile to use. If not given, then the default profile is used." ) aws_access_key_id: Optional[str] = Field( description="AWS Access Key ID to use", exclude=True ) aws_secret_access_key: Optional[str] = Field( description="AWS Secret Access Key to use", exclude=True ) aws_session_token: Optional[str] = Field( description="AWS Session Token to use", exclude=True ) region_name: Optional[str] = Field( description="AWS region name to use. Uses region configured in AWS CLI if not passed", exclude=True, ) botocore_session: Optional[Any] = Field( description="Use this Botocore session instead of creating a new default one.", exclude=True, ) botocore_config: Optional[Any] = Field( description="Custom configuration object to use instead of the default generated one.", exclude=True, ) max_retries: int = Field( default=10, description="The maximum number of API retries.", gt=0 ) timeout: float = Field( default=60.0, description="The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.", ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the bedrock invokeModel request.", ) _client: Any = PrivateAttr() _aclient: Any = PrivateAttr() _provider: Provider = PrivateAttr() def __init__( self, model: str, temperature: Optional[float] = DEFAULT_TEMPERATURE, max_tokens: Optional[int] = 512, context_size: Optional[int] = None, profile_name: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, region_name: Optional[str] = None, botocore_session: Optional[Any] = None, client: Optional[Any] = None, timeout: Optional[float] = 60.0, max_retries: Optional[int] = 10, botocore_config: Optional[Any] = None, additional_kwargs: Optional[Dict[str, Any]] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, **kwargs: Any, ) -> None: if context_size is None and model not in BEDROCK_FOUNDATION_LLMS: raise ValueError( "`context_size` argument not provided and" "model provided refers to a non-foundation model." " Please specify the context_size" ) session_kwargs = { "profile_name": profile_name, "region_name": region_name, "aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "aws_session_token": aws_session_token, "botocore_session": botocore_session, } config = None try: import boto3 from botocore.config import Config config = ( Config( retries={"max_attempts": max_retries, "mode": "standard"}, connect_timeout=timeout, read_timeout=timeout, ) if botocore_config is None else botocore_config ) session = boto3.Session(**session_kwargs) except ImportError: raise ImportError( "boto3 package not found, install with" "'pip install boto3'" ) # Prior to general availability, custom boto3 wheel files were # distributed that used the bedrock service to invokeModel. # This check prevents any services still using those wheel files # from breaking if client is not None: self._client = client elif "bedrock-runtime" in session.get_available_services(): self._client = session.client("bedrock-runtime", config=config) else: self._client = session.client("bedrock", config=config) additional_kwargs = additional_kwargs or {} callback_manager = callback_manager or CallbackManager([]) context_size = context_size or BEDROCK_FOUNDATION_LLMS[model] self._provider = get_provider(model) messages_to_prompt = messages_to_prompt or self._provider.messages_to_prompt completion_to_prompt = ( completion_to_prompt or self._provider.completion_to_prompt ) super().__init__( model=model, temperature=temperature, max_tokens=max_tokens, context_size=context_size, profile_name=profile_name, timeout=timeout, max_retries=max_retries, botocore_config=config, additional_kwargs=additional_kwargs, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "Bedrock_LLM" @property def metadata(self) -> LLMMetadata: return LLMMetadata( context_window=self.context_size, num_output=self.max_tokens, is_chat_model=self.model in CHAT_ONLY_MODELS, model_name=self.model, ) @property def _model_kwargs(self) -> Dict[str, Any]: base_kwargs = { "temperature": self.temperature, self._provider.max_tokens_key: self.max_tokens, } return { **base_kwargs, **self.additional_kwargs, } def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { **self._model_kwargs, **kwargs, } @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: if not formatted: prompt = self.completion_to_prompt(prompt) all_kwargs = self._get_all_kwargs(**kwargs) request_body = self._provider.get_request_body(prompt, all_kwargs) request_body_str = json.dumps(request_body) response = completion_with_retry( client=self._client, model=self.model, request_body=request_body_str, max_retries=self.max_retries, **all_kwargs, )["body"].read() response = json.loads(response) return CompletionResponse( text=self._provider.get_text_from_response(response), raw=response ) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: if self.model in BEDROCK_FOUNDATION_LLMS and self.model not in STREAMING_MODELS: raise ValueError(f"Model {self.model} does not support streaming") if not formatted: prompt = self.completion_to_prompt(prompt) all_kwargs = self._get_all_kwargs(**kwargs) request_body = self._provider.get_request_body(prompt, all_kwargs) request_body_str = json.dumps(request_body) response = completion_with_retry( client=self._client, model=self.model, request_body=request_body_str, max_retries=self.max_retries, stream=True, **all_kwargs, )["body"] def gen() -> CompletionResponseGen: content = "" for r in response: r = json.loads(r["chunk"]["bytes"]) content_delta = self._provider.get_text_from_stream_response(r) content += content_delta yield CompletionResponse(text=content, delta=content_delta, raw=r) return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: prompt = self.messages_to_prompt(messages) completion_response = self.complete(prompt, formatted=True, **kwargs) return completion_response_to_chat_response(completion_response) def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: prompt = self.messages_to_prompt(messages) completion_response = self.stream_complete(prompt, formatted=True, **kwargs) return stream_completion_response_to_chat_response(completion_response) async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: """Chat asynchronously.""" # TODO: do synchronous chat for now return self.chat(messages, **kwargs) async def acomplete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: raise NotImplementedError async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: raise NotImplementedError async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: raise NotImplementedError
[ "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.bedrock_utils.completion_with_retry", "llama_index.legacy.llms.generic_utils.completion_response_to_chat_response", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.CallbackManager", "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.bedrock_utils.get_provider" ]
[((1084, 1145), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The modelId of the Bedrock model to use."""'}), "(description='The modelId of the Bedrock model to use.')\n", (1089, 1145), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1171, 1228), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1176, 1228), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1251, 1313), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1256, 1313), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1338, 1396), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['"""The maximum number of tokens available for input."""'], {}), "('The maximum number of tokens available for input.')\n", (1343, 1396), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1431, 1541), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The name of aws profile to use. If not given, then the default profile is used."""'}), "(description=\n 'The name of aws profile to use. If not given, then the default profile is used.'\n )\n", (1436, 1541), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1585, 1644), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Access Key ID to use"""', 'exclude': '(True)'}), "(description='AWS Access Key ID to use', exclude=True)\n", (1590, 1644), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1702, 1765), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Secret Access Key to use"""', 'exclude': '(True)'}), "(description='AWS Secret Access Key to use', exclude=True)\n", (1707, 1765), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1819, 1878), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Session Token to use"""', 'exclude': '(True)'}), "(description='AWS Session Token to use', exclude=True)\n", (1824, 1878), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1926, 2041), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS region name to use. Uses region configured in AWS CLI if not passed"""', 'exclude': '(True)'}), "(description=\n 'AWS region name to use. Uses region configured in AWS CLI if not passed',\n exclude=True)\n", (1931, 2041), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2094, 2202), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Use this Botocore session instead of creating a new default one."""', 'exclude': '(True)'}), "(description=\n 'Use this Botocore session instead of creating a new default one.',\n exclude=True)\n", (2099, 2202), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2254, 2370), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Custom configuration object to use instead of the default generated one."""', 'exclude': '(True)'}), "(description=\n 'Custom configuration object to use instead of the default generated one.',\n exclude=True)\n", (2259, 2370), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2408, 2481), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""', 'gt': '(0)'}), "(default=10, description='The maximum number of API retries.', gt=0)\n", (2413, 2481), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2517, 2665), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(60.0)', 'description': '"""The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts."""'}), "(default=60.0, description=\n 'The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.'\n )\n", (2522, 2665), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2719, 2821), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the bedrock invokeModel request."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the bedrock invokeModel request.')\n", (2724, 2821), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2860, 2873), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2871, 2873), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2894, 2907), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2905, 2907), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2934, 2947), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2945, 2947), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((7807, 7832), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7830, 7832), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8617, 8642), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8640, 8642), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9840, 9859), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9857, 9859), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6106, 6125), 'llama_index.legacy.llms.bedrock_utils.get_provider', 'get_provider', (['model'], {}), '(model)\n', (6118, 6125), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((7158, 7304), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_size', 'num_output': 'self.max_tokens', 'is_chat_model': '(self.model in CHAT_ONLY_MODELS)', 'model_name': 'self.model'}), '(context_window=self.context_size, num_output=self.max_tokens,\n is_chat_model=self.model in CHAT_ONLY_MODELS, model_name=self.model)\n', (7169, 7304), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((8181, 8205), 'json.dumps', 'json.dumps', (['request_body'], {}), '(request_body)\n', (8191, 8205), False, 'import json\n'), ((8466, 8486), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (8476, 8486), False, 'import json\n'), ((9171, 9195), 'json.dumps', 'json.dumps', (['request_body'], {}), '(request_body)\n', (9181, 9195), False, 'import json\n'), ((10088, 10145), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (10124, 10145), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((10406, 10470), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (10449, 10470), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((5180, 5211), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (5193, 5211), False, 'import boto3\n'), ((5991, 6010), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6006, 6010), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((9215, 9368), 'llama_index.legacy.llms.bedrock_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'model': 'self.model', 'request_body': 'request_body_str', 'max_retries': 'self.max_retries', 'stream': '(True)'}), '(client=self._client, model=self.model, request_body=\n request_body_str, max_retries=self.max_retries, stream=True, **all_kwargs)\n', (9236, 9368), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((4872, 4988), 'botocore.config.Config', 'Config', ([], {'retries': "{'max_attempts': max_retries, 'mode': 'standard'}", 'connect_timeout': 'timeout', 'read_timeout': 'timeout'}), "(retries={'max_attempts': max_retries, 'mode': 'standard'},\n connect_timeout=timeout, read_timeout=timeout)\n", (4878, 4988), False, 'from botocore.config import Config\n'), ((9576, 9607), 'json.loads', 'json.loads', (["r['chunk']['bytes']"], {}), "(r['chunk']['bytes'])\n", (9586, 9607), False, 'import json\n'), ((8225, 8365), 'llama_index.legacy.llms.bedrock_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'model': 'self.model', 'request_body': 'request_body_str', 'max_retries': 'self.max_retries'}), '(client=self._client, model=self.model, request_body=\n request_body_str, max_retries=self.max_retries, **all_kwargs)\n', (8246, 8365), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((9751, 9811), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'content_delta', 'raw': 'r'}), '(text=content, delta=content_delta, raw=r)\n', (9769, 9811), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "BraintrustCodaHelpDeskDataset", "./braintrust_codahdd" ) # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 344), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""BraintrustCodaHelpDeskDataset"""', '"""./braintrust_codahdd"""'], {}), "('BraintrustCodaHelpDeskDataset', './braintrust_codahdd')\n", (287, 344), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((403, 455), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (434, 455), False, 'from llama_index.core import VectorStoreIndex\n'), ((548, 603), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (567, 603), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1454, 1478), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1476, 1478), False, 'import asyncio\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "BraintrustCodaHelpDeskDataset", "./braintrust_codahdd" ) # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack" ]
[((265, 344), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""BraintrustCodaHelpDeskDataset"""', '"""./braintrust_codahdd"""'], {}), "('BraintrustCodaHelpDeskDataset', './braintrust_codahdd')\n", (287, 344), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((403, 455), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (434, 455), False, 'from llama_index.core import VectorStoreIndex\n'), ((548, 603), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (567, 603), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1454, 1478), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1476, 1478), False, 'import asyncio\n')]