code
stringlengths 161
233k
| apis
sequencelengths 1
24
| extract_api
stringlengths 162
68.5k
|
---|---|---|
from typing import Any, Dict, Optional
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.legacy.core.llms.types import LLMMetadata
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
from llama_index.legacy.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://router.neutrinoapp.com/api/llm-router"
DEFAULT_ROUTER = "default"
MAX_CONTEXT_WINDOW = 200000
class Neutrino(OpenAILike):
model: str = Field(
description="The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."
)
context_window: int = Field(
default=MAX_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. Defaults to the largest supported model (Claude).",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.__fields__["is_chat_model"].field_info.description,
)
def __init__(
self,
model: Optional[str] = None,
router: str = DEFAULT_ROUTER,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "NEUTRINO_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "NEUTRINO_API_KEY")
model = model or router
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Neutrino_LLM"
| [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.bridge.pydantic.Field"
] | [((548, 659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."""'}), "(description=\n 'The Neutrino router to use. See https://docs.neutrinoapp.com/router for details.'\n )\n", (553, 659), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((690, 856), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'MAX_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model. Defaults to the largest supported model (Claude)."""', 'gt': '(0)'}), "(default=MAX_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model. Defaults to the largest supported model (Claude).'\n , gt=0)\n", (695, 856), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((904, 1004), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': "LLMMetadata.__fields__['is_chat_model'].field_info.description"}), "(default=True, description=LLMMetadata.__fields__['is_chat_model'].\n field_info.description)\n", (909, 1004), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1519, 1583), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_base"""', 'api_base', '"""NEUTRINO_API_BASE"""'], {}), "('api_base', api_base, 'NEUTRINO_API_BASE')\n", (1540, 1583), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1602, 1663), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""NEUTRINO_API_KEY"""'], {}), "('api_key', api_key, 'NEUTRINO_API_KEY')\n", (1623, 1663), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n')] |
from typing import Any, Dict, Optional
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.legacy.core.llms.types import LLMMetadata
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
from llama_index.legacy.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://router.neutrinoapp.com/api/llm-router"
DEFAULT_ROUTER = "default"
MAX_CONTEXT_WINDOW = 200000
class Neutrino(OpenAILike):
model: str = Field(
description="The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."
)
context_window: int = Field(
default=MAX_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. Defaults to the largest supported model (Claude).",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.__fields__["is_chat_model"].field_info.description,
)
def __init__(
self,
model: Optional[str] = None,
router: str = DEFAULT_ROUTER,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "NEUTRINO_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "NEUTRINO_API_KEY")
model = model or router
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Neutrino_LLM"
| [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.bridge.pydantic.Field"
] | [((548, 659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."""'}), "(description=\n 'The Neutrino router to use. See https://docs.neutrinoapp.com/router for details.'\n )\n", (553, 659), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((690, 856), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'MAX_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model. Defaults to the largest supported model (Claude)."""', 'gt': '(0)'}), "(default=MAX_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model. Defaults to the largest supported model (Claude).'\n , gt=0)\n", (695, 856), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((904, 1004), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': "LLMMetadata.__fields__['is_chat_model'].field_info.description"}), "(default=True, description=LLMMetadata.__fields__['is_chat_model'].\n field_info.description)\n", (909, 1004), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1519, 1583), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_base"""', 'api_base', '"""NEUTRINO_API_BASE"""'], {}), "('api_base', api_base, 'NEUTRINO_API_BASE')\n", (1540, 1583), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1602, 1663), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""NEUTRINO_API_KEY"""'], {}), "('api_key', api_key, 'NEUTRINO_API_KEY')\n", (1623, 1663), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n')] |
"""Tree-based index."""
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.common_tree.base import GPTTreeIndexBuilder
from llama_index.core.indices.tree.inserter import TreeIndexInserter
from llama_index.core.llms.llm import LLM
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.default_prompts import (
DEFAULT_INSERT_PROMPT,
DEFAULT_SUMMARY_PROMPT,
)
from llama_index.core.schema import BaseNode, IndexNode
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
embed_model_from_settings_or_context,
llm_from_settings_or_context,
)
from llama_index.core.storage.docstore.types import RefDocInfo
class TreeRetrieverMode(str, Enum):
SELECT_LEAF = "select_leaf"
SELECT_LEAF_EMBEDDING = "select_leaf_embedding"
ALL_LEAF = "all_leaf"
ROOT = "root"
REQUIRE_TREE_MODES = {
TreeRetrieverMode.SELECT_LEAF,
TreeRetrieverMode.SELECT_LEAF_EMBEDDING,
TreeRetrieverMode.ROOT,
}
class TreeIndex(BaseIndex[IndexGraph]):
"""Tree Index.
The tree index is a tree-structured index, where each node is a summary of
the children nodes. During index construction, the tree is constructed
in a bottoms-up fashion until we end up with a set of root_nodes.
There are a few different options during query time (see :ref:`Ref-Query`).
The main option is to traverse down the tree from the root nodes.
A secondary answer is to directly synthesize the answer from the root nodes.
Args:
summary_template (Optional[BasePromptTemplate]): A Summarization Prompt
(see :ref:`Prompt-Templates`).
insert_prompt (Optional[BasePromptTemplate]): An Tree Insertion Prompt
(see :ref:`Prompt-Templates`).
num_children (int): The number of children each node should have.
build_tree (bool): Whether to build the tree during index construction.
show_progress (bool): Whether to show progress bars. Defaults to False.
"""
index_struct_cls = IndexGraph
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IndexGraph] = None,
llm: Optional[LLM] = None,
summary_template: Optional[BasePromptTemplate] = None,
insert_prompt: Optional[BasePromptTemplate] = None,
num_children: int = 10,
build_tree: bool = True,
use_async: bool = False,
show_progress: bool = False,
# deprecated
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
# need to set parameters before building index in base class.
self.num_children = num_children
self.summary_template = summary_template or DEFAULT_SUMMARY_PROMPT
self.insert_prompt: BasePromptTemplate = insert_prompt or DEFAULT_INSERT_PROMPT
self.build_tree = build_tree
self._use_async = use_async
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
super().__init__(
nodes=nodes,
index_struct=index_struct,
service_context=service_context,
show_progress=show_progress,
objects=objects,
**kwargs,
)
def as_retriever(
self,
retriever_mode: Union[str, TreeRetrieverMode] = TreeRetrieverMode.SELECT_LEAF,
embed_model: Optional[BaseEmbedding] = None,
**kwargs: Any,
) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.tree.all_leaf_retriever import (
TreeAllLeafRetriever,
)
from llama_index.core.indices.tree.select_leaf_embedding_retriever import (
TreeSelectLeafEmbeddingRetriever,
)
from llama_index.core.indices.tree.select_leaf_retriever import (
TreeSelectLeafRetriever,
)
from llama_index.core.indices.tree.tree_root_retriever import (
TreeRootRetriever,
)
self._validate_build_tree_required(TreeRetrieverMode(retriever_mode))
if retriever_mode == TreeRetrieverMode.SELECT_LEAF:
return TreeSelectLeafRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.SELECT_LEAF_EMBEDDING:
embed_model = embed_model or embed_model_from_settings_or_context(
Settings, self._service_context
)
return TreeSelectLeafEmbeddingRetriever(
self, embed_model=embed_model, object_map=self._object_map, **kwargs
)
elif retriever_mode == TreeRetrieverMode.ROOT:
return TreeRootRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.ALL_LEAF:
return TreeAllLeafRetriever(self, object_map=self._object_map, **kwargs)
else:
raise ValueError(f"Unknown retriever mode: {retriever_mode}")
def _validate_build_tree_required(self, retriever_mode: TreeRetrieverMode) -> None:
"""Check if index supports modes that require trees."""
if retriever_mode in REQUIRE_TREE_MODES and not self.build_tree:
raise ValueError(
"Index was constructed without building trees, "
f"but retriever mode {retriever_mode} requires trees."
)
def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IndexGraph:
"""Build the index from nodes."""
index_builder = GPTTreeIndexBuilder(
self.num_children,
self.summary_template,
service_context=self.service_context,
llm=self._llm,
use_async=self._use_async,
show_progress=self._show_progress,
docstore=self._docstore,
)
return index_builder.build_from_nodes(nodes, build_tree=self.build_tree)
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
# TODO: allow to customize insert prompt
inserter = TreeIndexInserter(
self.index_struct,
service_context=self.service_context,
llm=self._llm,
num_children=self.num_children,
insert_prompt=self.insert_prompt,
summary_prompt=self.summary_template,
docstore=self._docstore,
)
inserter.insert(nodes)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Delete not implemented for tree index.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
node_doc_ids = list(self.index_struct.all_nodes.values())
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
# legacy
GPTTreeIndex = TreeIndex
| [
"llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever",
"llama_index.core.settings.embed_model_from_settings_or_context",
"llama_index.core.indices.tree.inserter.TreeIndexInserter",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever",
"llama_index.core.indices.tree.all_leaf_retriever.TreeAllLeafRetriever",
"llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder",
"llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever"
] | [((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}), '(self.num_children, self.summary_template,\n service_context=self.service_context, llm=self._llm, use_async=self.\n _use_async, show_progress=self._show_progress, docstore=self._docstore)\n', (6011, 6202), False, 'from llama_index.core.indices.common_tree.base import GPTTreeIndexBuilder\n'), ((6552, 6784), 'llama_index.core.indices.tree.inserter.TreeIndexInserter', 'TreeIndexInserter', (['self.index_struct'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'num_children': 'self.num_children', 'insert_prompt': 'self.insert_prompt', 'summary_prompt': 'self.summary_template', 'docstore': 'self._docstore'}), '(self.index_struct, service_context=self.service_context,\n llm=self._llm, num_children=self.num_children, insert_prompt=self.\n insert_prompt, summary_prompt=self.summary_template, docstore=self.\n _docstore)\n', (6569, 6784), False, 'from llama_index.core.indices.tree.inserter import TreeIndexInserter\n'), ((3443, 3498), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (3471, 3498), False, 'from llama_index.core.settings import Settings, embed_model_from_settings_or_context, llm_from_settings_or_context\n'), ((4636, 4704), 'llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever', 'TreeSelectLeafRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (4659, 4704), False, 'from llama_index.core.indices.tree.select_leaf_retriever import TreeSelectLeafRetriever\n'), ((4937, 5044), 'llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever', 'TreeSelectLeafEmbeddingRetriever', (['self'], {'embed_model': 'embed_model', 'object_map': 'self._object_map'}), '(self, embed_model=embed_model, object_map=\n self._object_map, **kwargs)\n', (4969, 5044), False, 'from llama_index.core.indices.tree.select_leaf_embedding_retriever import TreeSelectLeafEmbeddingRetriever\n'), ((4818, 4887), 'llama_index.core.settings.embed_model_from_settings_or_context', 'embed_model_from_settings_or_context', (['Settings', 'self._service_context'], {}), '(Settings, self._service_context)\n', (4854, 4887), False, 'from llama_index.core.settings import Settings, embed_model_from_settings_or_context, llm_from_settings_or_context\n'), ((5144, 5206), 'llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever', 'TreeRootRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (5161, 5206), False, 'from llama_index.core.indices.tree.tree_root_retriever import TreeRootRetriever\n'), ((5285, 5350), 'llama_index.core.indices.tree.all_leaf_retriever.TreeAllLeafRetriever', 'TreeAllLeafRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (5305, 5350), False, 'from llama_index.core.indices.tree.all_leaf_retriever import TreeAllLeafRetriever\n')] |
"""Tree-based index."""
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.common_tree.base import GPTTreeIndexBuilder
from llama_index.core.indices.tree.inserter import TreeIndexInserter
from llama_index.core.llms.llm import LLM
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.default_prompts import (
DEFAULT_INSERT_PROMPT,
DEFAULT_SUMMARY_PROMPT,
)
from llama_index.core.schema import BaseNode, IndexNode
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
embed_model_from_settings_or_context,
llm_from_settings_or_context,
)
from llama_index.core.storage.docstore.types import RefDocInfo
class TreeRetrieverMode(str, Enum):
SELECT_LEAF = "select_leaf"
SELECT_LEAF_EMBEDDING = "select_leaf_embedding"
ALL_LEAF = "all_leaf"
ROOT = "root"
REQUIRE_TREE_MODES = {
TreeRetrieverMode.SELECT_LEAF,
TreeRetrieverMode.SELECT_LEAF_EMBEDDING,
TreeRetrieverMode.ROOT,
}
class TreeIndex(BaseIndex[IndexGraph]):
"""Tree Index.
The tree index is a tree-structured index, where each node is a summary of
the children nodes. During index construction, the tree is constructed
in a bottoms-up fashion until we end up with a set of root_nodes.
There are a few different options during query time (see :ref:`Ref-Query`).
The main option is to traverse down the tree from the root nodes.
A secondary answer is to directly synthesize the answer from the root nodes.
Args:
summary_template (Optional[BasePromptTemplate]): A Summarization Prompt
(see :ref:`Prompt-Templates`).
insert_prompt (Optional[BasePromptTemplate]): An Tree Insertion Prompt
(see :ref:`Prompt-Templates`).
num_children (int): The number of children each node should have.
build_tree (bool): Whether to build the tree during index construction.
show_progress (bool): Whether to show progress bars. Defaults to False.
"""
index_struct_cls = IndexGraph
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IndexGraph] = None,
llm: Optional[LLM] = None,
summary_template: Optional[BasePromptTemplate] = None,
insert_prompt: Optional[BasePromptTemplate] = None,
num_children: int = 10,
build_tree: bool = True,
use_async: bool = False,
show_progress: bool = False,
# deprecated
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
# need to set parameters before building index in base class.
self.num_children = num_children
self.summary_template = summary_template or DEFAULT_SUMMARY_PROMPT
self.insert_prompt: BasePromptTemplate = insert_prompt or DEFAULT_INSERT_PROMPT
self.build_tree = build_tree
self._use_async = use_async
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
super().__init__(
nodes=nodes,
index_struct=index_struct,
service_context=service_context,
show_progress=show_progress,
objects=objects,
**kwargs,
)
def as_retriever(
self,
retriever_mode: Union[str, TreeRetrieverMode] = TreeRetrieverMode.SELECT_LEAF,
embed_model: Optional[BaseEmbedding] = None,
**kwargs: Any,
) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.tree.all_leaf_retriever import (
TreeAllLeafRetriever,
)
from llama_index.core.indices.tree.select_leaf_embedding_retriever import (
TreeSelectLeafEmbeddingRetriever,
)
from llama_index.core.indices.tree.select_leaf_retriever import (
TreeSelectLeafRetriever,
)
from llama_index.core.indices.tree.tree_root_retriever import (
TreeRootRetriever,
)
self._validate_build_tree_required(TreeRetrieverMode(retriever_mode))
if retriever_mode == TreeRetrieverMode.SELECT_LEAF:
return TreeSelectLeafRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.SELECT_LEAF_EMBEDDING:
embed_model = embed_model or embed_model_from_settings_or_context(
Settings, self._service_context
)
return TreeSelectLeafEmbeddingRetriever(
self, embed_model=embed_model, object_map=self._object_map, **kwargs
)
elif retriever_mode == TreeRetrieverMode.ROOT:
return TreeRootRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.ALL_LEAF:
return TreeAllLeafRetriever(self, object_map=self._object_map, **kwargs)
else:
raise ValueError(f"Unknown retriever mode: {retriever_mode}")
def _validate_build_tree_required(self, retriever_mode: TreeRetrieverMode) -> None:
"""Check if index supports modes that require trees."""
if retriever_mode in REQUIRE_TREE_MODES and not self.build_tree:
raise ValueError(
"Index was constructed without building trees, "
f"but retriever mode {retriever_mode} requires trees."
)
def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IndexGraph:
"""Build the index from nodes."""
index_builder = GPTTreeIndexBuilder(
self.num_children,
self.summary_template,
service_context=self.service_context,
llm=self._llm,
use_async=self._use_async,
show_progress=self._show_progress,
docstore=self._docstore,
)
return index_builder.build_from_nodes(nodes, build_tree=self.build_tree)
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
# TODO: allow to customize insert prompt
inserter = TreeIndexInserter(
self.index_struct,
service_context=self.service_context,
llm=self._llm,
num_children=self.num_children,
insert_prompt=self.insert_prompt,
summary_prompt=self.summary_template,
docstore=self._docstore,
)
inserter.insert(nodes)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Delete not implemented for tree index.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
node_doc_ids = list(self.index_struct.all_nodes.values())
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
# legacy
GPTTreeIndex = TreeIndex
| [
"llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever",
"llama_index.core.settings.embed_model_from_settings_or_context",
"llama_index.core.indices.tree.inserter.TreeIndexInserter",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever",
"llama_index.core.indices.tree.all_leaf_retriever.TreeAllLeafRetriever",
"llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder",
"llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever"
] | [((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}), '(self.num_children, self.summary_template,\n service_context=self.service_context, llm=self._llm, use_async=self.\n _use_async, show_progress=self._show_progress, docstore=self._docstore)\n', (6011, 6202), False, 'from llama_index.core.indices.common_tree.base import GPTTreeIndexBuilder\n'), ((6552, 6784), 'llama_index.core.indices.tree.inserter.TreeIndexInserter', 'TreeIndexInserter', (['self.index_struct'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'num_children': 'self.num_children', 'insert_prompt': 'self.insert_prompt', 'summary_prompt': 'self.summary_template', 'docstore': 'self._docstore'}), '(self.index_struct, service_context=self.service_context,\n llm=self._llm, num_children=self.num_children, insert_prompt=self.\n insert_prompt, summary_prompt=self.summary_template, docstore=self.\n _docstore)\n', (6569, 6784), False, 'from llama_index.core.indices.tree.inserter import TreeIndexInserter\n'), ((3443, 3498), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (3471, 3498), False, 'from llama_index.core.settings import Settings, embed_model_from_settings_or_context, llm_from_settings_or_context\n'), ((4636, 4704), 'llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever', 'TreeSelectLeafRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (4659, 4704), False, 'from llama_index.core.indices.tree.select_leaf_retriever import TreeSelectLeafRetriever\n'), ((4937, 5044), 'llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever', 'TreeSelectLeafEmbeddingRetriever', (['self'], {'embed_model': 'embed_model', 'object_map': 'self._object_map'}), '(self, embed_model=embed_model, object_map=\n self._object_map, **kwargs)\n', (4969, 5044), False, 'from llama_index.core.indices.tree.select_leaf_embedding_retriever import TreeSelectLeafEmbeddingRetriever\n'), ((4818, 4887), 'llama_index.core.settings.embed_model_from_settings_or_context', 'embed_model_from_settings_or_context', (['Settings', 'self._service_context'], {}), '(Settings, self._service_context)\n', (4854, 4887), False, 'from llama_index.core.settings import Settings, embed_model_from_settings_or_context, llm_from_settings_or_context\n'), ((5144, 5206), 'llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever', 'TreeRootRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (5161, 5206), False, 'from llama_index.core.indices.tree.tree_root_retriever import TreeRootRetriever\n'), ((5285, 5350), 'llama_index.core.indices.tree.all_leaf_retriever.TreeAllLeafRetriever', 'TreeAllLeafRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (5305, 5350), False, 'from llama_index.core.indices.tree.all_leaf_retriever import TreeAllLeafRetriever\n')] |
"""Tree-based index."""
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.common_tree.base import GPTTreeIndexBuilder
from llama_index.core.indices.tree.inserter import TreeIndexInserter
from llama_index.core.llms.llm import LLM
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.default_prompts import (
DEFAULT_INSERT_PROMPT,
DEFAULT_SUMMARY_PROMPT,
)
from llama_index.core.schema import BaseNode, IndexNode
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
embed_model_from_settings_or_context,
llm_from_settings_or_context,
)
from llama_index.core.storage.docstore.types import RefDocInfo
class TreeRetrieverMode(str, Enum):
SELECT_LEAF = "select_leaf"
SELECT_LEAF_EMBEDDING = "select_leaf_embedding"
ALL_LEAF = "all_leaf"
ROOT = "root"
REQUIRE_TREE_MODES = {
TreeRetrieverMode.SELECT_LEAF,
TreeRetrieverMode.SELECT_LEAF_EMBEDDING,
TreeRetrieverMode.ROOT,
}
class TreeIndex(BaseIndex[IndexGraph]):
"""Tree Index.
The tree index is a tree-structured index, where each node is a summary of
the children nodes. During index construction, the tree is constructed
in a bottoms-up fashion until we end up with a set of root_nodes.
There are a few different options during query time (see :ref:`Ref-Query`).
The main option is to traverse down the tree from the root nodes.
A secondary answer is to directly synthesize the answer from the root nodes.
Args:
summary_template (Optional[BasePromptTemplate]): A Summarization Prompt
(see :ref:`Prompt-Templates`).
insert_prompt (Optional[BasePromptTemplate]): An Tree Insertion Prompt
(see :ref:`Prompt-Templates`).
num_children (int): The number of children each node should have.
build_tree (bool): Whether to build the tree during index construction.
show_progress (bool): Whether to show progress bars. Defaults to False.
"""
index_struct_cls = IndexGraph
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IndexGraph] = None,
llm: Optional[LLM] = None,
summary_template: Optional[BasePromptTemplate] = None,
insert_prompt: Optional[BasePromptTemplate] = None,
num_children: int = 10,
build_tree: bool = True,
use_async: bool = False,
show_progress: bool = False,
# deprecated
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
# need to set parameters before building index in base class.
self.num_children = num_children
self.summary_template = summary_template or DEFAULT_SUMMARY_PROMPT
self.insert_prompt: BasePromptTemplate = insert_prompt or DEFAULT_INSERT_PROMPT
self.build_tree = build_tree
self._use_async = use_async
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
super().__init__(
nodes=nodes,
index_struct=index_struct,
service_context=service_context,
show_progress=show_progress,
objects=objects,
**kwargs,
)
def as_retriever(
self,
retriever_mode: Union[str, TreeRetrieverMode] = TreeRetrieverMode.SELECT_LEAF,
embed_model: Optional[BaseEmbedding] = None,
**kwargs: Any,
) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.tree.all_leaf_retriever import (
TreeAllLeafRetriever,
)
from llama_index.core.indices.tree.select_leaf_embedding_retriever import (
TreeSelectLeafEmbeddingRetriever,
)
from llama_index.core.indices.tree.select_leaf_retriever import (
TreeSelectLeafRetriever,
)
from llama_index.core.indices.tree.tree_root_retriever import (
TreeRootRetriever,
)
self._validate_build_tree_required(TreeRetrieverMode(retriever_mode))
if retriever_mode == TreeRetrieverMode.SELECT_LEAF:
return TreeSelectLeafRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.SELECT_LEAF_EMBEDDING:
embed_model = embed_model or embed_model_from_settings_or_context(
Settings, self._service_context
)
return TreeSelectLeafEmbeddingRetriever(
self, embed_model=embed_model, object_map=self._object_map, **kwargs
)
elif retriever_mode == TreeRetrieverMode.ROOT:
return TreeRootRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.ALL_LEAF:
return TreeAllLeafRetriever(self, object_map=self._object_map, **kwargs)
else:
raise ValueError(f"Unknown retriever mode: {retriever_mode}")
def _validate_build_tree_required(self, retriever_mode: TreeRetrieverMode) -> None:
"""Check if index supports modes that require trees."""
if retriever_mode in REQUIRE_TREE_MODES and not self.build_tree:
raise ValueError(
"Index was constructed without building trees, "
f"but retriever mode {retriever_mode} requires trees."
)
def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IndexGraph:
"""Build the index from nodes."""
index_builder = GPTTreeIndexBuilder(
self.num_children,
self.summary_template,
service_context=self.service_context,
llm=self._llm,
use_async=self._use_async,
show_progress=self._show_progress,
docstore=self._docstore,
)
return index_builder.build_from_nodes(nodes, build_tree=self.build_tree)
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
# TODO: allow to customize insert prompt
inserter = TreeIndexInserter(
self.index_struct,
service_context=self.service_context,
llm=self._llm,
num_children=self.num_children,
insert_prompt=self.insert_prompt,
summary_prompt=self.summary_template,
docstore=self._docstore,
)
inserter.insert(nodes)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Delete not implemented for tree index.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
node_doc_ids = list(self.index_struct.all_nodes.values())
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
# legacy
GPTTreeIndex = TreeIndex
| [
"llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever",
"llama_index.core.settings.embed_model_from_settings_or_context",
"llama_index.core.indices.tree.inserter.TreeIndexInserter",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever",
"llama_index.core.indices.tree.all_leaf_retriever.TreeAllLeafRetriever",
"llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder",
"llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever"
] | [((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}), '(self.num_children, self.summary_template,\n service_context=self.service_context, llm=self._llm, use_async=self.\n _use_async, show_progress=self._show_progress, docstore=self._docstore)\n', (6011, 6202), False, 'from llama_index.core.indices.common_tree.base import GPTTreeIndexBuilder\n'), ((6552, 6784), 'llama_index.core.indices.tree.inserter.TreeIndexInserter', 'TreeIndexInserter', (['self.index_struct'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'num_children': 'self.num_children', 'insert_prompt': 'self.insert_prompt', 'summary_prompt': 'self.summary_template', 'docstore': 'self._docstore'}), '(self.index_struct, service_context=self.service_context,\n llm=self._llm, num_children=self.num_children, insert_prompt=self.\n insert_prompt, summary_prompt=self.summary_template, docstore=self.\n _docstore)\n', (6569, 6784), False, 'from llama_index.core.indices.tree.inserter import TreeIndexInserter\n'), ((3443, 3498), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (3471, 3498), False, 'from llama_index.core.settings import Settings, embed_model_from_settings_or_context, llm_from_settings_or_context\n'), ((4636, 4704), 'llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever', 'TreeSelectLeafRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (4659, 4704), False, 'from llama_index.core.indices.tree.select_leaf_retriever import TreeSelectLeafRetriever\n'), ((4937, 5044), 'llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever', 'TreeSelectLeafEmbeddingRetriever', (['self'], {'embed_model': 'embed_model', 'object_map': 'self._object_map'}), '(self, embed_model=embed_model, object_map=\n self._object_map, **kwargs)\n', (4969, 5044), False, 'from llama_index.core.indices.tree.select_leaf_embedding_retriever import TreeSelectLeafEmbeddingRetriever\n'), ((4818, 4887), 'llama_index.core.settings.embed_model_from_settings_or_context', 'embed_model_from_settings_or_context', (['Settings', 'self._service_context'], {}), '(Settings, self._service_context)\n', (4854, 4887), False, 'from llama_index.core.settings import Settings, embed_model_from_settings_or_context, llm_from_settings_or_context\n'), ((5144, 5206), 'llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever', 'TreeRootRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (5161, 5206), False, 'from llama_index.core.indices.tree.tree_root_retriever import TreeRootRetriever\n'), ((5285, 5350), 'llama_index.core.indices.tree.all_leaf_retriever.TreeAllLeafRetriever', 'TreeAllLeafRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (5305, 5350), False, 'from llama_index.core.indices.tree.all_leaf_retriever import TreeAllLeafRetriever\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
EXAMPLE_URL = "https://clarifai.com/anthropic/completion/models/claude-v2"
class Clarifai(LLM):
model_url: Optional[str] = Field(
description=f"Full URL of the model. e.g. `{EXAMPLE_URL}`"
)
model_version_id: Optional[str] = Field(description="Model Version ID.")
app_id: Optional[str] = Field(description="Clarifai application ID of the model.")
user_id: Optional[str] = Field(description="Clarifai user ID of the model.")
pat: Optional[str] = Field(
description="Personal Access Tokens(PAT) to validate requests."
)
_model: Any = PrivateAttr()
_is_chat_model: bool = PrivateAttr()
def __init__(
self,
model_name: Optional[str] = None,
model_url: Optional[str] = None,
model_version_id: Optional[str] = "",
app_id: Optional[str] = None,
user_id: Optional[str] = None,
pat: Optional[str] = None,
temperature: float = 0.1,
max_tokens: int = 512,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
):
try:
import os
from clarifai.client.model import Model
except ImportError:
raise ImportError("ClarifaiLLM requires `pip install clarifai`.")
if pat is None and os.environ.get("CLARIFAI_PAT") is not None:
pat = os.environ.get("CLARIFAI_PAT")
if not pat and os.environ.get("CLARIFAI_PAT") is None:
raise ValueError(
"Set `CLARIFAI_PAT` as env variable or pass `pat` as constructor argument"
)
if model_url is not None and model_name is not None:
raise ValueError("You can only specify one of model_url or model_name.")
if model_url is None and model_name is None:
raise ValueError("You must specify one of model_url or model_name.")
if model_name is not None:
if app_id is None or user_id is None:
raise ValueError(
f"Missing one app ID or user ID of the model: {app_id=}, {user_id=}"
)
else:
self._model = Model(
user_id=user_id,
app_id=app_id,
model_id=model_name,
model_version={"id": model_version_id},
pat=pat,
)
if model_url is not None:
self._model = Model(model_url, pat=pat)
model_name = self._model.id
self._is_chat_model = False
if "chat" in self._model.app_id or "chat" in self._model.id:
self._is_chat_model = True
additional_kwargs = additional_kwargs or {}
super().__init__(
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
model_name=model_name,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "ClarifaiLLM"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens,
model_name=self._model,
is_chat_model=self._is_chat_model,
)
# TODO: When the Clarifai python SDK supports inference params, add here.
def chat(
self,
messages: Sequence[ChatMessage],
inference_params: Optional[Dict] = {},
**kwargs: Any,
) -> ChatResponse:
"""Chat endpoint for LLM."""
prompt = "".join([str(m) for m in messages])
try:
response = (
self._model.predict_by_bytes(
input_bytes=prompt.encode(encoding="UTF-8"),
input_type="text",
inference_params=inference_params,
)
.outputs[0]
.data.text.raw
)
except Exception as e:
raise Exception(f"Prediction failed: {e}")
return ChatResponse(message=ChatMessage(content=response))
def complete(
self,
prompt: str,
formatted: bool = False,
inference_params: Optional[Dict] = {},
**kwargs: Any,
) -> CompletionResponse:
"""Completion endpoint for LLM."""
try:
response = (
self._model.predict_by_bytes(
input_bytes=prompt.encode(encoding="utf-8"),
input_type="text",
inference_params=inference_params,
)
.outputs[0]
.data.text.raw
)
except Exception as e:
raise Exception(f"Prediction failed: {e}")
return CompletionResponse(text=response)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError(
"Clarifai does not currently support streaming completion."
)
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError(
"Clarifai does not currently support streaming completion."
)
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
raise NotImplementedError("Currently not supported.")
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError("Currently not supported.")
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError("Clarifai does not currently support this function.")
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((762, 827), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': 'f"""Full URL of the model. e.g. `{EXAMPLE_URL}`"""'}), "(description=f'Full URL of the model. e.g. `{EXAMPLE_URL}`')\n", (767, 827), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((880, 918), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Model Version ID."""'}), "(description='Model Version ID.')\n", (885, 918), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((947, 1005), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Clarifai application ID of the model."""'}), "(description='Clarifai application ID of the model.')\n", (952, 1005), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1035, 1086), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Clarifai user ID of the model."""'}), "(description='Clarifai user ID of the model.')\n", (1040, 1086), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1112, 1182), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Personal Access Tokens(PAT) to validate requests."""'}), "(description='Personal Access Tokens(PAT) to validate requests.')\n", (1117, 1182), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1216, 1229), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1227, 1229), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1257, 1270), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1268, 1270), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((6542, 6561), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6559, 6561), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6735, 6760), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6758, 6760), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6934, 6953), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6951, 6953), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7142, 7167), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7165, 7167), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4359, 4497), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.max_tokens', 'model_name': 'self._model', 'is_chat_model': 'self._is_chat_model'}), '(context_window=self.context_window, num_output=self.max_tokens,\n model_name=self._model, is_chat_model=self._is_chat_model)\n', (4370, 4497), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((6035, 6068), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response'}), '(text=response)\n', (6053, 6068), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((2360, 2390), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2374, 2390), False, 'import os\n'), ((3434, 3459), 'clarifai.client.model.Model', 'Model', (['model_url'], {'pat': 'pat'}), '(model_url, pat=pat)\n', (3439, 3459), False, 'from clarifai.client.model import Model\n'), ((2298, 2328), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2312, 2328), False, 'import os\n'), ((2415, 2445), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2429, 2445), False, 'import os\n'), ((3146, 3258), 'clarifai.client.model.Model', 'Model', ([], {'user_id': 'user_id', 'app_id': 'app_id', 'model_id': 'model_name', 'model_version': "{'id': model_version_id}", 'pat': 'pat'}), "(user_id=user_id, app_id=app_id, model_id=model_name, model_version={\n 'id': model_version_id}, pat=pat)\n", (3151, 3258), False, 'from clarifai.client.model import Model\n'), ((5340, 5369), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'content': 'response'}), '(content=response)\n', (5351, 5369), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
EXAMPLE_URL = "https://clarifai.com/anthropic/completion/models/claude-v2"
class Clarifai(LLM):
model_url: Optional[str] = Field(
description=f"Full URL of the model. e.g. `{EXAMPLE_URL}`"
)
model_version_id: Optional[str] = Field(description="Model Version ID.")
app_id: Optional[str] = Field(description="Clarifai application ID of the model.")
user_id: Optional[str] = Field(description="Clarifai user ID of the model.")
pat: Optional[str] = Field(
description="Personal Access Tokens(PAT) to validate requests."
)
_model: Any = PrivateAttr()
_is_chat_model: bool = PrivateAttr()
def __init__(
self,
model_name: Optional[str] = None,
model_url: Optional[str] = None,
model_version_id: Optional[str] = "",
app_id: Optional[str] = None,
user_id: Optional[str] = None,
pat: Optional[str] = None,
temperature: float = 0.1,
max_tokens: int = 512,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
):
try:
import os
from clarifai.client.model import Model
except ImportError:
raise ImportError("ClarifaiLLM requires `pip install clarifai`.")
if pat is None and os.environ.get("CLARIFAI_PAT") is not None:
pat = os.environ.get("CLARIFAI_PAT")
if not pat and os.environ.get("CLARIFAI_PAT") is None:
raise ValueError(
"Set `CLARIFAI_PAT` as env variable or pass `pat` as constructor argument"
)
if model_url is not None and model_name is not None:
raise ValueError("You can only specify one of model_url or model_name.")
if model_url is None and model_name is None:
raise ValueError("You must specify one of model_url or model_name.")
if model_name is not None:
if app_id is None or user_id is None:
raise ValueError(
f"Missing one app ID or user ID of the model: {app_id=}, {user_id=}"
)
else:
self._model = Model(
user_id=user_id,
app_id=app_id,
model_id=model_name,
model_version={"id": model_version_id},
pat=pat,
)
if model_url is not None:
self._model = Model(model_url, pat=pat)
model_name = self._model.id
self._is_chat_model = False
if "chat" in self._model.app_id or "chat" in self._model.id:
self._is_chat_model = True
additional_kwargs = additional_kwargs or {}
super().__init__(
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
model_name=model_name,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "ClarifaiLLM"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens,
model_name=self._model,
is_chat_model=self._is_chat_model,
)
# TODO: When the Clarifai python SDK supports inference params, add here.
def chat(
self,
messages: Sequence[ChatMessage],
inference_params: Optional[Dict] = {},
**kwargs: Any,
) -> ChatResponse:
"""Chat endpoint for LLM."""
prompt = "".join([str(m) for m in messages])
try:
response = (
self._model.predict_by_bytes(
input_bytes=prompt.encode(encoding="UTF-8"),
input_type="text",
inference_params=inference_params,
)
.outputs[0]
.data.text.raw
)
except Exception as e:
raise Exception(f"Prediction failed: {e}")
return ChatResponse(message=ChatMessage(content=response))
def complete(
self,
prompt: str,
formatted: bool = False,
inference_params: Optional[Dict] = {},
**kwargs: Any,
) -> CompletionResponse:
"""Completion endpoint for LLM."""
try:
response = (
self._model.predict_by_bytes(
input_bytes=prompt.encode(encoding="utf-8"),
input_type="text",
inference_params=inference_params,
)
.outputs[0]
.data.text.raw
)
except Exception as e:
raise Exception(f"Prediction failed: {e}")
return CompletionResponse(text=response)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError(
"Clarifai does not currently support streaming completion."
)
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError(
"Clarifai does not currently support streaming completion."
)
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
raise NotImplementedError("Currently not supported.")
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError("Currently not supported.")
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError("Clarifai does not currently support this function.")
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((762, 827), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': 'f"""Full URL of the model. e.g. `{EXAMPLE_URL}`"""'}), "(description=f'Full URL of the model. e.g. `{EXAMPLE_URL}`')\n", (767, 827), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((880, 918), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Model Version ID."""'}), "(description='Model Version ID.')\n", (885, 918), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((947, 1005), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Clarifai application ID of the model."""'}), "(description='Clarifai application ID of the model.')\n", (952, 1005), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1035, 1086), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Clarifai user ID of the model."""'}), "(description='Clarifai user ID of the model.')\n", (1040, 1086), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1112, 1182), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Personal Access Tokens(PAT) to validate requests."""'}), "(description='Personal Access Tokens(PAT) to validate requests.')\n", (1117, 1182), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1216, 1229), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1227, 1229), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1257, 1270), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1268, 1270), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((6542, 6561), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6559, 6561), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6735, 6760), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6758, 6760), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6934, 6953), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6951, 6953), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7142, 7167), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7165, 7167), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4359, 4497), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.max_tokens', 'model_name': 'self._model', 'is_chat_model': 'self._is_chat_model'}), '(context_window=self.context_window, num_output=self.max_tokens,\n model_name=self._model, is_chat_model=self._is_chat_model)\n', (4370, 4497), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((6035, 6068), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response'}), '(text=response)\n', (6053, 6068), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((2360, 2390), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2374, 2390), False, 'import os\n'), ((3434, 3459), 'clarifai.client.model.Model', 'Model', (['model_url'], {'pat': 'pat'}), '(model_url, pat=pat)\n', (3439, 3459), False, 'from clarifai.client.model import Model\n'), ((2298, 2328), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2312, 2328), False, 'import os\n'), ((2415, 2445), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2429, 2445), False, 'import os\n'), ((3146, 3258), 'clarifai.client.model.Model', 'Model', ([], {'user_id': 'user_id', 'app_id': 'app_id', 'model_id': 'model_name', 'model_version': "{'id': model_version_id}", 'pat': 'pat'}), "(user_id=user_id, app_id=app_id, model_id=model_name, model_version={\n 'id': model_version_id}, pat=pat)\n", (3151, 3258), False, 'from clarifai.client.model import Model\n'), ((5340, 5369), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'content': 'response'}), '(content=response)\n', (5351, 5369), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')] |
"""PII postprocessor."""
import json
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
DEFAULT_PII_TMPL = (
"The current context information is provided. \n"
"A task is also provided to mask the PII within the context. \n"
"Return the text, with all PII masked out, and a mapping of the original PII "
"to the masked PII. \n"
"Return the output of the task in JSON. \n"
"Context:\n"
"Hello Zhang Wei, I am John. "
"Your AnyCompany Financial Services, "
"LLC credit card account 1111-0000-1111-0008 "
"has a minimum payment of $24.53 that is due "
"by July 31st. Based on your autopay settings, we will withdraw your payment. "
"Task: Mask out the PII, replace each PII with a tag, and return the text. Return the mapping in JSON. \n"
"Output: \n"
"Hello [NAME1], I am [NAME2]. "
"Your AnyCompany Financial Services, "
"LLC credit card account [CREDIT_CARD_NUMBER] "
"has a minimum payment of $24.53 that is due "
"by [DATE_TIME]. Based on your autopay settings, we will withdraw your payment. "
"Output Mapping:\n"
'{{"NAME1": "Zhang Wei", "NAME2": "John", "CREDIT_CARD_NUMBER": "1111-0000-1111-0008", "DATE_TIME": "July 31st"}}\n'
"Context:\n{context_str}\n"
"Task: {query_str}\n"
"Output: \n"
""
)
class PIINodePostprocessor(BaseNodePostprocessor):
"""PII Node processor.
NOTE: this is a beta feature, the API might change.
Args:
llm (LLM): The local LLM to use for prediction.
"""
llm: LLM
pii_str_tmpl: str = DEFAULT_PII_TMPL
pii_node_info_key: str = "__pii_node_info__"
@classmethod
def class_name(cls) -> str:
return "PIINodePostprocessor"
def mask_pii(self, text: str) -> Tuple[str, Dict]:
"""Mask PII in text."""
pii_prompt = PromptTemplate(self.pii_str_tmpl)
# TODO: allow customization
task_str = (
"Mask out the PII, replace each PII with a tag, and return the text. "
"Return the mapping in JSON."
)
response = self.llm.predict(pii_prompt, context_str=text, query_str=task_str)
splits = response.split("Output Mapping:")
text_output = splits[0].strip()
json_str_output = splits[1].strip()
json_dict = json.loads(json_str_output)
return text_output, json_dict
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text, mapping_info = self.mask_pii(
node.get_content(metadata_mode=MetadataMode.LLM)
)
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = mapping_info
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
class NERPIINodePostprocessor(BaseNodePostprocessor):
"""NER PII Node processor.
Uses a HF transformers model.
"""
pii_node_info_key: str = "__pii_node_info__"
@classmethod
def class_name(cls) -> str:
return "NERPIINodePostprocessor"
def mask_pii(self, ner: Callable, text: str) -> Tuple[str, Dict]:
"""Mask PII in text."""
new_text = text
response = ner(text)
mapping = {}
for entry in response:
entity_group_tag = f"[{entry['entity_group']}_{entry['start']}]"
new_text = new_text.replace(entry["word"], entity_group_tag).strip()
mapping[entity_group_tag] = entry["word"]
return new_text, mapping
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
from transformers import pipeline # pants: no-infer-dep
ner = pipeline("ner", grouped_entities=True)
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text, mapping_info = self.mask_pii(
ner, node.get_content(metadata_mode=MetadataMode.LLM)
)
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = mapping_info
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
| [
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.schema.NodeWithScore"
] | [((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, 2587), False, 'import json\n'), ((4543, 4581), 'transformers.pipeline', 'pipeline', (['"""ner"""'], {'grouped_entities': '(True)'}), "('ner', grouped_entities=True)\n", (4551, 4581), False, 'from transformers import pipeline\n'), ((3143, 3157), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (3151, 3157), False, 'from copy import deepcopy\n'), ((4911, 4925), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (4919, 4925), False, 'from copy import deepcopy\n'), ((3459, 3516), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (3472, 3516), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle\n'), ((5227, 5284), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (5240, 5284), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle\n')] |
"""PII postprocessor."""
import json
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
DEFAULT_PII_TMPL = (
"The current context information is provided. \n"
"A task is also provided to mask the PII within the context. \n"
"Return the text, with all PII masked out, and a mapping of the original PII "
"to the masked PII. \n"
"Return the output of the task in JSON. \n"
"Context:\n"
"Hello Zhang Wei, I am John. "
"Your AnyCompany Financial Services, "
"LLC credit card account 1111-0000-1111-0008 "
"has a minimum payment of $24.53 that is due "
"by July 31st. Based on your autopay settings, we will withdraw your payment. "
"Task: Mask out the PII, replace each PII with a tag, and return the text. Return the mapping in JSON. \n"
"Output: \n"
"Hello [NAME1], I am [NAME2]. "
"Your AnyCompany Financial Services, "
"LLC credit card account [CREDIT_CARD_NUMBER] "
"has a minimum payment of $24.53 that is due "
"by [DATE_TIME]. Based on your autopay settings, we will withdraw your payment. "
"Output Mapping:\n"
'{{"NAME1": "Zhang Wei", "NAME2": "John", "CREDIT_CARD_NUMBER": "1111-0000-1111-0008", "DATE_TIME": "July 31st"}}\n'
"Context:\n{context_str}\n"
"Task: {query_str}\n"
"Output: \n"
""
)
class PIINodePostprocessor(BaseNodePostprocessor):
"""PII Node processor.
NOTE: this is a beta feature, the API might change.
Args:
llm (LLM): The local LLM to use for prediction.
"""
llm: LLM
pii_str_tmpl: str = DEFAULT_PII_TMPL
pii_node_info_key: str = "__pii_node_info__"
@classmethod
def class_name(cls) -> str:
return "PIINodePostprocessor"
def mask_pii(self, text: str) -> Tuple[str, Dict]:
"""Mask PII in text."""
pii_prompt = PromptTemplate(self.pii_str_tmpl)
# TODO: allow customization
task_str = (
"Mask out the PII, replace each PII with a tag, and return the text. "
"Return the mapping in JSON."
)
response = self.llm.predict(pii_prompt, context_str=text, query_str=task_str)
splits = response.split("Output Mapping:")
text_output = splits[0].strip()
json_str_output = splits[1].strip()
json_dict = json.loads(json_str_output)
return text_output, json_dict
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text, mapping_info = self.mask_pii(
node.get_content(metadata_mode=MetadataMode.LLM)
)
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = mapping_info
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
class NERPIINodePostprocessor(BaseNodePostprocessor):
"""NER PII Node processor.
Uses a HF transformers model.
"""
pii_node_info_key: str = "__pii_node_info__"
@classmethod
def class_name(cls) -> str:
return "NERPIINodePostprocessor"
def mask_pii(self, ner: Callable, text: str) -> Tuple[str, Dict]:
"""Mask PII in text."""
new_text = text
response = ner(text)
mapping = {}
for entry in response:
entity_group_tag = f"[{entry['entity_group']}_{entry['start']}]"
new_text = new_text.replace(entry["word"], entity_group_tag).strip()
mapping[entity_group_tag] = entry["word"]
return new_text, mapping
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
from transformers import pipeline # pants: no-infer-dep
ner = pipeline("ner", grouped_entities=True)
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text, mapping_info = self.mask_pii(
ner, node.get_content(metadata_mode=MetadataMode.LLM)
)
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = mapping_info
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
| [
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.schema.NodeWithScore"
] | [((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, 2587), False, 'import json\n'), ((4543, 4581), 'transformers.pipeline', 'pipeline', (['"""ner"""'], {'grouped_entities': '(True)'}), "('ner', grouped_entities=True)\n", (4551, 4581), False, 'from transformers import pipeline\n'), ((3143, 3157), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (3151, 3157), False, 'from copy import deepcopy\n'), ((4911, 4925), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (4919, 4925), False, 'from copy import deepcopy\n'), ((3459, 3516), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (3472, 3516), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle\n'), ((5227, 5284), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (5240, 5284), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle\n')] |
"""PII postprocessor."""
import json
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
DEFAULT_PII_TMPL = (
"The current context information is provided. \n"
"A task is also provided to mask the PII within the context. \n"
"Return the text, with all PII masked out, and a mapping of the original PII "
"to the masked PII. \n"
"Return the output of the task in JSON. \n"
"Context:\n"
"Hello Zhang Wei, I am John. "
"Your AnyCompany Financial Services, "
"LLC credit card account 1111-0000-1111-0008 "
"has a minimum payment of $24.53 that is due "
"by July 31st. Based on your autopay settings, we will withdraw your payment. "
"Task: Mask out the PII, replace each PII with a tag, and return the text. Return the mapping in JSON. \n"
"Output: \n"
"Hello [NAME1], I am [NAME2]. "
"Your AnyCompany Financial Services, "
"LLC credit card account [CREDIT_CARD_NUMBER] "
"has a minimum payment of $24.53 that is due "
"by [DATE_TIME]. Based on your autopay settings, we will withdraw your payment. "
"Output Mapping:\n"
'{{"NAME1": "Zhang Wei", "NAME2": "John", "CREDIT_CARD_NUMBER": "1111-0000-1111-0008", "DATE_TIME": "July 31st"}}\n'
"Context:\n{context_str}\n"
"Task: {query_str}\n"
"Output: \n"
""
)
class PIINodePostprocessor(BaseNodePostprocessor):
"""PII Node processor.
NOTE: this is a beta feature, the API might change.
Args:
llm (LLM): The local LLM to use for prediction.
"""
llm: LLM
pii_str_tmpl: str = DEFAULT_PII_TMPL
pii_node_info_key: str = "__pii_node_info__"
@classmethod
def class_name(cls) -> str:
return "PIINodePostprocessor"
def mask_pii(self, text: str) -> Tuple[str, Dict]:
"""Mask PII in text."""
pii_prompt = PromptTemplate(self.pii_str_tmpl)
# TODO: allow customization
task_str = (
"Mask out the PII, replace each PII with a tag, and return the text. "
"Return the mapping in JSON."
)
response = self.llm.predict(pii_prompt, context_str=text, query_str=task_str)
splits = response.split("Output Mapping:")
text_output = splits[0].strip()
json_str_output = splits[1].strip()
json_dict = json.loads(json_str_output)
return text_output, json_dict
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text, mapping_info = self.mask_pii(
node.get_content(metadata_mode=MetadataMode.LLM)
)
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = mapping_info
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
class NERPIINodePostprocessor(BaseNodePostprocessor):
"""NER PII Node processor.
Uses a HF transformers model.
"""
pii_node_info_key: str = "__pii_node_info__"
@classmethod
def class_name(cls) -> str:
return "NERPIINodePostprocessor"
def mask_pii(self, ner: Callable, text: str) -> Tuple[str, Dict]:
"""Mask PII in text."""
new_text = text
response = ner(text)
mapping = {}
for entry in response:
entity_group_tag = f"[{entry['entity_group']}_{entry['start']}]"
new_text = new_text.replace(entry["word"], entity_group_tag).strip()
mapping[entity_group_tag] = entry["word"]
return new_text, mapping
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
from transformers import pipeline # pants: no-infer-dep
ner = pipeline("ner", grouped_entities=True)
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text, mapping_info = self.mask_pii(
ner, node.get_content(metadata_mode=MetadataMode.LLM)
)
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = mapping_info
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
| [
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.schema.NodeWithScore"
] | [((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, 2587), False, 'import json\n'), ((4543, 4581), 'transformers.pipeline', 'pipeline', (['"""ner"""'], {'grouped_entities': '(True)'}), "('ner', grouped_entities=True)\n", (4551, 4581), False, 'from transformers import pipeline\n'), ((3143, 3157), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (3151, 3157), False, 'from copy import deepcopy\n'), ((4911, 4925), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (4919, 4925), False, 'from copy import deepcopy\n'), ((3459, 3516), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (3472, 3516), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle\n'), ((5227, 5284), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (5240, 5284), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.legacy.core.llms.types import ChatMessage, LLMMetadata
from llama_index.legacy.llms.everlyai_utils import everlyai_modelname_to_contextsize
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
from llama_index.legacy.llms.openai import OpenAI
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
EVERLYAI_API_BASE = "https://everlyai.xyz/hosted"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class EverlyAI(OpenAI):
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_key = get_from_param_or_env("api_key", api_key, "EverlyAI_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=EVERLYAI_API_BASE,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "EverlyAI_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=everlyai_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
)
@property
def _is_chat_model(self) -> bool:
return True
| [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize"
] | [((1525, 1586), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""EverlyAI_API_KEY"""'], {}), "('api_key', api_key, 'EverlyAI_API_KEY')\n", (1546, 1586), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1486, 1505), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1501, 1505), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((2357, 2402), 'llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize', 'everlyai_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (2390, 2402), False, 'from llama_index.legacy.llms.everlyai_utils import everlyai_modelname_to_contextsize\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.legacy.core.llms.types import ChatMessage, LLMMetadata
from llama_index.legacy.llms.everlyai_utils import everlyai_modelname_to_contextsize
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
from llama_index.legacy.llms.openai import OpenAI
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
EVERLYAI_API_BASE = "https://everlyai.xyz/hosted"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class EverlyAI(OpenAI):
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_key = get_from_param_or_env("api_key", api_key, "EverlyAI_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=EVERLYAI_API_BASE,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "EverlyAI_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=everlyai_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
)
@property
def _is_chat_model(self) -> bool:
return True
| [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize"
] | [((1525, 1586), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""EverlyAI_API_KEY"""'], {}), "('api_key', api_key, 'EverlyAI_API_KEY')\n", (1546, 1586), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1486, 1505), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1501, 1505), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((2357, 2402), 'llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize', 'everlyai_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (2390, 2402), False, 'from llama_index.legacy.llms.everlyai_utils import everlyai_modelname_to_contextsize\n')] |
"""txtai reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class TxtaiReader(BaseReader):
"""txtai reader.
Retrieves documents through an existing in-memory txtai index.
These documents can then be used in a downstream LlamaIndex data structure.
If you wish use txtai itself as an index to to organize documents,
insert documents, and perform queries on them, please use VectorStoreIndex
with TxtaiVectorStore.
Args:
txtai_index (txtai.ann.ANN): A txtai Index object (required)
"""
def __init__(self, index: Any):
"""Initialize with parameters."""
import_err_msg = """
`txtai` package not found. For instructions on
how to install `txtai` please visit
https://neuml.github.io/txtai/install/
"""
try:
import txtai # noqa
except ImportError:
raise ImportError(import_err_msg)
self._index = index
def load_data(
self,
query: np.ndarray,
id_to_text_map: Dict[str, str],
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""Load data from txtai index.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
id_to_text_map (Dict[str, str]): A map from ID's to text.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
search_result = self._index.search(query, k)
documents = []
for query_result in search_result:
for doc_id, _ in query_result:
doc_id = str(doc_id)
if doc_id not in id_to_text_map:
raise ValueError(
f"Document ID {doc_id} not found in id_to_text_map."
)
text = id_to_text_map[doc_id]
documents.append(Document(text=text))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| [
"llama_index.legacy.schema.Document"
] | [((2425, 2444), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2433, 2444), False, 'from llama_index.legacy.schema import Document\n'), ((2194, 2213), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2202, 2213), False, 'from llama_index.legacy.schema import Document\n')] |
"""txtai reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class TxtaiReader(BaseReader):
"""txtai reader.
Retrieves documents through an existing in-memory txtai index.
These documents can then be used in a downstream LlamaIndex data structure.
If you wish use txtai itself as an index to to organize documents,
insert documents, and perform queries on them, please use VectorStoreIndex
with TxtaiVectorStore.
Args:
txtai_index (txtai.ann.ANN): A txtai Index object (required)
"""
def __init__(self, index: Any):
"""Initialize with parameters."""
import_err_msg = """
`txtai` package not found. For instructions on
how to install `txtai` please visit
https://neuml.github.io/txtai/install/
"""
try:
import txtai # noqa
except ImportError:
raise ImportError(import_err_msg)
self._index = index
def load_data(
self,
query: np.ndarray,
id_to_text_map: Dict[str, str],
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""Load data from txtai index.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
id_to_text_map (Dict[str, str]): A map from ID's to text.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
search_result = self._index.search(query, k)
documents = []
for query_result in search_result:
for doc_id, _ in query_result:
doc_id = str(doc_id)
if doc_id not in id_to_text_map:
raise ValueError(
f"Document ID {doc_id} not found in id_to_text_map."
)
text = id_to_text_map[doc_id]
documents.append(Document(text=text))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| [
"llama_index.legacy.schema.Document"
] | [((2425, 2444), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2433, 2444), False, 'from llama_index.legacy.schema import Document\n'), ((2194, 2213), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2202, 2213), False, 'from llama_index.legacy.schema import Document\n')] |
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
"""Single select prompt.
PromptTemplate to select one out of `num_choices` options provided in `context_list`,
given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `query_str`
"""
SingleSelectPrompt = PromptTemplate
"""Multiple select prompt.
PromptTemplate to select multiple candidates (up to `max_outputs`) out of `num_choices`
options provided in `context_list`, given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `query_str`,
`max_outputs`
"""
MultiSelectPrompt = PromptTemplate
# single select
DEFAULT_SINGLE_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return "
"the choice that is most relevant to the question: '{query_str}'\n"
)
DEFAULT_SINGLE_SELECT_PROMPT = PromptTemplate(
template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=PromptType.SINGLE_SELECT
)
# multiple select
DEFAULT_MULTI_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choices "
"(no more than {max_outputs}, but only select what is needed) that "
"are most relevant to the question: '{query_str}'\n"
)
DEFAULT_MULTIPLE_SELECT_PROMPT = PromptTemplate(
template=DEFAULT_MULTI_SELECT_PROMPT_TMPL, prompt_type=PromptType.MULTI_SELECT
)
# single pydantic select
DEFAULT_SINGLE_PYD_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, generate "
"the selection object and reason that is most relevant to the "
"question: '{query_str}'\n"
)
# multiple pydantic select
DEFAULT_MULTI_PYD_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choice(s) "
"(no more than {max_outputs}, but only select what is needed) by generating "
"the selection object and reasons that are most relevant to the "
"question: '{query_str}'\n"
)
| [
"llama_index.core.prompts.base.PromptTemplate"
] | [((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((1812, 1911), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_MULTI_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.MULTI_SELECT'}), '(template=DEFAULT_MULTI_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.MULTI_SELECT)\n', (1826, 1911), False, 'from llama_index.core.prompts.base import PromptTemplate\n')] |
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
"""Single select prompt.
PromptTemplate to select one out of `num_choices` options provided in `context_list`,
given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `query_str`
"""
SingleSelectPrompt = PromptTemplate
"""Multiple select prompt.
PromptTemplate to select multiple candidates (up to `max_outputs`) out of `num_choices`
options provided in `context_list`, given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `query_str`,
`max_outputs`
"""
MultiSelectPrompt = PromptTemplate
# single select
DEFAULT_SINGLE_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return "
"the choice that is most relevant to the question: '{query_str}'\n"
)
DEFAULT_SINGLE_SELECT_PROMPT = PromptTemplate(
template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=PromptType.SINGLE_SELECT
)
# multiple select
DEFAULT_MULTI_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choices "
"(no more than {max_outputs}, but only select what is needed) that "
"are most relevant to the question: '{query_str}'\n"
)
DEFAULT_MULTIPLE_SELECT_PROMPT = PromptTemplate(
template=DEFAULT_MULTI_SELECT_PROMPT_TMPL, prompt_type=PromptType.MULTI_SELECT
)
# single pydantic select
DEFAULT_SINGLE_PYD_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, generate "
"the selection object and reason that is most relevant to the "
"question: '{query_str}'\n"
)
# multiple pydantic select
DEFAULT_MULTI_PYD_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choice(s) "
"(no more than {max_outputs}, but only select what is needed) by generating "
"the selection object and reasons that are most relevant to the "
"question: '{query_str}'\n"
)
| [
"llama_index.core.prompts.base.PromptTemplate"
] | [((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((1812, 1911), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_MULTI_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.MULTI_SELECT'}), '(template=DEFAULT_MULTI_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.MULTI_SELECT)\n', (1826, 1911), False, 'from llama_index.core.prompts.base import PromptTemplate\n')] |
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
"""Single select prompt.
PromptTemplate to select one out of `num_choices` options provided in `context_list`,
given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `query_str`
"""
SingleSelectPrompt = PromptTemplate
"""Multiple select prompt.
PromptTemplate to select multiple candidates (up to `max_outputs`) out of `num_choices`
options provided in `context_list`, given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `query_str`,
`max_outputs`
"""
MultiSelectPrompt = PromptTemplate
# single select
DEFAULT_SINGLE_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return "
"the choice that is most relevant to the question: '{query_str}'\n"
)
DEFAULT_SINGLE_SELECT_PROMPT = PromptTemplate(
template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=PromptType.SINGLE_SELECT
)
# multiple select
DEFAULT_MULTI_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choices "
"(no more than {max_outputs}, but only select what is needed) that "
"are most relevant to the question: '{query_str}'\n"
)
DEFAULT_MULTIPLE_SELECT_PROMPT = PromptTemplate(
template=DEFAULT_MULTI_SELECT_PROMPT_TMPL, prompt_type=PromptType.MULTI_SELECT
)
# single pydantic select
DEFAULT_SINGLE_PYD_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, generate "
"the selection object and reason that is most relevant to the "
"question: '{query_str}'\n"
)
# multiple pydantic select
DEFAULT_MULTI_PYD_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choice(s) "
"(no more than {max_outputs}, but only select what is needed) by generating "
"the selection object and reasons that are most relevant to the "
"question: '{query_str}'\n"
)
| [
"llama_index.core.prompts.base.PromptTemplate"
] | [((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((1812, 1911), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_MULTI_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.MULTI_SELECT'}), '(template=DEFAULT_MULTI_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.MULTI_SELECT)\n', (1826, 1911), False, 'from llama_index.core.prompts.base import PromptTemplate\n')] |
"""Awadb reader."""
from typing import Any, List
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class AwadbReader(BaseReader):
"""Awadb reader.
Retrieves documents through an existing awadb client.
These documents can then be used in a downstream LlamaIndex data structure.
Args:
client (awadb.client): An awadb client.
"""
def __init__(self, client: Any):
"""Initialize with parameters."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
pass
except ImportError:
raise ImportError(import_err_msg)
self.awadb_client = client
def load_data(
self,
query: np.ndarray,
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""Load data from Faiss.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
results = self.awadb_client.Search(
query,
k,
text_in_page_content=None,
meta_filter=None,
not_include_fields=None,
)
documents = []
for item_detail in results[0]["ResultItems"]:
documents.append(Document(text=item_detail["embedding_text"]))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| [
"llama_index.legacy.schema.Document"
] | [((1780, 1824), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': "item_detail['embedding_text']"}), "(text=item_detail['embedding_text'])\n", (1788, 1824), False, 'from llama_index.legacy.schema import Document\n'), ((2042, 2061), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2050, 2061), False, 'from llama_index.legacy.schema import Document\n')] |
"""Mongo client."""
from typing import Dict, Iterable, List, Optional, Union
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document used by LlamaIndex.
Args:
host (str): Mongo host.
port (int): Mongo port.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
uri: Optional[str] = None,
) -> None:
"""Initialize with parameters."""
try:
from pymongo import MongoClient
except ImportError as err:
raise ImportError(
"`pymongo` package not found, please run `pip install pymongo`"
) from err
client: MongoClient
if uri:
client = MongoClient(uri)
elif host and port:
client = MongoClient(host, port)
else:
raise ValueError("Either `host` and `port` or `uri` must be provided.")
self.client = client
def _flatten(self, texts: List[Union[str, List[str]]]) -> List[str]:
result = []
for text in texts:
result += text if isinstance(text, list) else [text]
return result
def lazy_load_data(
self,
db_name: str,
collection_name: str,
field_names: List[str] = ["text"],
separator: str = "",
query_dict: Optional[Dict] = None,
max_docs: int = 0,
metadata_names: Optional[List[str]] = None,
) -> Iterable[Document]:
"""Load data from the input directory.
Args:
db_name (str): name of the database.
collection_name (str): name of the collection.
field_names(List[str]): names of the fields to be concatenated.
Defaults to ["text"]
separator (str): separator to be used between fields.
Defaults to ""
query_dict (Optional[Dict]): query to filter documents. Read more
at [official docs](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/#std-label-method-find-query)
Defaults to None
max_docs (int): maximum number of documents to load.
Defaults to 0 (no limit)
metadata_names (Optional[List[str]]): names of the fields to be added
to the metadata attribute of the Document. Defaults to None
Returns:
List[Document]: A list of documents.
"""
db = self.client[db_name]
cursor = db[collection_name].find(filter=query_dict or {}, limit=max_docs)
for item in cursor:
try:
texts = [item[name] for name in field_names]
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
texts = self._flatten(texts)
text = separator.join(texts)
if metadata_names is None:
yield Document(text=text)
else:
try:
metadata = {name: item[name] for name in metadata_names}
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
yield Document(text=text, metadata=metadata)
| [
"llama_index.legacy.schema.Document"
] | [((887, 903), 'pymongo.MongoClient', 'MongoClient', (['uri'], {}), '(uri)\n', (898, 903), False, 'from pymongo import MongoClient\n'), ((953, 976), 'pymongo.MongoClient', 'MongoClient', (['host', 'port'], {}), '(host, port)\n', (964, 976), False, 'from pymongo import MongoClient\n'), ((3133, 3152), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (3141, 3152), False, 'from llama_index.legacy.schema import Document\n'), ((3476, 3514), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text', 'metadata': 'metadata'}), '(text=text, metadata=metadata)\n', (3484, 3514), False, 'from llama_index.legacy.schema import Document\n')] |
"""Mongo client."""
from typing import Dict, Iterable, List, Optional, Union
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document used by LlamaIndex.
Args:
host (str): Mongo host.
port (int): Mongo port.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
uri: Optional[str] = None,
) -> None:
"""Initialize with parameters."""
try:
from pymongo import MongoClient
except ImportError as err:
raise ImportError(
"`pymongo` package not found, please run `pip install pymongo`"
) from err
client: MongoClient
if uri:
client = MongoClient(uri)
elif host and port:
client = MongoClient(host, port)
else:
raise ValueError("Either `host` and `port` or `uri` must be provided.")
self.client = client
def _flatten(self, texts: List[Union[str, List[str]]]) -> List[str]:
result = []
for text in texts:
result += text if isinstance(text, list) else [text]
return result
def lazy_load_data(
self,
db_name: str,
collection_name: str,
field_names: List[str] = ["text"],
separator: str = "",
query_dict: Optional[Dict] = None,
max_docs: int = 0,
metadata_names: Optional[List[str]] = None,
) -> Iterable[Document]:
"""Load data from the input directory.
Args:
db_name (str): name of the database.
collection_name (str): name of the collection.
field_names(List[str]): names of the fields to be concatenated.
Defaults to ["text"]
separator (str): separator to be used between fields.
Defaults to ""
query_dict (Optional[Dict]): query to filter documents. Read more
at [official docs](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/#std-label-method-find-query)
Defaults to None
max_docs (int): maximum number of documents to load.
Defaults to 0 (no limit)
metadata_names (Optional[List[str]]): names of the fields to be added
to the metadata attribute of the Document. Defaults to None
Returns:
List[Document]: A list of documents.
"""
db = self.client[db_name]
cursor = db[collection_name].find(filter=query_dict or {}, limit=max_docs)
for item in cursor:
try:
texts = [item[name] for name in field_names]
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
texts = self._flatten(texts)
text = separator.join(texts)
if metadata_names is None:
yield Document(text=text)
else:
try:
metadata = {name: item[name] for name in metadata_names}
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
yield Document(text=text, metadata=metadata)
| [
"llama_index.legacy.schema.Document"
] | [((887, 903), 'pymongo.MongoClient', 'MongoClient', (['uri'], {}), '(uri)\n', (898, 903), False, 'from pymongo import MongoClient\n'), ((953, 976), 'pymongo.MongoClient', 'MongoClient', (['host', 'port'], {}), '(host, port)\n', (964, 976), False, 'from pymongo import MongoClient\n'), ((3133, 3152), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (3141, 3152), False, 'from llama_index.legacy.schema import Document\n'), ((3476, 3514), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text', 'metadata': 'metadata'}), '(text=text, metadata=metadata)\n', (3484, 3514), False, 'from llama_index.legacy.schema import Document\n')] |
from typing import Any, Callable, Optional, Sequence
from typing_extensions import override
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class _BaseGradientLLM(CustomLLM):
_gradient = PrivateAttr()
_model = PrivateAttr()
# Config
max_tokens: Optional[int] = Field(
default=DEFAULT_NUM_OUTPUTS,
description="The number of tokens to generate.",
gt=0,
lt=512,
)
# Gradient client config
access_token: Optional[str] = Field(
description="The Gradient access token to use.",
)
host: Optional[str] = Field(
description="The url of the Gradient service to access."
)
workspace_id: Optional[str] = Field(
description="The Gradient workspace id to use.",
)
is_chat_model: bool = Field(
default=False, description="Whether the model is a chat model."
)
def __init__(
self,
*,
access_token: Optional[str] = None,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
super().__init__(
max_tokens=max_tokens,
access_token=access_token,
host=host,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
try:
from gradientai import Gradient
self._gradient = Gradient(
access_token=access_token, host=host, workspace_id=workspace_id
)
except ImportError as e:
raise ImportError(
"Could not import Gradient Python package. "
"Please install it with `pip install gradientai`."
) from e
def close(self) -> None:
self._gradient.close()
@llm_completion_callback()
@override
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return CompletionResponse(
text=self._model.complete(
query=prompt,
max_generated_token_count=self.max_tokens,
**kwargs,
).generated_output
)
@llm_completion_callback()
@override
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
grdt_reponse = await self._model.acomplete(
query=prompt,
max_generated_token_count=self.max_tokens,
**kwargs,
)
return CompletionResponse(text=grdt_reponse.generated_output)
@override
def stream_complete(
self,
prompt: str,
formatted: bool = False,
**kwargs: Any,
) -> CompletionResponseGen:
raise NotImplementedError
@property
@override
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=1024,
num_output=self.max_tokens or 20,
is_chat_model=self.is_chat_model,
is_function_calling_model=False,
model_name=self._model.id,
)
class GradientBaseModelLLM(_BaseGradientLLM):
base_model_slug: str = Field(
description="The slug of the base model to use.",
)
def __init__(
self,
*,
access_token: Optional[str] = None,
base_model_slug: str,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
) -> None:
super().__init__(
access_token=access_token,
base_model_slug=base_model_slug,
host=host,
max_tokens=max_tokens,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
)
self._model = self._gradient.get_base_model(
base_model_slug=base_model_slug,
)
class GradientModelAdapterLLM(_BaseGradientLLM):
model_adapter_id: str = Field(
description="The id of the model adapter to use.",
)
def __init__(
self,
*,
access_token: Optional[str] = None,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
model_adapter_id: str,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
) -> None:
super().__init__(
access_token=access_token,
host=host,
max_tokens=max_tokens,
model_adapter_id=model_adapter_id,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
)
self._model = self._gradient.get_model_adapter(
model_adapter_id=model_adapter_id
)
| [
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((660, 673), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (671, 673), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((687, 700), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (698, 700), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((747, 849), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""The number of tokens to generate."""', 'gt': '(0)', 'lt': '(512)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'The number of tokens to generate.', gt=0, lt=512)\n", (752, 849), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((948, 1002), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Gradient access token to use."""'}), "(description='The Gradient access token to use.')\n", (953, 1002), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1044, 1107), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The url of the Gradient service to access."""'}), "(description='The url of the Gradient service to access.')\n", (1049, 1107), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1156, 1210), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Gradient workspace id to use."""'}), "(description='The Gradient workspace id to use.')\n", (1161, 1210), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1252, 1322), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the model is a chat model."""'}), "(default=False, description='Whether the model is a chat model.')\n", (1257, 1322), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3019, 3044), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3042, 3044), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((3408, 3433), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3431, 3433), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((4391, 4446), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The slug of the base model to use."""'}), "(description='The slug of the base model to use.')\n", (4396, 4446), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5307, 5363), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The id of the model adapter to use."""'}), "(description='The id of the model adapter to use.')\n", (5312, 5363), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3749, 3803), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'grdt_reponse.generated_output'}), '(text=grdt_reponse.generated_output)\n', (3767, 3803), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((4084, 4252), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': '(1024)', 'num_output': '(self.max_tokens or 20)', 'is_chat_model': 'self.is_chat_model', 'is_function_calling_model': '(False)', 'model_name': 'self._model.id'}), '(context_window=1024, num_output=self.max_tokens or 20,\n is_chat_model=self.is_chat_model, is_function_calling_model=False,\n model_name=self._model.id)\n', (4095, 4252), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2635, 2708), 'gradientai.Gradient', 'Gradient', ([], {'access_token': 'access_token', 'host': 'host', 'workspace_id': 'workspace_id'}), '(access_token=access_token, host=host, workspace_id=workspace_id)\n', (2643, 2708), False, 'from gradientai import Gradient\n')] |
from typing import Any, Callable, Optional, Sequence
from typing_extensions import override
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class _BaseGradientLLM(CustomLLM):
_gradient = PrivateAttr()
_model = PrivateAttr()
# Config
max_tokens: Optional[int] = Field(
default=DEFAULT_NUM_OUTPUTS,
description="The number of tokens to generate.",
gt=0,
lt=512,
)
# Gradient client config
access_token: Optional[str] = Field(
description="The Gradient access token to use.",
)
host: Optional[str] = Field(
description="The url of the Gradient service to access."
)
workspace_id: Optional[str] = Field(
description="The Gradient workspace id to use.",
)
is_chat_model: bool = Field(
default=False, description="Whether the model is a chat model."
)
def __init__(
self,
*,
access_token: Optional[str] = None,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
super().__init__(
max_tokens=max_tokens,
access_token=access_token,
host=host,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
try:
from gradientai import Gradient
self._gradient = Gradient(
access_token=access_token, host=host, workspace_id=workspace_id
)
except ImportError as e:
raise ImportError(
"Could not import Gradient Python package. "
"Please install it with `pip install gradientai`."
) from e
def close(self) -> None:
self._gradient.close()
@llm_completion_callback()
@override
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return CompletionResponse(
text=self._model.complete(
query=prompt,
max_generated_token_count=self.max_tokens,
**kwargs,
).generated_output
)
@llm_completion_callback()
@override
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
grdt_reponse = await self._model.acomplete(
query=prompt,
max_generated_token_count=self.max_tokens,
**kwargs,
)
return CompletionResponse(text=grdt_reponse.generated_output)
@override
def stream_complete(
self,
prompt: str,
formatted: bool = False,
**kwargs: Any,
) -> CompletionResponseGen:
raise NotImplementedError
@property
@override
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=1024,
num_output=self.max_tokens or 20,
is_chat_model=self.is_chat_model,
is_function_calling_model=False,
model_name=self._model.id,
)
class GradientBaseModelLLM(_BaseGradientLLM):
base_model_slug: str = Field(
description="The slug of the base model to use.",
)
def __init__(
self,
*,
access_token: Optional[str] = None,
base_model_slug: str,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
) -> None:
super().__init__(
access_token=access_token,
base_model_slug=base_model_slug,
host=host,
max_tokens=max_tokens,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
)
self._model = self._gradient.get_base_model(
base_model_slug=base_model_slug,
)
class GradientModelAdapterLLM(_BaseGradientLLM):
model_adapter_id: str = Field(
description="The id of the model adapter to use.",
)
def __init__(
self,
*,
access_token: Optional[str] = None,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
model_adapter_id: str,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
) -> None:
super().__init__(
access_token=access_token,
host=host,
max_tokens=max_tokens,
model_adapter_id=model_adapter_id,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
)
self._model = self._gradient.get_model_adapter(
model_adapter_id=model_adapter_id
)
| [
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((660, 673), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (671, 673), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((687, 700), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (698, 700), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((747, 849), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""The number of tokens to generate."""', 'gt': '(0)', 'lt': '(512)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'The number of tokens to generate.', gt=0, lt=512)\n", (752, 849), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((948, 1002), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Gradient access token to use."""'}), "(description='The Gradient access token to use.')\n", (953, 1002), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1044, 1107), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The url of the Gradient service to access."""'}), "(description='The url of the Gradient service to access.')\n", (1049, 1107), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1156, 1210), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Gradient workspace id to use."""'}), "(description='The Gradient workspace id to use.')\n", (1161, 1210), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1252, 1322), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the model is a chat model."""'}), "(default=False, description='Whether the model is a chat model.')\n", (1257, 1322), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3019, 3044), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3042, 3044), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((3408, 3433), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3431, 3433), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((4391, 4446), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The slug of the base model to use."""'}), "(description='The slug of the base model to use.')\n", (4396, 4446), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5307, 5363), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The id of the model adapter to use."""'}), "(description='The id of the model adapter to use.')\n", (5312, 5363), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3749, 3803), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'grdt_reponse.generated_output'}), '(text=grdt_reponse.generated_output)\n', (3767, 3803), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((4084, 4252), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': '(1024)', 'num_output': '(self.max_tokens or 20)', 'is_chat_model': 'self.is_chat_model', 'is_function_calling_model': '(False)', 'model_name': 'self._model.id'}), '(context_window=1024, num_output=self.max_tokens or 20,\n is_chat_model=self.is_chat_model, is_function_calling_model=False,\n model_name=self._model.id)\n', (4095, 4252), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2635, 2708), 'gradientai.Gradient', 'Gradient', ([], {'access_token': 'access_token', 'host': 'host', 'workspace_id': 'workspace_id'}), '(access_token=access_token, host=host, workspace_id=workspace_id)\n', (2643, 2708), False, 'from gradientai import Gradient\n')] |
from typing import Dict, Type
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = {
MockEmbedding.class_name(): MockEmbedding,
}
# conditionals for llama-cloud support
try:
from llama_index.embeddings.openai import OpenAIEmbedding # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[OpenAIEmbedding.class_name()] = OpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.azure_openai import (
AzureOpenAIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[AzureOpenAIEmbedding.class_name()] = AzureOpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.huggingface import (
HuggingFaceInferenceAPIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[
HuggingFaceInferenceAPIEmbedding.class_name()
] = HuggingFaceInferenceAPIEmbedding
except ImportError:
pass
def load_embed_model(data: dict) -> BaseEmbedding:
"""Load Embedding by name."""
if isinstance(data, BaseEmbedding):
return data
name = data.get("class_name", None)
if name is None:
raise ValueError("Embedding loading requires a class_name")
if name not in RECOGNIZED_EMBEDDINGS:
raise ValueError(f"Invalid Embedding name: {name}")
return RECOGNIZED_EMBEDDINGS[name].from_dict(data)
| [
"llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name",
"llama_index.embeddings.openai.OpenAIEmbedding.class_name"
] | [((229, 255), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name', 'MockEmbedding.class_name', ([], {}), '()\n', (253, 255), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((431, 459), 'llama_index.embeddings.openai.OpenAIEmbedding.class_name', 'OpenAIEmbedding.class_name', ([], {}), '()\n', (457, 459), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((654, 687), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name', 'AzureOpenAIEmbedding.class_name', ([], {}), '()\n', (685, 687), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n'), ((907, 952), 'llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name', 'HuggingFaceInferenceAPIEmbedding.class_name', ([], {}), '()\n', (950, 952), False, 'from llama_index.embeddings.huggingface import HuggingFaceInferenceAPIEmbedding\n')] |
from typing import Dict, Type
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = {
MockEmbedding.class_name(): MockEmbedding,
}
# conditionals for llama-cloud support
try:
from llama_index.embeddings.openai import OpenAIEmbedding # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[OpenAIEmbedding.class_name()] = OpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.azure_openai import (
AzureOpenAIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[AzureOpenAIEmbedding.class_name()] = AzureOpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.huggingface import (
HuggingFaceInferenceAPIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[
HuggingFaceInferenceAPIEmbedding.class_name()
] = HuggingFaceInferenceAPIEmbedding
except ImportError:
pass
def load_embed_model(data: dict) -> BaseEmbedding:
"""Load Embedding by name."""
if isinstance(data, BaseEmbedding):
return data
name = data.get("class_name", None)
if name is None:
raise ValueError("Embedding loading requires a class_name")
if name not in RECOGNIZED_EMBEDDINGS:
raise ValueError(f"Invalid Embedding name: {name}")
return RECOGNIZED_EMBEDDINGS[name].from_dict(data)
| [
"llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name",
"llama_index.embeddings.openai.OpenAIEmbedding.class_name"
] | [((229, 255), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name', 'MockEmbedding.class_name', ([], {}), '()\n', (253, 255), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((431, 459), 'llama_index.embeddings.openai.OpenAIEmbedding.class_name', 'OpenAIEmbedding.class_name', ([], {}), '()\n', (457, 459), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((654, 687), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name', 'AzureOpenAIEmbedding.class_name', ([], {}), '()\n', (685, 687), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n'), ((907, 952), 'llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name', 'HuggingFaceInferenceAPIEmbedding.class_name', ([], {}), '()\n', (950, 952), False, 'from llama_index.embeddings.huggingface import HuggingFaceInferenceAPIEmbedding\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core.evaluation import CorrectnessEvaluator
from llama_index.llms import OpenAI, Gemini
from llama_index.core import ServiceContext
import pandas as pd
async def main():
# DOWNLOAD LLAMADATASET
evaluator_dataset, _ = download_llama_dataset(
"MiniMtBenchSingleGradingDataset", "./mini_mt_bench_data"
)
# DEFINE EVALUATORS
gpt_4_context = ServiceContext.from_defaults(
llm=OpenAI(temperature=0, model="gpt-4"),
)
gpt_3p5_context = ServiceContext.from_defaults(
llm=OpenAI(temperature=0, model="gpt-3.5-turbo"),
)
gemini_pro_context = ServiceContext.from_defaults(
llm=Gemini(model="models/gemini-pro", temperature=0)
)
evaluators = {
"gpt-4": CorrectnessEvaluator(service_context=gpt_4_context),
"gpt-3.5": CorrectnessEvaluator(service_context=gpt_3p5_context),
"gemini-pro": CorrectnessEvaluator(service_context=gemini_pro_context),
}
# EVALUATE WITH PACK
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
EvaluatorBenchmarkerPack = download_llama_pack("EvaluatorBenchmarkerPack", "./pack")
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gpt-3.5"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gpt_3p5_benchmark_df = await evaluator_benchmarker.arun(
batch_size=100, sleep_time_in_seconds=0
)
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gpt-4"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gpt_4_benchmark_df = await evaluator_benchmarker.arun(
batch_size=100, sleep_time_in_seconds=0
)
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gemini-pro"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gemini_pro_benchmark_df = await evaluator_benchmarker.arun(
batch_size=5, sleep_time_in_seconds=0.5
)
benchmark_df = pd.concat(
[
gpt_3p5_benchmark_df,
gpt_4_benchmark_df,
gemini_pro_benchmark_df,
],
axis=0,
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.evaluation.CorrectnessEvaluator",
"llama_index.llms.Gemini",
"llama_index.llms.OpenAI",
"llama_index.core.llama_dataset.download_llama_dataset"
] | [((386, 471), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniMtBenchSingleGradingDataset"""', '"""./mini_mt_bench_data"""'], {}), "('MiniMtBenchSingleGradingDataset',\n './mini_mt_bench_data')\n", (408, 471), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((1646, 1703), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""EvaluatorBenchmarkerPack"""', '"""./pack"""'], {}), "('EvaluatorBenchmarkerPack', './pack')\n", (1665, 1703), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((2580, 2670), 'pandas.concat', 'pd.concat', (['[gpt_3p5_benchmark_df, gpt_4_benchmark_df, gemini_pro_benchmark_df]'], {'axis': '(0)'}), '([gpt_3p5_benchmark_df, gpt_4_benchmark_df,\n gemini_pro_benchmark_df], axis=0)\n', (2589, 2670), True, 'import pandas as pd\n'), ((2801, 2825), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2823, 2825), False, 'import asyncio\n'), ((890, 941), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gpt_4_context'}), '(service_context=gpt_4_context)\n', (910, 941), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((962, 1015), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gpt_3p5_context'}), '(service_context=gpt_3p5_context)\n', (982, 1015), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((1039, 1095), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gemini_pro_context'}), '(service_context=gemini_pro_context)\n', (1059, 1095), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((569, 605), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (575, 605), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((678, 722), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0, model='gpt-3.5-turbo')\n", (684, 722), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((798, 846), 'llama_index.llms.Gemini', 'Gemini', ([], {'model': '"""models/gemini-pro"""', 'temperature': '(0)'}), "(model='models/gemini-pro', temperature=0)\n", (804, 846), False, 'from llama_index.llms import OpenAI, Gemini\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core.evaluation import CorrectnessEvaluator
from llama_index.llms import OpenAI, Gemini
from llama_index.core import ServiceContext
import pandas as pd
async def main():
# DOWNLOAD LLAMADATASET
evaluator_dataset, _ = download_llama_dataset(
"MiniMtBenchSingleGradingDataset", "./mini_mt_bench_data"
)
# DEFINE EVALUATORS
gpt_4_context = ServiceContext.from_defaults(
llm=OpenAI(temperature=0, model="gpt-4"),
)
gpt_3p5_context = ServiceContext.from_defaults(
llm=OpenAI(temperature=0, model="gpt-3.5-turbo"),
)
gemini_pro_context = ServiceContext.from_defaults(
llm=Gemini(model="models/gemini-pro", temperature=0)
)
evaluators = {
"gpt-4": CorrectnessEvaluator(service_context=gpt_4_context),
"gpt-3.5": CorrectnessEvaluator(service_context=gpt_3p5_context),
"gemini-pro": CorrectnessEvaluator(service_context=gemini_pro_context),
}
# EVALUATE WITH PACK
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
EvaluatorBenchmarkerPack = download_llama_pack("EvaluatorBenchmarkerPack", "./pack")
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gpt-3.5"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gpt_3p5_benchmark_df = await evaluator_benchmarker.arun(
batch_size=100, sleep_time_in_seconds=0
)
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gpt-4"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gpt_4_benchmark_df = await evaluator_benchmarker.arun(
batch_size=100, sleep_time_in_seconds=0
)
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gemini-pro"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gemini_pro_benchmark_df = await evaluator_benchmarker.arun(
batch_size=5, sleep_time_in_seconds=0.5
)
benchmark_df = pd.concat(
[
gpt_3p5_benchmark_df,
gpt_4_benchmark_df,
gemini_pro_benchmark_df,
],
axis=0,
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.evaluation.CorrectnessEvaluator",
"llama_index.llms.Gemini",
"llama_index.llms.OpenAI",
"llama_index.core.llama_dataset.download_llama_dataset"
] | [((386, 471), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniMtBenchSingleGradingDataset"""', '"""./mini_mt_bench_data"""'], {}), "('MiniMtBenchSingleGradingDataset',\n './mini_mt_bench_data')\n", (408, 471), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((1646, 1703), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""EvaluatorBenchmarkerPack"""', '"""./pack"""'], {}), "('EvaluatorBenchmarkerPack', './pack')\n", (1665, 1703), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((2580, 2670), 'pandas.concat', 'pd.concat', (['[gpt_3p5_benchmark_df, gpt_4_benchmark_df, gemini_pro_benchmark_df]'], {'axis': '(0)'}), '([gpt_3p5_benchmark_df, gpt_4_benchmark_df,\n gemini_pro_benchmark_df], axis=0)\n', (2589, 2670), True, 'import pandas as pd\n'), ((2801, 2825), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2823, 2825), False, 'import asyncio\n'), ((890, 941), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gpt_4_context'}), '(service_context=gpt_4_context)\n', (910, 941), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((962, 1015), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gpt_3p5_context'}), '(service_context=gpt_3p5_context)\n', (982, 1015), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((1039, 1095), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gemini_pro_context'}), '(service_context=gemini_pro_context)\n', (1059, 1095), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((569, 605), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (575, 605), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((678, 722), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0, model='gpt-3.5-turbo')\n", (684, 722), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((798, 846), 'llama_index.llms.Gemini', 'Gemini', ([], {'model': '"""models/gemini-pro"""', 'temperature': '(0)'}), "(model='models/gemini-pro', temperature=0)\n", (804, 846), False, 'from llama_index.llms import OpenAI, Gemini\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"PaulGrahamEssayDataset", "./paul_graham"
)
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 330), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""PaulGrahamEssayDataset"""', '"""./paul_graham"""'], {}), "('PaulGrahamEssayDataset', './paul_graham')\n", (287, 330), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((389, 441), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (420, 441), False, 'from llama_index.core import VectorStoreIndex\n'), ((534, 589), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (553, 589), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1440, 1464), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1462, 1464), False, 'import asyncio\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"PaulGrahamEssayDataset", "./paul_graham"
)
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 330), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""PaulGrahamEssayDataset"""', '"""./paul_graham"""'], {}), "('PaulGrahamEssayDataset', './paul_graham')\n", (287, 330), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((389, 441), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (420, 441), False, 'from llama_index.core import VectorStoreIndex\n'), ((534, 589), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (553, 589), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1440, 1464), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1462, 1464), False, 'import asyncio\n')] |
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
class QueryEngineTool(AsyncBaseTool):
"""Query engine tool.
A tool making use of a query engine.
Args:
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
resolve_input_errors: bool = True,
) -> None:
self._query_engine = query_engine
self._metadata = metadata
self._resolve_input_errors = resolve_input_errors
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
resolve_input_errors: bool = True,
) -> "QueryEngineTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
query_engine=query_engine,
metadata=metadata,
resolve_input_errors=resolve_input_errors,
)
@property
def query_engine(self) -> BaseQueryEngine:
return self._query_engine
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError(
"Cannot call query engine without specifying `input` parameter."
)
response = self._query_engine.query(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError("Cannot call query engine without inputs")
response = await self._query_engine.aquery(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
from llama_index.core.langchain_helpers.agents.tools import (
IndexToolConfig,
LlamaIndexTool,
)
tool_config = IndexToolConfig(
query_engine=self.query_engine,
name=self.metadata.name,
description=self.metadata.description,
)
return LlamaIndexTool.from_tool_config(tool_config=tool_config)
| [
"llama_index.core.langchain_helpers.agents.tools.IndexToolConfig",
"llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config",
"llama_index.core.tools.types.ToolMetadata"
] | [((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langchain_helpers.agents.tools.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'self.query_engine', 'name': 'self.metadata.name', 'description': 'self.metadata.description'}), '(query_engine=self.query_engine, name=self.metadata.name,\n description=self.metadata.description)\n', (3575, 3675), False, 'from llama_index.core.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n'), ((3734, 3790), 'llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', ([], {'tool_config': 'tool_config'}), '(tool_config=tool_config)\n', (3765, 3790), False, 'from llama_index.core.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n')] |
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
class QueryEngineTool(AsyncBaseTool):
"""Query engine tool.
A tool making use of a query engine.
Args:
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
resolve_input_errors: bool = True,
) -> None:
self._query_engine = query_engine
self._metadata = metadata
self._resolve_input_errors = resolve_input_errors
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
resolve_input_errors: bool = True,
) -> "QueryEngineTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
query_engine=query_engine,
metadata=metadata,
resolve_input_errors=resolve_input_errors,
)
@property
def query_engine(self) -> BaseQueryEngine:
return self._query_engine
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError(
"Cannot call query engine without specifying `input` parameter."
)
response = self._query_engine.query(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError("Cannot call query engine without inputs")
response = await self._query_engine.aquery(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
from llama_index.core.langchain_helpers.agents.tools import (
IndexToolConfig,
LlamaIndexTool,
)
tool_config = IndexToolConfig(
query_engine=self.query_engine,
name=self.metadata.name,
description=self.metadata.description,
)
return LlamaIndexTool.from_tool_config(tool_config=tool_config)
| [
"llama_index.core.langchain_helpers.agents.tools.IndexToolConfig",
"llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config",
"llama_index.core.tools.types.ToolMetadata"
] | [((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langchain_helpers.agents.tools.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'self.query_engine', 'name': 'self.metadata.name', 'description': 'self.metadata.description'}), '(query_engine=self.query_engine, name=self.metadata.name,\n description=self.metadata.description)\n', (3575, 3675), False, 'from llama_index.core.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n'), ((3734, 3790), 'llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', ([], {'tool_config': 'tool_config'}), '(tool_config=tool_config)\n', (3765, 3790), False, 'from llama_index.core.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n')] |
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
class QueryEngineTool(AsyncBaseTool):
"""Query engine tool.
A tool making use of a query engine.
Args:
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
resolve_input_errors: bool = True,
) -> None:
self._query_engine = query_engine
self._metadata = metadata
self._resolve_input_errors = resolve_input_errors
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
resolve_input_errors: bool = True,
) -> "QueryEngineTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
query_engine=query_engine,
metadata=metadata,
resolve_input_errors=resolve_input_errors,
)
@property
def query_engine(self) -> BaseQueryEngine:
return self._query_engine
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError(
"Cannot call query engine without specifying `input` parameter."
)
response = self._query_engine.query(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError("Cannot call query engine without inputs")
response = await self._query_engine.aquery(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
from llama_index.core.langchain_helpers.agents.tools import (
IndexToolConfig,
LlamaIndexTool,
)
tool_config = IndexToolConfig(
query_engine=self.query_engine,
name=self.metadata.name,
description=self.metadata.description,
)
return LlamaIndexTool.from_tool_config(tool_config=tool_config)
| [
"llama_index.core.langchain_helpers.agents.tools.IndexToolConfig",
"llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config",
"llama_index.core.tools.types.ToolMetadata"
] | [((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langchain_helpers.agents.tools.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'self.query_engine', 'name': 'self.metadata.name', 'description': 'self.metadata.description'}), '(query_engine=self.query_engine, name=self.metadata.name,\n description=self.metadata.description)\n', (3575, 3675), False, 'from llama_index.core.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n'), ((3734, 3790), 'llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', ([], {'tool_config': 'tool_config'}), '(tool_config=tool_config)\n', (3765, 3790), False, 'from llama_index.core.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator
from llama_index.legacy.llms.openai_utils import (
from_openai_message_dict,
to_openai_message_dicts,
)
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class LlamaAPI(CustomLLM):
model: str = Field(description="The llama-api model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
max_tokens: int = Field(description="The maximum number of tokens to generate.")
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the llama-api API."
)
_client: Any = PrivateAttr()
def __init__(
self,
model: str = "llama-13b-chat",
temperature: float = 0.1,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
try:
from llamaapi import LlamaAPI as Client
except ImportError as e:
raise ImportError(
"llama_api not installed."
"Please install it with `pip install llamaapi`."
) from e
self._client = Client(api_key)
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs or {},
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "llama_api_llm"
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_length": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=4096,
num_output=DEFAULT_NUM_OUTPUTS,
is_chat_model=True,
is_function_calling_model=True,
model_name="llama-api",
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
message_dicts = to_openai_message_dicts(messages)
json_dict = {
"messages": message_dicts,
**self._model_kwargs,
**kwargs,
}
response = self._client.run(json_dict).json()
message_dict = response["choices"][0]["message"]
message = from_openai_message_dict(message_dict)
return ChatResponse(message=message, raw=response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError("stream_complete is not supported for LlamaAPI")
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError("stream_chat is not supported for LlamaAPI")
| [
"llama_index.legacy.llms.openai_utils.from_openai_message_dict",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatResponse",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.llms.generic_utils.chat_to_completion_decorator",
"llama_index.legacy.llms.openai_utils.to_openai_message_dicts"
] | [((868, 916), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The llama-api model to use."""'}), "(description='The llama-api model to use.')\n", (873, 916), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((942, 999), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (947, 999), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1022, 1084), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1027, 1084), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1125, 1213), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the llama-api API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the llama-api API.')\n", (1130, 1213), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1243, 1256), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1254, 1256), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3379, 3398), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (3396, 3398), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3902, 3927), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3925, 3927), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4154, 4179), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4177, 4179), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4392, 4411), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4409, 4411), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2205, 2220), 'llamaapi.LlamaAPI', 'Client', (['api_key'], {}), '(api_key)\n', (2211, 2220), True, 'from llamaapi import LlamaAPI as Client\n'), ((3161, 3305), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': '(4096)', 'num_output': 'DEFAULT_NUM_OUTPUTS', 'is_chat_model': '(True)', 'is_function_calling_model': '(True)', 'model_name': '"""llama-api"""'}), "(context_window=4096, num_output=DEFAULT_NUM_OUTPUTS,\n is_chat_model=True, is_function_calling_model=True, model_name='llama-api')\n", (3172, 3305), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((3507, 3540), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (3530, 3540), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message_dict, to_openai_message_dicts\n'), ((3797, 3835), 'llama_index.legacy.llms.openai_utils.from_openai_message_dict', 'from_openai_message_dict', (['message_dict'], {}), '(message_dict)\n', (3821, 3835), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message_dict, to_openai_message_dicts\n'), ((3852, 3895), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'message', 'raw': 'response'}), '(message=message, raw=response)\n', (3864, 3895), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((4063, 4102), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self.chat'], {}), '(self.chat)\n', (4091, 4102), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator
from llama_index.legacy.llms.openai_utils import (
from_openai_message_dict,
to_openai_message_dicts,
)
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class LlamaAPI(CustomLLM):
model: str = Field(description="The llama-api model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
max_tokens: int = Field(description="The maximum number of tokens to generate.")
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the llama-api API."
)
_client: Any = PrivateAttr()
def __init__(
self,
model: str = "llama-13b-chat",
temperature: float = 0.1,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
try:
from llamaapi import LlamaAPI as Client
except ImportError as e:
raise ImportError(
"llama_api not installed."
"Please install it with `pip install llamaapi`."
) from e
self._client = Client(api_key)
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs or {},
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "llama_api_llm"
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_length": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=4096,
num_output=DEFAULT_NUM_OUTPUTS,
is_chat_model=True,
is_function_calling_model=True,
model_name="llama-api",
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
message_dicts = to_openai_message_dicts(messages)
json_dict = {
"messages": message_dicts,
**self._model_kwargs,
**kwargs,
}
response = self._client.run(json_dict).json()
message_dict = response["choices"][0]["message"]
message = from_openai_message_dict(message_dict)
return ChatResponse(message=message, raw=response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError("stream_complete is not supported for LlamaAPI")
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError("stream_chat is not supported for LlamaAPI")
| [
"llama_index.legacy.llms.openai_utils.from_openai_message_dict",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatResponse",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.llms.generic_utils.chat_to_completion_decorator",
"llama_index.legacy.llms.openai_utils.to_openai_message_dicts"
] | [((868, 916), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The llama-api model to use."""'}), "(description='The llama-api model to use.')\n", (873, 916), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((942, 999), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (947, 999), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1022, 1084), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1027, 1084), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1125, 1213), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the llama-api API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the llama-api API.')\n", (1130, 1213), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1243, 1256), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1254, 1256), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3379, 3398), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (3396, 3398), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3902, 3927), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3925, 3927), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4154, 4179), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4177, 4179), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4392, 4411), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4409, 4411), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2205, 2220), 'llamaapi.LlamaAPI', 'Client', (['api_key'], {}), '(api_key)\n', (2211, 2220), True, 'from llamaapi import LlamaAPI as Client\n'), ((3161, 3305), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': '(4096)', 'num_output': 'DEFAULT_NUM_OUTPUTS', 'is_chat_model': '(True)', 'is_function_calling_model': '(True)', 'model_name': '"""llama-api"""'}), "(context_window=4096, num_output=DEFAULT_NUM_OUTPUTS,\n is_chat_model=True, is_function_calling_model=True, model_name='llama-api')\n", (3172, 3305), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((3507, 3540), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (3530, 3540), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message_dict, to_openai_message_dicts\n'), ((3797, 3835), 'llama_index.legacy.llms.openai_utils.from_openai_message_dict', 'from_openai_message_dict', (['message_dict'], {}), '(message_dict)\n', (3821, 3835), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message_dict, to_openai_message_dicts\n'), ((3852, 3895), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'message', 'raw': 'response'}), '(message=message, raw=response)\n', (3864, 3895), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((4063, 4102), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self.chat'], {}), '(self.chat)\n', (4091, 4102), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator\n')] |
"""Download tool from Llama Hub."""
from typing import Optional, Type
from llama_index.legacy.download.module import (
LLAMA_HUB_URL,
MODULE_TYPE,
download_llama_module,
track_download,
)
from llama_index.legacy.tools.tool_spec.base import BaseToolSpec
def download_tool(
tool_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_path: Optional[str] = None,
) -> Type[BaseToolSpec]:
"""Download a single tool from Llama Hub.
Args:
tool_class: The name of the tool class you want to download,
such as `GmailToolSpec`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_path: Custom dirpath to download loader into.
Returns:
A Loader.
"""
tool_cls = download_llama_module(
tool_class,
llama_hub_url=llama_hub_url,
refresh_cache=refresh_cache,
custom_dir="tools",
custom_path=custom_path,
library_path="tools/library.json",
)
if not issubclass(tool_cls, BaseToolSpec):
raise ValueError(f"Tool class {tool_class} must be a subclass of BaseToolSpec.")
track_download(tool_class, MODULE_TYPE.TOOL)
return tool_cls
| [
"llama_index.legacy.download.module.track_download",
"llama_index.legacy.download.module.download_llama_module"
] | [((867, 1047), 'llama_index.legacy.download.module.download_llama_module', 'download_llama_module', (['tool_class'], {'llama_hub_url': 'llama_hub_url', 'refresh_cache': 'refresh_cache', 'custom_dir': '"""tools"""', 'custom_path': 'custom_path', 'library_path': '"""tools/library.json"""'}), "(tool_class, llama_hub_url=llama_hub_url,\n refresh_cache=refresh_cache, custom_dir='tools', custom_path=\n custom_path, library_path='tools/library.json')\n", (888, 1047), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n'), ((1234, 1278), 'llama_index.legacy.download.module.track_download', 'track_download', (['tool_class', 'MODULE_TYPE.TOOL'], {}), '(tool_class, MODULE_TYPE.TOOL)\n', (1248, 1278), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n')] |
"""Download tool from Llama Hub."""
from typing import Optional, Type
from llama_index.legacy.download.module import (
LLAMA_HUB_URL,
MODULE_TYPE,
download_llama_module,
track_download,
)
from llama_index.legacy.tools.tool_spec.base import BaseToolSpec
def download_tool(
tool_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_path: Optional[str] = None,
) -> Type[BaseToolSpec]:
"""Download a single tool from Llama Hub.
Args:
tool_class: The name of the tool class you want to download,
such as `GmailToolSpec`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_path: Custom dirpath to download loader into.
Returns:
A Loader.
"""
tool_cls = download_llama_module(
tool_class,
llama_hub_url=llama_hub_url,
refresh_cache=refresh_cache,
custom_dir="tools",
custom_path=custom_path,
library_path="tools/library.json",
)
if not issubclass(tool_cls, BaseToolSpec):
raise ValueError(f"Tool class {tool_class} must be a subclass of BaseToolSpec.")
track_download(tool_class, MODULE_TYPE.TOOL)
return tool_cls
| [
"llama_index.legacy.download.module.track_download",
"llama_index.legacy.download.module.download_llama_module"
] | [((867, 1047), 'llama_index.legacy.download.module.download_llama_module', 'download_llama_module', (['tool_class'], {'llama_hub_url': 'llama_hub_url', 'refresh_cache': 'refresh_cache', 'custom_dir': '"""tools"""', 'custom_path': 'custom_path', 'library_path': '"""tools/library.json"""'}), "(tool_class, llama_hub_url=llama_hub_url,\n refresh_cache=refresh_cache, custom_dir='tools', custom_path=\n custom_path, library_path='tools/library.json')\n", (888, 1047), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n'), ((1234, 1278), 'llama_index.legacy.download.module.track_download', 'track_download', (['tool_class', 'MODULE_TYPE.TOOL'], {}), '(tool_class, MODULE_TYPE.TOOL)\n', (1248, 1278), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n')] |
"""Simple Engine."""
import json
import os
from typing import Any, Optional, Union
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
from llama_index.core.indices.base import BaseIndex
from llama_index.core.ingestion.pipeline import run_transformations
from llama_index.core.llms import LLM
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
get_response_synthesizer,
)
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.schema import (
BaseNode,
Document,
NodeWithScore,
QueryBundle,
QueryType,
TransformComponent,
)
from metagpt.rag.factories import (
get_index,
get_rag_embedding,
get_rag_llm,
get_rankers,
get_retriever,
)
from metagpt.rag.interface import NoEmbedding, RAGObject
from metagpt.rag.retrievers.base import ModifiableRAGRetriever, PersistableRAGRetriever
from metagpt.rag.retrievers.hybrid_retriever import SimpleHybridRetriever
from metagpt.rag.schema import (
BaseIndexConfig,
BaseRankerConfig,
BaseRetrieverConfig,
BM25RetrieverConfig,
ObjectNode,
)
from metagpt.utils.common import import_class
class SimpleEngine(RetrieverQueryEngine):
"""SimpleEngine is designed to be simple and straightforward.
It is a lightweight and easy-to-use search engine that integrates
document reading, embedding, indexing, retrieving, and ranking functionalities
into a single, straightforward workflow. It is designed to quickly set up a
search engine from a collection of documents.
"""
def __init__(
self,
retriever: BaseRetriever,
response_synthesizer: Optional[BaseSynthesizer] = None,
node_postprocessors: Optional[list[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
index: Optional[BaseIndex] = None,
) -> None:
super().__init__(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=node_postprocessors,
callback_manager=callback_manager,
)
self.index = index
@classmethod
def from_docs(
cls,
input_dir: str = None,
input_files: list[str] = None,
transformations: Optional[list[TransformComponent]] = None,
embed_model: BaseEmbedding = None,
llm: LLM = None,
retriever_configs: list[BaseRetrieverConfig] = None,
ranker_configs: list[BaseRankerConfig] = None,
) -> "SimpleEngine":
"""From docs.
Must provide either `input_dir` or `input_files`.
Args:
input_dir: Path to the directory.
input_files: List of file paths to read (Optional; overrides input_dir, exclude).
transformations: Parse documents to nodes. Default [SentenceSplitter].
embed_model: Parse nodes to embedding. Must supported by llama index. Default OpenAIEmbedding.
llm: Must supported by llama index. Default OpenAI.
retriever_configs: Configuration for retrievers. If more than one config, will use SimpleHybridRetriever.
ranker_configs: Configuration for rankers.
"""
if not input_dir and not input_files:
raise ValueError("Must provide either `input_dir` or `input_files`.")
documents = SimpleDirectoryReader(input_dir=input_dir, input_files=input_files).load_data()
cls._fix_document_metadata(documents)
index = VectorStoreIndex.from_documents(
documents=documents,
transformations=transformations or [SentenceSplitter()],
embed_model=cls._resolve_embed_model(embed_model, retriever_configs),
)
return cls._from_index(index, llm=llm, retriever_configs=retriever_configs, ranker_configs=ranker_configs)
@classmethod
def from_objs(
cls,
objs: Optional[list[RAGObject]] = None,
transformations: Optional[list[TransformComponent]] = None,
embed_model: BaseEmbedding = None,
llm: LLM = None,
retriever_configs: list[BaseRetrieverConfig] = None,
ranker_configs: list[BaseRankerConfig] = None,
) -> "SimpleEngine":
"""From objs.
Args:
objs: List of RAGObject.
transformations: Parse documents to nodes. Default [SentenceSplitter].
embed_model: Parse nodes to embedding. Must supported by llama index. Default OpenAIEmbedding.
llm: Must supported by llama index. Default OpenAI.
retriever_configs: Configuration for retrievers. If more than one config, will use SimpleHybridRetriever.
ranker_configs: Configuration for rankers.
"""
if not objs and any(isinstance(config, BM25RetrieverConfig) for config in retriever_configs):
raise ValueError("In BM25RetrieverConfig, Objs must not be empty.")
objs = objs or []
nodes = [ObjectNode(text=obj.rag_key(), metadata=ObjectNode.get_obj_metadata(obj)) for obj in objs]
index = VectorStoreIndex(
nodes=nodes,
transformations=transformations or [SentenceSplitter()],
embed_model=cls._resolve_embed_model(embed_model, retriever_configs),
)
return cls._from_index(index, llm=llm, retriever_configs=retriever_configs, ranker_configs=ranker_configs)
@classmethod
def from_index(
cls,
index_config: BaseIndexConfig,
embed_model: BaseEmbedding = None,
llm: LLM = None,
retriever_configs: list[BaseRetrieverConfig] = None,
ranker_configs: list[BaseRankerConfig] = None,
) -> "SimpleEngine":
"""Load from previously maintained index by self.persist(), index_config contains persis_path."""
index = get_index(index_config, embed_model=cls._resolve_embed_model(embed_model, [index_config]))
return cls._from_index(index, llm=llm, retriever_configs=retriever_configs, ranker_configs=ranker_configs)
async def asearch(self, content: str, **kwargs) -> str:
"""Inplement tools.SearchInterface"""
return await self.aquery(content)
async def aretrieve(self, query: QueryType) -> list[NodeWithScore]:
"""Allow query to be str."""
query_bundle = QueryBundle(query) if isinstance(query, str) else query
nodes = await super().aretrieve(query_bundle)
self._try_reconstruct_obj(nodes)
return nodes
def add_docs(self, input_files: list[str]):
"""Add docs to retriever. retriever must has add_nodes func."""
self._ensure_retriever_modifiable()
documents = SimpleDirectoryReader(input_files=input_files).load_data()
self._fix_document_metadata(documents)
nodes = run_transformations(documents, transformations=self.index._transformations)
self._save_nodes(nodes)
def add_objs(self, objs: list[RAGObject]):
"""Adds objects to the retriever, storing each object's original form in metadata for future reference."""
self._ensure_retriever_modifiable()
nodes = [ObjectNode(text=obj.rag_key(), metadata=ObjectNode.get_obj_metadata(obj)) for obj in objs]
self._save_nodes(nodes)
def persist(self, persist_dir: Union[str, os.PathLike], **kwargs):
"""Persist."""
self._ensure_retriever_persistable()
self._persist(str(persist_dir), **kwargs)
@classmethod
def _from_index(
cls,
index: BaseIndex,
llm: LLM = None,
retriever_configs: list[BaseRetrieverConfig] = None,
ranker_configs: list[BaseRankerConfig] = None,
) -> "SimpleEngine":
llm = llm or get_rag_llm()
retriever = get_retriever(configs=retriever_configs, index=index) # Default index.as_retriever
rankers = get_rankers(configs=ranker_configs, llm=llm) # Default []
return cls(
retriever=retriever,
node_postprocessors=rankers,
response_synthesizer=get_response_synthesizer(llm=llm),
index=index,
)
def _ensure_retriever_modifiable(self):
self._ensure_retriever_of_type(ModifiableRAGRetriever)
def _ensure_retriever_persistable(self):
self._ensure_retriever_of_type(PersistableRAGRetriever)
def _ensure_retriever_of_type(self, required_type: BaseRetriever):
"""Ensure that self.retriever is required_type, or at least one of its components, if it's a SimpleHybridRetriever.
Args:
required_type: The class that the retriever is expected to be an instance of.
"""
if isinstance(self.retriever, SimpleHybridRetriever):
if not any(isinstance(r, required_type) for r in self.retriever.retrievers):
raise TypeError(
f"Must have at least one retriever of type {required_type.__name__} in SimpleHybridRetriever"
)
if not isinstance(self.retriever, required_type):
raise TypeError(f"The retriever is not of type {required_type.__name__}: {type(self.retriever)}")
def _save_nodes(self, nodes: list[BaseNode]):
self.retriever.add_nodes(nodes)
def _persist(self, persist_dir: str, **kwargs):
self.retriever.persist(persist_dir, **kwargs)
@staticmethod
def _try_reconstruct_obj(nodes: list[NodeWithScore]):
"""If node is object, then dynamically reconstruct object, and save object to node.metadata["obj"]."""
for node in nodes:
if node.metadata.get("is_obj", False):
obj_cls = import_class(node.metadata["obj_cls_name"], node.metadata["obj_mod_name"])
obj_dict = json.loads(node.metadata["obj_json"])
node.metadata["obj"] = obj_cls(**obj_dict)
@staticmethod
def _fix_document_metadata(documents: list[Document]):
"""LlamaIndex keep metadata['file_path'], which is unnecessary, maybe deleted in the near future."""
for doc in documents:
doc.excluded_embed_metadata_keys.append("file_path")
@staticmethod
def _resolve_embed_model(embed_model: BaseEmbedding = None, configs: list[Any] = None) -> BaseEmbedding:
if configs and all(isinstance(c, NoEmbedding) for c in configs):
return MockEmbedding(embed_dim=1)
return embed_model or get_rag_embedding()
| [
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.ingestion.pipeline.run_transformations",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding",
"llama_index.core.schema.QueryBundle",
"llama_index.core.SimpleDirectoryReader"
] | [((7145, 7220), 'llama_index.core.ingestion.pipeline.run_transformations', 'run_transformations', (['documents'], {'transformations': 'self.index._transformations'}), '(documents, transformations=self.index._transformations)\n', (7164, 7220), False, 'from llama_index.core.ingestion.pipeline import run_transformations\n'), ((8091, 8144), 'metagpt.rag.factories.get_retriever', 'get_retriever', ([], {'configs': 'retriever_configs', 'index': 'index'}), '(configs=retriever_configs, index=index)\n', (8104, 8144), False, 'from metagpt.rag.factories import get_index, get_rag_embedding, get_rag_llm, get_rankers, get_retriever\n'), ((8193, 8237), 'metagpt.rag.factories.get_rankers', 'get_rankers', ([], {'configs': 'ranker_configs', 'llm': 'llm'}), '(configs=ranker_configs, llm=llm)\n', (8204, 8237), False, 'from metagpt.rag.factories import get_index, get_rag_embedding, get_rag_llm, get_rankers, get_retriever\n'), ((6663, 6681), 'llama_index.core.schema.QueryBundle', 'QueryBundle', (['query'], {}), '(query)\n', (6674, 6681), False, 'from llama_index.core.schema import BaseNode, Document, NodeWithScore, QueryBundle, QueryType, TransformComponent\n'), ((8057, 8070), 'metagpt.rag.factories.get_rag_llm', 'get_rag_llm', ([], {}), '()\n', (8068, 8070), False, 'from metagpt.rag.factories import get_index, get_rag_embedding, get_rag_llm, get_rankers, get_retriever\n'), ((10657, 10683), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(1)'}), '(embed_dim=1)\n', (10670, 10683), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((10715, 10734), 'metagpt.rag.factories.get_rag_embedding', 'get_rag_embedding', ([], {}), '()\n', (10732, 10734), False, 'from metagpt.rag.factories import get_index, get_rag_embedding, get_rag_llm, get_rankers, get_retriever\n'), ((3729, 3796), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'input_dir', 'input_files': 'input_files'}), '(input_dir=input_dir, input_files=input_files)\n', (3750, 3796), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7022, 7068), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (7043, 7068), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((8380, 8413), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm'}), '(llm=llm)\n', (8404, 8413), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, get_response_synthesizer\n'), ((9956, 10030), 'metagpt.utils.common.import_class', 'import_class', (["node.metadata['obj_cls_name']", "node.metadata['obj_mod_name']"], {}), "(node.metadata['obj_cls_name'], node.metadata['obj_mod_name'])\n", (9968, 10030), False, 'from metagpt.utils.common import import_class\n'), ((10058, 10095), 'json.loads', 'json.loads', (["node.metadata['obj_json']"], {}), "(node.metadata['obj_json'])\n", (10068, 10095), False, 'import json\n'), ((5368, 5400), 'metagpt.rag.schema.ObjectNode.get_obj_metadata', 'ObjectNode.get_obj_metadata', (['obj'], {}), '(obj)\n', (5395, 5400), False, 'from metagpt.rag.schema import BaseIndexConfig, BaseRankerConfig, BaseRetrieverConfig, BM25RetrieverConfig, ObjectNode\n'), ((7518, 7550), 'metagpt.rag.schema.ObjectNode.get_obj_metadata', 'ObjectNode.get_obj_metadata', (['obj'], {}), '(obj)\n', (7545, 7550), False, 'from metagpt.rag.schema import BaseIndexConfig, BaseRankerConfig, BaseRetrieverConfig, BM25RetrieverConfig, ObjectNode\n'), ((3986, 4004), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4002, 4004), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((5526, 5544), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (5542, 5544), False, 'from llama_index.core.node_parser import SentenceSplitter\n')] |
from collections import ChainMap
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Protocol,
Sequence,
get_args,
runtime_checkable,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
)
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
StringableInput,
validate_and_convert_stringable,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
root_validator,
validator,
)
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.generic_utils import (
messages_to_prompt as generic_messages_to_prompt,
)
from llama_index.core.base.llms.generic_utils import (
prompt_to_messages,
)
from llama_index.core.prompts import BasePromptTemplate, PromptTemplate
from llama_index.core.types import (
BaseOutputParser,
PydanticProgramMode,
TokenAsyncGen,
TokenGen,
)
from llama_index.core.instrumentation.events.llm import (
LLMPredictEndEvent,
LLMPredictStartEvent,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
# NOTE: These two protocols are needed to appease mypy
@runtime_checkable
class MessagesToPromptType(Protocol):
def __call__(self, messages: Sequence[ChatMessage]) -> str:
pass
@runtime_checkable
class CompletionToPromptType(Protocol):
def __call__(self, prompt: str) -> str:
pass
def stream_completion_response_to_tokens(
completion_response_gen: CompletionResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in completion_response_gen:
yield response.delta or ""
return gen()
def stream_chat_response_to_tokens(
chat_response_gen: ChatResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in chat_response_gen:
yield response.delta or ""
return gen()
async def astream_completion_response_to_tokens(
completion_response_gen: CompletionResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in completion_response_gen:
yield response.delta or ""
return gen()
async def astream_chat_response_to_tokens(
chat_response_gen: ChatResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in chat_response_gen:
yield response.delta or ""
return gen()
def default_completion_to_prompt(prompt: str) -> str:
return prompt
class LLM(BaseLLM):
system_prompt: Optional[str] = Field(
default=None, description="System prompt for LLM calls."
)
messages_to_prompt: Callable = Field(
description="Function to convert a list of messages to an LLM prompt.",
default=None,
exclude=True,
)
completion_to_prompt: Callable = Field(
description="Function to convert a completion to an LLM prompt.",
default=None,
exclude=True,
)
output_parser: Optional[BaseOutputParser] = Field(
description="Output parser to parse, validate, and correct errors programmatically.",
default=None,
exclude=True,
)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT
# deprecated
query_wrapper_prompt: Optional[BasePromptTemplate] = Field(
description="Query wrapper prompt for LLM calls.",
default=None,
exclude=True,
)
@validator("messages_to_prompt", pre=True)
def set_messages_to_prompt(
cls, messages_to_prompt: Optional[MessagesToPromptType]
) -> MessagesToPromptType:
return messages_to_prompt or generic_messages_to_prompt
@validator("completion_to_prompt", pre=True)
def set_completion_to_prompt(
cls, completion_to_prompt: Optional[CompletionToPromptType]
) -> CompletionToPromptType:
return completion_to_prompt or default_completion_to_prompt
@root_validator
def check_prompts(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if values.get("completion_to_prompt") is None:
values["completion_to_prompt"] = default_completion_to_prompt
if values.get("messages_to_prompt") is None:
values["messages_to_prompt"] = generic_messages_to_prompt
return values
def _log_template_data(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> None:
template_vars = {
k: v
for k, v in ChainMap(prompt.kwargs, prompt_args).items()
if k in prompt.template_vars
}
with self.callback_manager.event(
CBEventType.TEMPLATING,
payload={
EventPayload.TEMPLATE: prompt.get_template(llm=self),
EventPayload.TEMPLATE_VARS: template_vars,
EventPayload.SYSTEM_PROMPT: self.system_prompt,
EventPayload.QUERY_WRAPPER_PROMPT: self.query_wrapper_prompt,
},
):
pass
def _get_prompt(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str:
formatted_prompt = prompt.format(
llm=self,
messages_to_prompt=self.messages_to_prompt,
completion_to_prompt=self.completion_to_prompt,
**prompt_args,
)
if self.output_parser is not None:
formatted_prompt = self.output_parser.format(formatted_prompt)
return self._extend_prompt(formatted_prompt)
def _get_messages(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> List[ChatMessage]:
messages = prompt.format_messages(llm=self, **prompt_args)
if self.output_parser is not None:
messages = self.output_parser.format_messages(messages)
return self._extend_messages(messages)
def structured_predict(
self,
output_cls: BaseModel,
prompt: PromptTemplate,
**prompt_args: Any,
) -> BaseModel:
from llama_index.core.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return program(**prompt_args)
async def astructured_predict(
self,
output_cls: BaseModel,
prompt: PromptTemplate,
**prompt_args: Any,
) -> BaseModel:
from llama_index.core.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return await program.acall(**prompt_args)
def _parse_output(self, output: str) -> str:
if self.output_parser is not None:
return str(self.output_parser.parse(output))
return output
@dispatcher.span
def predict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""Predict."""
dispatcher.event(LLMPredictStartEvent())
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.chat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = self.complete(formatted_prompt, formatted=True)
output = response.text
dispatcher.event(LLMPredictEndEvent())
return self._parse_output(output)
def stream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenGen:
"""Stream."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.stream_chat(messages)
stream_tokens = stream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = self.stream_complete(formatted_prompt, formatted=True)
stream_tokens = stream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
@dispatcher.span
async def apredict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""Async predict."""
dispatcher.event(LLMPredictStartEvent())
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.achat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = await self.acomplete(formatted_prompt, formatted=True)
output = response.text
dispatcher.event(LLMPredictEndEvent())
return self._parse_output(output)
async def astream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenAsyncGen:
"""Async stream."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.astream_chat(messages)
stream_tokens = await astream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = await self.astream_complete(
formatted_prompt, formatted=True
)
stream_tokens = await astream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
def _extend_prompt(
self,
formatted_prompt: str,
) -> str:
"""Add system and query wrapper prompts to base prompt."""
extended_prompt = formatted_prompt
if self.system_prompt:
extended_prompt = self.system_prompt + "\n\n" + extended_prompt
if self.query_wrapper_prompt:
extended_prompt = self.query_wrapper_prompt.format(
query_str=extended_prompt
)
return extended_prompt
def _extend_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]:
"""Add system prompt to chat message list."""
if self.system_prompt:
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt),
*messages,
]
return messages
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""Return query component."""
if self.metadata.is_chat_model:
return LLMChatComponent(llm=self, **kwargs)
else:
return LLMCompleteComponent(llm=self, **kwargs)
class BaseLLMComponent(QueryComponent):
"""Base LLM component."""
llm: LLM = Field(..., description="LLM")
streaming: bool = Field(default=False, description="Streaming mode")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
self.llm.callback_manager = callback_manager
class LLMCompleteComponent(BaseLLMComponent):
"""LLM completion component."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
if "prompt" not in input:
raise ValueError("Prompt must be in input dict.")
# do special check to see if prompt is a list of chat messages
if isinstance(input["prompt"], get_args(List[ChatMessage])):
input["prompt"] = self.llm.messages_to_prompt(input["prompt"])
input["prompt"] = validate_and_convert_stringable(input["prompt"])
else:
input["prompt"] = validate_and_convert_stringable(input["prompt"])
input["prompt"] = self.llm.completion_to_prompt(input["prompt"])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
prompt = kwargs["prompt"]
# ignore all other kwargs for now
if self.streaming:
response = self.llm.stream_complete(prompt, formatted=True)
else:
response = self.llm.complete(prompt, formatted=True)
return {"output": response}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
prompt = kwargs["prompt"]
# ignore all other kwargs for now
response = await self.llm.acomplete(prompt, formatted=True)
return {"output": response}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# TODO: support only complete for now
return InputKeys.from_keys({"prompt"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
class LLMChatComponent(BaseLLMComponent):
"""LLM chat component."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
if "messages" not in input:
raise ValueError("Messages must be in input dict.")
# if `messages` is a string, convert to a list of chat message
if isinstance(input["messages"], get_args(StringableInput)):
input["messages"] = validate_and_convert_stringable(input["messages"])
input["messages"] = prompt_to_messages(str(input["messages"]))
for message in input["messages"]:
if not isinstance(message, ChatMessage):
raise ValueError("Messages must be a list of ChatMessage")
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
messages = kwargs["messages"]
if self.streaming:
response = self.llm.stream_chat(messages)
else:
response = self.llm.chat(messages)
return {"output": response}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
messages = kwargs["messages"]
if self.streaming:
response = await self.llm.astream_chat(messages)
else:
response = await self.llm.achat(messages)
return {"output": response}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# TODO: support only complete for now
return InputKeys.from_keys({"messages"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
| [
"llama_index.core.bridge.pydantic.validator",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.instrumentation.events.llm.LLMPredictStartEvent",
"llama_index.core.program.utils.get_program_for_llm",
"llama_index.core.instrumentation.events.llm.LLMPredictEndEvent",
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable"
] | [((1325, 1360), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (1350, 1360), True, 'import llama_index.core.instrumentation as instrument\n'), ((3081, 3144), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""System prompt for LLM calls."""'}), "(default=None, description='System prompt for LLM calls.')\n", (3086, 3144), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3194, 3309), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a list of messages to an LLM prompt."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Function to convert a list of messages to an LLM prompt.', default=\n None, exclude=True)\n", (3199, 3309), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3368, 3471), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a completion to an LLM prompt."""', 'default': 'None', 'exclude': '(True)'}), "(description='Function to convert a completion to an LLM prompt.',\n default=None, exclude=True)\n", (3373, 3471), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3547, 3675), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Output parser to parse, validate, and correct errors programmatically."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Output parser to parse, validate, and correct errors programmatically.',\n default=None, exclude=True)\n", (3552, 3675), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3850, 3938), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query wrapper prompt for LLM calls."""', 'default': 'None', 'exclude': '(True)'}), "(description='Query wrapper prompt for LLM calls.', default=None,\n exclude=True)\n", (3855, 3938), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((3972, 4013), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""messages_to_prompt"""'], {'pre': '(True)'}), "('messages_to_prompt', pre=True)\n", (3981, 4013), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((4211, 4254), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""completion_to_prompt"""'], {'pre': '(True)'}), "('completion_to_prompt', pre=True)\n", (4220, 4254), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((11933, 11962), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""LLM"""'}), "(..., description='LLM')\n", (11938, 11962), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((11985, 12035), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Streaming mode"""'}), "(default=False, description='Streaming mode')\n", (11990, 12035), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, root_validator, validator\n'), ((6558, 6658), 'llama_index.core.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6577, 6658), False, 'from llama_index.core.program.utils import get_program_for_llm\n'), ((7003, 7103), 'llama_index.core.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (7022, 7103), False, 'from llama_index.core.program.utils import get_program_for_llm\n'), ((14070, 14101), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (14089, 14101), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((14200, 14232), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (14220, 14232), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((16026, 16059), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'messages'}"], {}), "({'messages'})\n", (16045, 16059), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((16158, 16190), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (16178, 16190), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((7561, 7583), 'llama_index.core.instrumentation.events.llm.LLMPredictStartEvent', 'LLMPredictStartEvent', ([], {}), '()\n', (7581, 7583), False, 'from llama_index.core.instrumentation.events.llm import LLMPredictEndEvent, LLMPredictStartEvent\n'), ((8068, 8088), 'llama_index.core.instrumentation.events.llm.LLMPredictEndEvent', 'LLMPredictEndEvent', ([], {}), '()\n', (8086, 8088), False, 'from llama_index.core.instrumentation.events.llm import LLMPredictEndEvent, LLMPredictStartEvent\n'), ((9199, 9221), 'llama_index.core.instrumentation.events.llm.LLMPredictStartEvent', 'LLMPredictStartEvent', ([], {}), '()\n', (9219, 9221), False, 'from llama_index.core.instrumentation.events.llm import LLMPredictEndEvent, LLMPredictStartEvent\n'), ((9720, 9740), 'llama_index.core.instrumentation.events.llm.LLMPredictEndEvent', 'LLMPredictEndEvent', ([], {}), '()\n', (9738, 9740), False, 'from llama_index.core.instrumentation.events.llm import LLMPredictEndEvent, LLMPredictStartEvent\n'), ((12688, 12715), 'typing.get_args', 'get_args', (['List[ChatMessage]'], {}), '(List[ChatMessage])\n', (12696, 12715), False, 'from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((12823, 12871), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12854, 12871), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((12916, 12964), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12947, 12964), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((14666, 14691), 'typing.get_args', 'get_args', (['StringableInput'], {}), '(StringableInput)\n', (14674, 14691), False, 'from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((14726, 14776), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['messages']"], {}), "(input['messages'])\n", (14757, 14776), False, 'from llama_index.core.base.query_pipeline.query import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((11437, 11501), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': 'self.system_prompt'}), '(role=MessageRole.SYSTEM, content=self.system_prompt)\n', (11448, 11501), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponseAsyncGen, ChatResponseGen, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((4995, 5031), 'collections.ChainMap', 'ChainMap', (['prompt.kwargs', 'prompt_args'], {}), '(prompt.kwargs, prompt_args)\n', (5003, 5031), False, 'from collections import ChainMap\n')] |
import os
from typing import Optional, Dict
import openai
import pandas as pd
import llama_index
from llama_index.llms.openai import OpenAI
from llama_index.readers.schema.base import Document
from llama_index.readers import SimpleWebPageReader
from llama_index.prompts import PromptTemplate
from llama_index import ServiceContext, StorageContext, load_index_from_storage
from llama_index import LLMPredictor, OpenAIEmbedding
from llama_index.indices.vector_store.base import VectorStore
from llama_hub.github_repo import GithubClient, GithubRepositoryReader
from llama_hub.youtube_transcript import YoutubeTranscriptReader, is_youtube_video
from mindsdb.integrations.libs.base import BaseMLEngine
from mindsdb.utilities.config import Config
from mindsdb.utilities.security import is_private_url
from mindsdb.integrations.handlers.llama_index_handler import config
from mindsdb.integrations.handlers.llama_index_handler.github_loader_helper import (
_get_github_token,
_get_filter_file_extensions,
_get_filter_directories,
)
from mindsdb.integrations.utilities.handler_utils import get_api_key
def _validate_prompt_template(prompt_template: str):
if "{context_str}" not in prompt_template or "{query_str}" not in prompt_template:
raise Exception(
"Provided prompt template is invalid, missing `{context_str}`, `{query_str}`. Please ensure both placeholders are present and try again."
) # noqa
class LlamaIndexHandler(BaseMLEngine):
"""Integration with the LlamaIndex data framework for LLM applications."""
name = "llama_index"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generative = True
self.default_index_class = "GPTVectorStoreIndex"
self.supported_index_class = ["GPTVectorStoreIndex", "VectorStoreIndex"]
self.default_reader = "DFReader"
self.supported_reader = [
"DFReader",
"SimpleWebPageReader",
"GithubRepositoryReader",
"YoutubeTranscriptReader",
]
@staticmethod
def create_validation(target, args=None, **kwargs):
reader = args["using"].get("reader", "DFReader")
if reader not in config.data_loaders:
raise Exception(
f"Invalid reader argument. Please use one of {config.data_loaders.keys()}"
)
config_dict = config.data_loaders[reader]
missing_keys = [key for key in config_dict if key not in args["using"]]
if missing_keys:
raise Exception(f"{reader} requires {missing_keys} arguments")
if "prompt_template" in args["using"]:
_validate_prompt_template(args["using"]["prompt_template"])
if args["using"].get("mode") == "conversational":
for param in ("user_column", "assistant_column"):
if param not in args["using"]:
raise Exception(f"Conversational mode requires {param} parameter")
def create(
self,
target: str,
df: Optional[pd.DataFrame] = None,
args: Optional[Dict] = None,
) -> None:
if "using" not in args:
raise Exception(
"LlamaIndex engine requires a USING clause! Refer to its documentation for more details."
)
if "index_class" not in args["using"]:
args["using"]["index_class"] = self.default_index_class
elif args["using"]["index_class"] not in self.supported_index_class:
raise Exception(
f"Invalid index class argument. Please use one of {self.supported_index_class}"
)
if "reader" not in args["using"]:
args["using"]["reader"] = self.default_reader
elif args["using"]["reader"] not in self.supported_reader:
raise Exception(
f"Invalid operation mode. Please use one of {self.supported_reader}"
)
# workaround to create llama model without input data
if df is None or df.empty:
df = pd.DataFrame([{"text": ""}])
if args["using"]["reader"] == "DFReader":
dstrs = df.apply(
lambda x: ", ".join(
[f"{col}: {str(entry)}" for col, entry in zip(df.columns, x)]
),
axis=1,
)
reader = list(map(lambda x: Document(text=x), dstrs.tolist()))
elif args["using"]["reader"] == "SimpleWebPageReader":
url = args["using"]["source_url_link"]
config = Config()
is_cloud = config.get("cloud", False)
if is_cloud and is_private_url(url):
raise Exception(f"URL is private: {url}")
reader = SimpleWebPageReader(html_to_text=True).load_data([url])
elif args["using"]["reader"] == "GithubRepositoryReader":
engine_storage = self.engine_storage
key = "GITHUB_TOKEN"
github_token = get_api_key(
key, args["using"], engine_storage, strict=False
)
if github_token is None:
github_token = get_api_key(
key.lower(),
args["using"],
engine_storage,
strict=True,
)
github_client = GithubClient(github_token)
owner = args["using"]["owner"]
repo = args["using"]["repo"]
filter_file_extensions = _get_filter_file_extensions(args["using"])
filter_directories = _get_filter_directories(args["using"])
reader = GithubRepositoryReader(
github_client,
owner=owner,
repo=repo,
verbose=True,
filter_file_extensions=filter_file_extensions,
filter_directories=filter_directories,
).load_data(branch=args["using"].get("branch", "main"))
elif args["using"]["reader"] == "YoutubeTranscriptReader":
ytlinks = args["using"]["ytlinks"]
for link in ytlinks:
if not is_youtube_video(link):
raise Exception(f"Invalid youtube link: {link}")
reader = YoutubeTranscriptReader().load_data(ytlinks)
else:
raise Exception(
f"Invalid operation mode. Please use one of {self.supported_reader}."
)
self.model_storage.json_set("args", args)
index = self._setup_index(reader)
path = self.model_storage.folder_get("context")
index.storage_context.persist(persist_dir=path)
self.model_storage.folder_sync("context")
def update(self, args) -> None:
prompt_template = args["using"].get(
"prompt_template", args.get("prompt_template", None)
)
if prompt_template is not None:
_validate_prompt_template(prompt_template)
args_cur = self.model_storage.json_get("args")
args_cur["using"].update(args["using"])
# check new set of arguments
self.create_validation(None, args_cur)
self.model_storage.json_set("args", args_cur)
def predict(
self, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None
) -> pd.DataFrame:
pred_args = args["predict_params"] if args else {}
args = self.model_storage.json_get("args")
engine_kwargs = {}
if args["using"].get("mode") == "conversational":
user_column = args["using"]["user_column"]
assistant_column = args["using"]["assistant_column"]
messages = []
for row in df[:-1].to_dict("records"):
messages.append(f"user: {row[user_column]}")
messages.append(f"assistant: {row[assistant_column]}")
conversation = "\n".join(messages)
questions = [df.iloc[-1][user_column]]
if "prompt" in pred_args and pred_args["prompt"] is not None:
user_prompt = pred_args["prompt"]
else:
user_prompt = args["using"].get("prompt", "")
prompt_template = (
f"{user_prompt}\n"
f"---------------------\n"
f"We have provided context information below. \n"
f"{{context_str}}\n"
f"---------------------\n"
f"This is previous conversation history:\n"
f"{conversation}\n"
f"---------------------\n"
f"Given this information, please answer the question: {{query_str}}"
)
engine_kwargs["text_qa_template"] = PromptTemplate(prompt_template)
else:
input_column = args["using"].get("input_column", None)
prompt_template = args["using"].get(
"prompt_template", args.get("prompt_template", None)
)
if prompt_template is not None:
_validate_prompt_template(prompt_template)
engine_kwargs["text_qa_template"] = PromptTemplate(prompt_template)
if input_column is None:
raise Exception(
f"`input_column` must be provided at model creation time or through USING clause when predicting. Please try again."
) # noqa
if input_column not in df.columns:
raise Exception(
f'Column "{input_column}" not found in input data! Please try again.'
)
questions = df[input_column]
index_path = self.model_storage.folder_get("context")
storage_context = StorageContext.from_defaults(persist_dir=index_path)
service_context = self._get_service_context()
index = load_index_from_storage(
storage_context, service_context=service_context
)
query_engine = index.as_query_engine(**engine_kwargs)
results = []
for question in questions:
query_results = query_engine.query(
question
) # TODO: provide extra_info in explain_target col
results.append(query_results.response)
result_df = pd.DataFrame(
{"question": questions, args["target"]: results}
) # result_df['answer'].tolist()
return result_df
def _get_service_context(self):
args = self.model_storage.json_get("args")
engine_storage = self.engine_storage
openai_api_key = get_api_key('openai', args["using"], engine_storage, strict=True)
llm_kwargs = {"openai_api_key": openai_api_key}
if "temperature" in args["using"]:
llm_kwargs["temperature"] = args["using"]["temperature"]
if "model_name" in args["using"]:
llm_kwargs["model_name"] = args["using"]["model_name"]
if "max_tokens" in args["using"]:
llm_kwargs["max_tokens"] = args["using"]["max_tokens"]
llm = OpenAI(**llm_kwargs) # TODO: all usual params should go here
embed_model = OpenAIEmbedding(api_key=openai_api_key)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
return service_context
def _setup_index(self, documents):
args = self.model_storage.json_get("args")
indexer: VectorStore = getattr(llama_index, args["using"]["index_class"])
index = indexer.from_documents(
documents, service_context=self._get_service_context()
)
return index
| [
"llama_index.readers.SimpleWebPageReader",
"llama_index.llms.openai.OpenAI",
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.prompts.PromptTemplate",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.readers.schema.base.Document"
] | [((9647, 9699), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_path'}), '(persist_dir=index_path)\n', (9675, 9699), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((9770, 9843), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (9793, 9843), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((10195, 10257), 'pandas.DataFrame', 'pd.DataFrame', (["{'question': questions, args['target']: results}"], {}), "({'question': questions, args['target']: results})\n", (10207, 10257), True, 'import pandas as pd\n'), ((10496, 10561), 'mindsdb.integrations.utilities.handler_utils.get_api_key', 'get_api_key', (['"""openai"""', "args['using']", 'engine_storage'], {'strict': '(True)'}), "('openai', args['using'], engine_storage, strict=True)\n", (10507, 10561), False, 'from mindsdb.integrations.utilities.handler_utils import get_api_key\n'), ((10964, 10984), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '(**llm_kwargs)\n', (10970, 10984), False, 'from llama_index.llms.openai import OpenAI\n'), ((11048, 11087), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_key': 'openai_api_key'}), '(api_key=openai_api_key)\n', (11063, 11087), False, 'from llama_index import LLMPredictor, OpenAIEmbedding\n'), ((11114, 11176), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (11142, 11176), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((4055, 4083), 'pandas.DataFrame', 'pd.DataFrame', (["[{'text': ''}]"], {}), "([{'text': ''}])\n", (4067, 4083), True, 'import pandas as pd\n'), ((8659, 8690), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['prompt_template'], {}), '(prompt_template)\n', (8673, 8690), False, 'from llama_index.prompts import PromptTemplate\n'), ((4552, 4560), 'mindsdb.utilities.config.Config', 'Config', ([], {}), '()\n', (4558, 4560), False, 'from mindsdb.utilities.config import Config\n'), ((4584, 4610), 'mindsdb.integrations.handlers.llama_index_handler.config.get', 'config.get', (['"""cloud"""', '(False)'], {}), "('cloud', False)\n", (4594, 4610), False, 'from mindsdb.integrations.handlers.llama_index_handler import config\n'), ((9061, 9092), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['prompt_template'], {}), '(prompt_template)\n', (9075, 9092), False, 'from llama_index.prompts import PromptTemplate\n'), ((4639, 4658), 'mindsdb.utilities.security.is_private_url', 'is_private_url', (['url'], {}), '(url)\n', (4653, 4658), False, 'from mindsdb.utilities.security import is_private_url\n'), ((4972, 5033), 'mindsdb.integrations.utilities.handler_utils.get_api_key', 'get_api_key', (['key', "args['using']", 'engine_storage'], {'strict': '(False)'}), "(key, args['using'], engine_storage, strict=False)\n", (4983, 5033), False, 'from mindsdb.integrations.utilities.handler_utils import get_api_key\n'), ((5329, 5355), 'llama_hub.github_repo.GithubClient', 'GithubClient', (['github_token'], {}), '(github_token)\n', (5341, 5355), False, 'from llama_hub.github_repo import GithubClient, GithubRepositoryReader\n'), ((5477, 5519), 'mindsdb.integrations.handlers.llama_index_handler.github_loader_helper._get_filter_file_extensions', '_get_filter_file_extensions', (["args['using']"], {}), "(args['using'])\n", (5504, 5519), False, 'from mindsdb.integrations.handlers.llama_index_handler.github_loader_helper import _get_github_token, _get_filter_file_extensions, _get_filter_directories\n'), ((5553, 5591), 'mindsdb.integrations.handlers.llama_index_handler.github_loader_helper._get_filter_directories', '_get_filter_directories', (["args['using']"], {}), "(args['using'])\n", (5576, 5591), False, 'from mindsdb.integrations.handlers.llama_index_handler.github_loader_helper import _get_github_token, _get_filter_file_extensions, _get_filter_directories\n'), ((2335, 2361), 'mindsdb.integrations.handlers.llama_index_handler.config.data_loaders.keys', 'config.data_loaders.keys', ([], {}), '()\n', (2359, 2361), False, 'from mindsdb.integrations.handlers.llama_index_handler import config\n'), ((4381, 4397), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'x'}), '(text=x)\n', (4389, 4397), False, 'from llama_index.readers.schema.base import Document\n'), ((4740, 4778), 'llama_index.readers.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (4759, 4778), False, 'from llama_index.readers import SimpleWebPageReader\n'), ((5614, 5784), 'llama_hub.github_repo.GithubRepositoryReader', 'GithubRepositoryReader', (['github_client'], {'owner': 'owner', 'repo': 'repo', 'verbose': '(True)', 'filter_file_extensions': 'filter_file_extensions', 'filter_directories': 'filter_directories'}), '(github_client, owner=owner, repo=repo, verbose=True,\n filter_file_extensions=filter_file_extensions, filter_directories=\n filter_directories)\n', (5636, 5784), False, 'from llama_hub.github_repo import GithubClient, GithubRepositoryReader\n'), ((6112, 6134), 'llama_hub.youtube_transcript.is_youtube_video', 'is_youtube_video', (['link'], {}), '(link)\n', (6128, 6134), False, 'from llama_hub.youtube_transcript import YoutubeTranscriptReader, is_youtube_video\n'), ((6226, 6251), 'llama_hub.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (6249, 6251), False, 'from llama_hub.youtube_transcript import YoutubeTranscriptReader, is_youtube_video\n')] |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.bridge.pydantic import Field, PrivateAttr
from llama_index.callbacks import CallbackManager
from llama_index.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS
from llama_index.llms.base import (
ChatMessage,
ChatResponse,
CompletionResponse,
LLMMetadata,
llm_chat_callback,
llm_completion_callback,
)
from llama_index.llms.custom import CustomLLM
from llama_index.llms.generic_utils import completion_response_to_chat_response
from llama_index.llms.generic_utils import (
messages_to_prompt as generic_messages_to_prompt,
)
from transformers import LlamaTokenizer
import gc
import json
import torch
import numpy as np
from tensorrt_llm.runtime import ModelConfig, SamplingConfig
import tensorrt_llm
from pathlib import Path
import uuid
import time
EOS_TOKEN = 2
PAD_TOKEN = 2
class TrtLlmAPI(CustomLLM):
model_path: Optional[str] = Field(
description="The path to the trt engine."
)
temperature: float = Field(description="The temperature to use for sampling.")
max_new_tokens: int = Field(description="The maximum number of tokens to generate.")
context_window: int = Field(
description="The maximum number of context tokens for the model."
)
messages_to_prompt: Callable = Field(
description="The function to convert messages to a prompt.", exclude=True
)
completion_to_prompt: Callable = Field(
description="The function to convert a completion to a prompt.", exclude=True
)
generate_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Kwargs used for generation."
)
model_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Kwargs used for model initialization."
)
verbose: bool = Field(description="Whether to print verbose output.")
_model: Any = PrivateAttr()
_model_config: Any = PrivateAttr()
_tokenizer: Any = PrivateAttr()
_max_new_tokens = PrivateAttr()
_sampling_config = PrivateAttr()
_verbose = PrivateAttr()
def __init__(
self,
model_path: Optional[str] = None,
engine_name: Optional[str] = None,
tokenizer_dir: Optional[str] = None,
temperature: float = 0.1,
max_new_tokens: int = DEFAULT_NUM_OUTPUTS,
context_window: int = DEFAULT_CONTEXT_WINDOW,
messages_to_prompt: Optional[Callable] = None,
completion_to_prompt: Optional[Callable] = None,
callback_manager: Optional[CallbackManager] = None,
generate_kwargs: Optional[Dict[str, Any]] = None,
model_kwargs: Optional[Dict[str, Any]] = None,
verbose: bool = False
) -> None:
model_kwargs = model_kwargs or {}
model_kwargs.update({"n_ctx": context_window, "verbose": verbose})
self._max_new_tokens = max_new_tokens
self._verbose = verbose
# check if model is cached
if model_path is not None:
if not os.path.exists(model_path):
raise ValueError(
"Provided model path does not exist. "
"Please check the path or provide a model_url to download."
)
else:
engine_dir = model_path
engine_dir_path = Path(engine_dir)
config_path = engine_dir_path / 'config.json'
# config function
with open(config_path, 'r') as f:
config = json.load(f)
use_gpt_attention_plugin = config['plugin_config']['gpt_attention_plugin']
remove_input_padding = config['plugin_config']['remove_input_padding']
tp_size = config['builder_config']['tensor_parallel']
pp_size = config['builder_config']['pipeline_parallel']
world_size = tp_size * pp_size
assert world_size == tensorrt_llm.mpi_world_size(), \
f'Engine world size ({world_size}) != Runtime world size ({tensorrt_llm.mpi_world_size()})'
num_heads = config['builder_config']['num_heads'] // tp_size
hidden_size = config['builder_config']['hidden_size'] // tp_size
vocab_size = config['builder_config']['vocab_size']
num_layers = config['builder_config']['num_layers']
num_kv_heads = config['builder_config'].get('num_kv_heads', num_heads)
paged_kv_cache = config['plugin_config']['paged_kv_cache']
if config['builder_config'].get('multi_query_mode', False):
tensorrt_llm.logger.warning(
"`multi_query_mode` config is deprecated. Please rebuild the engine."
)
num_kv_heads = 1
num_kv_heads = (num_kv_heads + tp_size - 1) // tp_size
self._model_config = ModelConfig(num_heads=num_heads,
num_kv_heads=num_kv_heads,
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
gpt_attention_plugin=use_gpt_attention_plugin,
paged_kv_cache=paged_kv_cache,
remove_input_padding=remove_input_padding)
assert pp_size == 1, 'Python runtime does not support pipeline parallelism'
world_size = tp_size * pp_size
runtime_rank = tensorrt_llm.mpi_rank()
runtime_mapping = tensorrt_llm.Mapping(world_size,
runtime_rank,
tp_size=tp_size,
pp_size=pp_size)
torch.cuda.set_device(runtime_rank % runtime_mapping.gpus_per_node)
self._tokenizer = LlamaTokenizer.from_pretrained(tokenizer_dir, legacy=False)
self._sampling_config = SamplingConfig(end_id=EOS_TOKEN,
pad_id=PAD_TOKEN,
num_beams=1,
temperature=temperature)
serialize_path = engine_dir_path / engine_name
with open(serialize_path, 'rb') as f:
engine_buffer = f.read()
decoder = tensorrt_llm.runtime.GenerationSession(self._model_config,
engine_buffer,
runtime_mapping,
debug_mode=False)
self._model = decoder
messages_to_prompt = messages_to_prompt or generic_messages_to_prompt
completion_to_prompt = completion_to_prompt or (lambda x: x)
generate_kwargs = generate_kwargs or {}
generate_kwargs.update(
{"temperature": temperature, "max_tokens": max_new_tokens}
)
super().__init__(
model_path=model_path,
temperature=temperature,
context_window=context_window,
max_new_tokens=max_new_tokens,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
callback_manager=callback_manager,
generate_kwargs=generate_kwargs,
model_kwargs=model_kwargs,
verbose=verbose,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "TrtLlmAPI"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_new_tokens,
model_name=self.model_path,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
self.generate_kwargs.update({"stream": False})
is_formatted = kwargs.pop("formatted", False)
if not is_formatted:
prompt = self.completion_to_prompt(prompt)
input_text = prompt
input_ids, input_lengths = self.parse_input(input_text, self._tokenizer,
EOS_TOKEN,
self._model_config)
max_input_length = torch.max(input_lengths).item()
self._model.setup(input_lengths.size(0), max_input_length, self._max_new_tokens, 1) # beam size is set to 1
if self._verbose:
start_time = time.time()
output_ids = self._model.decode(input_ids, input_lengths, self._sampling_config)
torch.cuda.synchronize()
elapsed_time = None
if self._verbose:
end_time = time.time()
elapsed_time = end_time - start_time
output_txt, output_token_ids = self.get_output(output_ids,
input_lengths,
self._max_new_tokens,
self._tokenizer)
if self._verbose:
print(f"Input context length : {input_ids.shape[1]}")
print(f"Inference time : {elapsed_time:.2f} seconds")
print(f"Output context length : {len(output_token_ids)} ")
print(f"Inference token/sec : {(len(output_token_ids) / elapsed_time):2f}")
# call garbage collected after inference
torch.cuda.empty_cache()
gc.collect()
return CompletionResponse(text=output_txt, raw=self.generate_completion_dict(output_txt))
def parse_input(self, input_text: str, tokenizer, end_id: int,
remove_input_padding: bool):
input_tokens = []
input_tokens.append(
tokenizer.encode(input_text, add_special_tokens=False))
input_lengths = torch.tensor([len(x) for x in input_tokens],
dtype=torch.int32,
device='cuda')
if remove_input_padding:
input_ids = np.concatenate(input_tokens)
input_ids = torch.tensor(input_ids, dtype=torch.int32,
device='cuda').unsqueeze(0)
else:
input_ids = torch.nested.to_padded_tensor(
torch.nested.nested_tensor(input_tokens, dtype=torch.int32),
end_id).cuda()
return input_ids, input_lengths
def remove_extra_eos_ids(self, outputs):
outputs.reverse()
while outputs and outputs[0] == 2:
outputs.pop(0)
outputs.reverse()
outputs.append(2)
return outputs
def get_output(self, output_ids, input_lengths, max_output_len, tokenizer):
num_beams = output_ids.size(1)
output_text = ""
outputs = None
for b in range(input_lengths.size(0)):
for beam in range(num_beams):
output_begin = input_lengths[b]
output_end = input_lengths[b] + max_output_len
outputs = output_ids[b][beam][output_begin:output_end].tolist()
outputs = self.remove_extra_eos_ids(outputs)
output_text = tokenizer.decode(outputs)
return output_text, outputs
def generate_completion_dict(self, text_str):
"""
Generate a dictionary for text completion details.
Returns:
dict: A dictionary containing completion details.
"""
completion_id: str = f"cmpl-{str(uuid.uuid4())}"
created: int = int(time.time())
model_name: str = self._model if self._model is not None else self.model_path
return {
"id": completion_id,
"object": "text_completion",
"created": created,
"model": model_name,
"choices": [
{
"text": text_str,
"index": 0,
"logprobs": None,
"finish_reason": 'stop'
}
],
"usage": {
"prompt_tokens": None,
"completion_tokens": None,
"total_tokens": None
}
}
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
pass
| [
"llama_index.llms.base.llm_chat_callback",
"llama_index.llms.base.LLMMetadata",
"llama_index.bridge.pydantic.Field",
"llama_index.llms.generic_utils.completion_response_to_chat_response",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.llms.base.llm_completion_callback"
] | [((2151, 2199), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The path to the trt engine."""'}), "(description='The path to the trt engine.')\n", (2156, 2199), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2239, 2296), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (2244, 2296), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2323, 2385), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (2328, 2385), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2412, 2484), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of context tokens for the model."""'}), "(description='The maximum number of context tokens for the model.')\n", (2417, 2484), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2534, 2619), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The function to convert messages to a prompt."""', 'exclude': '(True)'}), "(description='The function to convert messages to a prompt.', exclude=True\n )\n", (2539, 2619), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2666, 2754), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The function to convert a completion to a prompt."""', 'exclude': '(True)'}), "(description='The function to convert a completion to a prompt.',\n exclude=True)\n", (2671, 2754), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2803, 2873), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Kwargs used for generation."""'}), "(default_factory=dict, description='Kwargs used for generation.')\n", (2808, 2873), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2923, 3008), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Kwargs used for model initialization."""'}), "(default_factory=dict, description='Kwargs used for model initialization.'\n )\n", (2928, 3008), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3038, 3091), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""Whether to print verbose output."""'}), "(description='Whether to print verbose output.')\n", (3043, 3091), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3111, 3124), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3122, 3124), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3150, 3163), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3161, 3163), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3186, 3199), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3197, 3199), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3222, 3235), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3233, 3235), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3259, 3272), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3270, 3272), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3288, 3301), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3299, 3301), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((9389, 9408), 'llama_index.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9406, 9408), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((9701, 9726), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (9724, 9726), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((14134, 14159), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (14157, 14159), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((9228, 9340), 'llama_index.llms.base.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.max_new_tokens', 'model_name': 'self.model_path'}), '(context_window=self.context_window, num_output=self.\n max_new_tokens, model_name=self.model_path)\n', (9239, 9340), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((9637, 9694), 'llama_index.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (9673, 9694), False, 'from llama_index.llms.generic_utils import completion_response_to_chat_response\n'), ((10577, 10601), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10599, 10601), False, 'import torch\n'), ((11367, 11391), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (11389, 11391), False, 'import torch\n'), ((11400, 11412), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11410, 11412), False, 'import gc\n'), ((10467, 10478), 'time.time', 'time.time', ([], {}), '()\n', (10476, 10478), False, 'import time\n'), ((10680, 10691), 'time.time', 'time.time', ([], {}), '()\n', (10689, 10691), False, 'import time\n'), ((11988, 12016), 'numpy.concatenate', 'np.concatenate', (['input_tokens'], {}), '(input_tokens)\n', (12002, 12016), True, 'import numpy as np\n'), ((13479, 13490), 'time.time', 'time.time', ([], {}), '()\n', (13488, 13490), False, 'import time\n'), ((4271, 4297), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (4285, 4297), False, 'import os\n'), ((4582, 4598), 'pathlib.Path', 'Path', (['engine_dir'], {}), '(engine_dir)\n', (4586, 4598), False, 'from pathlib import Path\n'), ((6180, 6445), 'tensorrt_llm.runtime.ModelConfig', 'ModelConfig', ([], {'num_heads': 'num_heads', 'num_kv_heads': 'num_kv_heads', 'hidden_size': 'hidden_size', 'vocab_size': 'vocab_size', 'num_layers': 'num_layers', 'gpt_attention_plugin': 'use_gpt_attention_plugin', 'paged_kv_cache': 'paged_kv_cache', 'remove_input_padding': 'remove_input_padding'}), '(num_heads=num_heads, num_kv_heads=num_kv_heads, hidden_size=\n hidden_size, vocab_size=vocab_size, num_layers=num_layers,\n gpt_attention_plugin=use_gpt_attention_plugin, paged_kv_cache=\n paged_kv_cache, remove_input_padding=remove_input_padding)\n', (6191, 6445), False, 'from tensorrt_llm.runtime import ModelConfig, SamplingConfig\n'), ((6947, 6970), 'tensorrt_llm.mpi_rank', 'tensorrt_llm.mpi_rank', ([], {}), '()\n', (6968, 6970), False, 'import tensorrt_llm\n'), ((7005, 7090), 'tensorrt_llm.Mapping', 'tensorrt_llm.Mapping', (['world_size', 'runtime_rank'], {'tp_size': 'tp_size', 'pp_size': 'pp_size'}), '(world_size, runtime_rank, tp_size=tp_size, pp_size=pp_size\n )\n', (7025, 7090), False, 'import tensorrt_llm\n'), ((7267, 7334), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(runtime_rank % runtime_mapping.gpus_per_node)'], {}), '(runtime_rank % runtime_mapping.gpus_per_node)\n', (7288, 7334), False, 'import torch\n'), ((7369, 7428), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['tokenizer_dir'], {'legacy': '(False)'}), '(tokenizer_dir, legacy=False)\n', (7399, 7428), False, 'from transformers import LlamaTokenizer\n'), ((7469, 7562), 'tensorrt_llm.runtime.SamplingConfig', 'SamplingConfig', ([], {'end_id': 'EOS_TOKEN', 'pad_id': 'PAD_TOKEN', 'num_beams': '(1)', 'temperature': 'temperature'}), '(end_id=EOS_TOKEN, pad_id=PAD_TOKEN, num_beams=1, temperature\n =temperature)\n', (7483, 7562), False, 'from tensorrt_llm.runtime import ModelConfig, SamplingConfig\n'), ((7912, 8024), 'tensorrt_llm.runtime.GenerationSession', 'tensorrt_llm.runtime.GenerationSession', (['self._model_config', 'engine_buffer', 'runtime_mapping'], {'debug_mode': '(False)'}), '(self._model_config, engine_buffer,\n runtime_mapping, debug_mode=False)\n', (7950, 8024), False, 'import tensorrt_llm\n'), ((10268, 10292), 'torch.max', 'torch.max', (['input_lengths'], {}), '(input_lengths)\n', (10277, 10292), False, 'import torch\n'), ((4775, 4787), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4784, 4787), False, 'import json\n'), ((5192, 5221), 'tensorrt_llm.mpi_world_size', 'tensorrt_llm.mpi_world_size', ([], {}), '()\n', (5219, 5221), False, 'import tensorrt_llm\n'), ((5889, 5992), 'tensorrt_llm.logger.warning', 'tensorrt_llm.logger.warning', (['"""`multi_query_mode` config is deprecated. Please rebuild the engine."""'], {}), "(\n '`multi_query_mode` config is deprecated. Please rebuild the engine.')\n", (5916, 5992), False, 'import tensorrt_llm\n'), ((12041, 12098), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.int32', 'device': '"""cuda"""'}), "(input_ids, dtype=torch.int32, device='cuda')\n", (12053, 12098), False, 'import torch\n'), ((13436, 13448), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13446, 13448), False, 'import uuid\n'), ((5304, 5333), 'tensorrt_llm.mpi_world_size', 'tensorrt_llm.mpi_world_size', ([], {}), '()\n', (5331, 5333), False, 'import tensorrt_llm\n'), ((12234, 12293), 'torch.nested.nested_tensor', 'torch.nested.nested_tensor', (['input_tokens'], {'dtype': 'torch.int32'}), '(input_tokens, dtype=torch.int32)\n', (12260, 12293), False, 'import torch\n')] |
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.llms import ChatMessage, ChatResponse
from llama_index.core.schema import NodeWithScore, TextNode
import chainlit as cl
@cl.on_chat_start
async def start():
await cl.Message(content="LlamaIndexCb").send()
cb = cl.LlamaIndexCallbackHandler()
cb.on_event_start(CBEventType.RETRIEVE, payload={})
await cl.sleep(0.2)
cb.on_event_end(
CBEventType.RETRIEVE,
payload={
EventPayload.NODES: [
NodeWithScore(node=TextNode(text="This is text1"), score=1)
]
},
)
cb.on_event_start(CBEventType.LLM)
await cl.sleep(0.2)
response = ChatResponse(message=ChatMessage(content="This is the LLM response"))
cb.on_event_end(
CBEventType.LLM,
payload={
EventPayload.RESPONSE: response,
EventPayload.PROMPT: "This is the LLM prompt",
},
)
| [
"llama_index.core.schema.TextNode",
"llama_index.core.llms.ChatMessage"
] | [((316, 346), 'chainlit.LlamaIndexCallbackHandler', 'cl.LlamaIndexCallbackHandler', ([], {}), '()\n', (344, 346), True, 'import chainlit as cl\n'), ((415, 428), 'chainlit.sleep', 'cl.sleep', (['(0.2)'], {}), '(0.2)\n', (423, 428), True, 'import chainlit as cl\n'), ((691, 704), 'chainlit.sleep', 'cl.sleep', (['(0.2)'], {}), '(0.2)\n', (699, 704), True, 'import chainlit as cl\n'), ((742, 789), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'content': '"""This is the LLM response"""'}), "(content='This is the LLM response')\n", (753, 789), False, 'from llama_index.core.llms import ChatMessage, ChatResponse\n'), ((264, 298), 'chainlit.Message', 'cl.Message', ([], {'content': '"""LlamaIndexCb"""'}), "(content='LlamaIndexCb')\n", (274, 298), True, 'import chainlit as cl\n'), ((568, 598), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': '"""This is text1"""'}), "(text='This is text1')\n", (576, 598), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n')] |
import requests
from bs4 import BeautifulSoup
from llama_index import GPTSimpleVectorIndex
from llama_index.readers.database import DatabaseReader
from env import settings
from logger import logger
from .base import BaseToolSet, SessionGetter, ToolScope, tool
class RequestsGet(BaseToolSet):
@tool(
name="Requests Get",
description="A portal to the internet. "
"Use this when you need to get specific content from a website."
"Input should be a url (i.e. https://www.google.com)."
"The output will be the text response of the GET request.",
)
def get(self, url: str) -> str:
"""Run the tool."""
html = requests.get(url).text
soup = BeautifulSoup(html)
non_readable_tags = soup.find_all(
["script", "style", "header", "footer", "form"]
)
for non_readable_tag in non_readable_tags:
non_readable_tag.extract()
content = soup.get_text("\n", strip=True)
if len(content) > 300:
content = content[:300] + "..."
logger.debug(
f"\nProcessed RequestsGet, Input Url: {url} " f"Output Contents: {content}"
)
return content
class WineDB(BaseToolSet):
def __init__(self):
db = DatabaseReader(
scheme="postgresql", # Database Scheme
host=settings["WINEDB_HOST"], # Database Host
port="5432", # Database Port
user="alphadom", # Database User
password=settings["WINEDB_PASSWORD"], # Database Password
dbname="postgres", # Database Name
)
self.columns = ["nameEn", "nameKo", "description"]
concat_columns = str(",'-',".join([f'"{i}"' for i in self.columns]))
query = f"""
SELECT
Concat({concat_columns})
FROM wine
"""
documents = db.load_data(query=query)
self.index = GPTSimpleVectorIndex(documents)
@tool(
name="Wine Recommendation",
description="A tool to recommend wines based on a user's input. "
"Inputs are necessary factors for wine recommendations, such as the user's mood today, side dishes to eat with wine, people to drink wine with, what things you want to do, the scent and taste of their favorite wine."
"The output will be a list of recommended wines."
"The tool is based on a database of wine reviews, which is stored in a database.",
)
def recommend(self, query: str) -> str:
"""Run the tool."""
results = self.index.query(query)
wine = "\n".join(
[
f"{i}:{j}"
for i, j in zip(
self.columns, results.source_nodes[0].source_text.split("-")
)
]
)
output = results.response + "\n\n" + wine
logger.debug(
f"\nProcessed WineDB, Input Query: {query} " f"Output Wine: {wine}"
)
return output
class ExitConversation(BaseToolSet):
@tool(
name="Exit Conversation",
description="A tool to exit the conversation. "
"Use this when you want to exit the conversation. "
"The input should be a message that the conversation is over.",
scope=ToolScope.SESSION,
)
def exit(self, message: str, get_session: SessionGetter) -> str:
"""Run the tool."""
_, executor = get_session()
del executor
logger.debug(f"\nProcessed ExitConversation.")
return message
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.readers.database.DatabaseReader"
] | [((713, 732), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html'], {}), '(html)\n', (726, 732), False, 'from bs4 import BeautifulSoup\n'), ((1073, 1166), 'logger.logger.debug', 'logger.debug', (['f"""\nProcessed RequestsGet, Input Url: {url} Output Contents: {content}"""'], {}), '(\n f"""\nProcessed RequestsGet, Input Url: {url} Output Contents: {content}""")\n', (1085, 1166), False, 'from logger import logger\n'), ((1275, 1437), 'llama_index.readers.database.DatabaseReader', 'DatabaseReader', ([], {'scheme': '"""postgresql"""', 'host': "settings['WINEDB_HOST']", 'port': '"""5432"""', 'user': '"""alphadom"""', 'password': "settings['WINEDB_PASSWORD']", 'dbname': '"""postgres"""'}), "(scheme='postgresql', host=settings['WINEDB_HOST'], port=\n '5432', user='alphadom', password=settings['WINEDB_PASSWORD'], dbname=\n 'postgres')\n", (1289, 1437), False, 'from llama_index.readers.database import DatabaseReader\n'), ((1937, 1968), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {}), '(documents)\n', (1957, 1968), False, 'from llama_index import GPTSimpleVectorIndex\n'), ((2867, 2952), 'logger.logger.debug', 'logger.debug', (['f"""\nProcessed WineDB, Input Query: {query} Output Wine: {wine}"""'], {}), '(f"""\nProcessed WineDB, Input Query: {query} Output Wine: {wine}"""\n )\n', (2879, 2952), False, 'from logger import logger\n'), ((3468, 3517), 'logger.logger.debug', 'logger.debug', (['f"""\nProcessed ExitConversation."""'], {}), '(f"""\nProcessed ExitConversation.""")\n', (3480, 3517), False, 'from logger import logger\n'), ((675, 692), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (687, 692), False, 'import requests\n')] |
try:
from llama_index import Document
from llama_index.text_splitter import SentenceSplitter
except ImportError:
from llama_index.core import Document
from llama_index.core.text_splitter import SentenceSplitter
def llama_index_sentence_splitter(
documents: list[str], document_ids: list[str], chunk_size=256
):
chunk_overlap = min(chunk_size / 4, min(chunk_size / 2, 64))
chunks = []
node_parser = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
docs = [[Document(text=doc)] for doc in documents]
for doc_id, doc in zip(document_ids, docs):
chunks += [
{"document_id": doc_id, "content": node.text} for node in node_parser(doc)
]
return chunks
| [
"llama_index.core.text_splitter.SentenceSplitter",
"llama_index.core.Document"
] | [((432, 500), 'llama_index.core.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (448, 500), False, 'from llama_index.core.text_splitter import SentenceSplitter\n'), ((514, 532), 'llama_index.core.Document', 'Document', ([], {'text': 'doc'}), '(text=doc)\n', (522, 532), False, 'from llama_index.core import Document\n')] |
"""
Creates RAG dataset for tutorial notebooks and persists to disk.
"""
import argparse
import logging
import sys
from typing import List, Optional
import llama_index
import numpy as np
import pandas as pd
from gcsfs import GCSFileSystem
from llama_index import ServiceContext, StorageContext, load_index_from_storage
from llama_index.callbacks import CallbackManager, OpenInferenceCallbackHandler
from llama_index.callbacks.open_inference_callback import as_dataframe
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms import OpenAI
from phoenix.experimental.evals.retrievals import (
classify_relevance,
compute_precisions_at_k,
)
from tqdm import tqdm
def create_user_feedback(
first_document_relevances: List[Optional[bool]],
second_document_relevances: List[Optional[bool]],
) -> List[Optional[bool]]:
"""_summary_
Args:
first_document_relevances (List[Optional[bool]]): _description_
second_document_relevances (List[Optional[bool]]): _description_
Returns:
List[Optional[bool]]: _description_
"""
if len(first_document_relevances) != len(second_document_relevances):
raise ValueError()
first_document_relevances_array = np.array(first_document_relevances)
second_document_relevances_array = np.array(second_document_relevances)
failed_retrieval_mask = ~first_document_relevances_array & ~second_document_relevances_array
num_failed_retrievals = failed_retrieval_mask.sum()
num_thumbs_down = int(0.75 * num_failed_retrievals)
failed_retrieval_indexes = np.where(failed_retrieval_mask)[0]
thumbs_down_mask = np.random.choice(
failed_retrieval_indexes, size=num_thumbs_down, replace=False
)
successful_retrieval_mask = ~failed_retrieval_mask
num_successful_retrievals = successful_retrieval_mask.sum()
num_thumbs_up = int(0.25 * num_successful_retrievals)
successful_retrieval_indexes = np.where(successful_retrieval_mask)[0]
thumbs_up_mask = np.random.choice(
successful_retrieval_indexes, size=num_thumbs_up, replace=False
)
user_feedback_array = np.full(len(first_document_relevances), np.nan, dtype=np.float32)
user_feedback_array[thumbs_down_mask] = -1.0
user_feedback_array[thumbs_up_mask] = 1.0
return [None if np.isnan(value) else value for value in user_feedback_array.tolist()]
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
parser = argparse.ArgumentParser()
parser.add_argument("--index-path", type=str, required=True, help="Path to persisted index.")
parser.add_argument(
"--use-gcs",
action="store_true",
help="If this flag is set, the index will be loaded from GCS.",
)
parser.add_argument(
"--query-path", type=str, required=True, help="Path to CSV file containing queries."
)
parser.add_argument(
"--output-path", type=str, required=True, help="Path to output Parquet file."
)
args = parser.parse_args()
llama_index.prompts.default_prompts.DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information, "
"answer the question and be as helpful as possible: {query_str}\n"
) # This prompt has been tweaked to make the system less conservative for demo purposes.
queries = pd.read_csv(args.query_path)["Question"].tolist()
file_system = GCSFileSystem(project="public-assets-275721") if args.use_gcs else None
storage_context = StorageContext.from_defaults(
fs=file_system,
persist_dir=args.index_path,
)
callback_handler = OpenInferenceCallbackHandler()
service_context = ServiceContext.from_defaults(
llm=OpenAI(model="text-davinci-003"),
embed_model=OpenAIEmbedding(model="text-embedding-ada-002"),
callback_manager=CallbackManager(handlers=[callback_handler]),
)
index = load_index_from_storage(
storage_context,
service_context=service_context,
)
query_engine = index.as_query_engine()
logging.info("Running queries")
for query in tqdm(queries):
query_engine.query(query)
query_dataframe = as_dataframe(callback_handler.flush_query_data_buffer())
document_dataframe = as_dataframe(callback_handler.flush_node_data_buffer())
query_texts = query_dataframe[":feature.text:prompt"].tolist()
list_of_document_id_lists = query_dataframe[
":feature.[str].retrieved_document_ids:prompt"
].tolist()
document_id_to_text = dict(
zip(document_dataframe["id"].to_list(), document_dataframe["node_text"].to_list())
)
first_document_texts, second_document_texts = [
[
document_id_to_text[document_ids[document_index]]
for document_ids in list_of_document_id_lists
]
for document_index in [0, 1]
]
logging.info("Computing LLM-assisted ranking metrics")
first_document_relevances, second_document_relevances = [
[
classify_relevance(query_text, document_text, model_name="gpt-4")
for query_text, document_text in tqdm(zip(query_texts, first_document_texts))
]
for document_texts in [first_document_texts, second_document_texts]
]
list_of_precisions_at_k_lists = [
compute_precisions_at_k([rel0, rel1])
for rel0, rel1 in zip(first_document_relevances, second_document_relevances)
]
precisions_at_1, precisions_at_2 = [
[precisions_at_k[index] for precisions_at_k in list_of_precisions_at_k_lists]
for index in [0, 1]
]
document_similarity_0, document_similarity_1 = [
[
scores[index]
for scores in query_dataframe[
":feature.[float].retrieved_document_scores:prompt"
].tolist()
]
for index in [0, 1]
]
user_feedback = create_user_feedback(first_document_relevances, second_document_relevances)
logging.info(
f"Thumbs up: {sum([value == 1.0 for value in user_feedback]) / len(user_feedback)}"
)
logging.info(
f"Thumbs down: {sum([value == -1.0 for value in user_feedback]) / len(user_feedback)}"
)
query_dataframe = query_dataframe.assign(
**{
":tag.bool:relevance_0": first_document_relevances,
":tag.bool:relevance_1": second_document_relevances,
":tag.float:precision_at_1": precisions_at_1,
":tag.float:precision_at_2": precisions_at_2,
":tag.float:document_similarity_0": document_similarity_0,
":tag.float:document_similarity_1": document_similarity_1,
":tag.float:user_feedback": user_feedback,
}
)
query_dataframe.to_parquet(args.output_path)
| [
"llama_index.llms.OpenAI",
"llama_index.StorageContext.from_defaults",
"llama_index.callbacks.OpenInferenceCallbackHandler",
"llama_index.load_index_from_storage",
"llama_index.callbacks.CallbackManager",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1235, 1270), 'numpy.array', 'np.array', (['first_document_relevances'], {}), '(first_document_relevances)\n', (1243, 1270), True, 'import numpy as np\n'), ((1310, 1346), 'numpy.array', 'np.array', (['second_document_relevances'], {}), '(second_document_relevances)\n', (1318, 1346), True, 'import numpy as np\n'), ((1645, 1724), 'numpy.random.choice', 'np.random.choice', (['failed_retrieval_indexes'], {'size': 'num_thumbs_down', 'replace': '(False)'}), '(failed_retrieval_indexes, size=num_thumbs_down, replace=False)\n', (1661, 1724), True, 'import numpy as np\n'), ((2011, 2097), 'numpy.random.choice', 'np.random.choice', (['successful_retrieval_indexes'], {'size': 'num_thumbs_up', 'replace': '(False)'}), '(successful_retrieval_indexes, size=num_thumbs_up, replace=\n False)\n', (2027, 2097), True, 'import numpy as np\n'), ((2417, 2476), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'stream': 'sys.stdout'}), '(level=logging.DEBUG, stream=sys.stdout)\n', (2436, 2476), False, 'import logging\n'), ((2491, 2516), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2514, 2516), False, 'import argparse\n'), ((3637, 3710), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'fs': 'file_system', 'persist_dir': 'args.index_path'}), '(fs=file_system, persist_dir=args.index_path)\n', (3665, 3710), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((3757, 3787), 'llama_index.callbacks.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '()\n', (3785, 3787), False, 'from llama_index.callbacks import CallbackManager, OpenInferenceCallbackHandler\n'), ((4044, 4117), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (4067, 4117), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((4189, 4220), 'logging.info', 'logging.info', (['"""Running queries"""'], {}), "('Running queries')\n", (4201, 4220), False, 'import logging\n'), ((4238, 4251), 'tqdm.tqdm', 'tqdm', (['queries'], {}), '(queries)\n', (4242, 4251), False, 'from tqdm import tqdm\n'), ((5004, 5058), 'logging.info', 'logging.info', (['"""Computing LLM-assisted ranking metrics"""'], {}), "('Computing LLM-assisted ranking metrics')\n", (5016, 5058), False, 'import logging\n'), ((1587, 1618), 'numpy.where', 'np.where', (['failed_retrieval_mask'], {}), '(failed_retrieval_mask)\n', (1595, 1618), True, 'import numpy as np\n'), ((1951, 1986), 'numpy.where', 'np.where', (['successful_retrieval_mask'], {}), '(successful_retrieval_mask)\n', (1959, 1986), True, 'import numpy as np\n'), ((3543, 3588), 'gcsfs.GCSFileSystem', 'GCSFileSystem', ([], {'project': '"""public-assets-275721"""'}), "(project='public-assets-275721')\n", (3556, 3588), False, 'from gcsfs import GCSFileSystem\n'), ((5437, 5474), 'phoenix.experimental.evals.retrievals.compute_precisions_at_k', 'compute_precisions_at_k', (['[rel0, rel1]'], {}), '([rel0, rel1])\n', (5460, 5474), False, 'from phoenix.experimental.evals.retrievals import classify_relevance, compute_precisions_at_k\n'), ((2314, 2329), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (2322, 2329), True, 'import numpy as np\n'), ((3852, 3884), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""text-davinci-003"""'}), "(model='text-davinci-003')\n", (3858, 3884), False, 'from llama_index.llms import OpenAI\n'), ((3906, 3953), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""'}), "(model='text-embedding-ada-002')\n", (3921, 3953), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((3980, 4024), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {'handlers': '[callback_handler]'}), '(handlers=[callback_handler])\n', (3995, 4024), False, 'from llama_index.callbacks import CallbackManager, OpenInferenceCallbackHandler\n'), ((5143, 5208), 'phoenix.experimental.evals.retrievals.classify_relevance', 'classify_relevance', (['query_text', 'document_text'], {'model_name': '"""gpt-4"""'}), "(query_text, document_text, model_name='gpt-4')\n", (5161, 5208), False, 'from phoenix.experimental.evals.retrievals import classify_relevance, compute_precisions_at_k\n'), ((3475, 3503), 'pandas.read_csv', 'pd.read_csv', (['args.query_path'], {}), '(args.query_path)\n', (3486, 3503), True, 'import pandas as pd\n')] |
import logging
import os
import time
import typing
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional
import numpy as np
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.vector_stores.types import (
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
legacy_metadata_dict_to_node,
metadata_dict_to_node,
node_to_metadata_dict,
)
if TYPE_CHECKING:
import vearch
logger = logging.getLogger(__name__)
class VearchVectorStore(VectorStore):
"""
Vearch vector store:
embeddings are stored within a Vearch table.
when query, the index uses Vearch to query for the top
k most similar nodes.
Args:
chroma_collection (chromadb.api.models.Collection.Collection):
ChromaDB collection instance
"""
flat_metadata: bool = True
stores_text: bool = True
_DEFAULT_TABLE_NAME = "liama_index_vearch"
_DEFAULT_CLUSTER_DB_NAME = "liama_index_vearch_client_db"
_DEFAULT_VERSION = 1
def __init__(
self,
path_or_url: Optional[str] = None,
table_name: str = _DEFAULT_TABLE_NAME,
db_name: str = _DEFAULT_CLUSTER_DB_NAME,
flag: int = _DEFAULT_VERSION,
**kwargs: Any,
) -> None:
"""
Initialize vearch vector store
flag 1 for cluster,0 for standalone.
"""
try:
if flag:
import vearch_cluster
else:
import vearch
except ImportError:
raise ValueError(
"Could not import suitable python package."
"Please install it with `pip install vearch or vearch_cluster."
)
if flag:
if path_or_url is None:
raise ValueError("Please input url of cluster")
if not db_name:
db_name = self._DEFAULT_CLUSTER_DB_NAME
db_name += "_"
db_name += str(uuid.uuid4()).split("-")[-1]
self.using_db_name = db_name
self.url = path_or_url
self.vearch = vearch_cluster.VearchCluster(path_or_url)
else:
if path_or_url is None:
metadata_path = os.getcwd().replace("\\", "/")
else:
metadata_path = path_or_url
if not os.path.isdir(metadata_path):
os.makedirs(metadata_path)
log_path = os.path.join(metadata_path, "log")
if not os.path.isdir(log_path):
os.makedirs(log_path)
self.vearch = vearch.Engine(metadata_path, log_path)
self.using_metapath = metadata_path
if not table_name:
table_name = self._DEFAULT_TABLE_NAME
table_name += "_"
table_name += str(uuid.uuid4()).split("-")[-1]
self.using_table_name = table_name
self.flag = flag
@property
def client(self) -> Any:
"""Get client."""
return self.vearch
def _get_matadata_field(self, metadatas: Optional[List[dict]] = None) -> None:
field_list = []
if metadatas:
for key, value in metadatas[0].items():
if isinstance(value, int):
field_list.append({"field": key, "type": "int"})
continue
if isinstance(value, str):
field_list.append({"field": key, "type": "str"})
continue
if isinstance(value, float):
field_list.append({"field": key, "type": "float"})
continue
else:
raise ValueError("Please check data type,support int, str, float")
self.field_list = field_list
def _add_texts(
self,
ids: Iterable[str],
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
embeddings: Optional[List[List[float]]] = None,
**kwargs: Any,
) -> List[str]:
"""
Returns:
List of ids from adding the texts into the vectorstore.
"""
if embeddings is None:
raise ValueError("embeddings is None")
self._get_matadata_field(metadatas)
if self.flag:
dbs_list = self.vearch.list_dbs()
if self.using_db_name not in dbs_list:
create_db_code = self.vearch.create_db(self.using_db_name)
if not create_db_code:
raise ValueError("create db failed!!!")
space_list = self.vearch.list_spaces(self.using_db_name)
if self.using_table_name not in space_list:
create_space_code = self._create_space(len(embeddings[0]))
if not create_space_code:
raise ValueError("create space failed!!!")
docid = []
if embeddings is not None and metadatas is not None:
meta_field_list = [i["field"] for i in self.field_list]
for text, metadata, embed, id_d in zip(
texts, metadatas, embeddings, ids
):
profiles: typing.Dict[str, Any] = {}
profiles["text"] = text
for f in meta_field_list:
profiles[f] = metadata[f]
embed_np = np.array(embed)
profiles["text_embedding"] = {
"feature": (embed_np / np.linalg.norm(embed_np)).tolist()
}
insert_res = self.vearch.insert_one(
self.using_db_name, self.using_table_name, profiles, id_d
)
if insert_res["status"] == 200:
docid.append(insert_res["_id"])
continue
else:
retry_insert = self.vearch.insert_one(
self.using_db_name, self.using_table_name, profiles
)
docid.append(retry_insert["_id"])
continue
else:
table_path = os.path.join(
self.using_metapath, self.using_table_name + ".schema"
)
if not os.path.exists(table_path):
dim = len(embeddings[0])
response_code = self._create_table(dim)
if response_code:
raise ValueError("create table failed!!!")
if embeddings is not None and metadatas is not None:
doc_items = []
meta_field_list = [i["field"] for i in self.field_list]
for text, metadata, embed, id_d in zip(
texts, metadatas, embeddings, ids
):
profiles_v: typing.Dict[str, Any] = {}
profiles_v["text"] = text
profiles_v["_id"] = id_d
for f in meta_field_list:
profiles_v[f] = metadata[f]
embed_np = np.array(embed)
profiles_v["text_embedding"] = embed_np / np.linalg.norm(embed_np)
doc_items.append(profiles_v)
docid = self.vearch.add(doc_items)
t_time = 0
while len(docid) != len(embeddings):
time.sleep(0.5)
if t_time > 6:
break
t_time += 1
self.vearch.dump()
return docid
def _create_table(
self,
dim: int = 1024,
) -> int:
"""
Create Standalone VectorStore Table.
Args:
dim:dimension of vector.
fields_list: the field you want to store.
Return:
code,0 for success,1 for failed.
"""
type_dict = {
"int": vearch.dataType.INT,
"str": vearch.dataType.STRING,
"float": vearch.dataType.FLOAT,
}
engine_info = {
"index_size": 1,
"retrieval_type": "HNSW",
"retrieval_param": {
"metric_type": "InnerProduct",
"nlinks": -1,
"efConstruction": -1,
},
}
filed_list_add = self.field_list.append({"field": "text", "type": "str"})
fields = [
vearch.GammaFieldInfo(fi["field"], type_dict[fi["type"]])
for fi in filed_list_add
]
vector_field = vearch.GammaVectorInfo(
name="text_embedding",
type=vearch.dataType.VECTOR,
is_index=True,
dimension=dim,
model_id="",
store_type="MemoryOnly",
store_param={"cache_size": 10000},
)
return self.vearch.create_table(
engine_info,
name=self.using_table_name,
fields=fields,
vector_field=vector_field,
)
def _create_space(
self,
dim: int = 1024,
) -> int:
"""
Create Cluster VectorStore space.
Args:
dim:dimension of vector.
Return:
code,0 failed for ,1 for success.
"""
type_dict = {"int": "integer", "str": "string", "float": "float"}
space_config = {
"name": self.using_table_name,
"partition_num": 1,
"replica_num": 1,
"engine": {
"index_size": 1,
"retrieval_type": "HNSW",
"retrieval_param": {
"metric_type": "InnerProduct",
"nlinks": -1,
"efConstruction": -1,
},
},
}
tmp_proer = {
"text": {"type": "string"},
"text_embedding": {
"type": "vector",
"index": True,
"dimension": dim,
"store_type": "MemoryOnly",
},
}
for item in self.field_list:
tmp_proer[item["field"]] = {"type": type_dict[item["type"]]}
space_config["properties"] = tmp_proer
return self.vearch.create_space(self.using_db_name, space_config)
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
if not self.vearch:
raise ValueError("Vearch Engine is not initialized")
embeddings = []
metadatas = []
ids = []
texts = []
for node in nodes:
embeddings.append(node.get_embedding())
metadatas.append(
node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
)
)
ids.append(node.node_id)
texts.append(node.get_content(metadata_mode=MetadataMode.NONE) or "")
return self._add_texts(
ids=ids,
texts=texts,
metadatas=metadatas,
embeddings=embeddings,
)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query : vector store query.
Returns:
VectorStoreQueryResult: Query results.
"""
meta_filters = {}
if query.filters is not None:
for filter_ in query.filters.legacy_filters():
meta_filters[filter_.key] = filter_.value
if self.flag:
meta_field_list = self.vearch.get_space(
self.using_db_name, self.using_table_name
)
meta_field_list.remove("text_embedding")
embed = query.query_embedding
if embed is None:
raise ValueError("query.query_embedding is None")
k = query.similarity_top_k
if self.flag:
query_data = {
"query": {
"sum": [
{
"field": "text_embedding",
"feature": (embed / np.linalg.norm(embed)).tolist(),
}
],
},
"retrieval_param": {"metric_type": "InnerProduct", "efSearch": 64},
"size": k,
"fields": meta_field_list,
}
query_result = self.vearch.search(
self.using_db_name, self.using_table_name, query_data
)
res = query_result["hits"]["hits"]
else:
query_data = {
"vector": [
{
"field": "text_embedding",
"feature": embed / np.linalg.norm(embed),
}
],
"fields": [],
"retrieval_param": {"metric_type": "InnerProduct", "efSearch": 64},
"topn": k,
}
query_result = self.vearch.search(query_data)
res = query_result[0]["result_items"]
nodes = []
similarities = []
ids = []
for item in res:
content = ""
meta_data = {}
node_id = ""
if self.flag:
score = item["_score"]
item = item["_source"]
for item_key in item:
if item_key == "text":
content = item[item_key]
continue
elif item_key == "_id":
node_id = item[item_key]
ids.append(node_id)
continue
if self.flag != 1 and item_key == "score":
score = item[item_key]
continue
meta_data[item_key] = item[item_key]
similarities.append(score)
try:
node = metadata_dict_to_node(meta_data)
node.set_content(content)
except Exception:
metadata, node_info, relationships = legacy_metadata_dict_to_node(
meta_data
)
node = TextNode(
text=content,
id_=node_id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships=relationships,
)
nodes.append(node)
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
def _delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[bool]:
"""
Delete the documents which have the specified ids.
Args:
ids: The ids of the embedding vectors.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful.
False otherwise, None if not implemented.
"""
ret: Optional[bool] = None
tmp_res = []
if ids is None or len(ids) == 0:
return ret
for _id in ids:
if self.flag:
ret = self.vearch.delete(self.using_db_name, self.using_table_name, _id)
else:
ret = self.vearch.del_doc(_id)
tmp_res.append(ret)
return all(i == 0 for i in tmp_res)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
Returns:
None
"""
if len(ref_doc_id) == 0:
return
ids: List[str] = []
ids.append(ref_doc_id)
self._delete(ids)
| [
"llama_index.core.vector_stores.utils.metadata_dict_to_node",
"llama_index.core.vector_stores.types.VectorStoreQueryResult",
"llama_index.core.vector_stores.utils.legacy_metadata_dict_to_node",
"llama_index.core.vector_stores.utils.node_to_metadata_dict"
] | [((524, 551), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (541, 551), False, 'import logging\n'), ((8582, 8767), 'vearch.GammaVectorInfo', 'vearch.GammaVectorInfo', ([], {'name': '"""text_embedding"""', 'type': 'vearch.dataType.VECTOR', 'is_index': '(True)', 'dimension': 'dim', 'model_id': '""""""', 'store_type': '"""MemoryOnly"""', 'store_param': "{'cache_size': 10000}"}), "(name='text_embedding', type=vearch.dataType.VECTOR,\n is_index=True, dimension=dim, model_id='', store_type='MemoryOnly',\n store_param={'cache_size': 10000})\n", (8604, 8767), False, 'import vearch\n'), ((14615, 14686), 'llama_index.core.vector_stores.types.VectorStoreQueryResult', 'VectorStoreQueryResult', ([], {'nodes': 'nodes', 'similarities': 'similarities', 'ids': 'ids'}), '(nodes=nodes, similarities=similarities, ids=ids)\n', (14637, 14686), False, 'from llama_index.core.vector_stores.types import VectorStore, VectorStoreQuery, VectorStoreQueryResult\n'), ((2180, 2221), 'vearch_cluster.VearchCluster', 'vearch_cluster.VearchCluster', (['path_or_url'], {}), '(path_or_url)\n', (2208, 2221), False, 'import vearch_cluster\n'), ((2512, 2546), 'os.path.join', 'os.path.join', (['metadata_path', '"""log"""'], {}), "(metadata_path, 'log')\n", (2524, 2546), False, 'import os\n'), ((2655, 2693), 'vearch.Engine', 'vearch.Engine', (['metadata_path', 'log_path'], {}), '(metadata_path, log_path)\n', (2668, 2693), False, 'import vearch\n'), ((6216, 6284), 'os.path.join', 'os.path.join', (['self.using_metapath', "(self.using_table_name + '.schema')"], {}), "(self.using_metapath, self.using_table_name + '.schema')\n", (6228, 6284), False, 'import os\n'), ((8454, 8511), 'vearch.GammaFieldInfo', 'vearch.GammaFieldInfo', (["fi['field']", "type_dict[fi['type']]"], {}), "(fi['field'], type_dict[fi['type']])\n", (8475, 8511), False, 'import vearch\n'), ((2416, 2444), 'os.path.isdir', 'os.path.isdir', (['metadata_path'], {}), '(metadata_path)\n', (2429, 2444), False, 'import os\n'), ((2462, 2488), 'os.makedirs', 'os.makedirs', (['metadata_path'], {}), '(metadata_path)\n', (2473, 2488), False, 'import os\n'), ((2566, 2589), 'os.path.isdir', 'os.path.isdir', (['log_path'], {}), '(log_path)\n', (2579, 2589), False, 'import os\n'), ((2607, 2628), 'os.makedirs', 'os.makedirs', (['log_path'], {}), '(log_path)\n', (2618, 2628), False, 'import os\n'), ((6334, 6360), 'os.path.exists', 'os.path.exists', (['table_path'], {}), '(table_path)\n', (6348, 6360), False, 'import os\n'), ((10709, 10788), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (10730, 10788), False, 'from llama_index.core.vector_stores.utils import legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((14001, 14033), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['meta_data'], {}), '(meta_data)\n', (14022, 14033), False, 'from llama_index.core.vector_stores.utils import legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((5418, 5433), 'numpy.array', 'np.array', (['embed'], {}), '(embed)\n', (5426, 5433), True, 'import numpy as np\n'), ((7132, 7147), 'numpy.array', 'np.array', (['embed'], {}), '(embed)\n', (7140, 7147), True, 'import numpy as np\n'), ((7435, 7450), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (7445, 7450), False, 'import time\n'), ((14159, 14198), 'llama_index.core.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['meta_data'], {}), '(meta_data)\n', (14187, 14198), False, 'from llama_index.core.vector_stores.utils import legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((2304, 2315), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2313, 2315), False, 'import os\n'), ((7210, 7234), 'numpy.linalg.norm', 'np.linalg.norm', (['embed_np'], {}), '(embed_np)\n', (7224, 7234), True, 'import numpy as np\n'), ((2879, 2891), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2889, 2891), False, 'import uuid\n'), ((12842, 12863), 'numpy.linalg.norm', 'np.linalg.norm', (['embed'], {}), '(embed)\n', (12856, 12863), True, 'import numpy as np\n'), ((2049, 2061), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2059, 2061), False, 'import uuid\n'), ((5532, 5556), 'numpy.linalg.norm', 'np.linalg.norm', (['embed_np'], {}), '(embed_np)\n', (5546, 5556), True, 'import numpy as np\n'), ((12210, 12231), 'numpy.linalg.norm', 'np.linalg.norm', (['embed'], {}), '(embed)\n', (12224, 12231), True, 'import numpy as np\n')] |
# ENTER YOUR OPENAPI KEY IN OPENAI_API_KEY ENV VAR FIRST
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader
savePath = f'/{os.path.dirname(__file__)}/indexes/index.json'
#
# index = GPTSimpleVectorIndex(documents)#, llm_predictor=llm_predictor)
index = GPTSimpleVectorIndex.load_from_disk(savePath)
response = index.query("Summarize the vulnerability CVE-2021-23406", response_mode="tree_summarize")
print(response)
print('Sources are ', response.get_formatted_sources())
| [
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((303, 348), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['savePath'], {}), '(savePath)\n', (338, 348), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader\n')] |
from typing import Optional, Union
from llama_index import ServiceContext
from llama_index.callbacks import CallbackManager
from llama_index.embeddings.utils import EmbedType
from llama_index.extractors import (
EntityExtractor,
KeywordExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
TitleExtractor,
)
from llama_index.llms.utils import LLMType
from llama_index.prompts import PromptTemplate
from llama_index.prompts.base import BasePromptTemplate
from llama_index.text_splitter import SentenceSplitter
from autollm.callbacks.cost_calculating import CostCalculatingHandler
from autollm.utils.llm_utils import set_default_prompt_template
class AutoServiceContext:
"""AutoServiceContext extends the functionality of LlamaIndex's ServiceContext to include token
counting.
"""
@staticmethod
def from_defaults(
llm: Optional[LLMType] = "default",
embed_model: Optional[EmbedType] = "default",
system_prompt: str = None,
query_wrapper_prompt: Union[str, BasePromptTemplate] = None,
enable_cost_calculator: bool = False,
chunk_size: Optional[int] = 512,
chunk_overlap: Optional[int] = 100,
context_window: Optional[int] = None,
enable_title_extractor: bool = False,
enable_summary_extractor: bool = False,
enable_qa_extractor: bool = False,
enable_keyword_extractor: bool = False,
enable_entity_extractor: bool = False,
**kwargs) -> ServiceContext:
"""
Create a ServiceContext with default parameters with extended enable_token_counting functionality. If
enable_token_counting is True, tracks the number of tokens used by the LLM for each query.
Parameters:
llm (LLM): The LLM to use for the query engine. Defaults to gpt-3.5-turbo.
embed_model (BaseEmbedding): The embedding model to use for the query engine. Defaults to OpenAIEmbedding.
system_prompt (str): The system prompt to use for the query engine.
query_wrapper_prompt (Union[str, BasePromptTemplate]): The query wrapper prompt to use for the query engine.
enable_cost_calculator (bool): Flag to enable cost calculator logging.
chunk_size (int): The token chunk size for each chunk.
chunk_overlap (int): The token overlap between each chunk.
context_window (int): The maximum context size that will get sent to the LLM.
enable_title_extractor (bool): Flag to enable title extractor.
enable_summary_extractor (bool): Flag to enable summary extractor.
enable_qa_extractor (bool): Flag to enable question answering extractor.
enable_keyword_extractor (bool): Flag to enable keyword extractor.
enable_entity_extractor (bool): Flag to enable entity extractor.
**kwargs: Arbitrary keyword arguments.
Returns:
ServiceContext: The initialized ServiceContext from default parameters with extra token counting functionality.
"""
if not system_prompt and not query_wrapper_prompt:
system_prompt, query_wrapper_prompt = set_default_prompt_template()
# Convert query_wrapper_prompt to PromptTemplate if it is a string
if isinstance(query_wrapper_prompt, str):
query_wrapper_prompt = PromptTemplate(template=query_wrapper_prompt)
callback_manager: CallbackManager = kwargs.get('callback_manager', CallbackManager())
kwargs.pop(
'callback_manager', None) # Make sure callback_manager is not passed to ServiceContext twice
if enable_cost_calculator:
llm_model_name = llm.metadata.model_name if not "default" else "gpt-3.5-turbo"
callback_manager.add_handler(CostCalculatingHandler(model_name=llm_model_name, verbose=True))
sentence_splitter = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
transformations = [sentence_splitter]
if enable_entity_extractor:
transformations.append(EntityExtractor())
if enable_keyword_extractor:
transformations.append(KeywordExtractor(llm=llm, keywords=5))
if enable_summary_extractor:
transformations.append(SummaryExtractor(llm=llm, summaries=["prev", "self"]))
if enable_title_extractor:
transformations.append(TitleExtractor(llm=llm, nodes=5))
if enable_qa_extractor:
transformations.append(QuestionsAnsweredExtractor(llm=llm, questions=5))
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
transformations=transformations,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
context_window=context_window,
callback_manager=callback_manager,
**kwargs)
return service_context
| [
"llama_index.extractors.TitleExtractor",
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.PromptTemplate",
"llama_index.extractors.KeywordExtractor",
"llama_index.extractors.QuestionsAnsweredExtractor",
"llama_index.callbacks.CallbackManager",
"llama_index.text_splitter.SentenceSplitter",
"llama_index.extractors.EntityExtractor",
"llama_index.extractors.SummaryExtractor"
] | [((3952, 4020), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3968, 4020), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((4643, 4954), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'transformations': 'transformations', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'context_window': 'context_window', 'callback_manager': 'callback_manager'}), '(llm=llm, embed_model=embed_model,\n transformations=transformations, system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt, chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, context_window=context_window,\n callback_manager=callback_manager, **kwargs)\n', (4671, 4954), False, 'from llama_index import ServiceContext\n'), ((3233, 3262), 'autollm.utils.llm_utils.set_default_prompt_template', 'set_default_prompt_template', ([], {}), '()\n', (3260, 3262), False, 'from autollm.utils.llm_utils import set_default_prompt_template\n'), ((3423, 3468), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'query_wrapper_prompt'}), '(template=query_wrapper_prompt)\n', (3437, 3468), False, 'from llama_index.prompts import PromptTemplate\n'), ((3545, 3562), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (3560, 3562), False, 'from llama_index.callbacks import CallbackManager\n'), ((3858, 3921), 'autollm.callbacks.cost_calculating.CostCalculatingHandler', 'CostCalculatingHandler', ([], {'model_name': 'llm_model_name', 'verbose': '(True)'}), '(model_name=llm_model_name, verbose=True)\n', (3880, 3921), False, 'from autollm.callbacks.cost_calculating import CostCalculatingHandler\n'), ((4138, 4155), 'llama_index.extractors.EntityExtractor', 'EntityExtractor', ([], {}), '()\n', (4153, 4155), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n'), ((4229, 4266), 'llama_index.extractors.KeywordExtractor', 'KeywordExtractor', ([], {'llm': 'llm', 'keywords': '(5)'}), '(llm=llm, keywords=5)\n', (4245, 4266), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n'), ((4340, 4393), 'llama_index.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'llm': 'llm', 'summaries': "['prev', 'self']"}), "(llm=llm, summaries=['prev', 'self'])\n", (4356, 4393), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n'), ((4465, 4497), 'llama_index.extractors.TitleExtractor', 'TitleExtractor', ([], {'llm': 'llm', 'nodes': '(5)'}), '(llm=llm, nodes=5)\n', (4479, 4497), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n'), ((4566, 4614), 'llama_index.extractors.QuestionsAnsweredExtractor', 'QuestionsAnsweredExtractor', ([], {'llm': 'llm', 'questions': '(5)'}), '(llm=llm, questions=5)\n', (4592, 4614), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n')] |
import torch
from llama_index import WikipediaReader
def divide_string(wiki_page, word_limit=50):
divided_text = []
for each_page in wiki_page:
words = each_page[0].text.split()
for i in range(0, len(words), word_limit):
chunk = ' '.join(words[i:i+word_limit])
divided_text.append(chunk)
return divided_text
def wiki_prompter(generator,tokenizer,question):
fulltext = "A question is provided below. Given the question, extract " +\
"keywords from the text. Focus on extracting the keywords that we can use " +\
"to best lookup answers to the question. \n" +\
"---------------------\n" +\
"{}\n".format(question) +\
"---------------------\n" +\
"Provide keywords in the following comma-separated format.\nKeywords: "
gen_in = tokenizer(fulltext, return_tensors="pt").input_ids.cuda()
with torch.no_grad():
generated_ids = generator(
gen_in,
max_new_tokens=512,
use_cache=True,
pad_token_id=tokenizer.eos_token_id,
num_return_sequences=1,
do_sample=True,
repetition_penalty=1.1, # 1.0 means 'off'. unfortunately if we penalize it it will not output Sphynx:
temperature=0.5, # default: 1.0
top_k=50, # default: 50
top_p=1.0, # default: 1.0
early_stopping=True,
)
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] # for some reason, batch_decode returns an array of one element?
text_without_prompt = generated_text[len(fulltext):]
response = text_without_prompt
response = response.split("===")[0]
response.strip()
print(response)
keywords = response.split(", ")
print(keywords)
wiki_docs=[]
for keyw in keywords:
try:
wiki_one = WikipediaReader().load_data(pages=[keyw], auto_suggest=False)
wiki_docs.append(wiki_one)
except:
print("No wiki: "+keyw)
divided_text = divide_string(wiki_docs, 250)
answer_llama=""
score_textlist = [0] * len(divided_text)
for i, chunk in enumerate(divided_text):
for t, keyw in enumerate(keywords):
if keyw.lower() in chunk.lower():
score_textlist[i]=score_textlist[i]+1
answer_list=[]
divided_text = [item for _, item in sorted(zip(score_textlist, divided_text), reverse=True)]
divided_text.append("_")
for i, chunk in enumerate(divided_text):
if i<4 and not i==int(len(divided_text)-1):
fulltext = "Context information is below. \n" +\
"---------------------\n" +\
"{}".format(chunk) +\
"\n---------------------\n" +\
"Given the context information and not prior knowledge, " +\
"answer the question: {}\n".format(question) +\
"Response: "
elif i==int(len(divided_text)-1) and len(answer_list)>1 :
fulltext = "The original question is as follows: {}\n".format(question) +\
"We have provided existing answers:\n" +\
"------------\n" +\
"{}\n".format(str("\n\n".join(answer_list))) +\
"------------\n" +\
"The best one answer: "
else:
continue
print(fulltext)
gen_in = tokenizer(fulltext, return_tensors="pt").input_ids.cuda()
with torch.no_grad():
generated_ids = generator(
gen_in,
max_new_tokens=512,
use_cache=True,
pad_token_id=tokenizer.eos_token_id,
num_return_sequences=1,
do_sample=True,
repetition_penalty=1.1, # 1.0 means 'off'. unfortunately if we penalize it it will not output Sphynx:
temperature=0.5, # default: 1.0
top_k=50, # default: 50
top_p=1.0, # default: 1.0
early_stopping=True,
)
generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
text_without_prompt = generated_text[len(fulltext):]
answer_llama = text_without_prompt
print()
print("\nAnswer: " + answer_llama)
print()
answer_list.append(answer_llama)
return answer_llama
| [
"llama_index.WikipediaReader"
] | [((933, 948), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (946, 948), False, 'import torch\n'), ((3638, 3653), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3651, 3653), False, 'import torch\n'), ((1958, 1975), 'llama_index.WikipediaReader', 'WikipediaReader', ([], {}), '()\n', (1973, 1975), False, 'from llama_index import WikipediaReader\n')] |
from rag.agents.interface import Pipeline
from llama_index.core.program import LLMTextCompletionProgram
import json
from llama_index.llms.ollama import Ollama
from typing import List
from pydantic import create_model
from rich.progress import Progress, SpinnerColumn, TextColumn
import requests
import warnings
import box
import yaml
import timeit
from rich import print
from typing import Any
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# Import config vars
with open('config.yml', 'r', encoding='utf8') as ymlfile:
cfg = box.Box(yaml.safe_load(ymlfile))
class VProcessorPipeline(Pipeline):
def run_pipeline(self,
payload: str,
query_inputs: [str],
query_types: [str],
query: str,
file_path: str,
index_name: str,
debug: bool = False,
local: bool = True) -> Any:
print(f"\nRunning pipeline with {payload}\n")
start = timeit.default_timer()
if file_path is None:
raise ValueError("File path is required for vprocessor pipeline")
with open(file_path, "rb") as file:
files = {'file': (file_path, file, 'image/jpeg')}
data = {
'image_url': ''
}
response = self.invoke_pipeline_step(lambda: requests.post(cfg.VPROCESSOR_OCR_ENDPOINT,
data=data,
files=files,
timeout=180),
"Running OCR...",
local)
if response.status_code != 200:
print('Request failed with status code:', response.status_code)
print('Response:', response.text)
return "Failed to process file. Please try again."
end = timeit.default_timer()
print(f"Time to run OCR: {end - start}")
start = timeit.default_timer()
data = response.json()
ResponseModel = self.invoke_pipeline_step(lambda: self.build_response_class(query_inputs, query_types),
"Building dynamic response class...",
local)
prompt_template_str = """\
""" + query + """\
using this structured data, coming from OCR {document_data}.\
"""
llm_ollama = self.invoke_pipeline_step(lambda: Ollama(model=cfg.LLM_VPROCESSOR,
base_url=cfg.OLLAMA_BASE_URL_VPROCESSOR,
temperature=0,
request_timeout=900),
"Loading Ollama...",
local)
program = LLMTextCompletionProgram.from_defaults(
output_cls=ResponseModel,
prompt_template_str=prompt_template_str,
llm=llm_ollama,
verbose=True,
)
output = self.invoke_pipeline_step(lambda: program(document_data=data),
"Running inference...",
local)
answer = self.beautify_json(output.model_dump_json())
end = timeit.default_timer()
print(f"\nJSON response:\n")
print(answer + '\n')
print('=' * 50)
print(f"Time to retrieve answer: {end - start}")
return answer
def prepare_files(self, file_path, file):
if file_path is not None:
with open(file_path, "rb") as file:
files = {'file': (file_path, file, 'image/jpeg')}
data = {
'image_url': ''
}
else:
files = {'file': (file.filename, file.file, file.content_type)}
data = {
'image_url': ''
}
return data, files
# Function to safely evaluate type strings
def safe_eval_type(self, type_str, context):
try:
return eval(type_str, {}, context)
except NameError:
raise ValueError(f"Type '{type_str}' is not recognized")
def build_response_class(self, query_inputs, query_types_as_strings):
# Controlled context for eval
context = {
'List': List,
'str': str,
'int': int,
'float': float
# Include other necessary types or typing constructs here
}
# Convert string representations to actual types
query_types = [self.safe_eval_type(type_str, context) for type_str in query_types_as_strings]
# Create fields dictionary
fields = {name: (type_, ...) for name, type_ in zip(query_inputs, query_types)}
DynamicModel = create_model('DynamicModel', **fields)
return DynamicModel
def invoke_pipeline_step(self, task_call, task_description, local):
if local:
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=False,
) as progress:
progress.add_task(description=task_description, total=None)
ret = task_call()
else:
print(task_description)
ret = task_call()
return ret
def beautify_json(self, result):
try:
# Convert and pretty print
data = json.loads(str(result))
data = json.dumps(data, indent=4)
return data
except (json.decoder.JSONDecodeError, TypeError):
print("The response is not in JSON format:\n")
print(result)
return {} | [
"llama_index.core.program.LLMTextCompletionProgram.from_defaults",
"llama_index.llms.ollama.Ollama"
] | [((396, 458), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (419, 458), False, 'import warnings\n'), ((459, 514), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (482, 514), False, 'import warnings\n'), ((614, 637), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (628, 637), False, 'import yaml\n'), ((1029, 1076), 'rich.print', 'print', (['f"""\nRunning pipeline with {payload}\n"""'], {}), '(f"""\nRunning pipeline with {payload}\n""")\n', (1034, 1076), False, 'from rich import print\n'), ((1092, 1114), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1112, 1114), False, 'import timeit\n'), ((2116, 2138), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2136, 2138), False, 'import timeit\n'), ((2147, 2187), 'rich.print', 'print', (['f"""Time to run OCR: {end - start}"""'], {}), "(f'Time to run OCR: {end - start}')\n", (2152, 2187), False, 'from rich import print\n'), ((2205, 2227), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2225, 2227), False, 'import timeit\n'), ((3157, 3296), 'llama_index.core.program.LLMTextCompletionProgram.from_defaults', 'LLMTextCompletionProgram.from_defaults', ([], {'output_cls': 'ResponseModel', 'prompt_template_str': 'prompt_template_str', 'llm': 'llm_ollama', 'verbose': '(True)'}), '(output_cls=ResponseModel,\n prompt_template_str=prompt_template_str, llm=llm_ollama, verbose=True)\n', (3195, 3296), False, 'from llama_index.core.program import LLMTextCompletionProgram\n'), ((3628, 3650), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3648, 3650), False, 'import timeit\n'), ((3660, 3690), 'rich.print', 'print', (['f"""\nJSON response:\n"""'], {}), '(f"""\nJSON response:\n""")\n', (3665, 3690), False, 'from rich import print\n'), ((3697, 3717), 'rich.print', 'print', (["(answer + '\\n')"], {}), "(answer + '\\n')\n", (3702, 3717), False, 'from rich import print\n'), ((3726, 3741), 'rich.print', 'print', (["('=' * 50)"], {}), "('=' * 50)\n", (3731, 3741), False, 'from rich import print\n'), ((3751, 3799), 'rich.print', 'print', (['f"""Time to retrieve answer: {end - start}"""'], {}), "(f'Time to retrieve answer: {end - start}')\n", (3756, 3799), False, 'from rich import print\n'), ((5158, 5196), 'pydantic.create_model', 'create_model', (['"""DynamicModel"""'], {}), "('DynamicModel', **fields)\n", (5170, 5196), False, 'from pydantic import create_model\n'), ((1927, 1990), 'rich.print', 'print', (['"""Request failed with status code:"""', 'response.status_code'], {}), "('Request failed with status code:', response.status_code)\n", (1932, 1990), False, 'from rich import print\n'), ((2003, 2036), 'rich.print', 'print', (['"""Response:"""', 'response.text'], {}), "('Response:', response.text)\n", (2008, 2036), False, 'from rich import print\n'), ((5657, 5680), 'rich.print', 'print', (['task_description'], {}), '(task_description)\n', (5662, 5680), False, 'from rich import print\n'), ((5884, 5910), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (5894, 5910), False, 'import json\n'), ((2719, 2832), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': 'cfg.LLM_VPROCESSOR', 'base_url': 'cfg.OLLAMA_BASE_URL_VPROCESSOR', 'temperature': '(0)', 'request_timeout': '(900)'}), '(model=cfg.LLM_VPROCESSOR, base_url=cfg.OLLAMA_BASE_URL_VPROCESSOR,\n temperature=0, request_timeout=900)\n', (2725, 2832), False, 'from llama_index.llms.ollama import Ollama\n'), ((6005, 6051), 'rich.print', 'print', (['"""The response is not in JSON format:\n"""'], {}), "('The response is not in JSON format:\\n')\n", (6010, 6051), False, 'from rich import print\n'), ((6064, 6077), 'rich.print', 'print', (['result'], {}), '(result)\n', (6069, 6077), False, 'from rich import print\n'), ((1457, 1536), 'requests.post', 'requests.post', (['cfg.VPROCESSOR_OCR_ENDPOINT'], {'data': 'data', 'files': 'files', 'timeout': '(180)'}), '(cfg.VPROCESSOR_OCR_ENDPOINT, data=data, files=files, timeout=180)\n', (1470, 1536), False, 'import requests\n'), ((5364, 5379), 'rich.progress.SpinnerColumn', 'SpinnerColumn', ([], {}), '()\n', (5377, 5379), False, 'from rich.progress import Progress, SpinnerColumn, TextColumn\n'), ((5401, 5455), 'rich.progress.TextColumn', 'TextColumn', (['"""[progress.description]{task.description}"""'], {}), "('[progress.description]{task.description}')\n", (5411, 5455), False, 'from rich.progress import Progress, SpinnerColumn, TextColumn\n')] |
import asyncio
import chromadb
import os
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from traceloop.sdk import Traceloop
os.environ["TOKENIZERS_PARALLELISM"] = "false"
Traceloop.init(app_name="llama_index_example")
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
# define embedding function
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
# load documents
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
# set up ChromaVectorStore and load in data
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
async def main():
# Query Data
query_engine = index.as_query_engine()
response = await query_engine.aquery("What did the author do growing up?")
print(response)
if __name__ == "__main__":
asyncio.run(main())
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader",
"llama_index.vector_stores.chroma.ChromaVectorStore"
] | [((344, 390), 'traceloop.sdk.Traceloop.init', 'Traceloop.init', ([], {'app_name': '"""llama_index_example"""'}), "(app_name='llama_index_example')\n", (358, 390), False, 'from traceloop.sdk import Traceloop\n'), ((408, 434), 'chromadb.EphemeralClient', 'chromadb.EphemeralClient', ([], {}), '()\n', (432, 434), False, 'import chromadb\n'), ((544, 600), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-base-en-v1.5"""'}), "(model_name='BAAI/bge-base-en-v1.5')\n", (564, 600), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((748, 802), 'llama_index.vector_stores.chroma.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (765, 802), False, 'from llama_index.vector_stores.chroma import ChromaVectorStore\n'), ((821, 876), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (849, 876), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext\n'), ((885, 989), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'embed_model': 'embed_model'}), '(documents, storage_context=storage_context,\n embed_model=embed_model)\n', (916, 989), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext\n'), ((631, 675), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data/paul_graham/"""'], {}), "('./data/paul_graham/')\n", (652, 675), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ================================================== #
# This file is a part of PYGPT package #
# Website: https://pygpt.net #
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License #
# Created By : Marcin Szczygliński #
# Updated Date: 2024.03.11 01:00:00 #
# ================================================== #
import os.path
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.service_context import ServiceContext
from .base import BaseStore
class TempProvider(BaseStore):
def __init__(self, *args, **kwargs):
super(TempProvider, self).__init__(*args, **kwargs)
"""
Temporary vector store provider
:param args: args
:param kwargs: kwargs
"""
self.window = kwargs.get('window', None)
self.id = "TempVectorStore"
self.prefix = "" # prefix for index directory
self.indexes = {}
self.persist = False
def count(self) -> int:
"""
Count indexes
:return: number of indexes
"""
return len(self.indexes)
def get_path(self, id: str) -> str:
"""
Get database path
:param id: index name
:return: database path
"""
if not self.persist:
return ""
tmp_dir = os.path.join(
self.window.core.config.get_user_dir('idx'),
"_tmp", # temp directory
)
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir, exist_ok=True)
path = os.path.join(
self.window.core.config.get_user_dir('idx'),
"_tmp", # temp directory
self.prefix + id,
)
return path
def exists(self, id: str = None) -> bool:
"""
Check if index with id exists
:param id: index name
:return: True if exists
"""
if not self.persist:
if id in self.indexes:
return True
return False
path = self.get_path(id)
if os.path.exists(path):
store = os.path.join(path, "docstore.json")
if os.path.exists(store):
return True
return False
def create(self, id: str):
"""
Create empty index
:param id: index name
"""
if self.persist:
path = self.get_path(id)
if not os.path.exists(path):
index = self.index_from_empty() # create empty index
self.store(
id=id,
index=index,
)
else:
self.indexes[id] = self.index_from_empty()
def get(self, id: str, service_context: ServiceContext = None) -> BaseIndex:
"""
Get index
:param id: tmp idx id
:param service_context: Service context
:return: index instance
"""
if not self.exists(id):
self.create(id)
path = self.get_path(id)
if self.persist:
storage_context = StorageContext.from_defaults(
persist_dir=path,
)
self.indexes[id] = load_index_from_storage(
storage_context,
service_context=service_context,
)
return self.indexes[id]
def store(self, id: str, index: BaseIndex = None):
"""
Store index
:param id: index name
:param index: index instance
"""
if not self.persist:
self.indexes[id] = index
return
if index is None:
index = self.indexes[id]
path = self.get_path(id)
index.storage_context.persist(
persist_dir=path,
)
self.indexes[id] = index
def clean(self, id: str):
"""
Clean index
:param id: index name
"""
if not self.persist:
if id in self.indexes:
del self.indexes[id]
return
path = self.get_path(id)
if os.path.exists(path):
os.remove(path)
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage"
] | [((3275, 3321), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'path'}), '(persist_dir=path)\n', (3303, 3321), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((3384, 3457), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (3407, 3457), False, 'from llama_index.core import StorageContext, load_index_from_storage\n')] |
import streamlit as st
from sqlalchemy import create_engine, inspect, text
from typing import Dict, Any
from llama_index import (
VectorStoreIndex,
ServiceContext,
download_loader,
)
from llama_index.llama_pack.base import BaseLlamaPack
from llama_index.llms import OpenAI
import openai
import os
import pandas as pd
from llama_index.llms.palm import PaLM
from llama_index import (
SimpleDirectoryReader,
ServiceContext,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
import sqlite3
from llama_index import SQLDatabase, ServiceContext
from llama_index.indices.struct_store import NLSQLTableQueryEngine
os.environ['OPENAI_API_KEY'] = st.secrets['OPENAI_API_KEY']
class StreamlitChatPack(BaseLlamaPack):
def __init__(
self,
page: str = "Natural Language to SQL Query",
run_from_main: bool = False,
**kwargs: Any,
) -> None:
"""Init params."""
self.page = page
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
import streamlit as st
st.set_page_config(
page_title=f"{self.page}",
layout="centered",
initial_sidebar_state="auto",
menu_items=None,
)
if "messages" not in st.session_state: # Initialize the chat messages history
st.session_state["messages"] = [
{"role": "assistant", "content": f"Hello. Ask me anything related to the database."}
]
st.title(
f"{self.page}💬"
)
st.info(
f"Explore Snowflake views with this AI-powered app. Pose any question and receive exact SQL queries.",
icon="ℹ️",
)
def add_to_message_history(role, content):
message = {"role": role, "content": str(content)}
st.session_state["messages"].append(
message
) # Add response to message history
def get_table_data(table_name, conn):
query = f"SELECT * FROM {table_name}"
df = pd.read_sql_query(query, conn)
return df
@st.cache_resource
def load_db_llm():
# Load the SQLite database
#engine = create_engine("sqlite:///ecommerce_platform1.db")
engine = create_engine("sqlite:///ecommerce_platform1.db?mode=ro", connect_args={"uri": True})
sql_database = SQLDatabase(engine) #include all tables
# Initialize LLM
#llm2 = PaLM(api_key=os.environ["GOOGLE_API_KEY"]) # Replace with your API key
llm2 = OpenAI(temperature=0.1, model="gpt-3.5-turbo-1106")
service_context = ServiceContext.from_defaults(llm=llm2, embed_model="local")
return sql_database, service_context, engine
sql_database, service_context, engine = load_db_llm()
# Sidebar for database schema viewer
st.sidebar.markdown("## Database Schema Viewer")
# Create an inspector object
inspector = inspect(engine)
# Get list of tables in the database
table_names = inspector.get_table_names()
# Sidebar selection for tables
selected_table = st.sidebar.selectbox("Select a Table", table_names)
db_file = 'ecommerce_platform1.db'
conn = sqlite3.connect(db_file)
# Display the selected table
if selected_table:
df = get_table_data(selected_table, conn)
st.sidebar.text(f"Data for table '{selected_table}':")
st.sidebar.dataframe(df)
# Close the connection
conn.close()
# Sidebar Intro
st.sidebar.markdown('## App Created By')
st.sidebar.markdown("""
Harshad Suryawanshi:
[Linkedin](https://www.linkedin.com/in/harshadsuryawanshi/), [Medium](https://harshadsuryawanshi.medium.com/), [X](https://twitter.com/HarshadSurya1c)
""")
st.sidebar.markdown('## Other Projects')
st.sidebar.markdown("""
- [Pokemon Go! Inspired AInimal GO! - Multimodal RAG App](https://www.linkedin.com/posts/harshadsuryawanshi_llamaindex-ai-deeplearning-activity-7134632983495327744-M7yy)
- [Building My Own GPT4-V with PaLM and Kosmos](https://lnkd.in/dawgKZBP)
- [AI Equity Research Analyst](https://ai-eqty-rsrch-anlyst.streamlit.app/)
- [Recasting "The Office" Scene](https://blackmirroroffice.streamlit.app/)
- [Story Generator](https://appstorycombined-agaf9j4ceit.streamlit.app/)
""")
st.sidebar.markdown('## Disclaimer')
st.sidebar.markdown("""This application is for demonstration purposes only and may not cover all aspects of real-world data complexities. Please use it as a guide and not as a definitive source for decision-making.""")
if "query_engine" not in st.session_state: # Initialize the query engine
st.session_state["query_engine"] = NLSQLTableQueryEngine(
sql_database=sql_database,
synthesize_response=True,
service_context=service_context
)
for message in st.session_state["messages"]: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
if prompt := st.chat_input(
"Enter your natural language query about the database"
): # Prompt for user input and save to chat history
with st.chat_message("user"):
st.write(prompt)
add_to_message_history("user", prompt)
# If last message is not from assistant, generate a new response
if st.session_state["messages"][-1]["role"] != "assistant":
with st.spinner():
with st.chat_message("assistant"):
response = st.session_state["query_engine"].query("User Question:"+prompt+". ")
sql_query = f"```sql\n{response.metadata['sql_query']}\n```\n**Response:**\n{response.response}\n"
response_container = st.empty()
response_container.write(sql_query)
# st.write(response.response)
add_to_message_history("assistant", sql_query)
if __name__ == "__main__":
StreamlitChatPack(run_from_main=True).run()
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.SQLDatabase",
"llama_index.indices.struct_store.NLSQLTableQueryEngine"
] | [((1194, 1309), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'f"""{self.page}"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=f'{self.page}', layout='centered',\n initial_sidebar_state='auto', menu_items=None)\n", (1212, 1309), True, 'import streamlit as st\n'), ((1622, 1647), 'streamlit.title', 'st.title', (['f"""{self.page}💬"""'], {}), "(f'{self.page}💬')\n", (1630, 1647), True, 'import streamlit as st\n'), ((1678, 1809), 'streamlit.info', 'st.info', (['f"""Explore Snowflake views with this AI-powered app. Pose any question and receive exact SQL queries."""'], {'icon': '"""ℹ️"""'}), "(\n f'Explore Snowflake views with this AI-powered app. Pose any question and receive exact SQL queries.'\n , icon='ℹ️')\n", (1685, 1809), True, 'import streamlit as st\n'), ((3050, 3098), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""## Database Schema Viewer"""'], {}), "('## Database Schema Viewer')\n", (3069, 3098), True, 'import streamlit as st\n'), ((3157, 3172), 'sqlalchemy.inspect', 'inspect', (['engine'], {}), '(engine)\n', (3164, 3172), False, 'from sqlalchemy import create_engine, inspect, text\n'), ((3334, 3385), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select a Table"""', 'table_names'], {}), "('Select a Table', table_names)\n", (3354, 3385), True, 'import streamlit as st\n'), ((3445, 3469), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (3460, 3469), False, 'import sqlite3\n'), ((3803, 3843), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""## App Created By"""'], {}), "('## App Created By')\n", (3822, 3843), True, 'import streamlit as st\n'), ((3852, 4087), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""\n Harshad Suryawanshi: \n [Linkedin](https://www.linkedin.com/in/harshadsuryawanshi/), [Medium](https://harshadsuryawanshi.medium.com/), [X](https://twitter.com/HarshadSurya1c)\n """'], {}), '(\n """\n Harshad Suryawanshi: \n [Linkedin](https://www.linkedin.com/in/harshadsuryawanshi/), [Medium](https://harshadsuryawanshi.medium.com/), [X](https://twitter.com/HarshadSurya1c)\n """\n )\n', (3871, 4087), True, 'import streamlit as st\n'), ((4104, 4144), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""## Other Projects"""'], {}), "('## Other Projects')\n", (4123, 4144), True, 'import streamlit as st\n'), ((4153, 4707), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""\n - [Pokemon Go! Inspired AInimal GO! - Multimodal RAG App](https://www.linkedin.com/posts/harshadsuryawanshi_llamaindex-ai-deeplearning-activity-7134632983495327744-M7yy)\n - [Building My Own GPT4-V with PaLM and Kosmos](https://lnkd.in/dawgKZBP)\n - [AI Equity Research Analyst](https://ai-eqty-rsrch-anlyst.streamlit.app/)\n - [Recasting "The Office" Scene](https://blackmirroroffice.streamlit.app/)\n - [Story Generator](https://appstorycombined-agaf9j4ceit.streamlit.app/)\n """'], {}), '(\n """\n - [Pokemon Go! Inspired AInimal GO! - Multimodal RAG App](https://www.linkedin.com/posts/harshadsuryawanshi_llamaindex-ai-deeplearning-activity-7134632983495327744-M7yy)\n - [Building My Own GPT4-V with PaLM and Kosmos](https://lnkd.in/dawgKZBP)\n - [AI Equity Research Analyst](https://ai-eqty-rsrch-anlyst.streamlit.app/)\n - [Recasting "The Office" Scene](https://blackmirroroffice.streamlit.app/)\n - [Story Generator](https://appstorycombined-agaf9j4ceit.streamlit.app/)\n """\n )\n', (4172, 4707), True, 'import streamlit as st\n'), ((4715, 4751), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""## Disclaimer"""'], {}), "('## Disclaimer')\n", (4734, 4751), True, 'import streamlit as st\n'), ((4760, 4984), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""This application is for demonstration purposes only and may not cover all aspects of real-world data complexities. Please use it as a guide and not as a definitive source for decision-making."""'], {}), "(\n 'This application is for demonstration purposes only and may not cover all aspects of real-world data complexities. Please use it as a guide and not as a definitive source for decision-making.'\n )\n", (4779, 4984), True, 'import streamlit as st\n'), ((2185, 2215), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'conn'], {}), '(query, conn)\n', (2202, 2215), True, 'import pandas as pd\n'), ((2425, 2515), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///ecommerce_platform1.db?mode=ro"""'], {'connect_args': "{'uri': True}"}), "('sqlite:///ecommerce_platform1.db?mode=ro', connect_args={\n 'uri': True})\n", (2438, 2515), False, 'from sqlalchemy import create_engine, inspect, text\n'), ((2539, 2558), 'llama_index.SQLDatabase', 'SQLDatabase', (['engine'], {}), '(engine)\n', (2550, 2558), False, 'from llama_index import SQLDatabase, ServiceContext\n'), ((2720, 2771), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model': '"""gpt-3.5-turbo-1106"""'}), "(temperature=0.1, model='gpt-3.5-turbo-1106')\n", (2726, 2771), False, 'from llama_index.llms import OpenAI\n'), ((2803, 2862), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm2', 'embed_model': '"""local"""'}), "(llm=llm2, embed_model='local')\n", (2831, 2862), False, 'from llama_index import SQLDatabase, ServiceContext\n'), ((3605, 3659), 'streamlit.sidebar.text', 'st.sidebar.text', (['f"""Data for table \'{selected_table}\':"""'], {}), '(f"Data for table \'{selected_table}\':")\n', (3620, 3659), True, 'import streamlit as st\n'), ((3672, 3696), 'streamlit.sidebar.dataframe', 'st.sidebar.dataframe', (['df'], {}), '(df)\n', (3692, 3696), True, 'import streamlit as st\n'), ((5113, 5224), 'llama_index.indices.struct_store.NLSQLTableQueryEngine', 'NLSQLTableQueryEngine', ([], {'sql_database': 'sql_database', 'synthesize_response': '(True)', 'service_context': 'service_context'}), '(sql_database=sql_database, synthesize_response=True,\n service_context=service_context)\n', (5134, 5224), False, 'from llama_index.indices.struct_store import NLSQLTableQueryEngine\n'), ((5491, 5560), 'streamlit.chat_input', 'st.chat_input', (['"""Enter your natural language query about the database"""'], {}), "('Enter your natural language query about the database')\n", (5504, 5560), True, 'import streamlit as st\n'), ((5389, 5421), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5404, 5421), True, 'import streamlit as st\n'), ((5439, 5467), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (5447, 5467), True, 'import streamlit as st\n'), ((5651, 5674), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5666, 5674), True, 'import streamlit as st\n'), ((5692, 5708), 'streamlit.write', 'st.write', (['prompt'], {}), '(prompt)\n', (5700, 5708), True, 'import streamlit as st\n'), ((5919, 5931), 'streamlit.spinner', 'st.spinner', ([], {}), '()\n', (5929, 5931), True, 'import streamlit as st\n'), ((5954, 5982), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5969, 5982), True, 'import streamlit as st\n'), ((6244, 6254), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6252, 6254), True, 'import streamlit as st\n')] |
#!/usr/bin/env python3
from flask import Flask, request
from werkzeug.utils import secure_filename
from llama_index import GPTSimpleVectorIndex, download_loader
import json
import secrets
app = Flask(__name__)
@app.route('/index', methods = ['GET', 'POST'])
def upload_and_index():
if request.method == "POST":
f = request.files['file']
filename = f"./uploads/{secure_filename(f.filename)}"
f.save(filename)
RDFReader = download_loader('RDFReader')
document = RDFReader().load_data(file=filename)
# avoid collisions of filenames
data_id = secrets.token_hex(15)
index = GPTSimpleVectorIndex(document)
index.save_to_disk(f"{data_id}.json")
return {'id': data_id}
@app.route('/query')
def query():
args = request.args
data_id = args.get('id')
query_str = args.get('query')
q_index = GPTSimpleVectorIndex.load_from_disk(f"{data_id}.json")
result = q_index.query(f"{query_str} - return the answer and explanation in a JSON object")
try:
json_start = result.response.index('{')
answer = json.loads(result.response[json_start:])
answer.update({'success': True})
except (ValueError, json.JSONDecodeError):
answer = {'success': False, 'answer': result.response, 'explanation': ''}
return json.dumps(answer)
@app.route('/')
def hello():
return 'Hello, World!'
def run_app():
app.run(host='0.0.0.0', port=5050)
if __name__ == '__main__':
run_app()
| [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex",
"llama_index.download_loader"
] | [((199, 214), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (204, 214), False, 'from flask import Flask, request\n'), ((893, 947), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['f"""{data_id}.json"""'], {}), "(f'{data_id}.json')\n", (928, 947), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((1342, 1360), 'json.dumps', 'json.dumps', (['answer'], {}), '(answer)\n', (1352, 1360), False, 'import json\n'), ((464, 492), 'llama_index.download_loader', 'download_loader', (['"""RDFReader"""'], {}), "('RDFReader')\n", (479, 492), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((608, 629), 'secrets.token_hex', 'secrets.token_hex', (['(15)'], {}), '(15)\n', (625, 629), False, 'import secrets\n'), ((647, 677), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['document'], {}), '(document)\n', (667, 677), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((1119, 1159), 'json.loads', 'json.loads', (['result.response[json_start:]'], {}), '(result.response[json_start:])\n', (1129, 1159), False, 'import json\n'), ((388, 415), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (403, 415), False, 'from werkzeug.utils import secure_filename\n')] |
from contextlib import contextmanager
import uuid
import os
import tiktoken
from . import S2_tools as scholar
import csv
import sys
import requests
# pdf loader
from langchain.document_loaders import OnlinePDFLoader
## paper questioning tools
from llama_index import Document
from llama_index.vector_stores import PineconeVectorStore
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
def PaperSearchAndDownload(query):
# make new workspace
if not os.path.exists( os.path.join(os.getcwd(),'workspaces') ): os.mkdir(os.path.join(os.getcwd(),'workspaces'))
workspace_dir_name = os.path.join(os.getcwd(),'workspaces',query.split()[0] + '_'+ str(uuid.uuid4().hex))
os.mkdir(workspace_dir_name)
os.mkdir(os.path.join(workspace_dir_name,'results'))
os.mkdir(os.path.join(workspace_dir_name,'refy_suggestions'))
os.environ['workspace'] = workspace_dir_name
# 1) search papers
print(' 1) Searching base papers')
papers = scholar.find_paper_from_query(query, result_limit=10)
if len(papers == 0):
papers = scholar.find_paper_from_query(query, result_limit=50)
scholar.update_dataframe(incomplete=papers, dest=os.path.join(workspace_dir_name, 'results','papers.csv'))
delete_duplicates_from_csv(csv_file=os.path.join(workspace_dir_name, 'results','papers.csv'))
# 2) Cross-reference reccomendation system:
# a paper is reccomended if and only if it's related to more than one paper
print('\n\n 2) Expanding with Scholar reccomendations')
counts = {}
candidates = {}
for paper in papers:
guesses = scholar.find_recommendations(paper)
for guess in guesses:
if not guess['isOpenAccess']: continue
candidates[guess['title']] = guess
if guess['title'] not in counts.keys(): counts[guess['title']] = 1
else: counts[guess['title']] += 1
# reccomend only papers that appeared more than once
reccomends = []
for key in counts:
if counts[key]>1: reccomends.append(candidates[key])
print(f'found {len(reccomends)} additional papers')
# update the csv
scholar.update_dataframe(incomplete= reccomends, dest=os.path.join(workspace_dir_name, 'results','papers.csv'))
delete_duplicates_from_csv(csv_file=os.path.join(workspace_dir_name, 'results','papers.csv'))
# download the papers (1/2)
print('downloading papers (1/2)')
with open(os.path.join(workspace_dir_name,'results','papers.csv'), 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
scholar.download_pdf_from_id(" ".join( row['paperId'] for row in csvfile), workspace_dir_name)
scholar.write_bib_file(csv_file=os.path.join(workspace_dir_name,'results','papers.csv'), bib_file=os.path.join(workspace_dir_name,'results','papers.bib'))
# expand further with refy reccomendendation system
print('\n\n 3) Expanding with Refy reccomendendation system')
print('this might take a while...')
scholar.refy_reccomend(bib_path=os.path.join(workspace_dir_name,'results','papers.bib'))
with open(os.path.join(workspace_dir_name, 'refy_suggestions', 'test.csv'), 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
for row in csvfile:
title = scholar.replace_non_alphanumeric(row['title'])
title = title.replace(" ","_")
save_path = os.path.join(workspace_dir_name,'refy_suggestions',(title+'.pdf'))
try:
download_paper(url=row['url'], save_path=save_path)
except:
print(f'couldn t download {row}')
return f'{os.path.join(os.getcwd(), workspace_dir_name)}'
import urllib
def download_paper(url, save_path=f"{uuid.uuid4().hex}.pdf"):
success_string = f"paper saved successfully at {os.path.join(os.path.abspath(save_path))}"
if url.endswith('.pdf'):
urllib.request.urlretrieve(url, save_path)
return success_string
if 'doi' in url:
doi = paper_id = "/".join(url.split("/")[-2:])
# Construct the Crossref API URL
print(doi)
doi_url = f"https://doi.org/{doi}"
# Send a GET request to the doi.org URL
response = requests.get(doi_url, allow_redirects=True)
# Check if the request was successful
if response.status_code == 200:
# Extract the final URL after redirection
url = response.url
if 'arxiv' in url:
# URL del paper su arXiv
# Ottieni l'ID del paper dall'URL
paper_id = url.split("/")[-1]
# Costruisci l'URL di download del paper
pdf_url = f"http://arxiv.org/pdf/{paper_id}.pdf"
# Scarica il paper in formato PDF
urllib.request.urlretrieve(pdf_url, save_path)
return success_string
else:
if '/full' in url:
urllib.request.urlretrieve(url.replace('/full','/pdf'))
return success_string
if 'plos.org' in url:
final_url = url.replace('article?', 'article/file?')
urllib.request.urlretrieve(final_url, save_path)
return success_string
return f'\nfailed to download {url}'
def download_bibtex_library(csv_path):
with open(csv_path, 'r',encoding='utf-8') as fp:
csvfile = csv.DictReader(fp)
for row in csvfile:
title = scholar.replace_non_alphanumeric(row['title'])
title = title.replace(" ","-")
save_path = os.path.join(os.path.join(csv_path, '..', title+'.pdf'))
try:
download_paper(url=row['url'], save_path=save_path)
except:
try:
download_paper(url=row['url']+'.pdf', save_path=save_path)
except:
print(f'couldn t download {row}')
def generate_chunks(text, CHUNK_LENGTH = 4000):
enc = tiktoken.encoding_for_model("gpt-4")
tokens = enc.encode(text)
token_chunks = [tokens[i:i + CHUNK_LENGTH] for i in range(0, len(tokens), CHUNK_LENGTH)]
word_chunks = [enc.decode(chunk) for chunk in token_chunks]
return word_chunks
from langchain.vectorstores import Chroma, Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
import langid
import time
# def process_pdf_folder(folder_path):
# if not os.path.exists(folder_path):
# return 'the folder does not exist, check your spelling'
# for item in os.listdir(folder_path):
# if not item.endswith('.pdf'):continue
# with open(os.path.join(folder_path,'SUMMARY.txt'), 'a', encoding='UTF-8') as write_file:
# write_file.write(item)
# write_file.write("\n\n\n")
# txt = summarize_pdf(item, model='Vicuna')
# try:
# write_file.write(txt)
# except:
# print(txt)
# with open(os.path.join(folder_path,'SUMMARY.txt'), 'r', encoding='UTF-8') as read_file:
# return read_file.read()
# # def summarize_pdf(pdf_path, model= None):
# text = readPDF(pdf_path)
# # according to the TLDR Model, consider smaller chunks
# text_chunks = generate_chunks(text, 700)
# if model is not None:
# summarizer = LocalSearchEngine(tldr_model=model)
# summary=''
# for chunk in text_chunks:
# summary += summarizer.tldr(chunk)
# return summary
def get_result_path(path, exclude = []):
for item in os.listdir(path):
if item == 'papers.csv':
return os.path.join(path, item)
if os.path.isdir(os.path.join(path, item)) and item not in exclude:
res = get_result_path(os.path.join(path, item))
if res: return res
return
def get_workspace_titles(workspace_name):
csv_file_path = get_result_path(workspace_name)
papers_available = []
with open(csv_file_path, 'r', encoding='utf-8') as file:
csv_file = csv.DictReader(file)
for row in csv_file:
papers_available.append(row['title'])
return papers_available
import re
def same_title(title1, title2):
try:
title1 = re.sub(r'[^a-zA-Z]', ' ', title1)
title2 = re.sub(r'[^a-zA-Z]', ' ', title2)
except:
return False
words1 = set(title1.lower().split())
words2 = set(title2.lower().split())
return words1 == words2 or words1 <= words2 or words1 >= words2
def glimpse_pdf(title):
# find papers.csv in workspace
for workspace_name in os.listdir('workspaces'):
csv_file_path = get_result_path(workspace_name)
if csv_file_path is None: return 'no paper found'
with open(csv_file_path, 'r', encoding='utf-8') as file:
csv_file = csv.DictReader(file)
for row in csv_file:
if same_title(row['title'], title): return f"{row['title']}, paperId: {row['paperId']}, summary: {row['abstract']}"
return f'\nno paper found with title {title}'
def count_tokens(text):
enc = tiktoken.encoding_for_model("gpt-4")
tokens = enc.encode(text)
return len(tokens)
def readPDF(pdf_path):
loader = OnlinePDFLoader(pdf_path)
data = loader.load()
text_content = ''
for page in data:
formatted_content = page.page_content.replace('\n\n', ' ')
text_content+=formatted_content
return text_content
def get_pdf_path(dir, exclude=[]):
paths = []
for item in os.listdir(dir):
itempath = os.path.join(dir,item)
if item.endswith('.pdf'): paths.append(itempath)
if os.path.isdir(itempath)and item not in exclude:
subpaths = get_pdf_path(itempath)
for i in subpaths: paths.append(i)
return paths
def delete_duplicates_from_csv(csv_file):
print('verifying duplicates...')
to_delete = []
def delete_csv_row_by_title(csv_file, title):
# Read the CSV file and store rows in a list
with open(csv_file, 'r',encoding='UTF-8') as file:
reader = csv.DictReader(file)
rows = list(reader)
# Find the row index with the matching title
row_index = None
for index, row in enumerate(rows):
if row['title'] == title:
row_index = index
break
# If no matching title is found, return
if row_index is None:
print(f"No row with title '{title}' found.")
return
# Remove the row from the list
del rows[row_index]
# Write the updated rows back to the CSV file
with open(csv_file, 'w', newline='',encoding='UTF-8') as file:
fieldnames = reader.fieldnames
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(rows)
with open(csv_file, 'r', encoding='UTF-8') as file:
DELETED = 0
reader = csv.DictReader(file)
rows = list(reader)
entries = set()
for row in rows:
if row['title']=='' or row['title'] is None: continue
if row['title'] not in entries:entries.add(row['title'])
else:
DELETED+=1
to_delete.append(row['title'])
for title in to_delete: delete_csv_row_by_title(csv_file, title=title)
print(f"Deleted {DELETED} duplicates")
return
def update_workspace_dataframe(workspace, verbose = True):
ADDED = 0
# find results.csv
csv_path = get_result_path(workspace)
# get titles in csv
titles = get_workspace_titles(workspace)
# get local papers path
paths = get_pdf_path(workspace, exclude='refy_suggestions')
# adding new to csv:
for path in paths:
exists = False
# extract the title from the local paper
title = scholar.extract_title(path)
for t in titles:
if same_title(t,title): exists = True
# add it to dataframe if it was not found on the DF
if not exists:
if verbose: print(f"\nnew paper detected: {title}")
# find it with online
paper = scholar.find_paper_online(path)
if paper :
if verbose: print(f"\t---> best match found online: {paper['title']} " )
for t in titles:
if same_title(paper['title'], title):
if verbose: print(f"\t this paper is already present in the dataframe. skipping")
else:
if verbose: print(path, '-x-> no match found')
continue
with open(csv_path, 'a', encoding='utf-8') as fp:
areYouSure = True
for t in titles:
if same_title(t,paper['title']): areYouSure =False
if not areYouSure:
if verbose: print(f"double check revealed that the paper is already in the dataframe. Skipping")
continue
if verbose: print(f"\t---> adding {paper['title']}")
ADDED +=1
paper_authors = paper.get('authors', [])
journal_data = {}
if 'journal' in paper:
journal_data = paper.get('journal',[])
if journal_data is not None:
if 'name' not in journal_data: journal_data['name'] = ''
if 'pages' not in journal_data: journal_data['pages'] = ''
if paper.get('tldr',[]) != []:tldr = paper['tldr']['text']
elif paper.get('summary',[]) != []:tldr = paper['summary']
elif 'abstract' in paper:tldr = paper['abstract']
else: tldr = 'No summary available'
if 'year' in paper:
year = paper['year']
elif 'updated' in paper:year = paper['updated']
else: year = ''
if 'citationStyles' in paper:
if 'bibtex' in paper['citationStyles']: citStyle = paper['citationStyles']['bibtex']
else: citStyle = paper['citationStyles'][0]
else: citStyle = ''
csvfile = csv.DictWriter(fp, ['paperId', 'title', 'first_author', 'year', 'abstract','tldr','bibtex','influentialCitationCount','venue','journal','pages'])
try:
csvfile.writerow({
'title': paper['title'],
'first_author': paper_authors[0]['name'] if paper_authors else '',
'year': year,
'abstract': paper['abstract'] if 'abstract' in paper else '',
'paperId': paper['paperId'] if 'paperId' in paper else '',
'tldr':tldr,
'bibtex':citStyle,
'influentialCitationCount': paper['influentialCitationCount'] if 'influentialCitationCount' in paper else '0',
'venue':paper['venue'] if 'venue' in paper else '',
'journal':journal_data['name'] if journal_data is not None else '',
'pages':journal_data['pages'] if journal_data is not None else '',
})
except Exception as e:
if verbose: print('could not add ', title, '\n',e)
# delete dupes if present
if verbose: print(f"\n\nCSV UPDATE: Added {ADDED} new papers")
# clean form dupes
delete_duplicates_from_csv(csv_path)
# update bib
scholar.write_bib_file(csv_path)
return
def load_workspace(folderdir):
docs =[]
for item in os.listdir(folderdir):
if item.endswith('.pdf'):
print(f' > loading {item}')
with suppress_stdout():
content = readPDF(os.path.join(folderdir, item))
docs.append(Document(
text = content,
doc_id = uuid.uuid4().hex
))
if item =='.'or item =='..':continue
if os.path.isdir( os.path.join(folderdir,item) ):
sub_docs = load_workspace(os.path.join(folderdir,item))
for doc in sub_docs:
docs.append(doc)
return docs
# List paths of all pdf files in a folder
def list_workspace_elements(folderdir):
docs =[]
for item in os.listdir(folderdir):
if item.endswith('.pdf'):
docs.append(rf"{os.path.join(folderdir,item)}")
if item =='.'or item =='..':continue
if os.path.isdir( os.path.join(folderdir,item) ):
sub_docs = list_workspace_elements(os.path.join(folderdir,item))
for doc in sub_docs:
docs.append(doc)
return docs
def llama_query_engine(docs:list, pinecone_index_name:str):
pinecone.init(
api_key= os.environ['PINECONE_API_KEY'],
environment= os.environ['PINECONE_API_ENV']
)
# Find the pinecone index
if pinecone_index_name not in pinecone.list_indexes():
# we create a new index
pinecone.create_index(
name=pinecone_index_name,
metric='dotproduct',
dimension=1536 # 1536 dim of text-embedding-ada-002
)
index = pinecone.Index(pinecone_index_name)
# init it
vector_store = PineconeVectorStore(pinecone_index=index)
time.sleep(1)
# setup our storage (vector db)
storage_context = StorageContext.from_defaults(
vector_store=vector_store
)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
# populate the vector store
LamaIndex = GPTVectorStoreIndex.from_documents(
docs, storage_context=storage_context,
service_context=service_context
)
print('PINECONE Vector Index initialized:\n',index.describe_index_stats())
# init the query engine
query_engine = LamaIndex.as_query_engine()
return query_engine, LamaIndex
@contextmanager
def suppress_stdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout | [
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((768, 796), 'os.mkdir', 'os.mkdir', (['workspace_dir_name'], {}), '(workspace_dir_name)\n', (776, 796), False, 'import os\n'), ((5950, 5986), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (5977, 5986), False, 'import tiktoken\n'), ((7532, 7548), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7542, 7548), False, 'import os\n'), ((8565, 8589), 'os.listdir', 'os.listdir', (['"""workspaces"""'], {}), "('workspaces')\n", (8575, 8589), False, 'import os\n'), ((9082, 9118), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (9109, 9118), False, 'import tiktoken\n'), ((9209, 9234), 'langchain.document_loaders.OnlinePDFLoader', 'OnlinePDFLoader', (['pdf_path'], {}), '(pdf_path)\n', (9224, 9234), False, 'from langchain.document_loaders import OnlinePDFLoader\n'), ((9509, 9524), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (9519, 9524), False, 'import os\n'), ((15782, 15803), 'os.listdir', 'os.listdir', (['folderdir'], {}), '(folderdir)\n', (15792, 15803), False, 'import os\n'), ((16505, 16526), 'os.listdir', 'os.listdir', (['folderdir'], {}), '(folderdir)\n', (16515, 16526), False, 'import os\n'), ((16958, 17060), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_API_ENV']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_API_ENV'])\n", (16971, 17060), False, 'import pinecone\n'), ((17396, 17431), 'pinecone.Index', 'pinecone.Index', (['pinecone_index_name'], {}), '(pinecone_index_name)\n', (17410, 17431), False, 'import pinecone\n'), ((17470, 17511), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'index'}), '(pinecone_index=index)\n', (17489, 17511), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((17516, 17529), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17526, 17529), False, 'import time\n'), ((17589, 17644), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (17617, 17644), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((17678, 17747), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (17693, 17747), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((17770, 17823), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (17798, 17823), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((17878, 17988), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(docs, storage_context=storage_context,\n service_context=service_context)\n', (17912, 17988), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((692, 703), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (701, 703), False, 'import os\n'), ((810, 853), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""'], {}), "(workspace_dir_name, 'results')\n", (822, 853), False, 'import os\n'), ((867, 919), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""'], {}), "(workspace_dir_name, 'refy_suggestions')\n", (879, 919), False, 'import os\n'), ((2613, 2631), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (2627, 2631), False, 'import csv\n'), ((3285, 3303), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (3299, 3303), False, 'import csv\n'), ((3963, 4005), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'save_path'], {}), '(url, save_path)\n', (3989, 4005), False, 'import urllib\n'), ((4283, 4326), 'requests.get', 'requests.get', (['doi_url'], {'allow_redirects': '(True)'}), '(doi_url, allow_redirects=True)\n', (4295, 4326), False, 'import requests\n'), ((4795, 4841), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['pdf_url', 'save_path'], {}), '(pdf_url, save_path)\n', (4821, 4841), False, 'import urllib\n'), ((5368, 5386), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (5382, 5386), False, 'import csv\n'), ((8008, 8028), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (8022, 8028), False, 'import csv\n'), ((8205, 8237), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'title1'], {}), "('[^a-zA-Z]', ' ', title1)\n", (8211, 8237), False, 'import re\n'), ((8256, 8288), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'title2'], {}), "('[^a-zA-Z]', ' ', title2)\n", (8262, 8288), False, 'import re\n'), ((9545, 9568), 'os.path.join', 'os.path.join', (['dir', 'item'], {}), '(dir, item)\n', (9557, 9568), False, 'import os\n'), ((10975, 10995), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (10989, 10995), False, 'import csv\n'), ((17149, 17172), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (17170, 17172), False, 'import pinecone\n'), ((17214, 17302), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'pinecone_index_name', 'metric': '"""dotproduct"""', 'dimension': '(1536)'}), "(name=pinecone_index_name, metric='dotproduct',\n dimension=1536)\n", (17235, 17302), False, 'import pinecone\n'), ((1250, 1307), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (1262, 1307), False, 'import os\n'), ((1348, 1405), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (1360, 1405), False, 'import os\n'), ((2268, 2325), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2280, 2325), False, 'import os\n'), ((2366, 2423), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2378, 2423), False, 'import os\n'), ((2509, 2566), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2521, 2566), False, 'import os\n'), ((2778, 2835), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.csv"""'], {}), "(workspace_dir_name, 'results', 'papers.csv')\n", (2790, 2835), False, 'import os\n'), ((2844, 2901), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.bib"""'], {}), "(workspace_dir_name, 'results', 'papers.bib')\n", (2856, 2901), False, 'import os\n'), ((3100, 3157), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""results"""', '"""papers.bib"""'], {}), "(workspace_dir_name, 'results', 'papers.bib')\n", (3112, 3157), False, 'import os\n'), ((3172, 3236), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""', '"""test.csv"""'], {}), "(workspace_dir_name, 'refy_suggestions', 'test.csv')\n", (3184, 3236), False, 'import os\n'), ((3468, 3536), 'os.path.join', 'os.path.join', (['workspace_dir_name', '"""refy_suggestions"""', "(title + '.pdf')"], {}), "(workspace_dir_name, 'refy_suggestions', title + '.pdf')\n", (3480, 3536), False, 'import os\n'), ((5119, 5167), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['final_url', 'save_path'], {}), '(final_url, save_path)\n', (5145, 5167), False, 'import urllib\n'), ((7602, 7626), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7614, 7626), False, 'import os\n'), ((8802, 8822), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (8816, 8822), False, 'import csv\n'), ((9636, 9659), 'os.path.isdir', 'os.path.isdir', (['itempath'], {}), '(itempath)\n', (9649, 9659), False, 'import os\n'), ((10078, 10098), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (10092, 10098), False, 'import csv\n'), ((10760, 10803), 'csv.DictWriter', 'csv.DictWriter', (['file'], {'fieldnames': 'fieldnames'}), '(file, fieldnames=fieldnames)\n', (10774, 10803), False, 'import csv\n'), ((16201, 16230), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16213, 16230), False, 'import os\n'), ((16702, 16731), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16714, 16731), False, 'import os\n'), ((576, 587), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (585, 587), False, 'import os\n'), ((627, 638), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (636, 638), False, 'import os\n'), ((3718, 3729), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3727, 3729), False, 'import os\n'), ((3806, 3818), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3816, 3818), False, 'import uuid\n'), ((3896, 3922), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (3911, 3922), False, 'import os\n'), ((5564, 5608), 'os.path.join', 'os.path.join', (['csv_path', '""".."""', "(title + '.pdf')"], {}), "(csv_path, '..', title + '.pdf')\n", (5576, 5608), False, 'import os\n'), ((7652, 7676), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7664, 7676), False, 'import os\n'), ((7738, 7762), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (7750, 7762), False, 'import os\n'), ((14294, 14449), 'csv.DictWriter', 'csv.DictWriter', (['fp', "['paperId', 'title', 'first_author', 'year', 'abstract', 'tldr', 'bibtex',\n 'influentialCitationCount', 'venue', 'journal', 'pages']"], {}), "(fp, ['paperId', 'title', 'first_author', 'year', 'abstract',\n 'tldr', 'bibtex', 'influentialCitationCount', 'venue', 'journal', 'pages'])\n", (14308, 14449), False, 'import csv\n'), ((16271, 16300), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16283, 16300), False, 'import os\n'), ((16781, 16810), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16793, 16810), False, 'import os\n'), ((745, 757), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (755, 757), False, 'import uuid\n'), ((15951, 15980), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (15963, 15980), False, 'import os\n'), ((16590, 16619), 'os.path.join', 'os.path.join', (['folderdir', 'item'], {}), '(folderdir, item)\n', (16602, 16619), False, 'import os\n'), ((16085, 16097), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16095, 16097), False, 'import uuid\n')] |
import os
import logging
import sys
from llama_index import GPTSimpleVectorIndex
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# 加载索引
new_index = GPTSimpleVectorIndex.load_from_disk('index.json')
# 查询索引
response = new_index.query("What did the author do in 9th grade?")
# 打印答案
print(response)
| [
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((82, 140), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (101, 140), False, 'import logging\n'), ((234, 283), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""index.json"""'], {}), "('index.json')\n", (269, 283), False, 'from llama_index import GPTSimpleVectorIndex\n'), ((172, 212), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (193, 212), False, 'import logging\n'), ((141, 160), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (158, 160), False, 'import logging\n')] |
import os
import openai
from fastapi import FastAPI, HTTPException
from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context
from llama_index.indices.postprocessor import SentenceEmbeddingOptimizer
from llama_index.embeddings import OpenAIEmbedding
from pydantic import BaseModel
openai.api_key = os.environ["OPENAI_API_KEY"]
app = FastAPI()
class QueryRequest(BaseModel):
question: str
class QueryResponse(BaseModel):
answer: str
embed_model = OpenAIEmbedding(embed_batch_size=10)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
set_global_service_context(service_context)
storage_context = StorageContext.from_defaults(persist_dir="./storage")
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine(
node_postprocessors=[SentenceEmbeddingOptimizer(percentile_cutoff=0.5)],
response_mode="compact",
similarity_cutoff=0.7
)
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/chat")
def query_data(request: QueryRequest):
response = query_engine.query(request.question)
if not response:
raise HTTPException(status_code=404, detail="No results found")
return QueryResponse(answer=str(response))
| [
"llama_index.indices.postprocessor.SentenceEmbeddingOptimizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.embeddings.OpenAIEmbedding",
"llama_index.set_global_service_context",
"llama_index.load_index_from_storage"
] | [((385, 394), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (392, 394), False, 'from fastapi import FastAPI, HTTPException\n'), ((510, 546), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'embed_batch_size': '(10)'}), '(embed_batch_size=10)\n', (525, 546), False, 'from llama_index.embeddings import OpenAIEmbedding\n'), ((565, 618), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (593, 618), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context\n'), ((619, 662), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (645, 662), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context\n'), ((682, 735), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (710, 735), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context\n'), ((744, 784), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (767, 784), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext, set_global_service_context\n'), ((1169, 1226), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(404)', 'detail': '"""No results found"""'}), "(status_code=404, detail='No results found')\n", (1182, 1226), False, 'from fastapi import FastAPI, HTTPException\n'), ((848, 897), 'llama_index.indices.postprocessor.SentenceEmbeddingOptimizer', 'SentenceEmbeddingOptimizer', ([], {'percentile_cutoff': '(0.5)'}), '(percentile_cutoff=0.5)\n', (874, 897), False, 'from llama_index.indices.postprocessor import SentenceEmbeddingOptimizer\n')] |
"""Example of how to use llamaindex for semantic search.
This example assumes that initially there is a projects.DATASETS_DIR_PATH/embeddings.pkl file
that has a list of dictionaries with each dictionary containing "text",
"rule_name" and "section_label" fields.
The first time you run this script, a vector store will be creaed with
embeddings. This store will be saved to "cache/msrb_index_store".
Subsequent runs will load the vector store from this location.
Each time you run this script you enter a loop where you can ask as
many questions of the data as you'd like. Each time you ask a question
you will be given a response that tells you:
1. The rule names and section labels for the most relevant rules,
2. A brief preview of the text from those sections, and
3. An LLM-generated response to your query given the texts that it found.
You can tweak three parameters at the bottom of this script (after all of
the function definitions):
- model_name: which OpenAI model to use.
- top_k: how many rules to return.
- similarity_cutoff: threshold for relevance (between 0 and 1).
"""
import os
import pickle
from pathlib import Path
# from llama_index import SimpleDirectoryReader
# from llama_index.node_parser import SimpleNodeParser
from llama_index import (
VectorStoreIndex,
StorageContext,
LLMPredictor,
ServiceContext,
get_response_synthesizer,
load_index_from_storage,
)
from llama_index.retrievers import VectorIndexRetriever
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.schema import TextNode
from langchain import OpenAI
from examples import project
TEXT_DATA_FILE = Path(os.path.join(project.DATASETS_DIR_PATH, 'embeddings.pkl'))
INDEX_DATA_DIR = Path('cache/msrb_index_store')
def get_vector_store(service_context: ServiceContext) -> VectorStoreIndex:
"""Load a vector index from disk or, if it doesn't exist, create one from raw text data."""
# === Load the data ===========================================================
# Simple example of reading text files from a local directory
# reader = SimpleDirectoryReader('./data')
# documents = reader.load_data() # returns a list of Documents
# parser = SimpleNodeParser()
# nodes = parser.get_nodes_from_documents(documents) # returns a list of nodes
if INDEX_DATA_DIR.exists():
print('Loading vector store from local directory.')
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=INDEX_DATA_DIR)
# load index
index = load_index_from_storage(storage_context)
else:
print('No local index found.')
print('Loading data.')
with open('embeddings.pkl', 'rb') as f:
data = pickle.load(f)
print('Building nodes.')
nodes = []
for example in data:
node = TextNode(text=example['text'])
node.metadata['rule_name'] = example['rule_name']
node.metadata['section_label'] = example['section_label']
nodes.append(node)
print(f'Created {len(nodes)} nodes.')
print('Creating vector store.')
index = VectorStoreIndex(nodes, service_context=service_context)
# index = VectorStoreIndex.from_documents(documents)
print('Saving vector store.')
index.storage_context.persist(INDEX_DATA_DIR)
return index
def get_llm_backend(model_name: str) -> ServiceContext:
"""Get an LLM to provide embedding and text generation service."""
# === Define the LLM backend ==================================================
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name=model_name))
# configure service context
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
return service_context
def get_query_engine(index: VectorStoreIndex, response_mode: str, top_k: int, similarity_cutoff: float) -> RetrieverQueryEngine:
"""Build a query enginge by combining a retriever and response synthesizer."""
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=top_k,
)
# configure response synthesizer
response_synthesizer = get_response_synthesizer()
# assemble query engine
# query_engine = RetrieverQueryEngine.from_args(
# retriever=retriever,
# response_synthesizer=response_synthesizer,
# response_mode=response_mode
# )
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=similarity_cutoff)
]
)
return query_engine
if __name__=='__main__':
model_name = "text-davinci-003"
top_k = 3
similarity_cutoff = 0.7
service_context = get_llm_backend(model_name)
index = get_vector_store(service_context)
response_mode = 'refine' # response_mode = 'no_text' for no text generation
query_engine = get_query_engine(index, response_mode, top_k, similarity_cutoff)
# query
while (query := input('Ask me a question about the MSRB rule book ("quit" to quit): ')) != 'quit':
print(f'You asked: {query}')
response = query_engine.query(query)
print('Source nodes:')
print(f'There are {len(response.source_nodes)} source nodes from the following rules:')
for source_node in response.source_nodes:
print(source_node.node.metadata['rule_name'], source_node.node.metadata['section_label'])
print(response.get_formatted_sources())
print('Response:')
print(response)
print()
print('='*40)
| [
"llama_index.get_response_synthesizer",
"llama_index.VectorStoreIndex",
"llama_index.ServiceContext.from_defaults",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.schema.TextNode",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.postprocessor.SimilarityPostprocessor",
"llama_index.load_index_from_storage"
] | [((1802, 1832), 'pathlib.Path', 'Path', (['"""cache/msrb_index_store"""'], {}), "('cache/msrb_index_store')\n", (1806, 1832), False, 'from pathlib import Path\n'), ((1726, 1783), 'os.path.join', 'os.path.join', (['project.DATASETS_DIR_PATH', '"""embeddings.pkl"""'], {}), "(project.DATASETS_DIR_PATH, 'embeddings.pkl')\n", (1738, 1783), False, 'import os\n'), ((3834, 3891), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (3862, 3891), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((4176, 4233), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'top_k'}), '(index=index, similarity_top_k=top_k)\n', (4196, 4233), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((4322, 4348), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (4346, 4348), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((2542, 2598), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'INDEX_DATA_DIR'}), '(persist_dir=INDEX_DATA_DIR)\n', (2570, 2598), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((2636, 2676), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2659, 2676), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((3237, 3293), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'service_context': 'service_context'}), '(nodes, service_context=service_context)\n', (3253, 3293), False, 'from llama_index import VectorStoreIndex, StorageContext, LLMPredictor, ServiceContext, get_response_synthesizer, load_index_from_storage\n'), ((2824, 2838), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2835, 2838), False, 'import pickle\n'), ((2940, 2970), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': "example['text']"}), "(text=example['text'])\n", (2948, 2970), False, 'from llama_index.schema import TextNode\n'), ((3733, 3777), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': 'model_name'}), '(temperature=0, model_name=model_name)\n', (3739, 3777), False, 'from langchain import OpenAI\n'), ((4724, 4784), 'llama_index.indices.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'similarity_cutoff'}), '(similarity_cutoff=similarity_cutoff)\n', (4747, 4784), False, 'from llama_index.indices.postprocessor import SimilarityPostprocessor\n')] |
from dotenv import load_dotenv
load_dotenv()
from llama_index import GPTVectorStoreIndex, TrafilaturaWebReader
import chromadb
def create_embedding_store(name):
chroma_client = chromadb.Client()
return chroma_client.create_collection(name)
def query_pages(collection, urls, questions):
docs = TrafilaturaWebReader().load_data(urls)
index = GPTVectorStoreIndex.from_documents(docs, chroma_collection=collection)
query_engine = index.as_query_engine()
for question in questions:
print(f"Question: {question} \n")
print(f"Answer: {query_engine.query(question)}")
if __name__ == "__main__":
url_list = ["https://supertype.ai", "https://supertype.ai/about-us"]
questions = [
"Who are the members of Supertype.ai",
"What problems are they trying to solve?",
"What are the important values at the company?"
]
collection = create_embedding_store("supertype")
query_pages(
collection,
url_list,
questions
)
| [
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.TrafilaturaWebReader"
] | [((32, 45), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (43, 45), False, 'from dotenv import load_dotenv\n'), ((185, 202), 'chromadb.Client', 'chromadb.Client', ([], {}), '()\n', (200, 202), False, 'import chromadb\n'), ((361, 431), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {'chroma_collection': 'collection'}), '(docs, chroma_collection=collection)\n', (395, 431), False, 'from llama_index import GPTVectorStoreIndex, TrafilaturaWebReader\n'), ((310, 332), 'llama_index.TrafilaturaWebReader', 'TrafilaturaWebReader', ([], {}), '()\n', (330, 332), False, 'from llama_index import GPTVectorStoreIndex, TrafilaturaWebReader\n')] |
import logging
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
import requests
from typing import List
import re
import os
import logging
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
import requests
from typing import List
import os
import pandas as pd
import openai
import ast
TWITTER_USERNAME = "shauryr"
def generate_search_queries_prompt(question):
"""Generates the search queries prompt for the given question.
Args: question (str): The question to generate the search queries prompt for
Returns: str: The search queries prompt for the given question
"""
return (
f'Please generate four related search queries that align with the initial query: "{question}"'
f'Each variation should be presented as a list of strings, following this format: ["query 1", "query 2", "query 3", "query 4"]'
)
def get_related_questions(query):
research_template = """You are a search engine expert"""
messages = [{
"role": "system",
"content": research_template
}, {
"role": "user",
"content": generate_search_queries_prompt(query),
}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.5,
max_tokens=256
)
related_questions = get_questions(response.choices[0].message.content)
related_questions.append(query)
return related_questions
def get_questions(response_text):
data = response_text.split("\n")
data = [ast.literal_eval(item)[0] for item in data]
return data
def get_unique_docs(docs):
unique_docs_id = []
unique_docs = []
for doc in docs:
if doc.extra_info['paperId'] not in unique_docs:
unique_docs_id.append(doc.extra_info['paperId'])
unique_docs.append(doc)
return unique_docs
class SemanticScholarReader(BaseReader):
"""
A class to read and process data from Semantic Scholar API
...
Methods
-------
__init__():
Instantiate the SemanticScholar object
load_data(query: str, limit: int = 10, returned_fields: list = ["title", "abstract", "venue", "year", "paperId", "citationCount", "openAccessPdf", "authors"]) -> list:
Loads data from Semantic Scholar based on the query and returned_fields
"""
def __init__(self, timeout=10, api_key=None, base_dir="pdfs"):
"""
Instantiate the SemanticScholar object
"""
from semanticscholar import SemanticScholar
import arxiv
self.arxiv = arxiv
self.base_dir = base_dir
self.s2 = SemanticScholar(timeout=timeout)
# check for base dir
if not os.path.exists(self.base_dir):
os.makedirs(self.base_dir)
def _clear_cache(self):
"""
delete the .citation* folder
"""
import shutil
shutil.rmtree("./.citation*")
def _download_pdf(self, paper_id, url: str, base_dir="pdfs"):
logger = logging.getLogger()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
# Making a GET request
response = requests.get(url, headers=headers, stream=True)
content_type = response.headers["Content-Type"]
# As long as the content-type is application/pdf, this will download the file
if "application/pdf" in content_type:
os.makedirs(base_dir, exist_ok=True)
file_path = os.path.join(base_dir, f"{paper_id}.pdf")
# check if the file already exists
if os.path.exists(file_path):
logger.info(f"{file_path} already exists")
return file_path
with open(file_path, "wb") as file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
logger.info(f"Downloaded pdf from {url}")
return file_path
else:
logger.warning(f"{url} was not downloaded: protected")
return None
def _get_full_text_docs(self, documents: List[Document]) -> List[Document]:
from PyPDF2 import PdfReader
"""
Gets the full text of the documents from Semantic Scholar
Parameters
----------
documents: list
The list of Document object that contains the search results
Returns
-------
list
The list of Document object that contains the search results with full text
Raises
------
Exception
If there is an error while getting the full text
"""
full_text_docs = []
for paper in documents:
metadata = paper.extra_info
url = metadata["openAccessPdf"]
externalIds = metadata["externalIds"]
paper_id = metadata["paperId"]
file_path = None
persist_dir = os.path.join(self.base_dir, f"{paper_id}.pdf")
if url and not os.path.exists(persist_dir):
# Download the document first
file_path = self._download_pdf(metadata["paperId"], url, persist_dir)
if (
not url
and externalIds
and "ArXiv" in externalIds
and not os.path.exists(persist_dir)
):
# download the pdf from arxiv
file_path = self._download_pdf_from_arxiv(
paper_id, externalIds["ArXiv"]
)
# Then, check if it's a valid PDF. If it's not, skip to the next document.
if file_path:
try:
pdf = PdfReader(open(file_path, "rb"))
except Exception as e:
logging.error(
f"Failed to read pdf with exception: {e}. Skipping document..."
)
continue
text = ""
for page in pdf.pages:
text += page.extract_text()
full_text_docs.append(Document(text=text, extra_info=metadata))
return full_text_docs
def _download_pdf_from_arxiv(self, paper_id, arxiv_id):
paper = next(self.arxiv.Search(id_list=[arxiv_id], max_results=1).results())
paper.download_pdf(dirpath=self.base_dir, filename=paper_id + ".pdf")
return os.path.join(self.base_dir, f"{paper_id}.pdf")
def load_data(
self,
query,
limit,
full_text=False,
returned_fields=[
"title",
"abstract",
"venue",
"year",
"paperId",
"citationCount",
"openAccessPdf",
"authors",
"externalIds",
],
) -> List[Document]:
"""
Loads data from Semantic Scholar based on the entered query and returned_fields
Parameters
----------
query: str
The search query for the paper
limit: int, optional
The number of maximum results returned (default is 10)
returned_fields: list, optional
The list of fields to be returned from the search
Returns
-------
list
The list of Document object that contains the search results
Raises
------
Exception
If there is an error while performing the search
"""
results = []
# query = get_related_questions(query)
query = [query]
try:
for question in query:
logging.info(f"Searching for {question}")
_results = self.s2.search_paper(question, limit=limit, fields=returned_fields)
results.extend(_results[:limit])
except (requests.HTTPError, requests.ConnectionError, requests.Timeout) as e:
logging.error(
"Failed to fetch data from Semantic Scholar with exception: %s", e
)
raise
except Exception as e:
logging.error("An unexpected error occurred: %s", e)
raise
documents = []
for item in results[:limit*len(query)]:
openAccessPdf = getattr(item, "openAccessPdf", None)
abstract = getattr(item, "abstract", None)
title = getattr(item, "title", None)
text = None
# concat title and abstract
if abstract and title:
text = title + " " + abstract
elif not abstract:
text = title
metadata = {
"title": title,
"venue": getattr(item, "venue", None),
"year": getattr(item, "year", None),
"paperId": getattr(item, "paperId", None),
"citationCount": getattr(item, "citationCount", None),
"openAccessPdf": openAccessPdf.get("url") if openAccessPdf else None,
"authors": [author["name"] for author in getattr(item, "authors", [])],
"externalIds": getattr(item, "externalIds", None),
}
documents.append(Document(text=text, extra_info=metadata))
if full_text:
logging.info("Getting full text documents...")
full_text_documents = self._get_full_text_docs(documents)
documents.extend(full_text_documents)
documents = get_unique_docs(documents)
return documents
def get_twitter_badge():
"""Constructs the Markdown code for the Twitter badge."""
return f'<a href="https://twitter.com/{TWITTER_USERNAME}" target="_blank"><img src="https://img.shields.io/badge/Twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white" /></a>'
def get_link_tree_badge():
return f'<a href="https://linktr.ee/shauryr" target="_blank"><img src="https://img.shields.io/badge/Linktree-39E09B?style=for-the-badge&logo=linktree&logoColor=white" /></a>'
def get_github_badge():
return f'<a href="https://github.com/shauryr/s2qa" target="_blank"><img src="https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white" /></a>'
def display_questions(sample_questions):
s = "#### 🧐 More questions? \n"
for i in sample_questions:
s += "- " + i + "\n"
return s
def get_citation(metadata):
# Extract details from metadata
title = metadata.get("title", "No Title")
venue = metadata.get("venue", "No Venue")
year = metadata.get("year", "No Year")
authors = metadata.get("authors", [])
# Generate author names in correct format
author_names = []
for author in authors[:5]:
last_name, *first_names = author.split(" ")
first_initials = " ".join(name[0] + "." for name in first_names)
author_names.append(f"{last_name}, {first_initials}")
authors_string = ", & ".join(author_names)
# APA citation format: Author1, Author2, & Author3. (Year). Title. Venue.
citation = f"{authors_string}. ({year}). **{title}**. {venue}."
return citation
def extract_numbers_in_brackets(input_string):
# use regular expressions to find all occurrences of [number]
# numbers_in_brackets = re.findall(r"\[(\d+)\]", input_string)
numbers_in_brackets = re.findall(r"\[(.*?)\]", input_string)
# numbers_in_brackets = [int(i) for num in numbers_in_brackets for i in num.split(",")]
# convert all numbers to int and remove duplicates by converting list to set and then back to list
cleaned_numbers = []
for n in numbers_in_brackets:
# Try to convert the value to an integer
try:
cleaned_numbers.append(int(n))
# If it fails (throws a ValueError), just ignore and continue with the next value
except ValueError:
continue
# Apply the rest of your code on the cleaned list
return sorted(list(set(cleaned_numbers)))
def generate_used_reference_display(source_nodes, used_nodes):
reference_display = "\n #### 📚 References: \n"
# for index in used_nodes get the source node and add it to the reference display
for index in used_nodes:
try:
source_node = source_nodes[index - 1]
except IndexError:
return "\n #### 😞 Couldnt Parse References \n"
metadata = source_node.node.metadata
reference_display += (
"[["
+ str(source_nodes.index(source_node) + 1)
+ "]"
+ "("
+ "https://www.semanticscholar.org/paper/"
+ metadata["paperId"]
+ ")] "
+ "\n `. . ."
+ str(source_node.node.text)[100:290]
+ ". . .`"
+ get_citation(metadata)
+ " \n\n"
)
return reference_display
def documents_to_df(documents):
# convert document objects to dataframe
list_data = []
for i, doc in enumerate(documents):
list_data.append(doc.extra_info.copy())
df = pd.DataFrame(list_data)
return df
def generate_reference_display(source_nodes):
reference_display = "\n ### References: \n"
for source_node in source_nodes:
metadata = source_node.node.metadata
# add number infront of citation to make it easier to reference
# reference_display += (
# "[["
# + str(source_nodes.index(source_node) + 1)
# + "]"
# + "("
# + "https://www.semanticscholar.org/paper/"
# + metadata["paperId"]
# + ")] "
# + '\n "`. . .'
# + str(source_node.node.text)[100:290]
# + ". . .` - **"
# + get_citation(metadata)
# + "** \n\n"
# )
reference_display += (
"[["
+ str(source_nodes.index(source_node) + 1)
+ "]"
+ "("
+ "https://www.semanticscholar.org/paper/"
+ metadata["paperId"]
+ ")] "
+ get_citation(metadata)
+ " \n\n"
)
return reference_display
| [
"llama_index.readers.schema.base.Document"
] | [((1299, 1406), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': 'messages', 'temperature': '(0.5)', 'max_tokens': '(256)'}), "(model='gpt-3.5-turbo', messages=messages,\n temperature=0.5, max_tokens=256)\n", (1327, 1406), False, 'import openai\n'), ((11530, 11569), 're.findall', 're.findall', (['"""\\\\[(.*?)\\\\]"""', 'input_string'], {}), "('\\\\[(.*?)\\\\]', input_string)\n", (11540, 11569), False, 'import re\n'), ((13232, 13255), 'pandas.DataFrame', 'pd.DataFrame', (['list_data'], {}), '(list_data)\n', (13244, 13255), True, 'import pandas as pd\n'), ((2741, 2773), 'semanticscholar.SemanticScholar', 'SemanticScholar', ([], {'timeout': 'timeout'}), '(timeout=timeout)\n', (2756, 2773), False, 'from semanticscholar import SemanticScholar\n'), ((3009, 3038), 'shutil.rmtree', 'shutil.rmtree', (['"""./.citation*"""'], {}), "('./.citation*')\n", (3022, 3038), False, 'import shutil\n'), ((3123, 3142), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3140, 3142), False, 'import logging\n'), ((3366, 3413), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'stream': '(True)'}), '(url, headers=headers, stream=True)\n', (3378, 3413), False, 'import requests\n'), ((6622, 6668), 'os.path.join', 'os.path.join', (['self.base_dir', 'f"""{paper_id}.pdf"""'], {}), "(self.base_dir, f'{paper_id}.pdf')\n", (6634, 6668), False, 'import os\n'), ((1649, 1671), 'ast.literal_eval', 'ast.literal_eval', (['item'], {}), '(item)\n', (1665, 1671), False, 'import ast\n'), ((2818, 2847), 'os.path.exists', 'os.path.exists', (['self.base_dir'], {}), '(self.base_dir)\n', (2832, 2847), False, 'import os\n'), ((2861, 2887), 'os.makedirs', 'os.makedirs', (['self.base_dir'], {}), '(self.base_dir)\n', (2872, 2887), False, 'import os\n'), ((3615, 3651), 'os.makedirs', 'os.makedirs', (['base_dir'], {'exist_ok': '(True)'}), '(base_dir, exist_ok=True)\n', (3626, 3651), False, 'import os\n'), ((3676, 3717), 'os.path.join', 'os.path.join', (['base_dir', 'f"""{paper_id}.pdf"""'], {}), "(base_dir, f'{paper_id}.pdf')\n", (3688, 3717), False, 'import os\n'), ((3780, 3805), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (3794, 3805), False, 'import os\n'), ((5158, 5204), 'os.path.join', 'os.path.join', (['self.base_dir', 'f"""{paper_id}.pdf"""'], {}), "(self.base_dir, f'{paper_id}.pdf')\n", (5170, 5204), False, 'import os\n'), ((9474, 9520), 'logging.info', 'logging.info', (['"""Getting full text documents..."""'], {}), "('Getting full text documents...')\n", (9486, 9520), False, 'import logging\n'), ((7834, 7875), 'logging.info', 'logging.info', (['f"""Searching for {question}"""'], {}), "(f'Searching for {question}')\n", (7846, 7875), False, 'import logging\n'), ((8118, 8203), 'logging.error', 'logging.error', (['"""Failed to fetch data from Semantic Scholar with exception: %s"""', 'e'], {}), "('Failed to fetch data from Semantic Scholar with exception: %s',\n e)\n", (8131, 8203), False, 'import logging\n'), ((8291, 8343), 'logging.error', 'logging.error', (['"""An unexpected error occurred: %s"""', 'e'], {}), "('An unexpected error occurred: %s', e)\n", (8304, 8343), False, 'import logging\n'), ((9397, 9437), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'text', 'extra_info': 'metadata'}), '(text=text, extra_info=metadata)\n', (9405, 9437), False, 'from llama_index.readers.schema.base import Document\n'), ((5232, 5259), 'os.path.exists', 'os.path.exists', (['persist_dir'], {}), '(persist_dir)\n', (5246, 5259), False, 'import os\n'), ((5534, 5561), 'os.path.exists', 'os.path.exists', (['persist_dir'], {}), '(persist_dir)\n', (5548, 5561), False, 'import os\n'), ((6310, 6350), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'text', 'extra_info': 'metadata'}), '(text=text, extra_info=metadata)\n', (6318, 6350), False, 'from llama_index.readers.schema.base import Document\n'), ((6004, 6082), 'logging.error', 'logging.error', (['f"""Failed to read pdf with exception: {e}. Skipping document..."""'], {}), "(f'Failed to read pdf with exception: {e}. Skipping document...')\n", (6017, 6082), False, 'import logging\n')] |
from llama_index.embeddings import LinearAdapterEmbeddingModel, resolve_embed_model
from llama_index.finetuning import EmbeddingQAFinetuneDataset
import pickle
from eval_utils import evaluate, display_results
def run_eval(val_data: str) -> None:
val_dataset = EmbeddingQAFinetuneDataset.from_json(val_data)
print("Loading model")
embed_model_name = "local:BAAI/bge-large-en"
base_embed_model = resolve_embed_model(embed_model_name)
print("Loading adapter")
embed_model = LinearAdapterEmbeddingModel(base_embed_model, "model_output_test", device="cuda")
# Top k 10 to match our internal experiments
print("Evaluating fine-tuned model")
ft_val_results = evaluate(val_dataset, embed_model, top_k=10)
print("Fine-Tuned Model Results")
print(ft_val_results)
with open("ft_results.pkl", "wb") as f:
pickle.dump(ft_val_results, f)
display_results(["ft"], [ft_val_results])
print("Evaluating base model")
bge_val_results = evaluate(val_dataset, embed_model_name, top_k=10)
print("Base Model Results:")
print(bge_val_results)
with open("base_model_results.pkl", "wb") as f2:
pickle.dump(bge_val_results, f2)
display_results(["bge"], [bge_val_results])
print("All Results")
display_results(
["bge", "ft"], [bge_val_results, ft_val_results]
)
if __name__ == "__main__":
run_eval("val.json") | [
"llama_index.embeddings.LinearAdapterEmbeddingModel",
"llama_index.finetuning.EmbeddingQAFinetuneDataset.from_json",
"llama_index.embeddings.resolve_embed_model"
] | [((264, 310), 'llama_index.finetuning.EmbeddingQAFinetuneDataset.from_json', 'EmbeddingQAFinetuneDataset.from_json', (['val_data'], {}), '(val_data)\n', (300, 310), False, 'from llama_index.finetuning import EmbeddingQAFinetuneDataset\n'), ((401, 438), 'llama_index.embeddings.resolve_embed_model', 'resolve_embed_model', (['embed_model_name'], {}), '(embed_model_name)\n', (420, 438), False, 'from llama_index.embeddings import LinearAdapterEmbeddingModel, resolve_embed_model\n'), ((480, 566), 'llama_index.embeddings.LinearAdapterEmbeddingModel', 'LinearAdapterEmbeddingModel', (['base_embed_model', '"""model_output_test"""'], {'device': '"""cuda"""'}), "(base_embed_model, 'model_output_test', device=\n 'cuda')\n", (507, 566), False, 'from llama_index.embeddings import LinearAdapterEmbeddingModel, resolve_embed_model\n'), ((664, 708), 'eval_utils.evaluate', 'evaluate', (['val_dataset', 'embed_model'], {'top_k': '(10)'}), '(val_dataset, embed_model, top_k=10)\n', (672, 708), False, 'from eval_utils import evaluate, display_results\n'), ((842, 883), 'eval_utils.display_results', 'display_results', (["['ft']", '[ft_val_results]'], {}), "(['ft'], [ft_val_results])\n", (857, 883), False, 'from eval_utils import evaluate, display_results\n'), ((936, 985), 'eval_utils.evaluate', 'evaluate', (['val_dataset', 'embed_model_name'], {'top_k': '(10)'}), '(val_dataset, embed_model_name, top_k=10)\n', (944, 985), False, 'from eval_utils import evaluate, display_results\n'), ((1126, 1169), 'eval_utils.display_results', 'display_results', (["['bge']", '[bge_val_results]'], {}), "(['bge'], [bge_val_results])\n", (1141, 1169), False, 'from eval_utils import evaluate, display_results\n'), ((1193, 1258), 'eval_utils.display_results', 'display_results', (["['bge', 'ft']", '[bge_val_results, ft_val_results]'], {}), "(['bge', 'ft'], [bge_val_results, ft_val_results])\n", (1208, 1258), False, 'from eval_utils import evaluate, display_results\n'), ((810, 840), 'pickle.dump', 'pickle.dump', (['ft_val_results', 'f'], {}), '(ft_val_results, f)\n', (821, 840), False, 'import pickle\n'), ((1092, 1124), 'pickle.dump', 'pickle.dump', (['bge_val_results', 'f2'], {}), '(bge_val_results, f2)\n', (1103, 1124), False, 'import pickle\n')] |
"""Simple horoscope predictions generator."""
from typing import List, Optional, Dict, Callable
import re
import json
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from vedastro import *
class SimpleBirthTimeReader(BasePydanticReader):
"""Simple birth time prediction reader.
Reads horoscope predictions from vedastro.org
`pip install vedastro` needed
Args:
metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
a birth time and returns a dictionary of prediction metadata.
Default is None.
"""
is_remote: bool = True
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
metadata_fn: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize with parameters."""
self._metadata_fn = metadata_fn
super().__init__()
@classmethod
def class_name(cls) -> str:
return "SimpleBirthTimeReader"
def load_data(self, birth_time: str) -> List[Document]:
"""Load data from the given birth time.
Args:
birth_time (str): birth time in this format : Location/Delhi,India/Time/01:30/14/02/2024/+05:30
Returns:
List[Document]: List of documents.
"""
documents = SimpleBirthTimeReader.birth_time_to_llama_index_nodes(birth_time)
return documents
@staticmethod
# converts vedastro horoscope predictions (JSON) to_llama-index's NodeWithScore
# so that llama index can understand vedastro predictions
def vedastro_predictions_to_llama_index_weight_nodes(
birth_time, predictions_list_json
):
from llama_index.core.schema import NodeWithScore
from llama_index.core.schema import TextNode
# Initialize an empty list
prediction_nodes = []
for prediction in predictions_list_json:
related_bod_json = prediction["RelatedBody"]
# shadbala_score = Calculate.PlanetCombinedShadbala()
rel_planets = related_bod_json["Planets"]
parsed_list = []
for planet in rel_planets:
parsed_list.append(PlanetName.Parse(planet))
# TODO temp use 1st planet, house, zodiac
planet_tags = []
shadbala_score = 0
if parsed_list: # This checks if the list is not empty
for planet in parsed_list:
shadbala_score += Calculate.PlanetShadbalaPinda(
planet, birth_time
).ToDouble()
# planet_tags = Calculate.GetPlanetTags(parsed_list[0])
predict_node = TextNode(
text=prediction["Description"],
metadata={
"name": SimpleBirthTimeReader.split_camel_case(prediction["Name"])
# "related_body": prediction['RelatedBody'],
# "planet_tags": planet_tags,
},
metadata_seperator="::",
metadata_template="{key}=>{value}",
text_template="Metadata: {metadata_str}\n-----\nContent: {content}",
)
# add in shadbala to give each prediction weights
parsed_node = NodeWithScore(
node=predict_node, score=shadbala_score
) # add in shabala score
prediction_nodes.append(parsed_node) # add to main list
return prediction_nodes
@staticmethod
def birth_time_to_llama_index_nodes(birth_time_text):
# 1 : convert raw time text into parsed time (aka time url)
parsed_birth_time = Time.FromUrl(birth_time_text).GetAwaiter().GetResult()
# 2 : do +300 horoscope prediction calculations to find correct predictions for person
all_predictions_raw = Calculate.HoroscopePredictions(parsed_birth_time)
# show the number of horo records found
print(f"Predictions Found : {len(all_predictions_raw)}")
# format list nicely so LLM can swallow (llama_index nodes)
# so that llama index can understand vedastro predictions
all_predictions_json = json.loads(
HoroscopePrediction.ToJsonList(all_predictions_raw).ToString()
)
# do final packing into llama-index formats
prediction_nodes = (
SimpleBirthTimeReader.vedastro_predictions_to_llama_index_documents(
all_predictions_json
)
)
return prediction_nodes
@staticmethod
def vedastro_predictions_to_llama_index_nodes(birth_time, predictions_list_json):
from llama_index.core.schema import NodeWithScore
from llama_index.core.schema import TextNode
# Initialize an empty list
prediction_nodes = []
for prediction in predictions_list_json:
related_bod_json = prediction["RelatedBody"]
# shadbala_score = Calculate.PlanetCombinedShadbala()
rel_planets = related_bod_json["Planets"]
parsed_list = []
for planet in rel_planets:
parsed_list.append(PlanetName.Parse(planet))
# TODO temp use 1st planet, house, zodiac
planet_tags = []
shadbala_score = 0
if parsed_list: # This checks if the list is not empty
shadbala_score = Calculate.PlanetShadbalaPinda(
parsed_list[0], birth_time
).ToDouble()
planet_tags = Calculate.GetPlanetTags(parsed_list[0])
predict_node = TextNode(
text=prediction["Description"],
metadata={
"name": ChatTools.split_camel_case(prediction["Name"]),
"related_body": prediction["RelatedBody"],
"planet_tags": planet_tags,
},
metadata_seperator="::",
metadata_template="{key}=>{value}",
text_template="Metadata: {metadata_str}\n-----\nContent: {content}",
)
# add in shadbala to give each prediction weights
prediction_nodes.append(predict_node) # add to main list
return prediction_nodes
@staticmethod
# given list vedastro lib horoscope predictions will convert to documents
def vedastro_predictions_to_llama_index_documents(predictions_list_json):
from llama_index.core import Document
from llama_index.core.schema import MetadataMode
import copy
# Initialize an empty list
prediction_nodes = []
for prediction in predictions_list_json:
# take out description (long text) from metadata, becasue already in as "content"
predict_meta = copy.deepcopy(prediction)
del predict_meta["Description"]
predict_node = Document(
text=prediction["Description"],
metadata=predict_meta,
metadata_seperator="::",
metadata_template="{key}=>{value}",
text_template="Metadata: {metadata_str}\n-----\nContent: {content}",
)
# # this is shows difference for understanding output of Documents
# print("#######################################################")
# print(
# "The LLM sees this: \n",
# predict_node.get_content(metadata_mode=MetadataMode.LLM),
# )
# print(
# "The Embedding model sees this: \n",
# predict_node.get_content(metadata_mode=MetadataMode.EMBED),
# )
# print("#######################################################")
# add in shadbala to give each prediction weights
prediction_nodes.append(predict_node) # add to main list
return prediction_nodes
@staticmethod
def split_camel_case(s):
return re.sub("((?<=[a-z])[A-Z]|(?<!\\A)[A-Z](?=[a-z]))", " \\1", s) | [
"llama_index.core.Document",
"llama_index.core.schema.NodeWithScore",
"llama_index.core.bridge.pydantic.PrivateAttr"
] | [((767, 780), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (778, 780), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((8054, 8115), 're.sub', 're.sub', (['"""((?<=[a-z])[A-Z]|(?<!\\\\A)[A-Z](?=[a-z]))"""', '""" \\\\1"""', 's'], {}), "('((?<=[a-z])[A-Z]|(?<!\\\\A)[A-Z](?=[a-z]))', ' \\\\1', s)\n", (8060, 8115), False, 'import re\n'), ((3380, 3434), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'predict_node', 'score': 'shadbala_score'}), '(node=predict_node, score=shadbala_score)\n', (3393, 3434), False, 'from llama_index.core.schema import NodeWithScore\n'), ((6874, 6899), 'copy.deepcopy', 'copy.deepcopy', (['prediction'], {}), '(prediction)\n', (6887, 6899), False, 'import copy\n'), ((6972, 7175), 'llama_index.core.Document', 'Document', ([], {'text': "prediction['Description']", 'metadata': 'predict_meta', 'metadata_seperator': '"""::"""', 'metadata_template': '"""{key}=>{value}"""', 'text_template': '"""Metadata: {metadata_str}\n-----\nContent: {content}"""'}), '(text=prediction[\'Description\'], metadata=predict_meta,\n metadata_seperator=\'::\', metadata_template=\'{key}=>{value}\',\n text_template="""Metadata: {metadata_str}\n-----\nContent: {content}""")\n', (6980, 7175), False, 'from llama_index.core import Document\n')] |
import os
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader
from flask import Flask, render_template, jsonify, request
index = None
# set up the index, either load it from disk to create it on the fly
def initialise_index():
global index
if os.path.exists(os.environ["INDEX_FILE"]):
index = GPTSimpleVectorIndex.load_from_disk(os.environ["INDEX_FILE"])
else:
documents = SimpleDirectoryReader(os.environ["LOAD_DIR"]).load_data()
index = GPTSimpleVectorIndex.from_documents(documents)
# get path for GUI
gui_dir = os.path.join(os.path.dirname(__file__), 'gui')
if not os.path.exists(gui_dir):
gui_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'gui')
# start server
server = Flask(__name__, static_folder=gui_dir, template_folder=gui_dir)
# initialise index
initialise_index()
@server.route('/')
def landing():
return render_template('index.html')
@server.route('/query', methods=['POST'])
def query():
global index
data = request.json
response = index.query(data["input"])
return jsonify({'query': data["input"],
'response': str(response),
'source': response.get_formatted_sources()}) | [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex.from_documents"
] | [((756, 819), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': 'gui_dir', 'template_folder': 'gui_dir'}), '(__name__, static_folder=gui_dir, template_folder=gui_dir)\n', (761, 819), False, 'from flask import Flask, render_template, jsonify, request\n'), ((268, 308), 'os.path.exists', 'os.path.exists', (["os.environ['INDEX_FILE']"], {}), "(os.environ['INDEX_FILE'])\n", (282, 308), False, 'import os\n'), ((584, 609), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (599, 609), False, 'import os\n'), ((627, 650), 'os.path.exists', 'os.path.exists', (['gui_dir'], {}), '(gui_dir)\n', (641, 650), False, 'import os\n'), ((905, 934), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (920, 934), False, 'from flask import Flask, render_template, jsonify, request\n'), ((326, 387), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (["os.environ['INDEX_FILE']"], {}), "(os.environ['INDEX_FILE'])\n", (361, 387), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader\n'), ((492, 538), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['documents'], {}), '(documents)\n', (527, 538), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader\n'), ((696, 721), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (711, 721), False, 'import os\n'), ((418, 463), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (["os.environ['LOAD_DIR']"], {}), "(os.environ['LOAD_DIR'])\n", (439, 463), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader\n')] |
from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType
from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex
'''
Title of the page: A simple Python implementation of the ReAct pattern for LLMs
Name of the website: LlamaIndex (GPT Index) is a data framework for your LLM application.
URL: https://github.com/jerryjliu/llama_index
'''
docs = SimpleDirectoryReader("../data/paul_graham/").load_data()
from llama_index import ServiceContext, LLMPredictor, TreeIndex
from langchain.chat_models import ChatOpenAI
llm_predictor = LLMPredictor(llm=ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0))
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
callback_manager = CallbackManager([llama_debug])
service_context = ServiceContext.from_defaults(callback_manager=callback_manager, llm_predictor=llm_predictor)
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
# Print info on the LLM calls during the list index query
print(llama_debug.get_event_time_info(CBEventType.LLM))
# Print info on llm inputs/outputs - returns start/end events for each LLM call
event_pairs = llama_debug.get_llm_inputs_outputs()
print(event_pairs[0][0])
print(event_pairs[0][1].payload.keys())
print(event_pairs[0][1].payload['response'])
# Get info on any event type
event_pairs = llama_debug.get_event_pairs(CBEventType.CHUNKING)
print(event_pairs[0][0].payload.keys()) # get first chunking start event
print(event_pairs[0][1].payload.keys()) # get first chunking end event
# Clear the currently cached events
llama_debug.flush_event_logs()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.callbacks.CallbackManager"
] | [((676, 718), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (693, 718), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((738, 768), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (753, 768), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler, CBEventType\n'), ((787, 883), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'callback_manager': 'callback_manager', 'llm_predictor': 'llm_predictor'}), '(callback_manager=callback_manager,\n llm_predictor=llm_predictor)\n', (815, 883), False, 'from llama_index import ServiceContext, LLMPredictor, TreeIndex\n'), ((889, 959), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (920, 959), False, 'from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((405, 450), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../data/paul_graham/"""'], {}), "('../data/paul_graham/')\n", (426, 450), False, 'from llama_index import ListIndex, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((606, 659), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (616, 659), False, 'from langchain.chat_models import ChatOpenAI\n')] |
import logging
import os
from llama_index import (
StorageContext,
load_index_from_storage,
)
from app.engine.constants import STORAGE_DIR
from app.engine.context import create_service_context
def get_chat_engine():
service_context = create_service_context()
# check if storage already exists
if not os.path.exists(STORAGE_DIR):
raise Exception(
"StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first"
)
logger = logging.getLogger("uvicorn")
# load the existing index
logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
index = load_index_from_storage(storage_context, service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index.as_chat_engine()
| [
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((249, 273), 'app.engine.context.create_service_context', 'create_service_context', ([], {}), '()\n', (271, 273), False, 'from app.engine.context import create_service_context\n'), ((507, 535), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (524, 535), False, 'import logging\n'), ((644, 697), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'STORAGE_DIR'}), '(persist_dir=STORAGE_DIR)\n', (672, 697), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((710, 783), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (733, 783), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((323, 350), 'os.path.exists', 'os.path.exists', (['STORAGE_DIR'], {}), '(STORAGE_DIR)\n', (337, 350), False, 'import os\n')] |
"""Module for loading index."""
import logging
from typing import TYPE_CHECKING, Any, Optional
from llama_index import ServiceContext, StorageContext, load_index_from_storage
from llama_index.indices.base import BaseIndex
from ols.app.models.config import ReferenceContent
# This is to avoid importing HuggingFaceBgeEmbeddings in all cases, because in
# runtime it is used only under some conditions. OTOH we need to make Python
# interpreter happy in all circumstances, hence the definiton of Any symbol.
if TYPE_CHECKING:
from langchain_community.embeddings import HuggingFaceBgeEmbeddings # TCH004
else:
HuggingFaceBgeEmbeddings = Any
logger = logging.getLogger(__name__)
class IndexLoader:
"""Load index from local file storage."""
def __init__(self, index_config: Optional[ReferenceContent]) -> None:
"""Initialize loader."""
self._index: Optional[BaseIndex] = None
self._index_config = index_config
logger.debug(f"Config used for index load: {self._index_config}")
if self._index_config is None:
logger.warning("Config for reference content is not set.")
else:
self._index_path = self._index_config.product_docs_index_path
self._index_id = self._index_config.product_docs_index_id
self._embed_model_path = self._index_config.embeddings_model_path
self._embed_model = self._get_embed_model()
self._load_index()
def _get_embed_model(self) -> Optional[str | HuggingFaceBgeEmbeddings]:
"""Get embed model according to configuration."""
if self._embed_model_path is not None:
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
logger.debug(
f"Loading embedding model info from path {self._embed_model_path}"
)
return HuggingFaceBgeEmbeddings(model_name=self._embed_model_path)
logger.warning("Embedding model path is not set.")
logger.warning("Embedding model is set to default")
return "local:BAAI/bge-base-en"
def _set_context(self) -> None:
"""Set storage/service context required for index load."""
logger.debug(f"Using {self._embed_model!s} as embedding model for index.")
logger.info("Setting up service context for index load...")
self._service_context = ServiceContext.from_defaults(
embed_model=self._embed_model, llm=None
)
logger.info("Setting up storage context for index load...")
self._storage_context = StorageContext.from_defaults(
persist_dir=self._index_path
)
def _load_index(self) -> None:
"""Load vector index."""
if self._index_path is None:
logger.warning("Index path is not set.")
else:
try:
self._set_context()
logger.info("Loading vector index...")
self._index = load_index_from_storage(
service_context=self._service_context,
storage_context=self._storage_context,
index_id=self._index_id,
)
logger.info("Vector index is loaded.")
except Exception as err:
logger.exception(f"Error loading vector index:\n{err}")
@property
def vector_index(self) -> Optional[BaseIndex]:
"""Get index."""
if self._index is None:
logger.warning(
"Proceeding without RAG content. "
"Either there is an error or required parameters are not set."
)
return self._index
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((661, 688), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (678, 688), False, 'import logging\n'), ((2376, 2445), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'self._embed_model', 'llm': 'None'}), '(embed_model=self._embed_model, llm=None)\n', (2404, 2445), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((2568, 2626), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'self._index_path'}), '(persist_dir=self._index_path)\n', (2596, 2626), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((1869, 1928), 'langchain_community.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': 'self._embed_model_path'}), '(model_name=self._embed_model_path)\n', (1893, 1928), False, 'from langchain_community.embeddings import HuggingFaceBgeEmbeddings\n'), ((2960, 3090), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'service_context': 'self._service_context', 'storage_context': 'self._storage_context', 'index_id': 'self._index_id'}), '(service_context=self._service_context,\n storage_context=self._storage_context, index_id=self._index_id)\n', (2983, 3090), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n')] |
from llama_index import PromptTemplate
instruction_str = """\
1. Convert the query to executable Python code using Pandas.
2. The final line of code should be a Python expression that can be called with the `eval()` function.
3. The code should represent a solution to the query.
4. PRINT ONLY THE EXPRESSION.
5. Do not quote the expression."""
new_prompt = PromptTemplate(
"""\
You are working with a pandas dataframe in Python.
The name of the dataframe is `df`.
This is the result of `print(df.head())`:
{df_str}
Follow these instructions:
{instruction_str}
Query: {query_str}
Expression: """
)
context = """Purpose: The primary role of this agent is to assist users by providing accurate
information about world population statistics and details about a country. """
| [
"llama_index.PromptTemplate"
] | [((381, 660), 'llama_index.PromptTemplate', 'PromptTemplate', (['""" You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expression: """'], {}), '(\n """ You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expression: """\n )\n', (395, 660), False, 'from llama_index import PromptTemplate\n')] |
import os, shutil, datetime, time, json
import gradio as gr
import sys
import os
from llama_index import GPTSimpleVectorIndex
bank_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../memory_bank')
sys.path.append(bank_path)
from build_memory_index import build_memory_index
memory_bank_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../memory_bank')
sys.path.append(memory_bank_path)
from summarize_memory import summarize_memory
def enter_name(name, memory,local_memory_qa,data_args,update_memory_index=True):
cur_date = datetime.date.today().strftime("%Y-%m-%d")
user_memory_index = None
if isinstance(data_args,gr.State):
data_args = data_args.value
if isinstance(memory,gr.State):
memory = memory.value
if isinstance(local_memory_qa,gr.State):
local_memory_qa=local_memory_qa.value
memory_dir = os.path.join(data_args.memory_basic_dir,data_args.memory_file)
if name in memory.keys():
user_memory = memory[name]
memory_index_path = os.path.join(data_args.memory_basic_dir,f'memory_index/{name}_index')
os.makedirs(os.path.dirname(memory_index_path), exist_ok=True)
if (not os.path.exists(memory_index_path)) or update_memory_index:
print(f'Initializing memory index {memory_index_path}...')
# filepath = input("Input your local knowledge file path 请输入本地知识文件路径:")
if os.path.exists(memory_index_path):
shutil.rmtree(memory_index_path)
memory_index_path, _ = local_memory_qa.init_memory_vector_store(filepath=memory_dir,vs_path=memory_index_path,user_name=name,cur_date=cur_date)
user_memory_index = local_memory_qa.load_memory_index(memory_index_path) if memory_index_path else None
msg = f"欢迎回来,{name}!" if data_args.language=='cn' else f"Wellcome Back, {name}!"
return msg,user_memory,memory, name,user_memory_index
else:
memory[name] = {}
memory[name].update({"name":name})
msg = f"欢迎新用户{name}!我会记住你的名字,下次见面就能叫你的名字啦!" if data_args.language == 'cn' else f'Welcome, new user {name}! I will remember your name, so next time we meet, I\'ll be able to call you by your name!'
return msg,memory[name],memory,name,user_memory_index
def enter_name_llamaindex(name, memory, data_args, update_memory_index=True):
user_memory_index = None
if name in memory.keys():
user_memory = memory[name]
memory_index_path = os.path.join(data_args.memory_basic_dir,f'memory_index/{name}_index.json')
if not os.path.exists(memory_index_path) or update_memory_index:
print(f'Initializing memory index {memory_index_path}...')
build_memory_index(memory,data_args,name=name)
if os.path.exists(memory_index_path):
user_memory_index = GPTSimpleVectorIndex.load_from_disk(memory_index_path)
print(f'Successfully load memory index for user {name}!')
return f"Wellcome Back, {name}!",user_memory,user_memory_index
else:
memory[name] = {}
memory[name].update({"name":name})
return f"Welcome new user{name}!I will remember your name and call you by your name in the next conversation",memory[name],user_memory_index
def summarize_memory_event_personality(data_args, memory, user_name):
if isinstance(data_args,gr.State):
data_args = data_args.value
if isinstance(memory,gr.State):
memory = memory.value
memory_dir = os.path.join(data_args.memory_basic_dir,data_args.memory_file)
memory = summarize_memory(memory_dir,user_name,language=data_args.language)
user_memory = memory[user_name] if user_name in memory.keys() else {}
return user_memory#, user_memory_index
def save_local_memory(memory,b,user_name,data_args):
if isinstance(data_args,gr.State):
data_args = data_args.value
if isinstance(memory,gr.State):
memory = memory.value
memory_dir = os.path.join(data_args.memory_basic_dir,data_args.memory_file)
date = time.strftime("%Y-%m-%d", time.localtime())
if memory[user_name].get("history") is None:
memory[user_name].update({"history":{}})
if memory[user_name]['history'].get(date) is None:
memory[user_name]['history'][date] = []
# date = len(memory[user_name]['history'])
memory[user_name]['history'][date].append({'query':b[-1][0],'response':b[-1][1]})
json.dump(memory,open(memory_dir,"w",encoding="utf-8"),ensure_ascii=False)
return memory | [
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((213, 239), 'sys.path.append', 'sys.path.append', (['bank_path'], {}), '(bank_path)\n', (228, 239), False, 'import sys\n'), ((384, 417), 'sys.path.append', 'sys.path.append', (['memory_bank_path'], {}), '(memory_bank_path)\n', (399, 417), False, 'import sys\n'), ((882, 945), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'data_args.memory_file'], {}), '(data_args.memory_basic_dir, data_args.memory_file)\n', (894, 945), False, 'import os\n'), ((3526, 3589), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'data_args.memory_file'], {}), '(data_args.memory_basic_dir, data_args.memory_file)\n', (3538, 3589), False, 'import os\n'), ((3602, 3670), 'summarize_memory.summarize_memory', 'summarize_memory', (['memory_dir', 'user_name'], {'language': 'data_args.language'}), '(memory_dir, user_name, language=data_args.language)\n', (3618, 3670), False, 'from summarize_memory import summarize_memory\n'), ((4000, 4063), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'data_args.memory_file'], {}), '(data_args.memory_basic_dir, data_args.memory_file)\n', (4012, 4063), False, 'import os\n'), ((167, 192), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (182, 192), False, 'import os\n'), ((338, 363), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (353, 363), False, 'import os\n'), ((1038, 1108), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'f"""memory_index/{name}_index"""'], {}), "(data_args.memory_basic_dir, f'memory_index/{name}_index')\n", (1050, 1108), False, 'import os\n'), ((2502, 2577), 'os.path.join', 'os.path.join', (['data_args.memory_basic_dir', 'f"""memory_index/{name}_index.json"""'], {}), "(data_args.memory_basic_dir, f'memory_index/{name}_index.json')\n", (2514, 2577), False, 'import os\n'), ((2791, 2824), 'os.path.exists', 'os.path.exists', (['memory_index_path'], {}), '(memory_index_path)\n', (2805, 2824), False, 'import os\n'), ((4100, 4116), 'time.localtime', 'time.localtime', ([], {}), '()\n', (4114, 4116), False, 'import os, shutil, datetime, time, json\n'), ((561, 582), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (580, 582), False, 'import os, shutil, datetime, time, json\n'), ((1128, 1162), 'os.path.dirname', 'os.path.dirname', (['memory_index_path'], {}), '(memory_index_path)\n', (1143, 1162), False, 'import os\n'), ((1420, 1453), 'os.path.exists', 'os.path.exists', (['memory_index_path'], {}), '(memory_index_path)\n', (1434, 1453), False, 'import os\n'), ((2733, 2781), 'build_memory_index.build_memory_index', 'build_memory_index', (['memory', 'data_args'], {'name': 'name'}), '(memory, data_args, name=name)\n', (2751, 2781), False, 'from build_memory_index import build_memory_index\n'), ((2858, 2912), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['memory_index_path'], {}), '(memory_index_path)\n', (2893, 2912), False, 'from llama_index import GPTSimpleVectorIndex\n'), ((1195, 1228), 'os.path.exists', 'os.path.exists', (['memory_index_path'], {}), '(memory_index_path)\n', (1209, 1228), False, 'import os\n'), ((1471, 1503), 'shutil.rmtree', 'shutil.rmtree', (['memory_index_path'], {}), '(memory_index_path)\n', (1484, 1503), False, 'import os, shutil, datetime, time, json\n'), ((2592, 2625), 'os.path.exists', 'os.path.exists', (['memory_index_path'], {}), '(memory_index_path)\n', (2606, 2625), False, 'import os\n')] |
from llama_index import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.vector_stores.qdrant import QdrantVectorStore
from tqdm import tqdm
import arxiv
import os
import argparse
import yaml
import qdrant_client
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
from llama_index import ServiceContext
from llama_index.llms import Ollama
class Data:
def __init__(self, config):
self.config = config
def _create_data_folder(self, download_path):
data_path = download_path
if not os.path.exists(data_path):
os.makedirs(self.config["data_path"])
print("Output folder created")
else:
print("Output folder already exists.")
def download_papers(self, search_query, download_path, max_results):
self._create_data_folder(download_path)
client = arxiv.Client()
search = arxiv.Search(
query=search_query,
max_results=max_results,
sort_by=arxiv.SortCriterion.SubmittedDate,
)
results = list(client.results(search))
for paper in tqdm(results):
if os.path.exists(download_path):
paper_title = (paper.title).replace(" ", "_")
paper.download_pdf(dirpath=download_path, filename=f"{paper_title}.pdf")
print(f"{paper.title} Downloaded.")
def ingest(self, embedder, llm):
print("Indexing data...")
documents = SimpleDirectoryReader(self.config["data_path"]).load_data()
client = qdrant_client.QdrantClient(url=self.config["qdrant_url"])
qdrant_vector_store = QdrantVectorStore(
client=client, collection_name=self.config["collection_name"]
)
storage_context = StorageContext.from_defaults(vector_store=qdrant_vector_store)
# service_context = ServiceContext.from_defaults(
# llm=llm, embed_model=embedder, chunk_size=self.config["chunk_size"]
# )
service_context = ServiceContext.from_defaults(
llm=None, embed_model=embedder, chunk_size=self.config["chunk_size"]
)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, service_context=service_context
)
print(
f"Data indexed successfully to Qdrant. Collection: {self.config['collection_name']}"
)
return index
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-q", "--query",
type=str,
default=False,
help="Download papers from arxiv with this query.",
)
# parser.add_argument(
# "-o", "--output", type=str, default=False, help="Download path."
# )
parser.add_argument(
"-m", "--max", type=int, default=False, help="Max results to download."
)
parser.add_argument(
"-i",
"--ingest",
action=argparse.BooleanOptionalAction,
default=False,
help="Ingest data to Qdrant vector Database.",
)
args = parser.parse_args()
config_file = "config.yml"
with open(config_file, "r") as conf:
config = yaml.safe_load(conf)
data = Data(config)
if args.query:
data.download_papers(
search_query=args.query,
download_path=config["data_path"],
max_results=args.max,
)
if args.ingest:
print("Loading Embedder...")
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name=config["embedding_model"])
)
llm = Ollama(model=config["llm_name"], base_url=config["llm_url"])
data.ingest(embedder=embed_model, llm=llm)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.llms.Ollama"
] | [((2566, 2591), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2589, 2591), False, 'import argparse\n'), ((970, 984), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (982, 984), False, 'import arxiv\n'), ((1003, 1108), 'arxiv.Search', 'arxiv.Search', ([], {'query': 'search_query', 'max_results': 'max_results', 'sort_by': 'arxiv.SortCriterion.SubmittedDate'}), '(query=search_query, max_results=max_results, sort_by=arxiv.\n SortCriterion.SubmittedDate)\n', (1015, 1108), False, 'import arxiv\n'), ((1220, 1233), 'tqdm.tqdm', 'tqdm', (['results'], {}), '(results)\n', (1224, 1233), False, 'from tqdm import tqdm\n'), ((1654, 1711), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'url': "self.config['qdrant_url']"}), "(url=self.config['qdrant_url'])\n", (1680, 1711), False, 'import qdrant_client\n'), ((1742, 1827), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': "self.config['collection_name']"}), "(client=client, collection_name=self.config['collection_name']\n )\n", (1759, 1827), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((1871, 1933), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'qdrant_vector_store'}), '(vector_store=qdrant_vector_store)\n', (1899, 1933), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n'), ((2112, 2215), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'embedder', 'chunk_size': "self.config['chunk_size']"}), "(llm=None, embed_model=embedder, chunk_size=\n self.config['chunk_size'])\n", (2140, 2215), False, 'from llama_index import ServiceContext\n'), ((2250, 2362), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n service_context=service_context)\n', (2281, 2362), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n'), ((3283, 3303), 'yaml.safe_load', 'yaml.safe_load', (['conf'], {}), '(conf)\n', (3297, 3303), False, 'import yaml\n'), ((3700, 3760), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': "config['llm_name']", 'base_url': "config['llm_url']"}), "(model=config['llm_name'], base_url=config['llm_url'])\n", (3706, 3760), False, 'from llama_index.llms import Ollama\n'), ((646, 671), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (660, 671), False, 'import os\n'), ((685, 722), 'os.makedirs', 'os.makedirs', (["self.config['data_path']"], {}), "(self.config['data_path'])\n", (696, 722), False, 'import os\n'), ((1250, 1279), 'os.path.exists', 'os.path.exists', (['download_path'], {}), '(download_path)\n', (1264, 1279), False, 'import os\n'), ((3616, 3675), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': "config['embedding_model']"}), "(model_name=config['embedding_model'])\n", (3637, 3675), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1576, 1623), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (["self.config['data_path']"], {}), "(self.config['data_path'])\n", (1597, 1623), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n')] |
from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage
from llama_index.storage.storage_context import StorageContext
from llama_index.indices.service_context import ServiceContext
from llama_index.llms import OpenAI
from llama_index.node_parser import SimpleNodeParser
from llama_index.node_parser.extractors import (
MetadataExtractor,
SummaryExtractor,
QuestionsAnsweredExtractor,
TitleExtractor,
KeywordExtractor,
)
from llama_index.text_splitter import TokenTextSplitter
from dotenv import load_dotenv
import openai
import gradio as gr
import sys, os
import logging
import json
#loads dotenv lib to retrieve API keys from .env file
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# enable INFO level logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#define LLM service
llm = OpenAI(temperature=0.1, model_name="gpt-3.5-turbo", max_tokens=512)
service_context = ServiceContext.from_defaults(llm=llm)
#construct text splitter to split texts into chunks for processing
text_splitter = TokenTextSplitter(separator=" ", chunk_size=512, chunk_overlap=128)
#set the global service context object, avoiding passing service_context when building the index
from llama_index import set_global_service_context
set_global_service_context(service_context)
#create metadata extractor
metadata_extractor = MetadataExtractor(
extractors=[
TitleExtractor(nodes=1, llm=llm),
QuestionsAnsweredExtractor(questions=3, llm=llm),
SummaryExtractor(summaries=["prev", "self"], llm=llm),
KeywordExtractor(keywords=10, llm=llm)
],
)
#create node parser to parse nodes from document
node_parser = SimpleNodeParser(
text_splitter=text_splitter,
metadata_extractor=metadata_extractor,
)
#loading documents
documents_2022 = SimpleDirectoryReader(input_files=["data/executive-summary-2022.pdf"], filename_as_id=True).load_data()
print(f"loaded documents_2022 with {len(documents_2022)} pages")
documents_2021 = SimpleDirectoryReader(input_files=["data/executive-summary-2021.pdf"], filename_as_id=True).load_data()
print(f"loaded documents_2021 with {len(documents_2021)} pages")
def load_index():
try:
#load storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
#try to load the index from storage
index = load_index_from_storage(storage_context)
logging.info("Index loaded from storage.")
except FileNotFoundError:
#if index not found, create a new one
logging.info("Index not found. Creating a new one...")
nodes_2022 = node_parser.get_nodes_from_documents(documents_2022)
nodes_2021 = node_parser.get_nodes_from_documents(documents_2021)
print(f"loaded nodes_2022 with {len(nodes_2022)} nodes")
print(f"loaded nodes_2021 with {len(nodes_2021)} nodes")
#print metadata in json format
for node in nodes_2022:
metadata_json = json.dumps(node.metadata, indent=4) # Convert metadata to formatted JSON
print(metadata_json)
for node in nodes_2021:
metadata_json = json.dumps(node.metadata, indent=4) # Convert metadata to formatted JSON
print(metadata_json)
#based on the nodes and service_context, create index
index = VectorStoreIndex(nodes=nodes_2022 + nodes_2021, service_context=service_context)
# Persist index to disk
index.storage_context.persist()
logging.info("New index created and persisted to storage.")
return index
def data_querying(input_text):
# Load index
index = load_index()
#queries the index with the input text
response = index.as_query_engine().query(input_text)
return response.response
iface = gr.Interface(fn=data_querying,
inputs=gr.components.Textbox(lines=3, label="Enter your question"),
outputs="text",
title="Analyzing the U.S. Government's Financial Reports for 2022")
iface.launch(share=False) | [
"llama_index.node_parser.extractors.TitleExtractor",
"llama_index.SimpleDirectoryReader",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.node_parser.extractors.SummaryExtractor",
"llama_index.VectorStoreIndex",
"llama_index.indices.service_context.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.node_parser.extractors.QuestionsAnsweredExtractor",
"llama_index.text_splitter.TokenTextSplitter",
"llama_index.set_global_service_context",
"llama_index.node_parser.SimpleNodeParser",
"llama_index.node_parser.extractors.KeywordExtractor",
"llama_index.load_index_from_storage"
] | [((692, 705), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (703, 705), False, 'from dotenv import load_dotenv\n'), ((724, 751), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (733, 751), False, 'import sys, os\n'), ((781, 839), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (800, 839), False, 'import logging\n'), ((940, 1007), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': '(512)'}), "(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=512)\n", (946, 1007), False, 'from llama_index.llms import OpenAI\n'), ((1026, 1063), 'llama_index.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (1054, 1063), False, 'from llama_index.indices.service_context import ServiceContext\n'), ((1148, 1215), 'llama_index.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'separator': '""" """', 'chunk_size': '(512)', 'chunk_overlap': '(128)'}), "(separator=' ', chunk_size=512, chunk_overlap=128)\n", (1165, 1215), False, 'from llama_index.text_splitter import TokenTextSplitter\n'), ((1366, 1409), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1392, 1409), False, 'from llama_index import set_global_service_context\n'), ((1778, 1867), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {'text_splitter': 'text_splitter', 'metadata_extractor': 'metadata_extractor'}), '(text_splitter=text_splitter, metadata_extractor=\n metadata_extractor)\n', (1794, 1867), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((871, 911), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (892, 911), False, 'import logging\n'), ((840, 859), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (857, 859), False, 'import logging\n'), ((1911, 2006), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['data/executive-summary-2022.pdf']", 'filename_as_id': '(True)'}), "(input_files=['data/executive-summary-2022.pdf'],\n filename_as_id=True)\n", (1932, 2006), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((2097, 2192), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['data/executive-summary-2021.pdf']", 'filename_as_id': '(True)'}), "(input_files=['data/executive-summary-2021.pdf'],\n filename_as_id=True)\n", (2118, 2192), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((2356, 2409), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (2384, 2409), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((2470, 2510), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2493, 2510), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((2519, 2561), 'logging.info', 'logging.info', (['"""Index loaded from storage."""'], {}), "('Index loaded from storage.')\n", (2531, 2561), False, 'import logging\n'), ((3960, 4019), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(3)', 'label': '"""Enter your question"""'}), "(lines=3, label='Enter your question')\n", (3981, 4019), True, 'import gradio as gr\n'), ((1503, 1535), 'llama_index.node_parser.extractors.TitleExtractor', 'TitleExtractor', ([], {'nodes': '(1)', 'llm': 'llm'}), '(nodes=1, llm=llm)\n', (1517, 1535), False, 'from llama_index.node_parser.extractors import MetadataExtractor, SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor\n'), ((1545, 1593), 'llama_index.node_parser.extractors.QuestionsAnsweredExtractor', 'QuestionsAnsweredExtractor', ([], {'questions': '(3)', 'llm': 'llm'}), '(questions=3, llm=llm)\n', (1571, 1593), False, 'from llama_index.node_parser.extractors import MetadataExtractor, SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor\n'), ((1603, 1656), 'llama_index.node_parser.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'summaries': "['prev', 'self']", 'llm': 'llm'}), "(summaries=['prev', 'self'], llm=llm)\n", (1619, 1656), False, 'from llama_index.node_parser.extractors import MetadataExtractor, SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor\n'), ((1666, 1704), 'llama_index.node_parser.extractors.KeywordExtractor', 'KeywordExtractor', ([], {'keywords': '(10)', 'llm': 'llm'}), '(keywords=10, llm=llm)\n', (1682, 1704), False, 'from llama_index.node_parser.extractors import MetadataExtractor, SummaryExtractor, QuestionsAnsweredExtractor, TitleExtractor, KeywordExtractor\n'), ((2655, 2709), 'logging.info', 'logging.info', (['"""Index not found. Creating a new one..."""'], {}), "('Index not found. Creating a new one...')\n", (2667, 2709), False, 'import logging\n'), ((3443, 3528), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': '(nodes_2022 + nodes_2021)', 'service_context': 'service_context'}), '(nodes=nodes_2022 + nodes_2021, service_context=service_context\n )\n', (3459, 3528), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((3604, 3663), 'logging.info', 'logging.info', (['"""New index created and persisted to storage."""'], {}), "('New index created and persisted to storage.')\n", (3616, 3663), False, 'import logging\n'), ((3089, 3124), 'json.dumps', 'json.dumps', (['node.metadata'], {'indent': '(4)'}), '(node.metadata, indent=4)\n', (3099, 3124), False, 'import json\n'), ((3257, 3292), 'json.dumps', 'json.dumps', (['node.metadata'], {'indent': '(4)'}), '(node.metadata, indent=4)\n', (3267, 3292), False, 'import json\n')] |
# qa_template.py
from llama_index import QuestionAnswerPrompt
# define custom QuestionAnswerPrompt
QA_PROMPT_TMPL = (
"We have provided context information below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given this context information, please answer the question: {query_str} under a header # 'Based on the notes' \n"
"additionally, create a section under a header ## 'In addition with love from AI' that extends the answer, but does not repeat information from the context. \n"
"Provide the final answer in Markdown compliant presentation \n"
)
QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL)
| [
"llama_index.QuestionAnswerPrompt"
] | [((627, 663), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['QA_PROMPT_TMPL'], {}), '(QA_PROMPT_TMPL)\n', (647, 663), False, 'from llama_index import QuestionAnswerPrompt\n')] |
from typing import Any, Optional, Sequence, Type, cast
from llama_index.data_structs.data_structs_v2 import (
IndexDict,
OpensearchIndexDict,
)
from llama_index.data_structs.node_v2 import Node
from llama_index.indices.base import BaseGPTIndex, QueryMap
from llama_index.indices.query.schema import QueryMode
from llama_index.indices.service_context import ServiceContext
from llama_index.indices.vector_store.base import GPTVectorStoreIndex
from llama_index_fix.elasticsearch import ElasticsearchVectorStore, ElasticsearchVectorClient
class GPTElasticsearchIndex(GPTVectorStoreIndex):
index_struct_cls: Type[IndexDict] = OpensearchIndexDict
def __init__(
self,
nodes: Optional[Sequence[Node]] = None,
service_context: Optional[ServiceContext] = None,
client: Optional[ElasticsearchVectorClient] = None,
index_struct: Optional[IndexDict] = None,
**kwargs: Any,
) -> None:
"""Init params."""
if client is None:
raise ValueError("client is required.")
vector_store = ElasticsearchVectorStore(client)
super().__init__(
nodes=nodes,
index_struct=index_struct,
service_context=service_context,
vector_store=vector_store,
**kwargs,
)
@classmethod
def get_query_map(self) -> QueryMap:
"""Get query map."""
return {
QueryMode.DEFAULT: GPTOpensearchIndexQuery,
QueryMode.EMBEDDING: GPTOpensearchIndexQuery,
}
def _preprocess_query(self, mode: QueryMode, query_kwargs: Any) -> None:
"""Preprocess query."""
super()._preprocess_query(mode, query_kwargs)
del query_kwargs["vector_store"]
vector_store = cast(ElasticsearchVectorStore, self._vector_store)
query_kwargs["client"] = vector_store._client | [
"llama_index_fix.elasticsearch.ElasticsearchVectorStore"
] | [((1075, 1107), 'llama_index_fix.elasticsearch.ElasticsearchVectorStore', 'ElasticsearchVectorStore', (['client'], {}), '(client)\n', (1099, 1107), False, 'from llama_index_fix.elasticsearch import ElasticsearchVectorStore, ElasticsearchVectorClient\n'), ((1771, 1821), 'typing.cast', 'cast', (['ElasticsearchVectorStore', 'self._vector_store'], {}), '(ElasticsearchVectorStore, self._vector_store)\n', (1775, 1821), False, 'from typing import Any, Optional, Sequence, Type, cast\n')] |
import os
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.bridge.pydantic import Field, PrivateAttr
from llama_index.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.llms.custom import CustomLLM
from llama_index.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.types import BaseOutputParser, PydanticProgramMode
from llama_index.utils import get_cache_dir
from byzerllm.utils.client import ByzerLLM
class ByzerAI(CustomLLM):
"""
ByzerAI is a custom LLM that uses the ByzerLLM API to generate text.
"""
verbose: bool = Field(
default=False,
description="Whether to print verbose output.",
)
_model: ByzerLLM = PrivateAttr()
def __init__(
self,
llm:ByzerLLM
) -> None:
self._model = llm
super().__init__()
@classmethod
def class_name(cls) -> str:
return "ByzerAI_llm"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=8024,
num_output=2048,
model_name=self._model.default_model_name,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
conversations = [{
"role":message.role,
"content":message.content
} for message in messages]
m = self._model.chat_oai(conversations=conversations)
completion_response = CompletionResponse(text=m[0].output, raw=None)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
conversations = [{
"role":message.role,
"content":message.content
} for message in messages]
m = self._model.stream_chat_oai(conversations=conversations)
def gen():
v = ""
for response in m:
text:str = response[0]
metadata:Dict[str,Any] = response[1]
completion_response = CompletionResponse(text=text, delta=text[len(v):], raw=None)
v = text
yield completion_response
return stream_completion_response_to_chat_response(gen())
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
m = self._model.chat_oai(conversations=[{"role":"user","content":prompt}])
completion_response = CompletionResponse(text=m[0].output, raw=None)
return completion_response
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
conversations=[{"role":"user","content":prompt}]
m = self._model.stream_chat_oai(conversations=conversations)
def gen():
v = ""
for response in m:
text:str = response[0]
metadata:Dict[str,Any] = response[1]
completion_response = CompletionResponse(text=text, delta=text[len(v):], raw=None)
v = text
yield completion_response
return gen() | [
"llama_index.llms.base.llm_chat_callback",
"llama_index.bridge.pydantic.Field",
"llama_index.llms.generic_utils.completion_response_to_chat_response",
"llama_index.core.llms.types.LLMMetadata",
"llama_index.bridge.pydantic.PrivateAttr",
"llama_index.llms.base.llm_completion_callback",
"llama_index.core.llms.types.CompletionResponse"
] | [((858, 926), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to print verbose output."""'}), "(default=False, description='Whether to print verbose output.')\n", (863, 926), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((974, 987), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (985, 987), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1459, 1478), 'llama_index.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1476, 1478), False, 'from llama_index.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1914, 1933), 'llama_index.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1931, 1933), False, 'from llama_index.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2667, 2692), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (2690, 2692), False, 'from llama_index.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3015, 3040), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3038, 3040), False, 'from llama_index.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1310, 1407), 'llama_index.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': '(8024)', 'num_output': '(2048)', 'model_name': 'self._model.default_model_name'}), '(context_window=8024, num_output=2048, model_name=self._model.\n default_model_name)\n', (1321, 1407), False, 'from llama_index.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1788, 1834), 'llama_index.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'm[0].output', 'raw': 'None'}), '(text=m[0].output, raw=None)\n', (1806, 1834), False, 'from llama_index.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1850, 1907), 'llama_index.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (1886, 1907), False, 'from llama_index.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((2927, 2973), 'llama_index.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'm[0].output', 'raw': 'None'}), '(text=m[0].output, raw=None)\n', (2945, 2973), False, 'from llama_index.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n')] |
from byzerllm.utils.client import ByzerLLM
from byzerllm.utils.retrieval import ByzerRetrieval
from byzerllm.apps.llama_index.byzerai import ByzerAI
from byzerllm.apps.llama_index.byzerai_embedding import ByzerAIEmbedding
from byzerllm.apps.llama_index.byzerai_docstore import ByzerAIDocumentStore
from byzerllm.apps.llama_index.byzerai_index_store import ByzerAIIndexStore
from byzerllm.apps.llama_index.byzerai_vectordb import ByzerAIVectorStore
from llama_index.service_context import ServiceContext
from llama_index.storage import StorageContext
from typing import Optional
def get_service_context(llm:ByzerLLM,**kargs):
service_context = ServiceContext.from_defaults(llm=ByzerAI(llm=llm),embed_model=ByzerAIEmbedding(llm=llm),**kargs)
return service_context
def get_storage_context(llm:ByzerLLM,retrieval:ByzerRetrieval,
chunk_collection:Optional[str]="default",
namespace:Optional[str]=None,
**kargs):
vector_store = ByzerAIVectorStore(llm=llm, retrieval=retrieval,chunk_collection=chunk_collection)
docstore = ByzerAIDocumentStore(llm=llm, retrieval=retrieval,namespace=namespace)
index_store = ByzerAIIndexStore(llm=llm, retrieval=retrieval,namespace=namespace)
storage_context = StorageContext.from_defaults(
docstore=docstore,
vector_store=vector_store,
index_store=index_store,
**kargs
)
return storage_context
| [
"llama_index.storage.StorageContext.from_defaults"
] | [((1041, 1129), 'byzerllm.apps.llama_index.byzerai_vectordb.ByzerAIVectorStore', 'ByzerAIVectorStore', ([], {'llm': 'llm', 'retrieval': 'retrieval', 'chunk_collection': 'chunk_collection'}), '(llm=llm, retrieval=retrieval, chunk_collection=\n chunk_collection)\n', (1059, 1129), False, 'from byzerllm.apps.llama_index.byzerai_vectordb import ByzerAIVectorStore\n'), ((1139, 1210), 'byzerllm.apps.llama_index.byzerai_docstore.ByzerAIDocumentStore', 'ByzerAIDocumentStore', ([], {'llm': 'llm', 'retrieval': 'retrieval', 'namespace': 'namespace'}), '(llm=llm, retrieval=retrieval, namespace=namespace)\n', (1159, 1210), False, 'from byzerllm.apps.llama_index.byzerai_docstore import ByzerAIDocumentStore\n'), ((1228, 1296), 'byzerllm.apps.llama_index.byzerai_index_store.ByzerAIIndexStore', 'ByzerAIIndexStore', ([], {'llm': 'llm', 'retrieval': 'retrieval', 'namespace': 'namespace'}), '(llm=llm, retrieval=retrieval, namespace=namespace)\n', (1245, 1296), False, 'from byzerllm.apps.llama_index.byzerai_index_store import ByzerAIIndexStore\n'), ((1318, 1430), 'llama_index.storage.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'docstore': 'docstore', 'vector_store': 'vector_store', 'index_store': 'index_store'}), '(docstore=docstore, vector_store=vector_store,\n index_store=index_store, **kargs)\n', (1346, 1430), False, 'from llama_index.storage import StorageContext\n'), ((689, 705), 'byzerllm.apps.llama_index.byzerai.ByzerAI', 'ByzerAI', ([], {'llm': 'llm'}), '(llm=llm)\n', (696, 705), False, 'from byzerllm.apps.llama_index.byzerai import ByzerAI\n'), ((718, 743), 'byzerllm.apps.llama_index.byzerai_embedding.ByzerAIEmbedding', 'ByzerAIEmbedding', ([], {'llm': 'llm'}), '(llm=llm)\n', (734, 743), False, 'from byzerllm.apps.llama_index.byzerai_embedding import ByzerAIEmbedding\n')] |
#model_settings.py
import streamlit as st
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext
from llama_index.logger import LlamaLogger
from langchain.chat_models import ChatOpenAI
from langchain import OpenAI
from enum import Enum
class sentenceTransformers(Enum):
OPTION1 = "sentence-transformers/all-MiniLM-L6-v2" #default
OPTION2 = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
OPTION3 = "sentence-transformers/all-mpnet-base-v2"
def get_sentence_transformer_dropdown():
options = [e.value for e in sentenceTransformers]
selected_option = st.selectbox("Sentence transformer:", options)
return selected_option
def get_embed_model(provider='Langchain', model_name=sentenceTransformers.OPTION1.value):
# load in HF embedding model from langchain
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name=model_name)) if provider=='Langchain' else OpenAIEmbedding()
return embed_model
def get_prompt_helper():
# define prompt helper
max_input_size = 4096
num_output = 2048
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
return prompt_helper
def get_llm_predictor():
# define LLM
num_output = 2048
#llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=num_output))
llm_predictor = LLMPredictor(ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo", max_tokens=num_output))
return llm_predictor
@st.cache_resource
def get_logger():
llama_logger = LlamaLogger()
return llama_logger
def get_service_context(llm_predictor=get_llm_predictor(),
embed_model=get_embed_model(),
prompt_helper=get_prompt_helper(),
chunk_size_limit=512,
llama_logger=get_logger()):
return ServiceContext.from_defaults(llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
chunk_size_limit=chunk_size_limit,
llama_logger=llama_logger)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.logger.LlamaLogger",
"llama_index.PromptHelper"
] | [((705, 751), 'streamlit.selectbox', 'st.selectbox', (['"""Sentence transformer:"""', 'options'], {}), "('Sentence transformer:', options)\n", (717, 751), True, 'import streamlit as st\n'), ((1220, 1279), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1232, 1279), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1684, 1697), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (1695, 1697), False, 'from llama_index.logger import LlamaLogger\n'), ((2009, 2192), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model', 'prompt_helper': 'prompt_helper', 'chunk_size_limit': 'chunk_size_limit', 'llama_logger': 'llama_logger'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model, prompt_helper=prompt_helper, chunk_size_limit=\n chunk_size_limit, llama_logger=llama_logger)\n', (2037, 2192), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1031, 1048), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1046, 1048), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1522, 1600), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'num_output'}), "(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=num_output)\n", (1532, 1600), False, 'from langchain.chat_models import ChatOpenAI\n'), ((955, 999), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (976, 999), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n')] |
from typing import Any, List, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.base import BaseGPTIndex
from llama_index.core.llms.llm import LLM
from llama_index.core.node_parser import SentenceSplitter, TextSplitter
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts import PromptTemplate
from llama_index.core.prompts.base import BasePromptTemplate
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
ResponseMode,
get_response_synthesizer,
)
from llama_index.core.schema import (
MetadataMode,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
llm_from_settings_or_context,
)
CITATION_QA_TEMPLATE = PromptTemplate(
"Please provide an answer based solely on the provided sources. "
"When referencing information from a source, "
"cite the appropriate source(s) using their corresponding numbers. "
"Every answer should include at least one source citation. "
"Only cite a source when you are explicitly referencing it. "
"If none of the sources are helpful, you should indicate that. "
"For example:\n"
"Source 1:\n"
"The sky is red in the evening and blue in the morning.\n"
"Source 2:\n"
"Water is wet when the sky is red.\n"
"Query: When is water wet?\n"
"Answer: Water will be wet when the sky is red [2], "
"which occurs in the evening [1].\n"
"Now it's your turn. Below are several numbered sources of information:"
"\n------\n"
"{context_str}"
"\n------\n"
"Query: {query_str}\n"
"Answer: "
)
CITATION_REFINE_TEMPLATE = PromptTemplate(
"Please provide an answer based solely on the provided sources. "
"When referencing information from a source, "
"cite the appropriate source(s) using their corresponding numbers. "
"Every answer should include at least one source citation. "
"Only cite a source when you are explicitly referencing it. "
"If none of the sources are helpful, you should indicate that. "
"For example:\n"
"Source 1:\n"
"The sky is red in the evening and blue in the morning.\n"
"Source 2:\n"
"Water is wet when the sky is red.\n"
"Query: When is water wet?\n"
"Answer: Water will be wet when the sky is red [2], "
"which occurs in the evening [1].\n"
"Now it's your turn. "
"We have provided an existing answer: {existing_answer}"
"Below are several numbered sources of information. "
"Use them to refine the existing answer. "
"If the provided sources are not helpful, you will repeat the existing answer."
"\nBegin refining!"
"\n------\n"
"{context_msg}"
"\n------\n"
"Query: {query_str}\n"
"Answer: "
)
DEFAULT_CITATION_CHUNK_SIZE = 512
DEFAULT_CITATION_CHUNK_OVERLAP = 20
class CitationQueryEngine(BaseQueryEngine):
"""Citation query engine.
Args:
retriever (BaseRetriever): A retriever object.
response_synthesizer (Optional[BaseSynthesizer]):
A BaseSynthesizer object.
citation_chunk_size (int):
Size of citation chunks, default=512. Useful for controlling
granularity of sources.
citation_chunk_overlap (int): Overlap of citation nodes, default=20.
text_splitter (Optional[TextSplitter]):
A text splitter for creating citation source nodes. Default is
a SentenceSplitter.
callback_manager (Optional[CallbackManager]): A callback manager.
metadata_mode (MetadataMode): A MetadataMode object that controls how
metadata is included in the citation prompt.
"""
def __init__(
self,
retriever: BaseRetriever,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
citation_chunk_size: int = DEFAULT_CITATION_CHUNK_SIZE,
citation_chunk_overlap: int = DEFAULT_CITATION_CHUNK_OVERLAP,
text_splitter: Optional[TextSplitter] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
callback_manager: Optional[CallbackManager] = None,
metadata_mode: MetadataMode = MetadataMode.NONE,
) -> None:
self.text_splitter = text_splitter or SentenceSplitter(
chunk_size=citation_chunk_size, chunk_overlap=citation_chunk_overlap
)
self._retriever = retriever
service_context = retriever.get_service_context()
callback_manager = (
callback_manager
or callback_manager_from_settings_or_context(Settings, service_context)
)
llm = llm or llm_from_settings_or_context(Settings, service_context)
self._response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
service_context=service_context,
callback_manager=callback_manager,
)
self._node_postprocessors = node_postprocessors or []
self._metadata_mode = metadata_mode
for node_postprocessor in self._node_postprocessors:
node_postprocessor.callback_manager = callback_manager
super().__init__(callback_manager=callback_manager)
@classmethod
def from_args(
cls,
index: BaseGPTIndex,
llm: Optional[LLM] = None,
response_synthesizer: Optional[BaseSynthesizer] = None,
citation_chunk_size: int = DEFAULT_CITATION_CHUNK_SIZE,
citation_chunk_overlap: int = DEFAULT_CITATION_CHUNK_OVERLAP,
text_splitter: Optional[TextSplitter] = None,
citation_qa_template: BasePromptTemplate = CITATION_QA_TEMPLATE,
citation_refine_template: BasePromptTemplate = CITATION_REFINE_TEMPLATE,
retriever: Optional[BaseRetriever] = None,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
# response synthesizer args
response_mode: ResponseMode = ResponseMode.COMPACT,
use_async: bool = False,
streaming: bool = False,
# class-specific args
metadata_mode: MetadataMode = MetadataMode.NONE,
**kwargs: Any,
) -> "CitationQueryEngine":
"""Initialize a CitationQueryEngine object.".
Args:
index: (BastGPTIndex): index to use for querying
llm: (Optional[LLM]): LLM object to use for response generation.
citation_chunk_size (int):
Size of citation chunks, default=512. Useful for controlling
granularity of sources.
citation_chunk_overlap (int): Overlap of citation nodes, default=20.
text_splitter (Optional[TextSplitter]):
A text splitter for creating citation source nodes. Default is
a SentenceSplitter.
citation_qa_template (BasePromptTemplate): Template for initial citation QA
citation_refine_template (BasePromptTemplate):
Template for citation refinement.
retriever (BaseRetriever): A retriever object.
service_context (Optional[ServiceContext]): A ServiceContext object.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
verbose (bool): Whether to print out debug info.
response_mode (ResponseMode): A ResponseMode object.
use_async (bool): Whether to use async.
streaming (bool): Whether to use streaming.
optimizer (Optional[BaseTokenUsageOptimizer]): A BaseTokenUsageOptimizer
object.
"""
retriever = retriever or index.as_retriever(**kwargs)
response_synthesizer = response_synthesizer or get_response_synthesizer(
llm=llm,
service_context=index.service_context,
text_qa_template=citation_qa_template,
refine_template=citation_refine_template,
response_mode=response_mode,
use_async=use_async,
streaming=streaming,
)
return cls(
retriever=retriever,
response_synthesizer=response_synthesizer,
callback_manager=callback_manager_from_settings_or_context(
Settings, index.service_context
),
citation_chunk_size=citation_chunk_size,
citation_chunk_overlap=citation_chunk_overlap,
text_splitter=text_splitter,
node_postprocessors=node_postprocessors,
metadata_mode=metadata_mode,
)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {"response_synthesizer": self._response_synthesizer}
def _create_citation_nodes(self, nodes: List[NodeWithScore]) -> List[NodeWithScore]:
"""Modify retrieved nodes to be granular sources."""
new_nodes: List[NodeWithScore] = []
for node in nodes:
text_chunks = self.text_splitter.split_text(
node.node.get_content(metadata_mode=self._metadata_mode)
)
for text_chunk in text_chunks:
text = f"Source {len(new_nodes)+1}:\n{text_chunk}\n"
new_node = NodeWithScore(
node=TextNode.parse_obj(node.node), score=node.score
)
new_node.node.text = text
new_nodes.append(new_node)
return new_nodes
def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = self._retriever.retrieve(query_bundle)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(nodes, query_bundle=query_bundle)
return nodes
async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
nodes = await self._retriever.aretrieve(query_bundle)
for postprocessor in self._node_postprocessors:
nodes = postprocessor.postprocess_nodes(nodes, query_bundle=query_bundle)
return nodes
@property
def retriever(self) -> BaseRetriever:
"""Get the retriever object."""
return self._retriever
def synthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
nodes = self._create_citation_nodes(nodes)
return self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
async def asynthesize(
self,
query_bundle: QueryBundle,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
) -> RESPONSE_TYPE:
nodes = self._create_citation_nodes(nodes)
return await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
additional_source_nodes=additional_source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = self.retrieve(query_bundle)
nodes = self._create_citation_nodes(nodes)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
response = self._response_synthesizer.synthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Answer a query."""
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = await self.aretrieve(query_bundle)
nodes = self._create_citation_nodes(nodes)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
response = await self._response_synthesizer.asynthesize(
query=query_bundle,
nodes=nodes,
)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
| [
"llama_index.core.prompts.PromptTemplate",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.schema.TextNode.parse_obj",
"llama_index.core.settings.llm_from_settings_or_context"
] | [((1182, 1924), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. For example:\nSource 1:\nThe sky is red in the evening and blue in the morning.\nSource 2:\nWater is wet when the sky is red.\nQuery: When is water wet?\nAnswer: Water will be wet when the sky is red [2], which occurs in the evening [1].\nNow it\'s your turn. Below are several numbered sources of information:\n------\n{context_str}\n------\nQuery: {query_str}\nAnswer: """'], {}), '(\n """Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. For example:\nSource 1:\nThe sky is red in the evening and blue in the morning.\nSource 2:\nWater is wet when the sky is red.\nQuery: When is water wet?\nAnswer: Water will be wet when the sky is red [2], which occurs in the evening [1].\nNow it\'s your turn. Below are several numbered sources of information:\n------\n{context_str}\n------\nQuery: {query_str}\nAnswer: """\n )\n', (1196, 1924), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((2090, 3020), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. For example:\nSource 1:\nThe sky is red in the evening and blue in the morning.\nSource 2:\nWater is wet when the sky is red.\nQuery: When is water wet?\nAnswer: Water will be wet when the sky is red [2], which occurs in the evening [1].\nNow it\'s your turn. We have provided an existing answer: {existing_answer}Below are several numbered sources of information. Use them to refine the existing answer. If the provided sources are not helpful, you will repeat the existing answer.\nBegin refining!\n------\n{context_msg}\n------\nQuery: {query_str}\nAnswer: """'], {}), '(\n """Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. For example:\nSource 1:\nThe sky is red in the evening and blue in the morning.\nSource 2:\nWater is wet when the sky is red.\nQuery: When is water wet?\nAnswer: Water will be wet when the sky is red [2], which occurs in the evening [1].\nNow it\'s your turn. We have provided an existing answer: {existing_answer}Below are several numbered sources of information. Use them to refine the existing answer. If the provided sources are not helpful, you will repeat the existing answer.\nBegin refining!\n------\n{context_msg}\n------\nQuery: {query_str}\nAnswer: """\n )\n', (2104, 3020), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((4703, 4794), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'citation_chunk_size', 'chunk_overlap': 'citation_chunk_overlap'}), '(chunk_size=citation_chunk_size, chunk_overlap=\n citation_chunk_overlap)\n', (4719, 4794), False, 'from llama_index.core.node_parser import SentenceSplitter, TextSplitter\n'), ((4980, 5048), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5021, 5048), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((5080, 5135), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (5108, 5135), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((5198, 5303), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm', 'service_context': 'service_context', 'callback_manager': 'callback_manager'}), '(llm=llm, service_context=service_context,\n callback_manager=callback_manager)\n', (5222, 5303), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, ResponseMode, get_response_synthesizer\n'), ((8135, 8373), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'llm': 'llm', 'service_context': 'index.service_context', 'text_qa_template': 'citation_qa_template', 'refine_template': 'citation_refine_template', 'response_mode': 'response_mode', 'use_async': 'use_async', 'streaming': 'streaming'}), '(llm=llm, service_context=index.service_context,\n text_qa_template=citation_qa_template, refine_template=\n citation_refine_template, response_mode=response_mode, use_async=\n use_async, streaming=streaming)\n', (8159, 8373), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, ResponseMode, get_response_synthesizer\n'), ((8593, 8667), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'index.service_context'], {}), '(Settings, index.service_context)\n', (8634, 8667), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context, llm_from_settings_or_context\n'), ((9664, 9693), 'llama_index.core.schema.TextNode.parse_obj', 'TextNode.parse_obj', (['node.node'], {}), '(node.node)\n', (9682, 9693), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle, TextNode\n')] |
"""
# My first app
Here's our first attempt at using data to create a table:
"""
import logging
import sys
import streamlit as st
from clickhouse_connect import common
from llama_index.core.settings import Settings
from llama_index.embeddings.fastembed import FastEmbedEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex, PromptTemplate
from llama_index.core.indices.struct_store import NLSQLTableQueryEngine
from llama_index.core.indices.vector_store import VectorIndexAutoRetriever
from llama_index.core.indices.vector_store.retrievers.auto_retriever.prompts import PREFIX, EXAMPLES
from llama_index.core.prompts import PromptType
from llama_index.core.query_engine import RetrieverQueryEngine, SQLAutoVectorQueryEngine
from llama_index.core.tools import QueryEngineTool
from llama_index.core.vector_stores.types import VectorStoreInfo, MetadataInfo
from llama_index.vector_stores.clickhouse import ClickHouseVectorStore
import clickhouse_connect
import openai
from sqlalchemy import (
create_engine,
)
from llama_index.core import SQLDatabase
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
host = st.secrets.clickhouse.host
password = st.secrets.clickhouse.password
username = st.secrets.clickhouse.username
secure = st.secrets.clickhouse.secure
http_port = st.secrets.clickhouse.http_port
native_port = st.secrets.clickhouse.native_port
open_ai_model = "gpt-4"
database = st.secrets.clickhouse.database
hackernews_table = st.secrets.clickhouse.hackernews_table
stackoverflow_table = st.secrets.clickhouse.stackoverflow_table
database = st.secrets.clickhouse.database
st.set_page_config(
page_title="Get summaries of Hacker News posts enriched with Stackoverflow survey results, powered by LlamaIndex and ClickHouse",
page_icon="🦙🚀", layout="centered", initial_sidebar_state="auto", menu_items=None)
st.title("💬HackBot powered by LlamaIndex 🦙 and ClickHouse 🚀")
st.info(
"Check out the full [blog post](https://clickhouse.com/blog/building-a-hackernews-chat-bot-with-llama-index-with-clickhouse/) for this app",
icon="📃")
st.caption("A Streamlit chatbot 💬 for Hacker News powered by LlamaIndex 🦙 and ClickHouse 🚀")
@st.cache_resource
def load_embedding():
return FastEmbedEmbedding(
model_name="sentence-transformers/all-MiniLM-L6-v2",
max_length=384,
cache_dir="./embeddings/"
)
Settings.embed_model = load_embedding()
CLICKHOUSE_TEXT_TO_SQL_TMPL = (
"Given an input question, first create a syntactically correct ClickHouse SQL "
"query to run, then look at the results of the query and return the answer. "
"You can order the results by a relevant column to return the most "
"interesting examples in the database.\n\n"
"Never query for all the columns from a specific table, only ask for a "
"few relevant columns given the question.\n\n"
"Pay attention to use only the column names that you can see in the schema "
"description. "
"Be careful to not query for columns that do not exist. "
"Pay attention to which column is in which table. "
"Also, qualify column names with the table name when needed. \n"
"If needing to group on Array Columns use the ClickHouse function arrayJoin e.g. arrayJoin(columnName) \n"
"For example, the following query identifies the most popular database:\n"
"SELECT d, count(*) AS count FROM so_surveys GROUP BY "
"arrayJoin(database_want_to_work_with) AS d ORDER BY count DESC LIMIT 1\n"
"You are required to use the following format, each taking one line:\n\n"
"Question: Question here\n"
"SQLQuery: SQL Query to run\n"
"SQLResult: Result of the SQLQuery\n"
"Answer: Final answer here\n\n"
"Only use tables listed below.\n"
"{schema}\n\n"
"Question: {query_str}\n"
"SQLQuery: "
)
CLICKHOUSE_TEXT_TO_SQL_PROMPT = PromptTemplate(
CLICKHOUSE_TEXT_TO_SQL_TMPL,
prompt_type=PromptType.TEXT_TO_SQL,
)
CLICKHOUSE_CUSTOM_SUFFIX = """
The following is the datasource schema to work with.
IMPORTANT: Make sure that filters are only used as needed and only suggest filters for fields in the data source.
Data Source:
```json
{info_str}
```
User Query:
{query_str}
Structured Request:
"""
CLICKHOUSE_VECTOR_STORE_QUERY_PROMPT_TMPL = PREFIX + EXAMPLES + CLICKHOUSE_CUSTOM_SUFFIX
@st.cache_resource
def clickhouse():
common.set_setting('autogenerate_session_id', False)
return clickhouse_connect.get_client(
host=host, port=http_port, username=username, password=password,
secure=secure, settings={"max_parallel_replicas": "3", "use_hedged_requests": "0",
"allow_experimental_parallel_reading_from_replicas": "1"}
)
def sql_auto_vector_query_engine():
with st.spinner(text="Preparing indexes. This should take a few seconds. No time to make 🫖"):
engine = create_engine(
f'clickhouse+native://{username}:{password}@{host}:' +
f'{native_port}/{database}?compression=lz4&secure={secure}'
)
sql_database = SQLDatabase(engine, include_tables=[stackoverflow_table], view_support=True)
vector_store = ClickHouseVectorStore(clickhouse_client=clickhouse(), table=hackernews_table)
vector_index = VectorStoreIndex.from_vector_store(vector_store)
return sql_database, vector_index
def get_engine(min_length, score, min_date):
sql_database, vector_index = sql_auto_vector_query_engine()
nl_sql_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=[stackoverflow_table],
text_to_sql_prompt=CLICKHOUSE_TEXT_TO_SQL_PROMPT,
llm=OpenAI(model=open_ai_model)
)
vector_store_info = VectorStoreInfo(
content_info="Social news posts and comments from users",
metadata_info=[
MetadataInfo(
name="post_score", type="int", description="Score of the comment or post",
),
MetadataInfo(
name="by", type="str", description="the author or person who posted the comment",
),
MetadataInfo(
name="time", type="date", description="the time at which the post or comment was made",
),
]
)
vector_auto_retriever = VectorIndexAutoRetriever(
vector_index, vector_store_info=vector_store_info, similarity_top_k=10,
prompt_template_str=CLICKHOUSE_VECTOR_STORE_QUERY_PROMPT_TMPL, llm=OpenAI(model=open_ai_model),
vector_store_kwargs={"where": f"length >= {min_length} AND post_score >= {score} AND time >= '{min_date}'"}
)
retriever_query_engine = RetrieverQueryEngine.from_args(vector_auto_retriever, llm=OpenAI(model=open_ai_model))
sql_tool = QueryEngineTool.from_defaults(
query_engine=nl_sql_engine,
description=(
"Useful for translating a natural language query into a SQL query over"
f" a table: {stackoverflow_table}, containing the survey responses on"
f" different types of technology users currently use and want to use"
),
)
vector_tool = QueryEngineTool.from_defaults(
query_engine=retriever_query_engine,
description=(
f"Useful for answering semantic questions abouts users comments and posts"
),
)
return SQLAutoVectorQueryEngine(
sql_tool, vector_tool, llm=OpenAI(model=open_ai_model)
)
# identify the value ranges for our score, length and date widgets
if "max_score" not in st.session_state.keys():
client = clickhouse()
st.session_state.max_score = int(
client.query("SELECT max(post_score) FROM default.hackernews_llama").first_row[0])
st.session_state.max_length = int(
client.query("SELECT max(length) FROM default.hackernews_llama").first_row[0])
st.session_state.min_date, st.session_state.max_date = client.query(
"SELECT min(toDate(time)), max(toDate(time)) FROM default.hackernews_llama WHERE time != '1970-01-01 00:00:00'").first_row
# set the initial message on load. Store in the session.
if "messages" not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "Ask me a question about opinions on Hacker News and Stackoverflow!"}]
# build the sidebar with our filters
with st.sidebar:
score = st.slider('Min Score', 0, st.session_state.max_score, value=0)
min_length = st.slider('Min comment Length (tokens)', 0, st.session_state.max_length, value=20)
min_date = st.date_input('Min comment date', value=st.session_state.min_date, min_value=st.session_state.min_date,
max_value=st.session_state.max_date)
openai_api_key = st.text_input("Open API Key", key="chatbot_api_key", type="password")
openai.api_key = openai_api_key
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/ClickHouse/examples/blob/main/blog-examples/llama-index/hacknernews_app/hacker_insights.py)"
# grab the users OPENAI api key. Don’t allow questions if not entered.
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
if prompt := st.chat_input(placeholder="Your question about Hacker News"):
st.session_state.messages.append({"role": "user", "content": prompt})
# Display the prior chat messages
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
# Query our engine for the answer and write to the page
response = str(get_engine(min_length, score, min_date).query(prompt))
st.write(response)
st.session_state.messages.append({"role": "assistant", "content": response})
| [
"llama_index.core.SQLDatabase",
"llama_index.llms.openai.OpenAI",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.tools.QueryEngineTool.from_defaults",
"llama_index.core.vector_stores.types.MetadataInfo",
"llama_index.core.PromptTemplate",
"llama_index.embeddings.fastembed.FastEmbedEmbedding"
] | [((1100, 1158), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1119, 1158), False, 'import logging\n'), ((1713, 1957), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Get summaries of Hacker News posts enriched with Stackoverflow survey results, powered by LlamaIndex and ClickHouse"""', 'page_icon': '"""🦙🚀"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=\n 'Get summaries of Hacker News posts enriched with Stackoverflow survey results, powered by LlamaIndex and ClickHouse'\n , page_icon='🦙🚀', layout='centered', initial_sidebar_state='auto',\n menu_items=None)\n", (1731, 1957), True, 'import streamlit as st\n'), ((1953, 2014), 'streamlit.title', 'st.title', (['"""💬HackBot powered by LlamaIndex 🦙 and ClickHouse 🚀"""'], {}), "('💬HackBot powered by LlamaIndex 🦙 and ClickHouse 🚀')\n", (1961, 2014), True, 'import streamlit as st\n'), ((2015, 2183), 'streamlit.info', 'st.info', (['"""Check out the full [blog post](https://clickhouse.com/blog/building-a-hackernews-chat-bot-with-llama-index-with-clickhouse/) for this app"""'], {'icon': '"""📃"""'}), "(\n 'Check out the full [blog post](https://clickhouse.com/blog/building-a-hackernews-chat-bot-with-llama-index-with-clickhouse/) for this app'\n , icon='📃')\n", (2022, 2183), True, 'import streamlit as st\n'), ((2183, 2285), 'streamlit.caption', 'st.caption', (['"""A Streamlit chatbot 💬 for Hacker News powered by LlamaIndex 🦙 and ClickHouse 🚀"""'], {}), "(\n 'A Streamlit chatbot 💬 for Hacker News powered by LlamaIndex 🦙 and ClickHouse 🚀'\n )\n", (2193, 2285), True, 'import streamlit as st\n'), ((3944, 4023), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['CLICKHOUSE_TEXT_TO_SQL_TMPL'], {'prompt_type': 'PromptType.TEXT_TO_SQL'}), '(CLICKHOUSE_TEXT_TO_SQL_TMPL, prompt_type=PromptType.TEXT_TO_SQL)\n', (3958, 4023), False, 'from llama_index.core import VectorStoreIndex, PromptTemplate\n'), ((1190, 1230), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1211, 1230), False, 'import logging\n'), ((2330, 2448), 'llama_index.embeddings.fastembed.FastEmbedEmbedding', 'FastEmbedEmbedding', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""', 'max_length': '(384)', 'cache_dir': '"""./embeddings/"""'}), "(model_name='sentence-transformers/all-MiniLM-L6-v2',\n max_length=384, cache_dir='./embeddings/')\n", (2348, 2448), False, 'from llama_index.embeddings.fastembed import FastEmbedEmbedding\n'), ((4455, 4507), 'clickhouse_connect.common.set_setting', 'common.set_setting', (['"""autogenerate_session_id"""', '(False)'], {}), "('autogenerate_session_id', False)\n", (4473, 4507), False, 'from clickhouse_connect import common\n'), ((4519, 4767), 'clickhouse_connect.get_client', 'clickhouse_connect.get_client', ([], {'host': 'host', 'port': 'http_port', 'username': 'username', 'password': 'password', 'secure': 'secure', 'settings': "{'max_parallel_replicas': '3', 'use_hedged_requests': '0',\n 'allow_experimental_parallel_reading_from_replicas': '1'}"}), "(host=host, port=http_port, username=username,\n password=password, secure=secure, settings={'max_parallel_replicas':\n '3', 'use_hedged_requests': '0',\n 'allow_experimental_parallel_reading_from_replicas': '1'})\n", (4548, 4767), False, 'import clickhouse_connect\n'), ((6832, 7118), 'llama_index.core.tools.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'nl_sql_engine', 'description': 'f"""Useful for translating a natural language query into a SQL query over a table: {stackoverflow_table}, containing the survey responses on different types of technology users currently use and want to use"""'}), "(query_engine=nl_sql_engine, description=\n f'Useful for translating a natural language query into a SQL query over a table: {stackoverflow_table}, containing the survey responses on different types of technology users currently use and want to use'\n )\n", (6861, 7118), False, 'from llama_index.core.tools import QueryEngineTool\n'), ((7205, 7368), 'llama_index.core.tools.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'retriever_query_engine', 'description': 'f"""Useful for answering semantic questions abouts users comments and posts"""'}), "(query_engine=retriever_query_engine,\n description=\n f'Useful for answering semantic questions abouts users comments and posts')\n", (7234, 7368), False, 'from llama_index.core.tools import QueryEngineTool\n'), ((7605, 7628), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (7626, 7628), True, 'import streamlit as st\n'), ((8425, 8487), 'streamlit.slider', 'st.slider', (['"""Min Score"""', '(0)', 'st.session_state.max_score'], {'value': '(0)'}), "('Min Score', 0, st.session_state.max_score, value=0)\n", (8434, 8487), True, 'import streamlit as st\n'), ((8505, 8591), 'streamlit.slider', 'st.slider', (['"""Min comment Length (tokens)"""', '(0)', 'st.session_state.max_length'], {'value': '(20)'}), "('Min comment Length (tokens)', 0, st.session_state.max_length,\n value=20)\n", (8514, 8591), True, 'import streamlit as st\n'), ((8603, 8747), 'streamlit.date_input', 'st.date_input', (['"""Min comment date"""'], {'value': 'st.session_state.min_date', 'min_value': 'st.session_state.min_date', 'max_value': 'st.session_state.max_date'}), "('Min comment date', value=st.session_state.min_date,\n min_value=st.session_state.min_date, max_value=st.session_state.max_date)\n", (8616, 8747), True, 'import streamlit as st\n'), ((8794, 8863), 'streamlit.text_input', 'st.text_input', (['"""Open API Key"""'], {'key': '"""chatbot_api_key"""', 'type': '"""password"""'}), "('Open API Key', key='chatbot_api_key', type='password')\n", (8807, 8863), True, 'import streamlit as st\n'), ((9214, 9268), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (9221, 9268), True, 'import streamlit as st\n'), ((9273, 9282), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (9280, 9282), True, 'import streamlit as st\n'), ((9297, 9357), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Your question about Hacker News"""'}), "(placeholder='Your question about Hacker News')\n", (9310, 9357), True, 'import streamlit as st\n'), ((9363, 9432), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (9395, 9432), True, 'import streamlit as st\n'), ((1159, 1178), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1176, 1178), False, 'import logging\n'), ((4858, 4950), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Preparing indexes. This should take a few seconds. No time to make 🫖"""'}), "(text=\n 'Preparing indexes. This should take a few seconds. No time to make 🫖')\n", (4868, 4950), True, 'import streamlit as st\n'), ((4964, 5097), 'sqlalchemy.create_engine', 'create_engine', (["(f'clickhouse+native://{username}:{password}@{host}:' +\n f'{native_port}/{database}?compression=lz4&secure={secure}')"], {}), "(f'clickhouse+native://{username}:{password}@{host}:' +\n f'{native_port}/{database}?compression=lz4&secure={secure}')\n", (4977, 5097), False, 'from sqlalchemy import create_engine\n'), ((5151, 5227), 'llama_index.core.SQLDatabase', 'SQLDatabase', (['engine'], {'include_tables': '[stackoverflow_table]', 'view_support': '(True)'}), '(engine, include_tables=[stackoverflow_table], view_support=True)\n', (5162, 5227), False, 'from llama_index.core import SQLDatabase\n'), ((5352, 5400), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (5386, 5400), False, 'from llama_index.core import VectorStoreIndex, PromptTemplate\n'), ((9553, 9585), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (9568, 9585), True, 'import streamlit as st\n'), ((9595, 9623), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (9603, 9623), True, 'import streamlit as st\n'), ((9756, 9784), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (9771, 9784), True, 'import streamlit as st\n'), ((5741, 5768), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (5747, 5768), False, 'from llama_index.llms.openai import OpenAI\n'), ((6548, 6575), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (6554, 6575), False, 'from llama_index.llms.openai import OpenAI\n'), ((6787, 6814), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (6793, 6814), False, 'from llama_index.llms.openai import OpenAI\n'), ((7480, 7507), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (7486, 7507), False, 'from llama_index.llms.openai import OpenAI\n'), ((9799, 9824), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (9809, 9824), True, 'import streamlit as st\n'), ((9988, 10006), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (9996, 10006), True, 'import streamlit as st\n'), ((10019, 10095), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': response}"], {}), "({'role': 'assistant', 'content': response})\n", (10051, 10095), True, 'import streamlit as st\n'), ((5918, 6010), 'llama_index.core.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""post_score"""', 'type': '"""int"""', 'description': '"""Score of the comment or post"""'}), "(name='post_score', type='int', description=\n 'Score of the comment or post')\n", (5930, 6010), False, 'from llama_index.core.vector_stores.types import VectorStoreInfo, MetadataInfo\n'), ((6050, 6149), 'llama_index.core.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""by"""', 'type': '"""str"""', 'description': '"""the author or person who posted the comment"""'}), "(name='by', type='str', description=\n 'the author or person who posted the comment')\n", (6062, 6149), False, 'from llama_index.core.vector_stores.types import VectorStoreInfo, MetadataInfo\n'), ((6189, 6294), 'llama_index.core.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""time"""', 'type': '"""date"""', 'description': '"""the time at which the post or comment was made"""'}), "(name='time', type='date', description=\n 'the time at which the post or comment was made')\n", (6201, 6294), False, 'from llama_index.core.vector_stores.types import VectorStoreInfo, MetadataInfo\n')] |
import chromadb
import openai
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
load_dotenv()
from llama_index.llms import OpenAI
from llama_index import VectorStoreIndex, ServiceContext
from llama_index.vector_stores import ChromaVectorStore
import os
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
print(OPENAI_API_KEY)
client = chromadb.PersistentClient(path=".chromadb/")
print(client.list_collections())
# get a collection
collection_name = input("请输入要获取的collection name:")
chroma_collection = client.get_collection(collection_name)
print(chroma_collection.count())
# 创建 ChatOpenAI 实例作为底层语言模型
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k-0613")
service_context = ServiceContext.from_defaults(llm=llm)
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(vector_store, service_context=service_context)
query_engine = index.as_query_engine(service_context=service_context, verbose=True, streaming=True)
while True:
user_input = []
print("请输入您的问题(纯文本格式),换行输入 n 以结束:")
while True:
line = input()
if line != "n":
user_input.append(line)
else:
break
user_input_text = "\n".join(user_input)
# print(user_input_text)
# print(user_input_text)
print("****Thingking******")
try:
r = query_engine.query(user_input_text)
print(r)
except Exception as e:
print("出现异常:", str(e))
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.vector_stores.ChromaVectorStore"
] | [((107, 120), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (118, 120), False, 'from dotenv import load_dotenv\n'), ((298, 325), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (307, 325), False, 'import os\n'), ((390, 434), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '""".chromadb/"""'}), "(path='.chromadb/')\n", (415, 434), False, 'import chromadb\n'), ((665, 722), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo-16k-0613"""'}), "(temperature=0, model='gpt-3.5-turbo-16k-0613')\n", (675, 722), False, 'from langchain.chat_models import ChatOpenAI\n'), ((741, 778), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (769, 778), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((795, 849), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (812, 849), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((858, 944), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (892, 944), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
import tempfile
import llama_index
from llama_index import SimpleDirectoryReader
import aiohttp
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from models.statics_model import ResponseStatics, g_index, file_extensions_mappings
def upload_doc_handler(knowledgebase_id, file):
if not knowledgebase_id:
return False
# Check if knowledgebase exists
if knowledgebase_id not in g_index:
return False
# Get the content type of the file
content_type = "" if not file.content_type else file.content_type
suffix = file_extensions_mappings[content_type]
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as fp:
file.save(fp)
doc = SimpleDirectoryReader(input_files=[fp.name]).load_data()
g_index[knowledgebase_id].add_documents(doc)
return True
async def upload_link_handler(knowledgebase_id, url):
if not knowledgebase_id:
return False
# Check if knowledgebase exists
if knowledgebase_id not in g_index:
return False
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status != 200:
return False
if response.headers["Content-Type"] == "application/pdf":
data = await response.read()
f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
f.write(data)
f.close()
doc = SimpleDirectoryReader(input_files=[f.name]).load_data()
g_index[knowledgebase_id].add_documents(doc)
else:
documents = llama_index.BeautifulSoupWebReader(website_extractor=DEFAULT_WEBSITE_EXTRACTOR).load_data(urls=[url])
g_index[knowledgebase_id].add_documents(documents)
return True
| [
"llama_index.BeautifulSoupWebReader",
"llama_index.SimpleDirectoryReader"
] | [((615, 671), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': 'suffix'}), '(delete=False, suffix=suffix)\n', (642, 671), False, 'import tempfile\n'), ((1063, 1086), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1084, 1086), False, 'import aiohttp\n'), ((715, 759), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[fp.name]'}), '(input_files=[fp.name])\n', (736, 759), False, 'from llama_index import SimpleDirectoryReader\n'), ((1352, 1408), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pdf"""', 'delete': '(False)'}), "(suffix='.pdf', delete=False)\n", (1379, 1408), False, 'import tempfile\n'), ((1487, 1530), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[f.name]'}), '(input_files=[f.name])\n', (1508, 1530), False, 'from llama_index import SimpleDirectoryReader\n'), ((1651, 1730), 'llama_index.BeautifulSoupWebReader', 'llama_index.BeautifulSoupWebReader', ([], {'website_extractor': 'DEFAULT_WEBSITE_EXTRACTOR'}), '(website_extractor=DEFAULT_WEBSITE_EXTRACTOR)\n', (1685, 1730), False, 'import llama_index\n')] |
import os
from dotenv import load_dotenv
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor
from langchain.chat_models import ChatOpenAI
load_dotenv()
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_KEY')
def tune_llm(input_directory="sourcedata", output_file="indexdata/index.json"):
loaded_content = SimpleDirectoryReader(input_directory).load_data()
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name='gpt-3.5-turbo'))
output_index = GPTSimpleVectorIndex(loaded_content, llm_predictor=llm_predictor)
# Create the output directory if it doesn't exist
os.makedirs(os.path.dirname(output_file), exist_ok=True)
output_index.save_to_disk(output_file) | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((169, 182), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (180, 182), False, 'from dotenv import load_dotenv\n'), ((215, 238), 'os.getenv', 'os.getenv', (['"""OPENAI_KEY"""'], {}), "('OPENAI_KEY')\n", (224, 238), False, 'import os\n'), ((506, 571), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['loaded_content'], {'llm_predictor': 'llm_predictor'}), '(loaded_content, llm_predictor=llm_predictor)\n', (526, 571), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor\n'), ((643, 671), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (658, 671), False, 'import os\n'), ((341, 379), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['input_directory'], {}), '(input_directory)\n', (362, 379), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor\n'), ((430, 485), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.7, model_name='gpt-3.5-turbo')\n", (440, 485), False, 'from langchain.chat_models import ChatOpenAI\n')] |
from ..conversable_agent import ConversableAgent
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from ....utils.client import ByzerLLM
from byzerllm.utils.retrieval import ByzerRetrieval
from ..agent import Agent
import ray
from ray.util.client.common import ClientActorHandle, ClientObjectRef
from .. import get_agent_name,run_agent_func,ChatResponse
from byzerllm.apps.agent.extensions.simple_retrieval_client import SimpleRetrievalClient
import uuid
import json
from byzerllm.apps.llama_index import get_service_context,get_storage_context
from llama_index import VectorStoreIndex
from llama_index.query_engine import SubQuestionQueryEngine
try:
from termcolor import colored
except ImportError:
def colored(x, *args, **kwargs):
return x
from llama_index.tools import QueryEngineTool, ToolMetadata
class LlamaIndexSubQuestionAgent(ConversableAgent):
PROMPT_DEFAULT = """You're a retrieve augmented chatbot. """
DEFAULT_SYSTEM_MESSAGE = PROMPT_DEFAULT
def __init__(
self,
name: str,
llm: ByzerLLM,
retrieval: ByzerRetrieval,
chat_name:str,
owner:str,
update_context_retry: int = 3,
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Optional[str] = "NEVER",
code_execution_config: Optional[Union[Dict, bool]] = False,
**kwargs,
):
super().__init__(
name,
llm,retrieval,
system_message,
is_termination_msg,
max_consecutive_auto_reply,
human_input_mode,
code_execution_config=code_execution_config,
**kwargs,
)
self.chat_name = chat_name
self.owner = owner
self.update_context_retry = update_context_retry
self._reply_func_list = []
# self.register_reply([Agent, ClientActorHandle,str], ConversableAgent.generate_llm_reply)
self.register_reply([Agent, ClientActorHandle,str], LlamaIndexSubQuestionAgent.generate_retrieval_based_reply)
self.register_reply([Agent, ClientActorHandle,str], ConversableAgent.check_termination_and_human_reply)
self.service_context = get_service_context(llm)
self.storage_context = get_storage_context(llm,retrieval)
def generate_retrieval_based_reply(
self,
raw_message: Optional[Union[Dict,str,ChatResponse]] = None,
messages: Optional[List[Dict]] = None,
sender: Optional[Union[ClientActorHandle,Agent,str]] = None,
config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None,ChatResponse]]:
if messages is None:
messages = self._messages[get_agent_name(sender)]
new_message = messages[-1]
index = VectorStoreIndex.from_vector_store(vector_store = self.storage_context.vector_store,service_context=self.service_context)
vector_query_engine = index.as_query_engine()
query_engine_tools = [
QueryEngineTool(
query_engine=vector_query_engine,
metadata=ToolMetadata(
name="common",
description="common",
),
),
]
query_engine = SubQuestionQueryEngine.from_defaults(
query_engine_tools=query_engine_tools,
service_context=self.service_context,
use_async=True,
)
response = query_engine.query(new_message["content"])
return True, {
"content":response.response,
"metadata":{"agent":self.name,"TERMINATE":True}
}
| [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.tools.ToolMetadata"
] | [((2438, 2462), 'byzerllm.apps.llama_index.get_service_context', 'get_service_context', (['llm'], {}), '(llm)\n', (2457, 2462), False, 'from byzerllm.apps.llama_index import get_service_context, get_storage_context\n'), ((2494, 2529), 'byzerllm.apps.llama_index.get_storage_context', 'get_storage_context', (['llm', 'retrieval'], {}), '(llm, retrieval)\n', (2513, 2529), False, 'from byzerllm.apps.llama_index import get_service_context, get_storage_context\n'), ((3092, 3217), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'self.storage_context.vector_store', 'service_context': 'self.service_context'}), '(vector_store=self.storage_context.\n vector_store, service_context=self.service_context)\n', (3126, 3217), False, 'from llama_index import VectorStoreIndex\n'), ((3754, 3887), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools', 'service_context': 'self.service_context', 'use_async': '(True)'}), '(query_engine_tools=query_engine_tools,\n service_context=self.service_context, use_async=True)\n', (3790, 3887), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((3463, 3512), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""common"""', 'description': '"""common"""'}), "(name='common', description='common')\n", (3475, 3512), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
# Copyright 2023 osiworx
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import os
from llama_index.embeddings import HuggingFaceEmbedding
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
SimpleDirectoryReader,
)
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
client = qdrant_client.QdrantClient(
# you can use :memory: mode for fast and light-weight experiments,
# it does not require to have Qdrant deployed anywhere
# but requires qdrant-client >= 1.1.1
#location=":memory:"
# otherwise set Qdrant instance address with:
url="http://localhost:6333"
# set API KEY for Qdrant Cloud
# api_key="<qdrant-api-key>",
)
sample_files_path = "E:\prompt_sources\lexica_split"
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L12-v2")
service_context = ServiceContext.from_defaults(llm=None,embed_model=embed_model)
vector_store = QdrantVectorStore(client=client, collection_name="prompts_all")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
for subdir, dirs, files in os.walk(sample_files_path):
if len(files) > 0:
now = datetime.datetime.now()
print(f'{now.strftime("%H:%M:%S")} adding folder: {subdir}')
documents = SimpleDirectoryReader(subdir).load_data()
docs = []
for doc in documents:
doc.excluded_llm_metadata_keys.append("file_path")
doc.excluded_embed_metadata_keys.append("file_path")
if doc.text != '':
docs = docs + [doc]
del documents
index = VectorStoreIndex.from_documents(
docs, storage_context=storage_context, service_context=service_context, show_progress=True
)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((905, 960), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'url': '"""http://localhost:6333"""'}), "(url='http://localhost:6333')\n", (931, 960), False, 'import qdrant_client\n'), ((1352, 1426), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L12-v2"""'}), "(model_name='sentence-transformers/all-MiniLM-L12-v2')\n", (1372, 1426), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1445, 1508), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'embed_model'}), '(llm=None, embed_model=embed_model)\n', (1473, 1508), False, 'from llama_index import VectorStoreIndex, ServiceContext, SimpleDirectoryReader\n'), ((1523, 1586), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""prompts_all"""'}), "(client=client, collection_name='prompts_all')\n", (1540, 1586), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((1605, 1660), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1633, 1660), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1691, 1717), 'os.walk', 'os.walk', (['sample_files_path'], {}), '(sample_files_path)\n', (1698, 1717), False, 'import os\n'), ((1756, 1779), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1777, 1779), False, 'import datetime\n'), ((2197, 2324), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context', 'show_progress': '(True)'}), '(docs, storage_context=storage_context,\n service_context=service_context, show_progress=True)\n', (2228, 2324), False, 'from llama_index import VectorStoreIndex, ServiceContext, SimpleDirectoryReader\n'), ((1870, 1899), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['subdir'], {}), '(subdir)\n', (1891, 1899), False, 'from llama_index import VectorStoreIndex, ServiceContext, SimpleDirectoryReader\n')] |
from typing import Union, Optional, List
from llama_index.chat_engine.types import BaseChatEngine, ChatMode
from llama_index.embeddings.utils import EmbedType
from llama_index.chat_engine import ContextChatEngine
from llama_index.memory import ChatMemoryBuffer
from lyzr.base.llm import LyzrLLMFactory
from lyzr.base.service import LyzrService
from lyzr.base.vector_store import LyzrVectorStoreIndex
from lyzr.base.retrievers import LyzrRetriever
from lyzr.utils.document_reading import (
read_pdf_as_documents,
read_docx_as_documents,
read_txt_as_documents,
read_website_as_documents,
read_webpage_as_documents,
read_youtube_as_documents,
)
def pdf_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_pdf_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def txt_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_txt_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def docx_chat_(
input_dir: Optional[str] = None,
input_files: Optional[List] = None,
exclude_hidden: bool = True,
filename_as_id: bool = True,
recursive: bool = True,
required_exts: Optional[List[str]] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_docx_as_documents(
input_dir=input_dir,
input_files=input_files,
exclude_hidden=exclude_hidden,
filename_as_id=filename_as_id,
recursive=recursive,
required_exts=required_exts,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def webpage_chat_(
url: str = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_webpage_as_documents(
url=url,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def website_chat_(
url: str = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_website_as_documents(
url=url,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
def youtube_chat_(
urls: List[str] = None,
system_prompt: str = None,
query_wrapper_prompt: str = None,
embed_model: Union[str, EmbedType] = "default",
llm_params: dict = None,
vector_store_params: dict = None,
service_context_params: dict = None,
chat_engine_params: dict = None,
retriever_params: dict = None,
) -> BaseChatEngine:
documents = read_youtube_as_documents(
urls=urls,
)
llm_params = (
{
"model": "gpt-4-0125-preview",
"temperature": 0,
}
if llm_params is None
else llm_params
)
vector_store_params = (
{"vector_store_type": "WeaviateVectorStore"}
if vector_store_params is None
else vector_store_params
)
service_context_params = (
{} if service_context_params is None else service_context_params
)
chat_engine_params = {} if chat_engine_params is None else chat_engine_params
retriever_params = (
{"retriever_type": "QueryFusionRetriever"}
if retriever_params is None
else retriever_params
)
llm = LyzrLLMFactory.from_defaults(**llm_params)
service_context = LyzrService.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
**service_context_params,
)
vector_store_index = LyzrVectorStoreIndex.from_defaults(
**vector_store_params, documents=documents, service_context=service_context
)
retriever = LyzrRetriever.from_defaults(
**retriever_params, base_index=vector_store_index
)
memory = ChatMemoryBuffer.from_defaults(token_limit=4000)
chat_engine = ContextChatEngine(
llm=llm,
memory=memory,
retriever=retriever,
prefix_messages=list(),
**chat_engine_params,
)
return chat_engine
| [
"llama_index.memory.ChatMemoryBuffer.from_defaults"
] | [((1242, 1430), 'lyzr.utils.document_reading.read_pdf_as_documents', 'read_pdf_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (1263, 1430), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((2161, 2203), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (2189, 2203), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((2226, 2393), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (2251, 2393), False, 'from lyzr.base.service import LyzrService\n'), ((2457, 2573), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (2491, 2573), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((2600, 2678), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (2627, 2678), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((2707, 2755), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (2737, 2755), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((3528, 3716), 'lyzr.utils.document_reading.read_txt_as_documents', 'read_txt_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (3549, 3716), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((4447, 4489), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (4475, 4489), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((4512, 4679), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (4537, 4679), False, 'from lyzr.base.service import LyzrService\n'), ((4743, 4859), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (4777, 4859), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((4886, 4964), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (4913, 4964), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((4993, 5041), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (5023, 5041), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((5815, 6004), 'lyzr.utils.document_reading.read_docx_as_documents', 'read_docx_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (5837, 6004), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((6735, 6777), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (6763, 6777), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((6800, 6967), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (6825, 6967), False, 'from lyzr.base.service import LyzrService\n'), ((7031, 7147), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (7065, 7147), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((7174, 7252), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (7201, 7252), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((7281, 7329), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (7311, 7329), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((7909, 7943), 'lyzr.utils.document_reading.read_webpage_as_documents', 'read_webpage_as_documents', ([], {'url': 'url'}), '(url=url)\n', (7934, 7943), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((8643, 8685), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (8671, 8685), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((8708, 8875), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (8733, 8875), False, 'from lyzr.base.service import LyzrService\n'), ((8939, 9055), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (8973, 9055), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((9082, 9160), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (9109, 9160), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((9189, 9237), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (9219, 9237), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((9817, 9851), 'lyzr.utils.document_reading.read_website_as_documents', 'read_website_as_documents', ([], {'url': 'url'}), '(url=url)\n', (9842, 9851), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((10551, 10593), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (10579, 10593), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((10616, 10783), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (10641, 10783), False, 'from lyzr.base.service import LyzrService\n'), ((10847, 10963), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (10881, 10963), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((10990, 11068), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (11017, 11068), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((11097, 11145), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (11127, 11145), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((11732, 11768), 'lyzr.utils.document_reading.read_youtube_as_documents', 'read_youtube_as_documents', ([], {'urls': 'urls'}), '(urls=urls)\n', (11757, 11768), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((12468, 12510), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (12496, 12510), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((12533, 12700), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (12558, 12700), False, 'from lyzr.base.service import LyzrService\n'), ((12764, 12880), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (12798, 12880), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((12907, 12985), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (12934, 12985), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((13014, 13062), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (13044, 13062), False, 'from llama_index.memory import ChatMemoryBuffer\n')] |
import json
from util import rm_file
from tqdm import tqdm
import argparse
from copy import deepcopy
import os
from util import JSONReader
import openai
from typing import List, Dict
from llama_index import (
ServiceContext,
OpenAIEmbedding,
PromptHelper,
VectorStoreIndex,
set_global_service_context
)
from llama_index.extractors import BaseExtractor
from llama_index.ingestion import IngestionPipeline
from llama_index.embeddings.cohereai import CohereEmbedding
from llama_index.llms import OpenAI
from llama_index.text_splitter import SentenceSplitter
from llama_index.embeddings import HuggingFaceEmbedding,VoyageEmbedding,InstructorEmbedding
from llama_index.postprocessor import FlagEmbeddingReranker
from llama_index.schema import QueryBundle,MetadataMode
class CustomExtractor(BaseExtractor):
async def aextract(self, nodes) -> List[Dict]:
metadata_list = [
{
"title": (
node.metadata["title"]
),
"source": (
node.metadata["source"]
),
"published_at": (
node.metadata["published_at"]
)
}
for node in nodes
]
return metadata_list
if __name__ == '__main__':
openai.api_key = os.environ.get("OPENAI_API_KEY", "your_openai_api_key")
openai.base_url = "your_api_base"
voyage_api_key = os.environ.get("VOYAGE_API_KEY", "your_voyage_api_key")
cohere_api_key = os.environ.get("COHERE_API_KEY", "your_cohere_api_key")
parser = argparse.ArgumentParser(description="running script.")
parser.add_argument('--retriever', type=str, required=True, help='retriever name')
parser.add_argument('--llm', type=str, required=False,default="gpt-3.5-turbo-1106", help='LLMs')
parser.add_argument('--rerank', action='store_true',required=False,default=False, help='if rerank')
parser.add_argument('--topk', type=int, required=False,default=10, help='Top K')
parser.add_argument('--chunk_size', type=int, required=False,default=256, help='chunk_size')
parser.add_argument('--context_window', type=int, required=False,default=2048, help='context_window')
parser.add_argument('--num_output', type=int, required=False,default=256, help='num_output')
args = parser.parse_args()
model_name = args.retriever
rerank = args.rerank
top_k = args.topk
save_model_name = model_name.split('/')
llm = OpenAI(model=args.llm, temperature=0, max_tokens=args.context_window)
# define save file
if rerank:
save_file = f'output/{save_model_name[-1]}_rerank_retrieval_test.json'
else:
save_file = f'output/{save_model_name[-1]}_retrieval_test.json'
rm_file(save_file)
print(f'save_file:{save_file}')
if 'text' in model_name:
# "text-embedding-ada-002" “text-search-ada-query-001”
embed_model = OpenAIEmbedding(model = model_name,embed_batch_size=10)
elif 'Cohere' in model_name:
embed_model = CohereEmbedding(
cohere_api_key=cohere_api_key,
model_name="embed-english-v3.0",
input_type="search_query",
)
elif 'voyage-02' in model_name:
embed_model = VoyageEmbedding(
model_name='voyage-02', voyage_api_key=voyage_api_key
)
elif 'instructor' in model_name:
embed_model = InstructorEmbedding(model_name=model_name)
else:
embed_model = HuggingFaceEmbedding(model_name=model_name, trust_remote_code=True)
# service context
text_splitter = SentenceSplitter(chunk_size=args.chunk_size)
prompt_helper = PromptHelper(
context_window=args.context_window,
num_output=args.num_output,
chunk_overlap_ratio=0.1,
chunk_size_limit=None,
)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
text_splitter=text_splitter,
prompt_helper=prompt_helper,
)
set_global_service_context(service_context)
reader = JSONReader()
data = reader.load_data('dataset/corpus.json')
# print(data[0])
transformations = [text_splitter,CustomExtractor()]
pipeline = IngestionPipeline(transformations=transformations)
nodes = pipeline.run(documents=data)
nodes_see = deepcopy(nodes)
print(
"LLM sees:\n",
(nodes_see)[0].get_content(metadata_mode=MetadataMode.LLM),
)
print('Finish Loading...')
index = VectorStoreIndex(nodes, show_progress=True)
print('Finish Indexing...')
with open('dataset/MultiHopRAG.json', 'r') as file:
query_data = json.load(file)
if rerank:
rerank_postprocessors = FlagEmbeddingReranker(model="BAAI/bge-reranker-large", top_n=top_k)
# test retrieval quality
retrieval_save_list = []
print("start to retrieve...")
for data in tqdm(query_data):
query = data['query']
if rerank:
nodes_score = index.as_retriever(similarity_top_k=20).retrieve(query)
nodes_score = rerank_postprocessors.postprocess_nodes(
nodes_score, query_bundle=QueryBundle(query_str=query)
)
else:
nodes_score = index.as_retriever(similarity_top_k=top_k).retrieve(query)
retrieval_list = []
for ns in nodes_score:
dic = {}
dic['text'] = ns.get_content(metadata_mode=MetadataMode.LLM)
dic['score'] = ns.get_score()
retrieval_list.append(dic)
save = {}
save['query'] = data['query']
save['answer'] = data['answer']
save['question_type'] = data['question_type']
save['retrieval_list'] = retrieval_list
save['gold_list'] = data['evidence_list']
retrieval_save_list.append(save)
with open(save_file, 'w') as json_file:
json.dump(retrieval_save_list, json_file)
| [
"llama_index.embeddings.cohereai.CohereEmbedding",
"llama_index.embeddings.VoyageEmbedding",
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.llms.OpenAI",
"llama_index.ingestion.IngestionPipeline",
"llama_index.set_global_service_context",
"llama_index.schema.QueryBundle",
"llama_index.PromptHelper",
"llama_index.VectorStoreIndex",
"llama_index.text_splitter.SentenceSplitter",
"llama_index.embeddings.HuggingFaceEmbedding",
"llama_index.postprocessor.FlagEmbeddingReranker",
"llama_index.embeddings.InstructorEmbedding"
] | [((1340, 1395), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""', '"""your_openai_api_key"""'], {}), "('OPENAI_API_KEY', 'your_openai_api_key')\n", (1354, 1395), False, 'import os\n'), ((1455, 1510), 'os.environ.get', 'os.environ.get', (['"""VOYAGE_API_KEY"""', '"""your_voyage_api_key"""'], {}), "('VOYAGE_API_KEY', 'your_voyage_api_key')\n", (1469, 1510), False, 'import os\n'), ((1532, 1587), 'os.environ.get', 'os.environ.get', (['"""COHERE_API_KEY"""', '"""your_cohere_api_key"""'], {}), "('COHERE_API_KEY', 'your_cohere_api_key')\n", (1546, 1587), False, 'import os\n'), ((1607, 1661), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""running script."""'}), "(description='running script.')\n", (1630, 1661), False, 'import argparse\n'), ((2504, 2573), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'args.llm', 'temperature': '(0)', 'max_tokens': 'args.context_window'}), '(model=args.llm, temperature=0, max_tokens=args.context_window)\n', (2510, 2573), False, 'from llama_index.llms import OpenAI\n'), ((2778, 2796), 'util.rm_file', 'rm_file', (['save_file'], {}), '(save_file)\n', (2785, 2796), False, 'from util import rm_file\n'), ((3610, 3654), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'args.chunk_size'}), '(chunk_size=args.chunk_size)\n', (3626, 3654), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((3675, 3803), 'llama_index.PromptHelper', 'PromptHelper', ([], {'context_window': 'args.context_window', 'num_output': 'args.num_output', 'chunk_overlap_ratio': '(0.1)', 'chunk_size_limit': 'None'}), '(context_window=args.context_window, num_output=args.num_output,\n chunk_overlap_ratio=0.1, chunk_size_limit=None)\n', (3687, 3803), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((3861, 3985), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'text_splitter': 'text_splitter', 'prompt_helper': 'prompt_helper'}), '(llm=llm, embed_model=embed_model,\n text_splitter=text_splitter, prompt_helper=prompt_helper)\n', (3889, 3985), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((4025, 4068), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (4051, 4068), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((4083, 4095), 'util.JSONReader', 'JSONReader', ([], {}), '()\n', (4093, 4095), False, 'from util import JSONReader\n'), ((4250, 4300), 'llama_index.ingestion.IngestionPipeline', 'IngestionPipeline', ([], {'transformations': 'transformations'}), '(transformations=transformations)\n', (4267, 4300), False, 'from llama_index.ingestion import IngestionPipeline\n'), ((4358, 4373), 'copy.deepcopy', 'deepcopy', (['nodes'], {}), '(nodes)\n', (4366, 4373), False, 'from copy import deepcopy\n'), ((4526, 4569), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'show_progress': '(True)'}), '(nodes, show_progress=True)\n', (4542, 4569), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((4921, 4937), 'tqdm.tqdm', 'tqdm', (['query_data'], {}), '(query_data)\n', (4925, 4937), False, 'from tqdm import tqdm\n'), ((2948, 3002), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': 'model_name', 'embed_batch_size': '(10)'}), '(model=model_name, embed_batch_size=10)\n', (2963, 3002), False, 'from llama_index import ServiceContext, OpenAIEmbedding, PromptHelper, VectorStoreIndex, set_global_service_context\n'), ((4680, 4695), 'json.load', 'json.load', (['file'], {}), '(file)\n', (4689, 4695), False, 'import json\n'), ((4744, 4811), 'llama_index.postprocessor.FlagEmbeddingReranker', 'FlagEmbeddingReranker', ([], {'model': '"""BAAI/bge-reranker-large"""', 'top_n': 'top_k'}), "(model='BAAI/bge-reranker-large', top_n=top_k)\n", (4765, 4811), False, 'from llama_index.postprocessor import FlagEmbeddingReranker\n'), ((5926, 5967), 'json.dump', 'json.dump', (['retrieval_save_list', 'json_file'], {}), '(retrieval_save_list, json_file)\n', (5935, 5967), False, 'import json\n'), ((3059, 3170), 'llama_index.embeddings.cohereai.CohereEmbedding', 'CohereEmbedding', ([], {'cohere_api_key': 'cohere_api_key', 'model_name': '"""embed-english-v3.0"""', 'input_type': '"""search_query"""'}), "(cohere_api_key=cohere_api_key, model_name=\n 'embed-english-v3.0', input_type='search_query')\n", (3074, 3170), False, 'from llama_index.embeddings.cohereai import CohereEmbedding\n'), ((3271, 3341), 'llama_index.embeddings.VoyageEmbedding', 'VoyageEmbedding', ([], {'model_name': '"""voyage-02"""', 'voyage_api_key': 'voyage_api_key'}), "(model_name='voyage-02', voyage_api_key=voyage_api_key)\n", (3286, 3341), False, 'from llama_index.embeddings import HuggingFaceEmbedding, VoyageEmbedding, InstructorEmbedding\n'), ((3423, 3465), 'llama_index.embeddings.InstructorEmbedding', 'InstructorEmbedding', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (3442, 3465), False, 'from llama_index.embeddings import HuggingFaceEmbedding, VoyageEmbedding, InstructorEmbedding\n'), ((3498, 3565), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'model_name', 'trust_remote_code': '(True)'}), '(model_name=model_name, trust_remote_code=True)\n', (3518, 3565), False, 'from llama_index.embeddings import HuggingFaceEmbedding, VoyageEmbedding, InstructorEmbedding\n'), ((5190, 5218), 'llama_index.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (5201, 5218), False, 'from llama_index.schema import QueryBundle, MetadataMode\n')] |
import pinecone
import torch
import numpy as np
import torchvision.transforms as T
from PIL import Image
import os
import tqdm
import openai
import hashlib
import io
from gradio_client import Client
from monitor import Monitor, monitoring
from llama_index.vector_stores import PineconeVectorStore
from llama_index import VectorStoreIndex
# from llama_index.storage.storage_context import StorageContext
# from llama_index.vector_stores import PineconeVectorStore
# from llama_index.llms import OpenAI
# from llama_index import (
# VectorStoreIndex,
# SimpleWebPageReader,
# LLMPredictor,
# ServiceContext
# )
# from trulens_eval import TruLlama, Feedback, Tru, feedback
# from trulens_eval.feedback import GroundTruthAgreement, Groundedness
from pathlib import Path
from trulens_eval import Feedback, Tru, TruLlama
from trulens_eval.feedback import Groundedness
from trulens_eval.feedback.provider.openai import OpenAI
tru = Tru()
import numpy as np
# Initialize provider class
openai_tl = OpenAI()
grounded = Groundedness(groundedness_provider=OpenAI())
# Define a groundedness feedback function
f_groundedness = Feedback(grounded.groundedness_measure_with_cot_reasons).on(
TruLlama.select_source_nodes().node.text
).on_output(
).aggregate(grounded.grounded_statements_aggregator)
# Question/answer relevance between overall question and answer.
f_qa_relevance = Feedback(openai_tl.relevance).on_input_output()
# Question/statement relevance between question and each context chunk.
f_qs_relevance = Feedback(openai_tl.qs_relevance).on_input().on(
TruLlama.select_source_nodes().node.text
).aggregate(np.mean)
index_name = "medical-images"
client = Client("https://42976740ac53ddbe7d.gradio.live/")
PINECONE_API_KEY = os.getenv('PINECONE_API_KEY')
PINECONE_ENVIRONMENT = os.getenv('PINECONE_ENVIRONMENT')
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENVIRONMENT
)
index = pinecone.Index(index_name)
vector_store = PineconeVectorStore(pinecone_index=index)
l_index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
query_engine = l_index.as_query_engine()
tru_query_engine_recorder = TruLlama(query_engine,
app_id='LlamaIndex_App1',
feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance])
dinov2_vits14 = torch.hub.load("facebookresearch/dinov2", "dinov2_vits14")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dinov2_vits14.to(device)
transform_image = T.Compose([T.ToTensor(),
T.Resize(224),
T.CenterCrop(224),
T.Normalize([0.5], [0.5])])
@Monitor.monitor
def compute_embedding(file) -> dict:
"""
Create an index that contains all of the images in the specified list of files.
"""
with torch.no_grad():
embedding = dinov2_vits14(load_image(file).to(device))
print(f"embedding shape before: {embedding.shape}")
embeddings_numpy = np.array(embedding[0].cpu().numpy()).reshape(1, -1)
padded_embedding = pad_embedding(embeddings_numpy)
print(f"embedding shape after padding: {padded_embedding.shape}")
return padded_embedding
@Monitor.monitor
def load_image(file) -> torch.Tensor:
"""
Load a an image and return a tensor that can be used as an input to DINOv2.
"""
# Assuming it's PNG or JPEG
img = Image.open(file).convert("RGB")
transformed_img = transform_image(img)[:3].unsqueeze(0)
return transformed_img
@Monitor.monitor
def pad_embedding(embedding: np.ndarray, target_dim: int = 512) -> np.ndarray:
"""
Pad the given embedding with zeros to match the target dimension.
"""
original_dim = embedding.shape[1]
padding_dim = target_dim - original_dim
if padding_dim > 0:
padding = np.zeros((1, padding_dim))
padded_embedding = np.hstack([embedding, padding])
else:
padded_embedding = embedding
return padded_embedding
@Monitor.monitor
def add_embedding_to_index(id: str, embedding):
single_vector = {
'id': id,
'values': embedding.flatten().tolist(),
'metadata': {'modality': 'mri'}
}
upsert_response = index.upsert(vectors=[single_vector])
print(f"Inserted {single_vector}")
@Monitor.monitor
def img_to_vector_db(img_path, index):
embedding = compute_embedding(img_path)
add_embedding_to_index(id=str(index), embedding=embedding)
def hash_file(image_path: str) -> str:
"""
Hash the filename to create a unique ID.
"""
filename = image_path.split("/")[-1]
unique_id = hashlib.sha256(filename.encode()).hexdigest()
return unique_id
@Monitor.monitor
def retrieve(embedding):
response = index.query(
vector=embedding.flatten().tolist(),
top_k=3,
include_values=True,
include_metadata=True
)
result =[ m["metadata"]["report"] for m in response["matches"]]
urls = []
for m in response["matches"]:
if "download_path" in m["metadata"]:
urls.append(m["metadata"]["download_path"])
return result, urls
@Monitor.monitor
def generate_response(result, query, li_response):
result = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system",
"content":
"""
Objective: Generate a concise radiologic diagnosis based on SHARED FEATURES from the provided radiology reports.
Definition of SHARED FEATURES: Features that appear in more than one report. Features unique to a single report are not considered SHARED.
Instructions:
Analyze the provided radiology reports.
Identify any SHARED FEATURES, these should be the diagnosis and not radiologic features.
If SHARED FEATURES are found, provide a radiologic diagnosis in one sentence.
If no SHARED FEATURES are identified, simply state: "Radiologic Diagnosis: Diagnosis not possible."
Return the reports summarized as well.
"""
},
{"role": "assistant", "content": "Reports:"+ "\n-".join(result)},
{"role": "user", "content": query},
]
,
temperature=0)
return result
@Monitor.monitor
def llama_index_response(query, result):
from llama_index import SummaryIndex
from llama_index.schema import TextNode
index = SummaryIndex([TextNode(text=r) for r in result])
summary_query_engine = index.as_query_engine()
tru_query_engine_recorder_tmp = TruLlama(summary_query_engine,
app_id='LlamaIndex_App1',
feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance])
with tru_query_engine_recorder_tmp as recording:
li_response = summary_query_engine.query(query)
return li_response
def predict(file, query):
embedding = compute_embedding(file)
retrieved_result, urls = retrieve(embedding)
li_response = llama_index_response(query, retrieved_result)
result = generate_response(retrieved_result, query, li_response)
result = result['choices'][0]['message']['content']
result = "**Retrieved Reports:** " + ' \n'.join(retrieved_result) + " \n**Images:** " + (' \n').join(urls) + " \n **Final Diagnosis:** " + result
return result
# result = predict(img_path=img_path)
# print(f"ID: {result['matches'][0]['id']} | Similarity score: {round(result['matches'][0]['score'], 2)}")
# new_img
| [
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.schema.TextNode",
"llama_index.vector_stores.PineconeVectorStore"
] | [((945, 950), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (948, 950), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1012, 1020), 'trulens_eval.feedback.provider.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (1018, 1020), False, 'from trulens_eval.feedback.provider.openai import OpenAI\n'), ((1697, 1746), 'gradio_client.Client', 'Client', (['"""https://42976740ac53ddbe7d.gradio.live/"""'], {}), "('https://42976740ac53ddbe7d.gradio.live/')\n", (1703, 1746), False, 'from gradio_client import Client\n'), ((1766, 1795), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (1775, 1795), False, 'import os\n'), ((1819, 1852), 'os.getenv', 'os.getenv', (['"""PINECONE_ENVIRONMENT"""'], {}), "('PINECONE_ENVIRONMENT')\n", (1828, 1852), False, 'import os\n'), ((1854, 1927), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'PINECONE_API_KEY', 'environment': 'PINECONE_ENVIRONMENT'}), '(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)\n', (1867, 1927), False, 'import pinecone\n'), ((1947, 1973), 'pinecone.Index', 'pinecone.Index', (['index_name'], {}), '(index_name)\n', (1961, 1973), False, 'import pinecone\n'), ((1989, 2030), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'index'}), '(pinecone_index=index)\n', (2008, 2030), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((2041, 2102), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2075, 2102), False, 'from llama_index import VectorStoreIndex\n'), ((2173, 2285), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': '"""LlamaIndex_App1"""', 'feedbacks': '[f_groundedness, f_qa_relevance, f_qs_relevance]'}), "(query_engine, app_id='LlamaIndex_App1', feedbacks=[f_groundedness,\n f_qa_relevance, f_qs_relevance])\n", (2181, 2285), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((2308, 2366), 'torch.hub.load', 'torch.hub.load', (['"""facebookresearch/dinov2"""', '"""dinov2_vits14"""'], {}), "('facebookresearch/dinov2', 'dinov2_vits14')\n", (2322, 2366), False, 'import torch\n'), ((6490, 6611), 'trulens_eval.TruLlama', 'TruLlama', (['summary_query_engine'], {'app_id': '"""LlamaIndex_App1"""', 'feedbacks': '[f_groundedness, f_qa_relevance, f_qs_relevance]'}), "(summary_query_engine, app_id='LlamaIndex_App1', feedbacks=[\n f_groundedness, f_qa_relevance, f_qs_relevance])\n", (6498, 6611), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1068, 1076), 'trulens_eval.feedback.provider.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (1074, 1076), False, 'from trulens_eval.feedback.provider.openai import OpenAI\n'), ((1401, 1430), 'trulens_eval.Feedback', 'Feedback', (['openai_tl.relevance'], {}), '(openai_tl.relevance)\n', (1409, 1430), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((2399, 2424), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2422, 2424), False, 'import torch\n'), ((2492, 2504), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2502, 2504), True, 'import torchvision.transforms as T\n'), ((2535, 2548), 'torchvision.transforms.Resize', 'T.Resize', (['(224)'], {}), '(224)\n', (2543, 2548), True, 'import torchvision.transforms as T\n'), ((2579, 2596), 'torchvision.transforms.CenterCrop', 'T.CenterCrop', (['(224)'], {}), '(224)\n', (2591, 2596), True, 'import torchvision.transforms as T\n'), ((2627, 2652), 'torchvision.transforms.Normalize', 'T.Normalize', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (2638, 2652), True, 'import torchvision.transforms as T\n'), ((2820, 2835), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2833, 2835), False, 'import torch\n'), ((3848, 3874), 'numpy.zeros', 'np.zeros', (['(1, padding_dim)'], {}), '((1, padding_dim))\n', (3856, 3874), True, 'import numpy as np\n'), ((3902, 3933), 'numpy.hstack', 'np.hstack', (['[embedding, padding]'], {}), '([embedding, padding])\n', (3911, 3933), True, 'import numpy as np\n'), ((3415, 3431), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (3425, 3431), False, 'from PIL import Image\n'), ((6367, 6383), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'r'}), '(text=r)\n', (6375, 6383), False, 'from llama_index.schema import TextNode\n'), ((1591, 1621), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1619, 1621), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1138, 1194), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {}), '(grounded.groundedness_measure_with_cot_reasons)\n', (1146, 1194), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1539, 1571), 'trulens_eval.Feedback', 'Feedback', (['openai_tl.qs_relevance'], {}), '(openai_tl.qs_relevance)\n', (1547, 1571), False, 'from trulens_eval import Feedback, Tru, TruLlama\n'), ((1203, 1233), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1231, 1233), False, 'from trulens_eval import Feedback, Tru, TruLlama\n')] |
############################################################################################################################
# In this section, we set the user authentication, model URL, and prompt text. Alternatively, set the user and app ID,
# and model name. Change these strings to run your own example.
###########################################################################################################################
PAT = "YOUR_PAT_HERE"
MODEL_URL = "https://clarifai.com/cohere/embed/models/cohere-text-to-embeddings"
PROMPT = "Hello World!"
# Alternatively, you can specify user ID, app ID, and model name
#USER_ID = "cohere"
#APP_ID = "embed"
#MODEL_NAME = "cohere-text-to-embeddings"
############################################################################
# YOU DO NOT NEED TO CHANGE ANYTHING BELOW THIS LINE TO RUN THIS EXAMPLE
############################################################################
# Import the required packages
import os
from llama_index.embeddings.clarifai import ClarifaiEmbedding
# Set Clarifai PAT as environment variable
os.environ["CLARIFAI_PAT"] = PAT
# Initialize the LLM class
embed_model = ClarifaiEmbedding(model_url=MODEL_URL)
# Alternatively
# embed_model = ClarifaiEmbedding(
# user_id=USER_ID,
# app_id=APP_ID,
# model_name=MODEL_NAME
# )
embeddings = embed_model.get_text_embedding(PROMPT)
print(len(embeddings))
# Print the first five elements of embeddings list
print(embeddings[:5])
| [
"llama_index.embeddings.clarifai.ClarifaiEmbedding"
] | [((1158, 1196), 'llama_index.embeddings.clarifai.ClarifaiEmbedding', 'ClarifaiEmbedding', ([], {'model_url': 'MODEL_URL'}), '(model_url=MODEL_URL)\n', (1175, 1196), False, 'from llama_index.embeddings.clarifai import ClarifaiEmbedding\n')] |
# Copyright 2023 Qarik Group, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import threading
from datetime import datetime
from pathlib import Path
from typing import Any, List
from common import admin_dao, constants, gcs_tools, solution
from common.cache import cache
from common.log import Logger, log, log_params
from langchain.llms.openai import OpenAIChat
from llama_index import (Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext,
SimpleDirectoryReader, StorageContext, load_index_from_storage)
from llama_index.indices.composability import ComposableGraph
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.query.query_transform.base import DecomposeQueryTransform
# import google.generativeai as palm
# from llama_index.query_engine.router_query_engine import RouterQueryEngine
from llama_index.query_engine.transform_query_engine import TransformQueryEngine
# from llama_index.selectors.llm_selectors import LLMSingleSelector
# from llama_index.tools.query_engine import QueryEngineTool
logger = Logger(__name__).get_logger()
logger.info('Initializing...')
DATA_LOAD_LOCK = threading.Lock()
"""Block many concurrent data loads at once."""
LLAMA_FILE_LOCK = threading.Lock()
"""Lock to prevent concurrent updates of the same index - needed in case we have more than one request processing."""
INDEX_BUCKET: str = solution.getenv('EMBEDDINGS_BUCKET_NAME')
"""Location to download llama-index embeddings from."""
LAST_LOCAL_INDEX_UPDATE: datetime | None = None
"""Keep track of the most recent local index update to avoid unnecessary refreshes."""
if solution.LOCAL_DEVELOPMENT_MODE:
LLAMA_INDEX_DIR: str = 'dev/tmp/llamaindex-embeddings'
else:
LLAMA_INDEX_DIR = 'tmp/llamaindex-embeddings'
LOCAL_DEV_DATA_DIR: str = 'dev/tmp'
"""Location of the local data directory for development on local machine."""
@log
def _get_llm(provider: constants.LlmProvider) -> LLMPredictor:
"""Return LLM predictor."""
if provider == constants.LlmProvider.OPEN_AI:
llm = LLMPredictor(llm=OpenAIChat(temperature=constants.TEMPERATURE, model_name=constants.GPT_MODEL)) # type: ignore
else:
raise ValueError(f'Unknown LLM provider: {provider}')
return llm
@log_params
def load_resumes(resume_dir: str | None) -> dict[str, List[Document]]:
"""Initialize list of resumes from index storage or from the directory with PDF source files."""
resumes: dict[str, List[Document]] = {}
if resume_dir is None:
resume_dir = ''
resume_path = Path(resume_dir)
index_path = Path(LLAMA_INDEX_DIR)
global DATA_LOAD_LOCK
with DATA_LOAD_LOCK:
if index_path.exists():
logger.info('Loading people names (not resumes) from existing index storage...')
names = glob.glob(f'{index_path}/*',)
if len(names):
for file_name in names:
# We do not care about the contents of the resume because it will be loaded from index
# All we care for here is the name - aka the Key, not Value
resumes[Path(file_name).name] = []
return resumes
else:
logger.warning('No resumes found in the index directory: %s', index_path)
logger.warning('Removing the index storage directory: %s', index_path)
Path.rmdir(index_path)
logger.info('Loading people names from the source dir with resume PDF files...')
Path.mkdir(resume_path, parents=True, exist_ok=True)
# Check if there are any pdf files in the data directory
pdf_files = glob.glob(f'{resume_path}/*.pdf')
if len(pdf_files):
# Each resume shall be named as '<person_name>.pdf' optionally with 'resume' suffix
for resume in pdf_files:
person_name = os.path.basename(resume).replace('.pdf', '').replace(
'Resume', '').replace('resume', '').replace('_', ' ').strip()
logger.debug(f'Loading: {person_name}')
resume_content = SimpleDirectoryReader(input_files=[resume]).load_data()
resumes[person_name] = resume_content
else:
logger.warning('No resume PDF files found in the data directory: %s', resume_path)
return resumes
@log
def _load_resume_indices(resumes: dict[str, List[Document]],
service_context: ServiceContext, embeddings_dir: str) -> dict[str, GPTVectorStoreIndex]:
"""Load or create index storage contexts for each person in the resumes list."""
vector_indices = {}
for person_name, resume_data in resumes.items():
cache_file_path = Path(f'./{embeddings_dir}/{person_name}')
if cache_file_path.exists():
logger.debug('Loading index from storage file: %s', cache_file_path)
storage_context = StorageContext.from_defaults(persist_dir=str(cache_file_path))
vector_indices[person_name] = load_index_from_storage(storage_context=storage_context)
else:
storage_context = StorageContext.from_defaults()
# build vector index
vector_indices[person_name] = GPTVectorStoreIndex.from_documents(
resume_data,
service_context=service_context,
storage_context=storage_context,
)
# set id for vector index
# vector_indices[person_name].index_struct.index_id = person_name
vector_indices[person_name].set_index_id(person_name)
logger.debug('Saving index to storage file: %s', cache_file_path)
storage_context.persist(persist_dir=str(cache_file_path))
# ------------------- Test
# name = 'Roman Kharkovski'
# test_query = f'What are the main skills for {name}?'
# logger.debug('Test query: %s', test_query)
# response = vector_indices[f'{name}'].as_query_engine().query(test_query)
# logger.debug('Response: %s', str(response))
# exit(0)
# ------------------- end of test
return vector_indices # type: ignore
@log
def _load_resume_index_summary(resumes: dict[str, Any]) -> dict[str, str]:
index_summaries = {}
for person_name in resumes.keys():
# index_summaries[person_name] = (f'Use this index if you need to lookup specific facts about {person_name}.')
index_summaries[person_name] = (f'This content contains resume of {person_name}.\n'
f'Use this index if you need to lookup specific facts about {person_name}.\n'
'Do not confuse people with the same lastname, but different first names.'
'If you cant find the answer, respond with the best of your knowledge.'
'Do not use this index if you want to analyze multiple people.')
return index_summaries
@log_params
def generate_embeddings(resume_dir: str, provider: constants.LlmProvider) -> None:
"""Generate embeddings from PDF resumes."""
resumes = load_resumes(resume_dir=resume_dir)
if not resumes:
return None
predictor = _get_llm(provider=provider)
context = ServiceContext.from_defaults(llm_predictor=predictor, chunk_size_limit=constants.CHUNK_SIZE)
_load_resume_indices(resumes=resumes, service_context=context, embeddings_dir=LLAMA_INDEX_DIR)
@log_params
def _get_resume_query_engine(provider: constants.LlmProvider, resume_dir: str | None = None) -> BaseQueryEngine | None:
"""Load the index from disk, or build it if it doesn't exist."""
llm = _get_llm(provider=provider)
service_context = ServiceContext.from_defaults(llm_predictor=llm, chunk_size_limit=constants.CHUNK_SIZE)
resumes: dict[str, List[Document]] = load_resumes(resume_dir=resume_dir)
logger.debug('-------------------------- resumes: %s', resumes.keys())
if not resumes:
return None
# vector_indices = load_resume_indices(resumes, service_context)
vector_indices = _load_resume_indices(resumes=resumes, service_context=service_context,
embeddings_dir=LLAMA_INDEX_DIR)
index_summaries = _load_resume_index_summary(resumes)
graph = ComposableGraph.from_indices(root_index_cls=GPTSimpleKeywordTableIndex,
children_indices=[index for _, index in vector_indices.items()],
index_summaries=[summary for _, summary in index_summaries.items()],
max_keywords_per_chunk=constants.MAX_KEYWORDS_PER_CHUNK)
# root_index = graph.get_index(graph.root_id)
root_index = graph.get_index(index_struct_id=graph.root_id)
root_index.set_index_id('compare_contrast')
graph.index_struct.summary = ('This index contains resumes of multiple people. '
'Do not confuse people with the same lastname, but different first names.'
'Use this index if you want to compare multiple people.')
decompose_transform = DecomposeQueryTransform(llm, verbose=True)
custom_query_engines = {}
for index in vector_indices.values():
query_engine = index.as_query_engine(service_context=service_context,
similarity_top_k=constants.SIMILARITY_TOP_K)
query_engine = TransformQueryEngine(query_engine=query_engine,
query_transform=decompose_transform,
transform_metadata={'index_summary': index.index_struct.summary},
) # type: ignore
custom_query_engines[index.index_id] = query_engine
custom_query_engines[graph.root_id] = graph.root_index.as_query_engine(
retriever_mode='simple',
response_mode='tree_summarize',
service_context=service_context,
verbose=True,
use_async=True,
)
graph_query_engine = graph.as_query_engine(custom_query_engines=custom_query_engines)
# ------------------- Test
# name1 = 'Roman Kharkovski'
# name2 = 'Steven Kim'
# response = graph_query_engine.query(f'Compare and contrast the skills of {name1} and {name2}.')
# logger.debug('Response: %s', str(response))
# ------------------- end of test
return graph_query_engine
# TODO: the query engine tool does not longer work - need to debug
# query_engine_tools = []
# # add vector index tools
# for person_name in resumes.keys():
# index = vector_indices[person_name]
# summary = index_summaries[person_name]
# query_engine = index.as_query_engine(service_context=service_context)
# vector_tool = QueryEngineTool.from_defaults(query_engine=query_engine, description=summary)
# query_engine_tools.append(vector_tool)
# # add graph tool
# graph_tool = QueryEngineTool.from_defaults(graph_query_engine, description=graph.index_struct.summary)
# query_engine_tools.append(graph_tool)
# router_query_engine = RouterQueryEngine.from_defaults(selector=LLMSingleSelector.from_defaults(
# service_context=service_context), query_engine_tools=query_engine_tools)
# return router_query_engine
@cache
@log
def _refresh_llama_index() -> None:
"""Refresh the index of resumes from the database using Llama-Index."""
global LAST_LOCAL_INDEX_UPDATE
if solution.LOCAL_DEVELOPMENT_MODE:
logger.info('Running in local development mode')
index_path = Path(LLAMA_INDEX_DIR)
if not index_path.exists():
# TODO - need to generate proper embeddings for each provider, not hard coded
generate_embeddings(resume_dir=LOCAL_DEV_DATA_DIR, provider=constants.LlmProvider.OPEN_AI)
return
global LLAMA_FILE_LOCK
last_resume_refresh = admin_dao.AdminDAO().get_resumes_timestamp()
if LAST_LOCAL_INDEX_UPDATE is None or LAST_LOCAL_INDEX_UPDATE < last_resume_refresh:
logger.info('Refreshing local index of resumes...')
# Prevent concurrent updates of the same index - needed in case we have more than one request processing
with LLAMA_FILE_LOCK:
# Check for condition again because the index may have been updated while we were waiting for the lock
if LAST_LOCAL_INDEX_UPDATE is None or LAST_LOCAL_INDEX_UPDATE < last_resume_refresh:
gcs_tools.download(bucket_name=INDEX_BUCKET, local_dir=LLAMA_INDEX_DIR)
return last_resume_refresh
logger.info('Skipping refresh of resumes index because no changes in source resumes were detected.')
LAST_LOCAL_INDEX_UPDATE = last_resume_refresh
@log
def query(question: str) -> str:
"""Run LLM query for CHatGPT."""
_refresh_llama_index()
query_engine = _get_resume_query_engine(provider=constants.LlmProvider.OPEN_AI)
if query_engine is None:
raise SystemError('No resumes found in the database. Please upload resumes.')
return str(query_engine.query(question))
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.query_engine.transform_query_engine.TransformQueryEngine",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.query.query_transform.base.DecomposeQueryTransform",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((1710, 1726), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1724, 1726), False, 'import threading\n'), ((1794, 1810), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1808, 1810), False, 'import threading\n'), ((1950, 1991), 'common.solution.getenv', 'solution.getenv', (['"""EMBEDDINGS_BUCKET_NAME"""'], {}), "('EMBEDDINGS_BUCKET_NAME')\n", (1965, 1991), False, 'from common import admin_dao, constants, gcs_tools, solution\n'), ((3114, 3130), 'pathlib.Path', 'Path', (['resume_dir'], {}), '(resume_dir)\n', (3118, 3130), False, 'from pathlib import Path\n'), ((3148, 3169), 'pathlib.Path', 'Path', (['LLAMA_INDEX_DIR'], {}), '(LLAMA_INDEX_DIR)\n', (3152, 3169), False, 'from pathlib import Path\n'), ((7800, 7897), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'predictor', 'chunk_size_limit': 'constants.CHUNK_SIZE'}), '(llm_predictor=predictor, chunk_size_limit=\n constants.CHUNK_SIZE)\n', (7828, 7897), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((8255, 8346), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm', 'chunk_size_limit': 'constants.CHUNK_SIZE'}), '(llm_predictor=llm, chunk_size_limit=constants.\n CHUNK_SIZE)\n', (8283, 8346), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((9702, 9744), 'llama_index.indices.query.query_transform.base.DecomposeQueryTransform', 'DecomposeQueryTransform', (['llm'], {'verbose': '(True)'}), '(llm, verbose=True)\n', (9725, 9744), False, 'from llama_index.indices.query.query_transform.base import DecomposeQueryTransform\n'), ((1631, 1647), 'common.log.Logger', 'Logger', (['__name__'], {}), '(__name__)\n', (1637, 1647), False, 'from common.log import Logger, log, log_params\n'), ((4069, 4121), 'pathlib.Path.mkdir', 'Path.mkdir', (['resume_path'], {'parents': '(True)', 'exist_ok': '(True)'}), '(resume_path, parents=True, exist_ok=True)\n', (4079, 4121), False, 'from pathlib import Path\n'), ((4208, 4241), 'glob.glob', 'glob.glob', (['f"""{resume_path}/*.pdf"""'], {}), "(f'{resume_path}/*.pdf')\n", (4217, 4241), False, 'import glob\n'), ((5266, 5307), 'pathlib.Path', 'Path', (['f"""./{embeddings_dir}/{person_name}"""'], {}), "(f'./{embeddings_dir}/{person_name}')\n", (5270, 5307), False, 'from pathlib import Path\n'), ((10009, 10169), 'llama_index.query_engine.transform_query_engine.TransformQueryEngine', 'TransformQueryEngine', ([], {'query_engine': 'query_engine', 'query_transform': 'decompose_transform', 'transform_metadata': "{'index_summary': index.index_struct.summary}"}), "(query_engine=query_engine, query_transform=\n decompose_transform, transform_metadata={'index_summary': index.\n index_struct.summary})\n", (10029, 10169), False, 'from llama_index.query_engine.transform_query_engine import TransformQueryEngine\n'), ((12194, 12215), 'pathlib.Path', 'Path', (['LLAMA_INDEX_DIR'], {}), '(LLAMA_INDEX_DIR)\n', (12198, 12215), False, 'from pathlib import Path\n'), ((3366, 3394), 'glob.glob', 'glob.glob', (['f"""{index_path}/*"""'], {}), "(f'{index_path}/*')\n", (3375, 3394), False, 'import glob\n'), ((5561, 5617), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context'}), '(storage_context=storage_context)\n', (5584, 5617), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((5662, 5692), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (5690, 5692), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((5768, 5886), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['resume_data'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(resume_data, service_context=\n service_context, storage_context=storage_context)\n', (5802, 5886), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((12514, 12534), 'common.admin_dao.AdminDAO', 'admin_dao.AdminDAO', ([], {}), '()\n', (12532, 12534), False, 'from common import admin_dao, constants, gcs_tools, solution\n'), ((2633, 2710), 'langchain.llms.openai.OpenAIChat', 'OpenAIChat', ([], {'temperature': 'constants.TEMPERATURE', 'model_name': 'constants.GPT_MODEL'}), '(temperature=constants.TEMPERATURE, model_name=constants.GPT_MODEL)\n', (2643, 2710), False, 'from langchain.llms.openai import OpenAIChat\n'), ((3948, 3970), 'pathlib.Path.rmdir', 'Path.rmdir', (['index_path'], {}), '(index_path)\n', (3958, 3970), False, 'from pathlib import Path\n'), ((13079, 13150), 'common.gcs_tools.download', 'gcs_tools.download', ([], {'bucket_name': 'INDEX_BUCKET', 'local_dir': 'LLAMA_INDEX_DIR'}), '(bucket_name=INDEX_BUCKET, local_dir=LLAMA_INDEX_DIR)\n', (13097, 13150), False, 'from common import admin_dao, constants, gcs_tools, solution\n'), ((4658, 4701), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[resume]'}), '(input_files=[resume])\n', (4679, 4701), False, 'from llama_index import Document, GPTSimpleKeywordTableIndex, GPTVectorStoreIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((3679, 3694), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (3683, 3694), False, 'from pathlib import Path\n'), ((4433, 4457), 'os.path.basename', 'os.path.basename', (['resume'], {}), '(resume)\n', (4449, 4457), False, 'import os\n')] |
import os
from dotenv import load_dotenv, find_dotenv
import numpy as np
from trulens_eval import (
Feedback,
TruLlama,
OpenAI
)
from trulens_eval.feedback import Groundedness
import nest_asyncio
nest_asyncio.apply()
def get_openai_api_key():
_ = load_dotenv(find_dotenv())
return os.getenv("OPENAI_API_KEY")
def get_hf_api_key():
_ = load_dotenv(find_dotenv())
return os.getenv("HUGGINGFACE_API_KEY")
openai = OpenAI()
qa_relevance = (
Feedback(openai.relevance_with_cot_reasons, name="Answer Relevance")
.on_input_output()
)
qs_relevance = (
Feedback(openai.relevance_with_cot_reasons, name = "Context Relevance")
.on_input()
.on(TruLlama.select_source_nodes().node.text)
.aggregate(np.mean)
)
#grounded = Groundedness(groundedness_provider=openai, summarize_provider=openai)
grounded = Groundedness(groundedness_provider=openai)
groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness")
.on(TruLlama.select_source_nodes().node.text)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
feedbacks = [qa_relevance, qs_relevance, groundedness]
def get_trulens_recorder(query_engine, feedbacks, app_id):
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks=feedbacks
)
return tru_recorder
def get_prebuilt_trulens_recorder(query_engine, app_id):
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks=feedbacks
)
return tru_recorder
from llama_index import ServiceContext, VectorStoreIndex, StorageContext
from llama_index.node_parser import SentenceWindowNodeParser
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index import load_index_from_storage
import os
def build_sentence_window_index(
document, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="sentence_index"
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
[document], service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
def get_sentence_window_query_engine(
sentence_index,
similarity_top_k=6,
rerank_top_n=2,
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
from llama_index.node_parser import HierarchicalNodeParser
from llama_index.node_parser import get_leaf_nodes
from llama_index import StorageContext
from llama_index.retrievers import AutoMergingRetriever
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index.query_engine import RetrieverQueryEngine
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None,
):
chunk_sizes = chunk_sizes or [2048, 512, 128]
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
nodes = node_parser.get_nodes_from_documents(documents)
leaf_nodes = get_leaf_nodes(nodes)
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context,
)
return automerging_index
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=2,
):
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
auto_merging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return auto_merging_engine
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor"
] | [((212, 232), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (230, 232), False, 'import nest_asyncio\n'), ((450, 458), 'trulens_eval.OpenAI', 'OpenAI', ([], {}), '()\n', (456, 458), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((855, 897), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'openai'}), '(groundedness_provider=openai)\n', (867, 897), False, 'from trulens_eval.feedback import Groundedness\n'), ((308, 335), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (317, 335), False, 'import os\n'), ((407, 439), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_API_KEY"""'], {}), "('HUGGINGFACE_API_KEY')\n", (416, 439), False, 'import os\n'), ((1270, 1328), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': 'feedbacks'}), '(query_engine, app_id=app_id, feedbacks=feedbacks)\n', (1278, 1328), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((1460, 1518), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': 'feedbacks'}), '(query_engine, app_id=app_id, feedbacks=feedbacks)\n', (1468, 1518), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((2131, 2263), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (2169, 2263), False, 'from llama_index.node_parser import SentenceWindowNodeParser\n'), ((2313, 2405), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (2341, 2405), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((3020, 3082), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (3052, 3082), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor\n'), ((3096, 3173), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (3121, 3173), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((3936, 3997), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3972, 3997), False, 'from llama_index.node_parser import HierarchicalNodeParser\n'), ((4075, 4096), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (4089, 4096), False, 'from llama_index.node_parser import get_leaf_nodes\n'), ((4119, 4181), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (4147, 4181), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((4227, 4257), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (4255, 4257), False, 'from llama_index import StorageContext\n'), ((4984, 5073), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (5004, 5073), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((5097, 5174), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (5122, 5174), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((5215, 5286), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5245, 5286), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((281, 294), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (292, 294), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((380, 393), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (391, 393), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((481, 549), 'trulens_eval.Feedback', 'Feedback', (['openai.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(openai.relevance_with_cot_reasons, name='Answer Relevance')\n", (489, 549), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((2443, 2467), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2457, 2467), False, 'import os\n'), ((2494, 2571), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[document]'], {'service_context': 'sentence_context'}), '([document], service_context=sentence_context)\n', (2525, 2571), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((4320, 4344), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4334, 4344), False, 'import os\n'), ((4374, 4472), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4390, 4472), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((2735, 2785), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2763, 2785), False, 'from llama_index import StorageContext\n'), ((4638, 4688), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4666, 4688), False, 'from llama_index import StorageContext\n'), ((693, 723), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (721, 723), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((597, 666), 'trulens_eval.Feedback', 'Feedback', (['openai.relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(openai.relevance_with_cot_reasons, name='Context Relevance')\n", (605, 666), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((920, 997), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (928, 997), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((1010, 1040), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1038, 1040), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n')] |
import tiktoken
import sys
from llama_index.readers.file import PyMuPDFReader
from llama_index.core.node_parser import TokenTextSplitter
index = int(sys.argv[1])
docs = PyMuPDFReader().load("Hamlet.pdf")
combined = ""
for doc in docs:
combined += doc.text
splitter = TokenTextSplitter(
chunk_size=10000,
chunk_overlap=10,
tokenizer=tiktoken.encoding_for_model("gpt-4").encode)
pieces = splitter.split_text(combined)
if index >= len(pieces):
print("No more content")
sys.exit(0)
print(pieces[index])
| [
"llama_index.readers.file.PyMuPDFReader"
] | [((495, 506), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (503, 506), False, 'import sys\n'), ((170, 185), 'llama_index.readers.file.PyMuPDFReader', 'PyMuPDFReader', ([], {}), '()\n', (183, 185), False, 'from llama_index.readers.file import PyMuPDFReader\n'), ((351, 387), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-4"""'], {}), "('gpt-4')\n", (378, 387), False, 'import tiktoken\n')] |
# The MIT License
# Copyright (c) Jerry Liu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""OpenDAL file and directory reader.
A loader that fetches a file or iterates through a directory on a object store like AWS S3 or AzureBlob.
"""
import asyncio
import logging as log
import tempfile
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Self, Type, Union, cast
import opendal
from llama_index.readers.base import BaseReader
from llama_index.readers.file.docs_reader import DocxReader, PDFReader
from llama_index.readers.file.epub_reader import EpubReader
from llama_index.readers.file.image_reader import ImageReader
from llama_index.readers.file.ipynb_reader import IPYNBReader
from llama_index.readers.file.markdown_reader import MarkdownReader
from llama_index.readers.file.mbox_reader import MboxReader
from llama_index.readers.file.slides_reader import PptxReader
from llama_index.readers.file.tabular_reader import PandasCSVReader
from llama_index.readers.file.video_audio_reader import VideoAudioReader
from llama_index.schema import Document
from .... import services
from ....domain import DocumentListItem
DEFAULT_FILE_READER_CLS: Dict[str, Type[BaseReader]] = {
".pdf": PDFReader,
".docx": DocxReader,
".pptx": PptxReader,
".jpg": ImageReader,
".png": ImageReader,
".jpeg": ImageReader,
".mp3": VideoAudioReader,
".mp4": VideoAudioReader,
".csv": PandasCSVReader,
".epub": EpubReader,
".md": MarkdownReader,
".mbox": MboxReader,
".ipynb": IPYNBReader,
}
FILE_MIME_EXTENSION_MAP: Dict[str, str] = {
"application/pdf": ".pdf",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
"application/vnd.google-apps.document": ".gdoc",
"application/vnd.google-apps.presentation": ".gslides",
"application/vnd.google-apps.spreadsheet": ".gsheet",
"image/jpeg": ".jpg",
"image/png": ".png",
"image/jpg": ".jpg",
"audio/mpeg": ".mp3",
"audio/mp3": ".mp3",
"video/mp4": ".mp4",
"video/mpeg": ".mp4",
"text/csv": ".csv",
"application/epub+zip": ".epub",
"text/markdown": ".md",
"application/x-ipynb+json": ".ipynb",
"application/mbox": ".mbox",
}
class OpendalReader(BaseReader):
"""General reader for any opendal operator."""
def __init__(
self: Self,
scheme: str,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
**kwargs: Optional[dict[str, Any]],
) -> None:
"""Initialize opendal operator, along with credentials if needed.
Args:
scheme (str): the scheme of the service
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loader will load the file.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. NOTE: this isn't implemented yet.
file_metadata (Optional[Callable[[str], Dict]]): A function that takes a source file path and returns a dictionary of metadata to be added to the Document object.
**kwargs (Optional dict[str, any]): Additional arguments to pass to the `opendal.AsyncOperator` constructor. These are the scheme (object store) specific options.
"""
super().__init__()
self.path = path
self.file_metadata = file_metadata
self.supported_suffix = list(DEFAULT_FILE_READER_CLS.keys())
self.async_op = opendal.AsyncOperator(scheme, **kwargs)
if file_extractor is not None:
self.file_extractor = file_extractor
else:
self.file_extractor = {}
self.documents: List[Document] = []
def load_data(self: Self) -> List[Document]:
"""Load file(s) from OpenDAL."""
# TODO: think about the private and secure aspect of this temp folder.
# NOTE: the following code cleans up the temp folder when existing the context.
with tempfile.TemporaryDirectory() as temp_dir:
if not self.path.endswith("/"):
result = asyncio.run(
download_file_from_opendal(self.async_op, temp_dir, self.path, file_metadata=self.file_metadata)
)
self.downloaded_files.append(result)
else:
self.downloaded_files = asyncio.run(download_dir_from_opendal(self.async_op, temp_dir, self.path))
self.documents = asyncio.run(
extract_files(
self.downloaded_files, file_extractor=self.file_extractor, file_metadata=self.file_metadata
)
)
return self.documents
def get_document_list(self: Self) -> List[DocumentListItem]:
"""Get a list of all documents in the index. A document is a list are 1:1 with a file."""
dl: List[DocumentListItem] = []
try:
for df in self.downloaded_files:
dl.append(DocumentListItem(link=df[0], indexed_on=df[2], size=df[3]))
except Exception as e:
log.exception("Converting Document list to DocumentListItem list failed: %s", e)
return dl
class FileStorageBaseReader(BaseReader):
"""File storage reader."""
def __init__(
self: Self,
access_token: dict,
root: str,
selected_folder_id: Optional[str] = None,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
**kwargs: Optional[dict[str, Any]],
) -> None:
"""Initialize File storage service reader.
Args:
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loader will load the file.
access_token (dict): the access token for the google drive service
root (str): the root folder to start the iteration
selected_folder_id (Optional[str] = None): the selected folder id
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. NOTE: this isn't implemented yet.
file_metadata (Optional[Callable[[str], Dict]]): A function that takes a source file path and returns a dictionary of metadata to be added to the Document object.
kwargs (Optional dict[str, any]): Additional arguments to pass to the specific file storage service.
"""
super().__init__()
self.path = path
self.file_extractor = file_extractor if file_extractor is not None else {}
self.supported_suffix = list(DEFAULT_FILE_READER_CLS.keys())
self.access_token = access_token
self.root = root
self.file_metadata = file_metadata
self.selected_folder_id = selected_folder_id
self.documents: List[Document] = []
self.kwargs = kwargs
self.downloaded_files: List[tuple[str, str, int, int]] = []
def load_data(self: Self) -> List[Document]:
"""Load file(s) from file storage."""
raise NotImplementedError
def get_document_list(self: Self) -> List[DocumentListItem]:
"""Get a list of all documents in the index. A document is a list are 1:1 with a file."""
dl: List[DocumentListItem] = []
try:
for df in self.downloaded_files:
dl.append(DocumentListItem(link=df[0], indexed_on=df[2], size=df[3]))
except Exception as e:
log.exception("Converting Document list to DocumentListItem list failed: %s", e)
return dl
# TODO: Tobe removed once opendal starts supporting Google Drive.
class GoogleDriveReader(FileStorageBaseReader):
"""Google Drive reader."""
def __init__(
self: Self,
access_token: dict,
root: str,
selected_folder_id: Optional[str] = None,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize Google Drive reader."""
super().__init__(
access_token=access_token,
root=root,
selected_folder_id=selected_folder_id,
path=path,
file_extractor=file_extractor,
file_metadata=file_metadata,
)
def load_data(self: Self) -> List[Document]:
"""Load file(s) from Google Drive."""
service = services.google_drive.get_drive_service(self.access_token)
id_ = self.selected_folder_id if self.selected_folder_id is not None else "root"
folder_content = service.files().list(
q=f"'{id_}' in parents and trashed=false",
fields="files(id, name, parents, mimeType, modifiedTime, webViewLink, webContentLink, size, fullFileExtension)",
).execute()
files = folder_content.get("files", [])
with tempfile.TemporaryDirectory() as temp_dir:
self.downloaded_files = asyncio.run(
download_from_gdrive(files, temp_dir, service)
)
self.documents = asyncio.run(
extract_files(
self.downloaded_files, file_extractor=self.file_extractor, file_metadata=self.file_metadata
)
)
return self.documents
class OneDriveReader(FileStorageBaseReader):
"""OneDrive reader."""
def __init__(
self: Self,
access_token: dict,
root: str,
selected_folder_id: Optional[str] = None,
path: str = "/",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize OneDrive reader."""
super().__init__(
access_token=access_token,
root=root,
selected_folder_id=selected_folder_id,
path=path,
file_extractor=file_extractor,
file_metadata=file_metadata,
)
def load_data(self: Self) -> List[Document]:
"""Load file(s) from OneDrive."""
client = services.ms_onedrive.get_client(self.access_token)
id_ = self.selected_folder_id if self.selected_folder_id is not None else "/drive/root:"
if client is not None:
response = client.files.drive_specific_folder(id_, {
"$select": "id,name,file,size,webUrl",
"$filter": "file ne null",
"$top": 100, # Limiting to a maximum of 100 files for now.
})
files = response.data.get("value", [])
with tempfile.TemporaryDirectory() as temp_dir:
self.downloaded_files = asyncio.run(
download_from_onedrive(files, temp_dir, client)
)
self.documents = asyncio.run(
extract_files(
self.downloaded_files, file_extractor=self.file_extractor, file_metadata=self.file_metadata
)
)
return self.documents
async def download_from_onedrive(files: List[dict], temp_dir: str, client: Any,) -> List[tuple[str, str, int, int]]:
"""Download files from OneDrive."""
downloaded_files: List[tuple[str, str, int, int]] = []
for file in files:
suffix = Path(file["name"]).suffix
if suffix not in DEFAULT_FILE_READER_CLS:
log.debug("file suffix not supported: %s", suffix)
continue
file_path = f"{temp_dir}/{file['name']}"
indexed_on = datetime.timestamp(datetime.now().utcnow())
await asyncio.to_thread(
services.ms_onedrive.download_file, client, file["id"], file_path
)
downloaded_files.append(
(file["webUrl"], file_path, int(indexed_on), int(file["size"]))
)
return downloaded_files
async def download_from_gdrive(files: List[dict], temp_dir: str, service: Any,) -> List[tuple[str, str, int, int]]:
"""Download files from Google Drive."""
downloaded_files: List[tuple[str, str, int, int]] = []
for file in files:
if file["mimeType"] == "application/vnd.google-apps.folder":
# TODO: Implement recursive folder download
continue
suffix = FILE_MIME_EXTENSION_MAP.get(file["mimeType"], None)
if suffix not in DEFAULT_FILE_READER_CLS:
continue
file_path = f"{temp_dir}/{file['name']}"
indexed_on = datetime.timestamp(datetime.now().utcnow())
await asyncio.to_thread(
services.google_drive.download_file, service, file["id"], file_path, file["mimeType"]
)
downloaded_files.append(
(file["webViewLink"], file_path, int(indexed_on), int(file["size"]))
)
return downloaded_files
async def download_file_from_opendal(op: Any, temp_dir: str, path: str) -> tuple[str, int, int]:
"""Download file from OpenDAL."""
import opendal
log.debug("downloading file using OpenDAL: %s", path)
op = cast(opendal.AsyncOperator, op)
suffix = Path(path).suffix
filepath = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}" # type: ignore
file_size = 0
indexed_on = datetime.timestamp(datetime.now().utcnow())
async with op.open_reader(path) as r:
with open(filepath, "wb") as w:
b = await r.read()
w.write(b)
file_size = len(b)
return (filepath, int(indexed_on), file_size)
async def download_dir_from_opendal(
op: Any,
temp_dir: str,
download_dir: str,
) -> List[tuple[str, str, int, int]]:
"""Download directory from opendal.
Args:
op: opendal operator
temp_dir: temp directory to store the downloaded files
download_dir: directory to download
supported_suffix: list of supported file suffixes
file_extractor: A mapping of file extractors to use for specific file types.
file_metadata: A function that takes a file path and returns a dictionary of metadata to be added to the Document object.
Returns:
a list of tuples of 'source path' and 'local path'.
"""
import opendal
log.debug("downloading dir using OpenDAL: %s", download_dir)
downloaded_files: List[tuple[str, str, int, int]] = []
op = cast(opendal.AsyncOperator, op)
objs = await op.scan(download_dir)
async for obj in objs:
filepath, indexed_on, size = await download_file_from_opendal(op, temp_dir, obj.path)
downloaded_files.append((obj.path, filepath, indexed_on, size)) # source path, local path
return downloaded_files
async def extract_files(
downloaded_files: List[tuple[str, str, int, int]],
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
file_metadata: Optional[Callable[[str], Dict]] = None,
) -> List[Document]:
"""Extract content of a list of files."""
documents: List[Document] = []
tasks = []
log.debug("number files to extract: %s", len(downloaded_files))
for fe in downloaded_files:
source_path = fe[0]
local_path = fe[1]
metadata = None
if file_metadata is not None:
metadata = file_metadata(source_path)
# TODO: this likely will not scale very much. We'll have to refactor to control the number of tasks.
task = asyncio.create_task(
extract_file(Path(local_path), filename_as_id=True, file_extractor=file_extractor, metadata=metadata)
)
tasks.append(task)
log.debug("extract task created for: %s", local_path)
log.debug("extract file - tasks started: %s", len(tasks))
results = await asyncio.gather(*tasks)
log.debug("extract file - tasks completed: %s", len(results))
for result in results:
# combine into a single Document list
documents.extend(result)
return documents
async def extract_file(
file_path: Path,
filename_as_id: bool = False,
errors: str = "ignore",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
metadata: Optional[Dict] = None,
) -> List[Document]:
"""Extract content of a file on disk.
Args:
file_path (str): path to the file
filename_as_id (bool): whether to use the filename as the document id
errors (str): how to handle errors when reading the file
supported_suffix (Optional[List[str]]): list of supported file suffixes
file_extractor (Optional[Dict[str, Union[str, BaseReader]]] = None): A mapping of file extractors to use for specific file types.
metadata (Optional[Dict] = None): metadata to add to the document. This will be appended to any metadata generated by the file extension specific extractor.
Returns:
List[Document]: list of documents containing the content of the file, one Document object per page.
"""
documents: List[Document] = []
file_suffix = file_path.suffix.lower()
supported_suffix = list(DEFAULT_FILE_READER_CLS.keys())
if file_suffix in supported_suffix:
log.debug("file extractor found for file_suffix: %s", file_suffix)
# NOTE: pondering if its worth turning this into a class and uncomment the code above so reader classes are only instantiated once.
reader = DEFAULT_FILE_READER_CLS[file_suffix]()
docs = reader.load_data(file_path, extra_info=metadata)
# iterate over docs if needed
if filename_as_id:
for i, doc in enumerate(docs):
doc.id_ = f"{str(file_path)}_part_{i}"
documents.extend(docs)
else:
log.debug("file extractor not found for file_suffix: %s", file_suffix)
# do standard read
with open(file_path, "r", errors=errors, encoding="utf8") as f:
data = f.read()
doc = Document(text=data, extra_info=metadata or {})
if filename_as_id:
doc.id_ = str(file_path)
documents.append(doc)
return documents
| [
"llama_index.schema.Document"
] | [((14702, 14755), 'logging.debug', 'log.debug', (['"""downloading file using OpenDAL: %s"""', 'path'], {}), "('downloading file using OpenDAL: %s', path)\n", (14711, 14755), True, 'import logging as log\n'), ((14765, 14796), 'typing.cast', 'cast', (['opendal.AsyncOperator', 'op'], {}), '(opendal.AsyncOperator, op)\n', (14769, 14796), False, 'from typing import Any, Callable, Dict, List, Optional, Self, Type, Union, cast\n'), ((15914, 15974), 'logging.debug', 'log.debug', (['"""downloading dir using OpenDAL: %s"""', 'download_dir'], {}), "('downloading dir using OpenDAL: %s', download_dir)\n", (15923, 15974), True, 'import logging as log\n'), ((16043, 16074), 'typing.cast', 'cast', (['opendal.AsyncOperator', 'op'], {}), '(opendal.AsyncOperator, op)\n', (16047, 16074), False, 'from typing import Any, Callable, Dict, List, Optional, Self, Type, Union, cast\n'), ((4988, 5027), 'opendal.AsyncOperator', 'opendal.AsyncOperator', (['scheme'], {}), '(scheme, **kwargs)\n', (5009, 5027), False, 'import opendal\n'), ((14811, 14821), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (14815, 14821), False, 'from pathlib import Path\n'), ((17265, 17318), 'logging.debug', 'log.debug', (['"""extract task created for: %s"""', 'local_path'], {}), "('extract task created for: %s', local_path)\n", (17274, 17318), True, 'import logging as log\n'), ((17403, 17425), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (17417, 17425), False, 'import asyncio\n'), ((18800, 18866), 'logging.debug', 'log.debug', (['"""file extractor found for file_suffix: %s"""', 'file_suffix'], {}), "('file extractor found for file_suffix: %s', file_suffix)\n", (18809, 18866), True, 'import logging as log\n'), ((19342, 19412), 'logging.debug', 'log.debug', (['"""file extractor not found for file_suffix: %s"""', 'file_suffix'], {}), "('file extractor not found for file_suffix: %s', file_suffix)\n", (19351, 19412), True, 'import logging as log\n'), ((19555, 19601), 'llama_index.schema.Document', 'Document', ([], {'text': 'data', 'extra_info': '(metadata or {})'}), '(text=data, extra_info=metadata or {})\n', (19563, 19601), False, 'from llama_index.schema import Document\n'), ((5485, 5514), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5512, 5514), False, 'import tempfile\n'), ((10635, 10664), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (10662, 10664), False, 'import tempfile\n'), ((13058, 13076), 'pathlib.Path', 'Path', (["file['name']"], {}), "(file['name'])\n", (13062, 13076), False, 'from pathlib import Path\n'), ((13146, 13196), 'logging.debug', 'log.debug', (['"""file suffix not supported: %s"""', 'suffix'], {}), "('file suffix not supported: %s', suffix)\n", (13155, 13196), True, 'import logging as log\n'), ((13346, 13434), 'asyncio.to_thread', 'asyncio.to_thread', (['services.ms_onedrive.download_file', 'client', "file['id']", 'file_path'], {}), "(services.ms_onedrive.download_file, client, file['id'],\n file_path)\n", (13363, 13434), False, 'import asyncio\n'), ((14261, 14369), 'asyncio.to_thread', 'asyncio.to_thread', (['services.google_drive.download_file', 'service', "file['id']", 'file_path', "file['mimeType']"], {}), "(services.google_drive.download_file, service, file['id'],\n file_path, file['mimeType'])\n", (14278, 14369), False, 'import asyncio\n'), ((6571, 6656), 'logging.exception', 'log.exception', (['"""Converting Document list to DocumentListItem list failed: %s"""', 'e'], {}), "('Converting Document list to DocumentListItem list failed: %s', e\n )\n", (6584, 6656), True, 'import logging as log\n'), ((9201, 9286), 'logging.exception', 'log.exception', (['"""Converting Document list to DocumentListItem list failed: %s"""', 'e'], {}), "('Converting Document list to DocumentListItem list failed: %s', e\n )\n", (9214, 9286), True, 'import logging as log\n'), ((12349, 12378), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (12376, 12378), False, 'import tempfile\n'), ((14863, 14894), 'tempfile._get_candidate_names', 'tempfile._get_candidate_names', ([], {}), '()\n', (14892, 14894), False, 'import tempfile\n'), ((14975, 14989), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14987, 14989), False, 'from datetime import datetime\n'), ((17131, 17147), 'pathlib.Path', 'Path', (['local_path'], {}), '(local_path)\n', (17135, 17147), False, 'from pathlib import Path\n'), ((13307, 13321), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13319, 13321), False, 'from datetime import datetime\n'), ((14222, 14236), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14234, 14236), False, 'from datetime import datetime\n')] |
from langchain.agents import (
initialize_agent,
Tool,
AgentType
)
from llama_index.callbacks import (
CallbackManager,
LlamaDebugHandler
)
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index import (
VectorStoreIndex,
SummaryIndex,
SimpleDirectoryReader,
ServiceContext,
StorageContext,
)
import os
import openai
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def init_llm_from_env(temperature=0.1, max_tokens=1024):
llm_type = os.getenv("LLM")
if llm_type == 'openai':
from langchain.chat_models import ChatOpenAI
openai.api_key = os.getenv("OPENAI_API_KEY")
llm = ChatOpenAI(temperature=temperature,
model_name="gpt-3.5-turbo",
max_tokens=max_tokens)
elif llm_type == 'xinference':
from langchain.llms import Xinference
llm = Xinference(
server_url=os.getenv("XINFERENCE_SERVER_ENDPOINT"),
model_uid=os.getenv("XINFERENCE_LLM_MODEL_UID")
)
else:
raise ValueError(f"Unknown LLM type {llm_type}")
return llm
def init_embedding_from_env(temperature=0.1, max_tokens=1024):
embedding_type = os.getenv("EMBEDDING")
if embedding_type == 'openai':
from llama_index.embeddings import OpenAIEmbedding
openai.api_key = os.getenv("OPENAI_API_KEY")
embedding = OpenAIEmbedding()
elif embedding_type == 'xinference':
from langchain.embeddings import XinferenceEmbeddings
from llama_index.embeddings import LangchainEmbedding
embedding = LangchainEmbedding(
XinferenceEmbeddings(
server_url=os.getenv("XINFERENCE_SERVER_ENDPOINT"),
model_uid=os.getenv("XINFERENCE_EMBEDDING_MODEL_UID")
)
)
else:
raise ValueError(f"Unknown EMBEDDING type {embedding_type}")
return embedding
def get_service_context(callback_handlers):
callback_manager = CallbackManager(callback_handlers)
node_parser = SimpleNodeParser.from_defaults(
chunk_size=512,
chunk_overlap=128,
callback_manager=callback_manager,
)
return ServiceContext.from_defaults(
embed_model=init_embedding_from_env(),
callback_manager=callback_manager,
llm=init_llm_from_env(),
chunk_size=512,
node_parser=node_parser
)
def get_storage_context():
return StorageContext.from_defaults()
def get_langchain_agent_from_index(summary_index, vector_index):
list_query_engine = summary_index.as_query_engine(
response_mode="tree_summarize",
use_async=True,
)
vector_query_engine = vector_index.as_query_engine(
similarity_top_k=3
)
tools = [
Tool(
name="Summary Tool",
func=lambda q: str(list_query_engine.query(q)),
description="useful for when you want to get summarizations",
return_direct=True,
),
Tool(
name="Lookup Tool",
func=lambda q: str(vector_query_engine.query(q)),
description="useful for when you want to lookup detailed information",
return_direct=True,
),
]
agent_chain = initialize_agent(
tools,
init_llm_from_env(),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
return agent_chain
def get_query_engine_from_index(index):
return index.as_query_engine(
similarity_top_k=3
)
def get_chat_engine_from_index(index):
return index.as_chat_engine(chat_mode="condense_question", verbose=True)
class ChatEngine:
def __init__(self, file_path):
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
service_context = get_service_context([llama_debug])
storage_context = get_storage_context()
documents = SimpleDirectoryReader(input_files=[file_path], filename_as_id=True).load_data()
logging.info(f"Loaded {len(documents)} documents from {file_path}")
nodes = service_context.node_parser.get_nodes_from_documents(documents)
storage_context.docstore.add_documents(nodes)
logging.info(f"Adding {len(nodes)} nodes to storage")
self.summary_index = SummaryIndex(nodes, storage_context=storage_context,
service_context=service_context)
self.vector_index = VectorStoreIndex(nodes, storage_context=storage_context,
service_context=service_context)
# def conversational_chat(self, query, callback_handler):
# """
# Start a conversational chat with a agent
# """
# response = self.agent_chain.run(input=query, callbacks=[callback_handler])
# return response
def conversational_chat(self, query, callback_handler):
"""
Start a conversational chat with a agent
"""
return get_chat_engine_from_index(self.vector_index).chat(query).response | [
"llama_index.SimpleDirectoryReader",
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.StorageContext.from_defaults",
"llama_index.embeddings.OpenAIEmbedding",
"llama_index.VectorStoreIndex",
"llama_index.callbacks.CallbackManager",
"llama_index.SummaryIndex",
"llama_index.node_parser.simple.SimpleNodeParser.from_defaults"
] | [((398, 456), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (417, 456), False, 'import logging\n'), ((529, 545), 'os.getenv', 'os.getenv', (['"""LLM"""'], {}), "('LLM')\n", (538, 545), False, 'import os\n'), ((1217, 1239), 'os.getenv', 'os.getenv', (['"""EMBEDDING"""'], {}), "('EMBEDDING')\n", (1226, 1239), False, 'import os\n'), ((1961, 1995), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['callback_handlers'], {}), '(callback_handlers)\n', (1976, 1995), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler\n'), ((2014, 2118), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(512)', 'chunk_overlap': '(128)', 'callback_manager': 'callback_manager'}), '(chunk_size=512, chunk_overlap=128,\n callback_manager=callback_manager)\n', (2044, 2118), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((2411, 2441), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2439, 2441), False, 'from llama_index import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((649, 676), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (658, 676), False, 'import os\n'), ((689, 780), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'max_tokens'}), "(temperature=temperature, model_name='gpt-3.5-turbo', max_tokens=\n max_tokens)\n", (699, 780), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1355, 1382), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1364, 1382), False, 'import os\n'), ((1401, 1418), 'llama_index.embeddings.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1416, 1418), False, 'from llama_index.embeddings import OpenAIEmbedding\n'), ((3697, 3739), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (3714, 3739), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler\n'), ((4266, 4356), 'llama_index.SummaryIndex', 'SummaryIndex', (['nodes'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(nodes, storage_context=storage_context, service_context=\n service_context)\n', (4278, 4356), False, 'from llama_index import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((4423, 4517), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(nodes, storage_context=storage_context, service_context=\n service_context)\n', (4439, 4517), False, 'from llama_index import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((3871, 3938), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]', 'filename_as_id': '(True)'}), '(input_files=[file_path], filename_as_id=True)\n', (3892, 3938), False, 'from llama_index import VectorStoreIndex, SummaryIndex, SimpleDirectoryReader, ServiceContext, StorageContext\n'), ((947, 986), 'os.getenv', 'os.getenv', (['"""XINFERENCE_SERVER_ENDPOINT"""'], {}), "('XINFERENCE_SERVER_ENDPOINT')\n", (956, 986), False, 'import os\n'), ((1009, 1046), 'os.getenv', 'os.getenv', (['"""XINFERENCE_LLM_MODEL_UID"""'], {}), "('XINFERENCE_LLM_MODEL_UID')\n", (1018, 1046), False, 'import os\n'), ((1672, 1711), 'os.getenv', 'os.getenv', (['"""XINFERENCE_SERVER_ENDPOINT"""'], {}), "('XINFERENCE_SERVER_ENDPOINT')\n", (1681, 1711), False, 'import os\n'), ((1735, 1778), 'os.getenv', 'os.getenv', (['"""XINFERENCE_EMBEDDING_MODEL_UID"""'], {}), "('XINFERENCE_EMBEDDING_MODEL_UID')\n", (1744, 1778), False, 'import os\n')] |
from llama_index import DiscordReader
from llama_index import download_loader
import os
import nest_asyncio
nest_asyncio.apply()
from llama_index import ServiceContext
import openai
import re
import csv
import time
import random
from dotenv import load_dotenv
import os
from llama_index import Document
load_dotenv()
openai_api_key = os.environ.get("OPENAI_API")
discord_key = os.environ.get("DISCORD_TOKEN")
os.environ["OPENAI_API_KEY"] = openai_api_key
openai.api_key = openai_api_key
def hit_discord():
DiscordReader = download_loader('DiscordReader')
discord_token = discord_key
channel_ids = [1088751449271447552] # Replace with your channel_i
#channel_ids = [1057178784895348746] # Replace with your channel_id
reader = DiscordReader(discord_token=discord_token)
documents = reader.load_data(channel_ids=channel_ids)
print("docs length", len(documents))
#discord_token = os.getenv("MTA4MjQyOTk4NTQ5Njc3MjYyOA.G8r0S7.MURmKr2iUaZf6AbDot5E_Gad_10oGbrMFxFVy4")
#documents = DiscordReader(discord_token="MTA4MjQyOTk4NTQ5Njc3MjYyOA.G8r0S7.MURmKr2iUaZf6AbDot5E_Gad_10oGbrMFxFVy4").load_data(channel_ids=channel_ids, limit=[10])
service_context = ServiceContext.from_defaults(chunk_size_limit=3000)
nodes = service_context.node_parser.get_nodes_from_documents(documents)
print("nodes length:", len(nodes))
questions = {}
array_of_docs = []
for n in nodes:
print(n)
prompt = f"""You are tasked with parsing out only the text from Discord messages (including who wrote it and their role). Here is the Discord data: {n}"""
MAX_RETRIES = 3
SLEEP_TIME = 0.75 # in seconds
for _ in range(MAX_RETRIES):
try:
time.sleep(round(random.uniform(0, SLEEP_TIME), 2))
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
],
temperature=0
)
break # If the API call works leave loop
except Exception as e:
print(f"Error calling OpenAI API: {e}")
time.sleep(SLEEP_TIME)
#print(completion.choices[0].message['content'])
text = completion.choices[0].message['content']
document = Document(text=text)
array_of_docs.append(document)
print(array_of_docs)
return array_of_docs
__all__ = ['hit_discord']
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.DiscordReader",
"llama_index.download_loader",
"llama_index.Document"
] | [((108, 128), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (126, 128), False, 'import nest_asyncio\n'), ((304, 317), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (315, 317), False, 'from dotenv import load_dotenv\n'), ((337, 365), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API"""'], {}), "('OPENAI_API')\n", (351, 365), False, 'import os\n'), ((380, 411), 'os.environ.get', 'os.environ.get', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (394, 411), False, 'import os\n'), ((532, 564), 'llama_index.download_loader', 'download_loader', (['"""DiscordReader"""'], {}), "('DiscordReader')\n", (547, 564), False, 'from llama_index import download_loader\n'), ((755, 797), 'llama_index.DiscordReader', 'DiscordReader', ([], {'discord_token': 'discord_token'}), '(discord_token=discord_token)\n', (768, 797), False, 'from llama_index import DiscordReader\n'), ((1195, 1246), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size_limit': '(3000)'}), '(chunk_size_limit=3000)\n', (1223, 1246), False, 'from llama_index import ServiceContext\n'), ((2389, 2408), 'llama_index.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2397, 2408), False, 'from llama_index import Document\n'), ((1823, 1941), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': "[{'role': 'user', 'content': prompt}]", 'temperature': '(0)'}), "(model='gpt-3.5-turbo', messages=[{'role':\n 'user', 'content': prompt}], temperature=0)\n", (1851, 1941), False, 'import openai\n'), ((2232, 2254), 'time.sleep', 'time.sleep', (['SLEEP_TIME'], {}), '(SLEEP_TIME)\n', (2242, 2254), False, 'import time\n'), ((1759, 1788), 'random.uniform', 'random.uniform', (['(0)', 'SLEEP_TIME'], {}), '(0, SLEEP_TIME)\n', (1773, 1788), False, 'import random\n')] |
from typing import Union
from llama_index.core import Prompt
from llama_index.core.response_synthesizers import get_response_synthesizer, ResponseMode
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from app.data.messages.qa import DocumentRequest
from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer
from app.data.models.mongodb import (
LlamaIndexDocumentMeta,
LlamaIndexDocumentMetaReadable,
Message,
)
from app.utils.log_util import logger
from app.utils import data_util
from app.llama_index_server.chat_message_dao import ChatMessageDao
from app.llama_index_server.index_storage import index_storage
from app.llama_index_server.my_query_engine_tool import MyQueryEngineTool, MATCHED_MARK
SIMILARITY_CUTOFF = 0.85
PROMPT_TEMPLATE_FOR_QUERY_ENGINE = (
"We have provided context information below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given this information, assume you are an experienced golf coach, if the question has anything to do with golf, "
"please give short, simple, accurate, precise answer to the question, "
"limited to 80 words maximum. If the question has nothing to do with golf at all, please answer "
f"'{get_default_answer_id()}'.\n"
"The question is: {query_str}\n"
)
SYSTEM_PROMPT_TEMPLATE_FOR_CHAT_ENGINE = (
"Your are an expert Q&A system that can find relevant information using the tools at your disposal.\n"
"The tools can access a set of typical questions a golf beginner might ask.\n"
"If the user's query matches one of those typical questions, stop and return the matched question immediately.\n"
"If the user's query doesn't match any of those typical questions, "
"then you should act as an experienced golf coach, and firstly evaluate whether the question is relevant to golf.\n"
f"if it is not golf relevant at all, please answer '{get_default_answer_id()},"
"otherwise, please give short, simple, accurate, precise answer to the question, limited to 80 words maximum.\n"
"You may need to combine the chat history to fully understand the query of the user.\n"
"Remember you are only allowed to answer questions related to golf.\n"
)
chat_message_dao = ChatMessageDao()
def get_local_query_engine():
"""
strictly limited to local knowledge base. our local knowledge base is a list of standard questions which are indexed in vector store,
while the standard answers are stored in mongodb through DocumentMetaDao.
there is a one-to-one mapping between each standard question and a standard answer.
we may update or optimize the standard answers in mongodb frequently, but usually we don't update the standard questions.
if a query matches one of the standard questions, we can find the respective standard answer from mongodb.
"""
index = index_storage.index()
return index.as_query_engine(
response_synthesizer=get_response_synthesizer(
response_mode=ResponseMode.NO_TEXT
),
node_postprocessors=[SimilarityPostprocessor(similarity_cutoff=SIMILARITY_CUTOFF)],
)
def get_matched_question_from_local_query_engine(query_text):
local_query_engine = get_local_query_engine()
local_query_response = local_query_engine.query(query_text)
if len(local_query_response.source_nodes) > 0:
matched_node = local_query_response.source_nodes[0]
matched_question = matched_node.text
logger.debug(f"Found matched question from index: {matched_question}")
return matched_question
else:
return None
def get_doc_meta(text):
matched_doc_id = data_util.get_doc_id(text)
mongo = index_storage.mongo()
doc_meta = mongo.find_one({"doc_id": matched_doc_id})
doc_meta = LlamaIndexDocumentMeta(**doc_meta) if doc_meta else None
return matched_doc_id, doc_meta
def get_llm_query_engine():
index = index_storage.index()
qa_template = Prompt(PROMPT_TEMPLATE_FOR_QUERY_ENGINE)
return index.as_query_engine(text_qa_template=qa_template)
def query_index(query_text, only_for_meta=False) -> Union[Answer, LlamaIndexDocumentMeta, None]:
data_util.assert_not_none(query_text, "query cannot be none")
logger.info(f"Query test: {query_text}")
# first search locally
matched_question = get_matched_question_from_local_query_engine(query_text)
if matched_question:
matched_doc_id, doc_meta = get_doc_meta(matched_question)
if doc_meta:
logger.debug(f"An matched doc meta found from mongodb: {doc_meta}")
doc_meta.query_timestamps.append(data_util.get_current_milliseconds())
index_storage.mongo().upsert_one({"doc_id": matched_doc_id}, doc_meta)
if only_for_meta:
return doc_meta
else:
return Answer(
category=doc_meta.category,
question=query_text,
matched_question=matched_question,
source=Source.KNOWLEDGE_BASE if doc_meta.source == Source.KNOWLEDGE_BASE else Source.USER_ASKED,
answer=doc_meta.answer,
)
else:
# means the document meta has been removed from mongodb. for example by pruning
logger.warning(f"'{matched_doc_id}' is not found in mongodb")
if only_for_meta:
return None
# if not found, turn to LLM
llm_query_engine = get_llm_query_engine()
response = llm_query_engine.query(query_text)
# save the question-answer pair to index
answer = Answer(
category=None,
question=query_text,
source=index_storage.current_model,
answer=str(response),
)
index_storage.add_doc(answer)
return answer
def delete_doc(doc_id):
data_util.assert_not_none(doc_id, "doc_id cannot be none")
logger.info(f"Delete document with doc id: {doc_id}")
return index_storage.delete_doc(doc_id)
def get_document(req: DocumentRequest):
doc_meta = index_storage.mongo().find_one({"doc_id": req.doc_id})
if doc_meta:
return LlamaIndexDocumentMetaReadable(**doc_meta)
elif req.fuzzy:
doc_meta = query_index(req.doc_id, only_for_meta=True)
if doc_meta:
doc_meta.matched_question = doc_meta.question
doc_meta.question = doc_meta.doc_id = req.doc_id
return LlamaIndexDocumentMetaReadable(**doc_meta.model_dump())
return None
def cleanup_for_test():
return index_storage.mongo().cleanup_for_test()
def get_chat_engine(conversation_id: str, streaming: bool = False):
local_query_engine = get_local_query_engine()
query_engine_tools = [
MyQueryEngineTool.from_defaults(
query_engine=local_query_engine,
name="local_query_engine",
description="Queries from a knowledge base consists of typical questions that a golf beginner might ask",
)
]
chat_llm = OpenAI(
temperature=0,
model=index_storage.current_model,
streaming=streaming,
max_tokens=100,
)
chat_history = chat_message_dao.get_chat_history(conversation_id)
chat_history = [ChatMessage(role=c.role, content=c.content) for c in chat_history]
return OpenAIAgent.from_tools(
tools=query_engine_tools,
llm=chat_llm,
chat_history=chat_history,
verbose=True,
system_prompt=SYSTEM_PROMPT_TEMPLATE_FOR_CHAT_ENGINE,
)
def get_response_text_from_chat(agent_chat_response):
sources = agent_chat_response.sources
if len(sources) > 0:
source_content = sources[0].content
if MATCHED_MARK in source_content:
return source_content.replace(MATCHED_MARK, "").strip()
return agent_chat_response.response
def chat(query_text: str, conversation_id: str) -> Message:
# we will not index chat messages in vector store, but will save them in mongodb
data_util.assert_not_none(query_text, "query content cannot be none")
user_message = ChatMessage(role=MessageRole.USER, content=query_text)
# save immediately, since the following steps may take a while and throw exceptions
chat_message_dao.save_chat_history(conversation_id, user_message)
chat_engine = get_chat_engine(conversation_id)
agent_chat_response = chat_engine.chat(query_text)
response_text = get_response_text_from_chat(agent_chat_response)
# todo: change the if condition to: response_text == get_default_answer_id()
response_text = get_default_answer() if get_default_answer_id() in response_text else response_text
matched_doc_id, doc_meta = get_doc_meta(response_text)
if doc_meta:
logger.debug(f"An matched doc meta found from mongodb: {doc_meta}")
doc_meta.query_timestamps.append(data_util.get_current_milliseconds())
index_storage.mongo().upsert_one({"doc_id": matched_doc_id}, doc_meta)
bot_message = ChatMessage(role=MessageRole.ASSISTANT, content=doc_meta.answer)
else:
# means the chat engine cannot find a matched doc meta from mongodb
logger.warning(f"'{matched_doc_id}' is not found in mongodb")
bot_message = ChatMessage(role=MessageRole.ASSISTANT, content=response_text)
chat_message_dao.save_chat_history(conversation_id, bot_message)
return Message.from_chat_message(conversation_id, bot_message)
async def stream_chat(content: str, conversation_id: str):
# todo: need to use chat engine based on index. otherwise, the local database is not utilized
# We only support using OpenAI's API
client = OpenAI()
user_message = ChatMessage(role=MessageRole.USER, content=content)
chat_message_dao.save_chat_history(conversation_id, user_message)
history = chat_message_dao.get_chat_history(conversation_id)
messages = [dict(content=c.content, role=c.role) for c in history]
messages = [
dict(
role=MessageRole.SYSTEM,
content=(
"assume you are an experienced golf coach, if the question has anything to do with golf, "
"please give short, simple, accurate, precise answer to the question, "
"limited to 80 words maximum. If the question has nothing to do with golf at all, please answer "
f"'{get_default_answer()}'."
)
),
] + messages
completion = client.chat.completions.create(
model=index_storage.current_model,
messages=messages,
temperature=0,
stream=True # again, we set stream=True
)
chunks = []
for chunk in completion:
finish_reason = chunk.choices[0].finish_reason
content = chunk.choices[0].delta.content
if finish_reason == "stop" or finish_reason == "length":
# reached the end
if content is not None:
bot_message = ChatMessage(role=MessageRole.ASSISTANT, content=content)
chat_message_dao.save_chat_history(conversation_id, bot_message)
break
if content is None:
break
chunks.append(content)
logger.debug("Chunk message: %s", content)
yield content
| [
"llama_index.llms.openai.OpenAI",
"llama_index.core.llms.ChatMessage",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.Prompt",
"llama_index.agent.openai.OpenAIAgent.from_tools",
"llama_index.core.postprocessor.SimilarityPostprocessor"
] | [((2418, 2434), 'app.llama_index_server.chat_message_dao.ChatMessageDao', 'ChatMessageDao', ([], {}), '()\n', (2432, 2434), False, 'from app.llama_index_server.chat_message_dao import ChatMessageDao\n'), ((3036, 3057), 'app.llama_index_server.index_storage.index_storage.index', 'index_storage.index', ([], {}), '()\n', (3055, 3057), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((3825, 3851), 'app.utils.data_util.get_doc_id', 'data_util.get_doc_id', (['text'], {}), '(text)\n', (3845, 3851), False, 'from app.utils import data_util\n'), ((3864, 3885), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (3883, 3885), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((4094, 4115), 'app.llama_index_server.index_storage.index_storage.index', 'index_storage.index', ([], {}), '()\n', (4113, 4115), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((4134, 4174), 'llama_index.core.Prompt', 'Prompt', (['PROMPT_TEMPLATE_FOR_QUERY_ENGINE'], {}), '(PROMPT_TEMPLATE_FOR_QUERY_ENGINE)\n', (4140, 4174), False, 'from llama_index.core import Prompt\n'), ((4341, 4402), 'app.utils.data_util.assert_not_none', 'data_util.assert_not_none', (['query_text', '"""query cannot be none"""'], {}), "(query_text, 'query cannot be none')\n", (4366, 4402), False, 'from app.utils import data_util\n'), ((4407, 4447), 'app.utils.log_util.logger.info', 'logger.info', (['f"""Query test: {query_text}"""'], {}), "(f'Query test: {query_text}')\n", (4418, 4447), False, 'from app.utils.log_util import logger\n'), ((5915, 5944), 'app.llama_index_server.index_storage.index_storage.add_doc', 'index_storage.add_doc', (['answer'], {}), '(answer)\n', (5936, 5944), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((5993, 6051), 'app.utils.data_util.assert_not_none', 'data_util.assert_not_none', (['doc_id', '"""doc_id cannot be none"""'], {}), "(doc_id, 'doc_id cannot be none')\n", (6018, 6051), False, 'from app.utils import data_util\n'), ((6056, 6109), 'app.utils.log_util.logger.info', 'logger.info', (['f"""Delete document with doc id: {doc_id}"""'], {}), "(f'Delete document with doc id: {doc_id}')\n", (6067, 6109), False, 'from app.utils.log_util import logger\n'), ((6121, 6153), 'app.llama_index_server.index_storage.index_storage.delete_doc', 'index_storage.delete_doc', (['doc_id'], {}), '(doc_id)\n', (6145, 6153), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((7158, 7256), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': 'index_storage.current_model', 'streaming': 'streaming', 'max_tokens': '(100)'}), '(temperature=0, model=index_storage.current_model, streaming=\n streaming, max_tokens=100)\n', (7164, 7256), False, 'from llama_index.llms.openai import OpenAI\n'), ((7459, 7626), 'llama_index.agent.openai.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', ([], {'tools': 'query_engine_tools', 'llm': 'chat_llm', 'chat_history': 'chat_history', 'verbose': '(True)', 'system_prompt': 'SYSTEM_PROMPT_TEMPLATE_FOR_CHAT_ENGINE'}), '(tools=query_engine_tools, llm=chat_llm, chat_history\n =chat_history, verbose=True, system_prompt=\n SYSTEM_PROMPT_TEMPLATE_FOR_CHAT_ENGINE)\n', (7481, 7626), False, 'from llama_index.agent.openai import OpenAIAgent\n'), ((8133, 8202), 'app.utils.data_util.assert_not_none', 'data_util.assert_not_none', (['query_text', '"""query content cannot be none"""'], {}), "(query_text, 'query content cannot be none')\n", (8158, 8202), False, 'from app.utils import data_util\n'), ((8222, 8276), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'query_text'}), '(role=MessageRole.USER, content=query_text)\n', (8233, 8276), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((9513, 9568), 'app.data.models.mongodb.Message.from_chat_message', 'Message.from_chat_message', (['conversation_id', 'bot_message'], {}), '(conversation_id, bot_message)\n', (9538, 9568), False, 'from app.data.models.mongodb import LlamaIndexDocumentMeta, LlamaIndexDocumentMetaReadable, Message\n'), ((9782, 9790), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (9788, 9790), False, 'from llama_index.llms.openai import OpenAI\n'), ((9810, 9861), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'content'}), '(role=MessageRole.USER, content=content)\n', (9821, 9861), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((1415, 1438), 'app.data.models.qa.get_default_answer_id', 'get_default_answer_id', ([], {}), '()\n', (1436, 1438), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((2086, 2109), 'app.data.models.qa.get_default_answer_id', 'get_default_answer_id', ([], {}), '()\n', (2107, 2109), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((3645, 3715), 'app.utils.log_util.logger.debug', 'logger.debug', (['f"""Found matched question from index: {matched_question}"""'], {}), "(f'Found matched question from index: {matched_question}')\n", (3657, 3715), False, 'from app.utils.log_util import logger\n'), ((3959, 3993), 'app.data.models.mongodb.LlamaIndexDocumentMeta', 'LlamaIndexDocumentMeta', ([], {}), '(**doc_meta)\n', (3981, 3993), False, 'from app.data.models.mongodb import LlamaIndexDocumentMeta, LlamaIndexDocumentMetaReadable, Message\n'), ((6298, 6340), 'app.data.models.mongodb.LlamaIndexDocumentMetaReadable', 'LlamaIndexDocumentMetaReadable', ([], {}), '(**doc_meta)\n', (6328, 6340), False, 'from app.data.models.mongodb import LlamaIndexDocumentMeta, LlamaIndexDocumentMetaReadable, Message\n'), ((6892, 7104), 'app.llama_index_server.my_query_engine_tool.MyQueryEngineTool.from_defaults', 'MyQueryEngineTool.from_defaults', ([], {'query_engine': 'local_query_engine', 'name': '"""local_query_engine"""', 'description': '"""Queries from a knowledge base consists of typical questions that a golf beginner might ask"""'}), "(query_engine=local_query_engine, name=\n 'local_query_engine', description=\n 'Queries from a knowledge base consists of typical questions that a golf beginner might ask'\n )\n", (6923, 7104), False, 'from app.llama_index_server.my_query_engine_tool import MyQueryEngineTool, MATCHED_MARK\n'), ((7381, 7424), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'c.role', 'content': 'c.content'}), '(role=c.role, content=c.content)\n', (7392, 7424), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((8711, 8731), 'app.data.models.qa.get_default_answer', 'get_default_answer', ([], {}), '()\n', (8729, 8731), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((8879, 8946), 'app.utils.log_util.logger.debug', 'logger.debug', (['f"""An matched doc meta found from mongodb: {doc_meta}"""'], {}), "(f'An matched doc meta found from mongodb: {doc_meta}')\n", (8891, 8946), False, 'from app.utils.log_util import logger\n'), ((9127, 9191), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'doc_meta.answer'}), '(role=MessageRole.ASSISTANT, content=doc_meta.answer)\n', (9138, 9191), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((9286, 9347), 'app.utils.log_util.logger.warning', 'logger.warning', (['f"""\'{matched_doc_id}\' is not found in mongodb"""'], {}), '(f"\'{matched_doc_id}\' is not found in mongodb")\n', (9300, 9347), False, 'from app.utils.log_util import logger\n'), ((9370, 9432), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response_text'}), '(role=MessageRole.ASSISTANT, content=response_text)\n', (9381, 9432), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((11412, 11454), 'app.utils.log_util.logger.debug', 'logger.debug', (['"""Chunk message: %s"""', 'content'], {}), "('Chunk message: %s', content)\n", (11424, 11454), False, 'from app.utils.log_util import logger\n'), ((3121, 3181), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'ResponseMode.NO_TEXT'}), '(response_mode=ResponseMode.NO_TEXT)\n', (3145, 3181), False, 'from llama_index.core.response_synthesizers import get_response_synthesizer, ResponseMode\n'), ((4679, 4746), 'app.utils.log_util.logger.debug', 'logger.debug', (['f"""An matched doc meta found from mongodb: {doc_meta}"""'], {}), "(f'An matched doc meta found from mongodb: {doc_meta}')\n", (4691, 4746), False, 'from app.utils.log_util import logger\n'), ((5465, 5526), 'app.utils.log_util.logger.warning', 'logger.warning', (['f"""\'{matched_doc_id}\' is not found in mongodb"""'], {}), '(f"\'{matched_doc_id}\' is not found in mongodb")\n', (5479, 5526), False, 'from app.utils.log_util import logger\n'), ((6211, 6232), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (6230, 6232), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((6696, 6717), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (6715, 6717), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((8735, 8758), 'app.data.models.qa.get_default_answer_id', 'get_default_answer_id', ([], {}), '()\n', (8756, 8758), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((8988, 9024), 'app.utils.data_util.get_current_milliseconds', 'data_util.get_current_milliseconds', ([], {}), '()\n', (9022, 9024), False, 'from app.utils import data_util\n'), ((3234, 3294), 'llama_index.core.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'SIMILARITY_CUTOFF'}), '(similarity_cutoff=SIMILARITY_CUTOFF)\n', (3257, 3294), False, 'from llama_index.core.postprocessor import SimilarityPostprocessor\n'), ((4792, 4828), 'app.utils.data_util.get_current_milliseconds', 'data_util.get_current_milliseconds', ([], {}), '()\n', (4826, 4828), False, 'from app.utils import data_util\n'), ((5016, 5236), 'app.data.models.qa.Answer', 'Answer', ([], {'category': 'doc_meta.category', 'question': 'query_text', 'matched_question': 'matched_question', 'source': '(Source.KNOWLEDGE_BASE if doc_meta.source == Source.KNOWLEDGE_BASE else\n Source.USER_ASKED)', 'answer': 'doc_meta.answer'}), '(category=doc_meta.category, question=query_text, matched_question=\n matched_question, source=Source.KNOWLEDGE_BASE if doc_meta.source ==\n Source.KNOWLEDGE_BASE else Source.USER_ASKED, answer=doc_meta.answer)\n', (5022, 5236), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n'), ((9034, 9055), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (9053, 9055), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((11171, 11227), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'content'}), '(role=MessageRole.ASSISTANT, content=content)\n', (11182, 11227), False, 'from llama_index.core.llms import ChatMessage, MessageRole\n'), ((4842, 4863), 'app.llama_index_server.index_storage.index_storage.mongo', 'index_storage.mongo', ([], {}), '()\n', (4861, 4863), False, 'from app.llama_index_server.index_storage import index_storage\n'), ((10564, 10584), 'app.data.models.qa.get_default_answer', 'get_default_answer', ([], {}), '()\n', (10582, 10584), False, 'from app.data.models.qa import Source, Answer, get_default_answer_id, get_default_answer\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from app.utils.json import json_to_model
from app.utils.index import get_agent
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import MessageRole, ChatMessage
from llama_index.agent import OpenAIAgent
from pydantic import BaseModel
import logging
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
# Note: To support clients sending a JSON object using content-type "text/plain",
# we need to use Depends(json_to_model(_ChatData)) here
data: _ChatData = Depends(json_to_model(_ChatData)),
agent: OpenAIAgent = Depends(get_agent),
):
logger = logging.getLogger("uvicorn")
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
# convert query engine to tool
logger.info("Querying chat engine")
response = agent.stream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
queue = agent.callback_manager.handlers[0].queue
while len(queue) > 0:
item = queue.pop(0)
yield item
for token in response.response_gen:
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
| [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((809, 827), 'fastapi.Depends', 'Depends', (['get_agent'], {}), '(get_agent)\n', (816, 827), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((845, 873), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (862, 873), False, 'import logging\n'), ((757, 781), 'app.utils.json.json_to_model', 'json_to_model', (['_ChatData'], {}), '(_ChatData)\n', (770, 781), False, 'from app.utils.json import json_to_model\n'), ((967, 1057), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (980, 1057), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1185, 1285), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (1198, 1285), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1408, 1451), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1419, 1451), False, 'from llama_index.llms.base import MessageRole, ChatMessage\n')] |
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, Document
from llama_index.llms import OpenAI
import openai
from llama_index import SimpleDirectoryReader
st.set_page_config(page_title="Converse com Resoluções do Bacen, powered by LlamaIndex", page_icon="🦙", layout="centered", initial_sidebar_state="auto", menu_items=None)
############### reduce top margin ################
st.markdown(
"""
<style>
.css-1y4p8pa {
padding-top: 0px;
}
</style>
""",
unsafe_allow_html=True,
)
############### hidde hamburguer menu ################
st.markdown(""" <style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """, unsafe_allow_html=True)
openai.api_key = st.secrets.openai_key
st.header("Converse 💬 com as Resoluções 4.966 e 352 do Banco Central e outras relacionadas, powered by LlamaIndex 🦙")
st.info("Código disponível neste [repositório Github](https://github.com/mvpalheta/4966_LLM)", icon="💡")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Me pergunte algo relacionado às Resoluções 4.966 e 352 do Banco Central!"}
]
@st.cache_resource(show_spinner=False, ttl="30min")
def load_data():
with st.spinner(text="Loading and indexing the docs – hang tight! This should take 1-2 minutes."):
reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
docs = reader.load_data()
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5))
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
return index
index = load_data()
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
if prompt := st.chat_input("Sua pergunta"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Pensando..."):
response = chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI"
] | [((187, 366), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Converse com Resoluções do Bacen, powered by LlamaIndex"""', 'page_icon': '"""🦙"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=\n 'Converse com Resoluções do Bacen, powered by LlamaIndex', page_icon=\n '🦙', layout='centered', initial_sidebar_state='auto', menu_items=None)\n", (205, 366), True, 'import streamlit as st\n'), ((409, 531), 'streamlit.markdown', 'st.markdown', (['"""\n<style>\n .css-1y4p8pa {\n padding-top: 0px;\n }\n</style>\n"""'], {'unsafe_allow_html': '(True)'}), '(\n """\n<style>\n .css-1y4p8pa {\n padding-top: 0px;\n }\n</style>\n"""\n , unsafe_allow_html=True)\n', (420, 531), True, 'import streamlit as st\n'), ((601, 733), 'streamlit.markdown', 'st.markdown', (['""" <style>\n#MainMenu {visibility: hidden;}\nfooter {visibility: hidden;}\n</style> """'], {'unsafe_allow_html': '(True)'}), '(\n """ <style>\n#MainMenu {visibility: hidden;}\nfooter {visibility: hidden;}\n</style> """\n , unsafe_allow_html=True)\n', (612, 733), True, 'import streamlit as st\n'), ((764, 891), 'streamlit.header', 'st.header', (['"""Converse 💬 com as Resoluções 4.966 e 352 do Banco Central e outras relacionadas, powered by LlamaIndex 🦙"""'], {}), "(\n 'Converse 💬 com as Resoluções 4.966 e 352 do Banco Central e outras relacionadas, powered by LlamaIndex 🦙'\n )\n", (773, 891), True, 'import streamlit as st\n'), ((882, 996), 'streamlit.info', 'st.info', (['"""Código disponível neste [repositório Github](https://github.com/mvpalheta/4966_LLM)"""'], {'icon': '"""💡"""'}), "(\n 'Código disponível neste [repositório Github](https://github.com/mvpalheta/4966_LLM)'\n , icon='💡')\n", (889, 996), True, 'import streamlit as st\n'), ((1241, 1291), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)', 'ttl': '"""30min"""'}), "(show_spinner=False, ttl='30min')\n", (1258, 1291), True, 'import streamlit as st\n'), ((1018, 1041), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1039, 1041), True, 'import streamlit as st\n'), ((1851, 1880), 'streamlit.chat_input', 'st.chat_input', (['"""Sua pergunta"""'], {}), "('Sua pergunta')\n", (1864, 1880), True, 'import streamlit as st\n'), ((1935, 2004), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1967, 2004), True, 'import streamlit as st\n'), ((1318, 1420), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the docs – hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the docs – hang tight! This should take 1-2 minutes.'\n )\n", (1328, 1420), True, 'import streamlit as st\n'), ((1429, 1486), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data"""', 'recursive': '(True)'}), "(input_dir='./data', recursive=True)\n", (1450, 1486), False, 'from llama_index import SimpleDirectoryReader\n'), ((1644, 1714), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (1675, 1714), False, 'from llama_index import VectorStoreIndex, ServiceContext, Document\n'), ((2091, 2123), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2106, 2123), True, 'import streamlit as st\n'), ((2133, 2161), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2141, 2161), True, 'import streamlit as st\n'), ((2302, 2330), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2317, 2330), True, 'import streamlit as st\n'), ((2345, 2370), 'streamlit.spinner', 'st.spinner', (['"""Pensando..."""'], {}), "('Pensando...')\n", (2355, 2370), True, 'import streamlit as st\n'), ((2432, 2459), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2440, 2459), True, 'import streamlit as st\n'), ((2546, 2587), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2578, 2587), True, 'import streamlit as st\n'), ((1580, 1626), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)'}), "(model='gpt-3.5-turbo', temperature=0.5)\n", (1586, 1626), False, 'from llama_index.llms import OpenAI\n')] |
"""Agent utils."""
from llama_index.core.agent.types import TaskStep
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.memory import BaseMemory
def add_user_step_to_memory(
step: TaskStep, memory: BaseMemory, verbose: bool = False
) -> None:
"""Add user step to memory."""
user_message = ChatMessage(content=step.input, role=MessageRole.USER)
memory.put(user_message)
if verbose:
print(f"Added user message to memory: {step.input}")
| [
"llama_index.core.base.llms.types.ChatMessage"
] | [((345, 399), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'content': 'step.input', 'role': 'MessageRole.USER'}), '(content=step.input, role=MessageRole.USER)\n', (356, 399), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n')] |
from llama_index.core.tools import FunctionTool
def calculate_average(*values):
"""
Calculates the average of the provided values.
"""
return sum(values) / len(values)
average_tool = FunctionTool.from_defaults(
fn=calculate_average
)
| [
"llama_index.core.tools.FunctionTool.from_defaults"
] | [((200, 248), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'calculate_average'}), '(fn=calculate_average)\n', (226, 248), False, 'from llama_index.core.tools import FunctionTool\n')] |
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, Document
def load_knowledge() -> list[Document]:
# Load data from directory
documents = SimpleDirectoryReader('knowledge').load_data()
return documents
def create_index() -> GPTVectorStoreIndex:
print('Creating new index')
# Load data
documents = load_knowledge()
# Create index from documents
service_context = ServiceContext.from_defaults(chunk_size_limit=3000)
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
# save_index(index)
return index
def save_index(index: GPTVectorStoreIndex):
# Save index to file
index.save_to_disk('knowledge/index.json')
def load_index() -> GPTVectorStoreIndex:
# Load index from file
try:
index = GPTVectorStoreIndex.load_from_disk('knowledge/index.json')
except FileNotFoundError:
index = create_index()
return index
def query_index(index: GPTVectorStoreIndex):
# Query index
query_engine = index.as_query_engine()
while True:
prompt = input("Type prompt...")
response = query_engine.query(prompt)
print(response)
def main():
# Ask user if they want to refresh the index
refresh_index = input("Do you want to refresh the index? (y/n) [n]: ")
refresh_index = refresh_index.lower() == 'y'
# If refreshing the index, create new index and save to file
if refresh_index:
index = create_index()
# Otherwise, load index from file
else:
index = load_index()
# Query index
query_index(index)
if __name__ == '__main__':
main()
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.load_from_disk"
] | [((432, 483), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size_limit': '(3000)'}), '(chunk_size_limit=3000)\n', (460, 483), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, Document\n'), ((496, 574), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (530, 574), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, Document\n'), ((829, 887), 'llama_index.GPTVectorStoreIndex.load_from_disk', 'GPTVectorStoreIndex.load_from_disk', (['"""knowledge/index.json"""'], {}), "('knowledge/index.json')\n", (863, 887), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, Document\n'), ((182, 216), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""knowledge"""'], {}), "('knowledge')\n", (203, 216), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, ServiceContext, Document\n')] |
import logging
import streamlit as st
from llama_index import (
OpenAIEmbedding,
ServiceContext,
SimpleDirectoryReader,
VectorStoreIndex,
)
from llama_index.llms import OpenAI
from streamlit_examples.utils.theme import initPage
from streamlit_examples.utils.streamlit import cache_file, upload_files
initPage("QueryPDFs")
st.write(
"Ask questions or create summaries or explanations on PDFs using [LlamaIndex](https://www.llamaindex.ai/)"
)
@st.cache_resource()
def get_service_context():
llm = OpenAI(
temperature=0.1, model="gpt-3.5-turbo", api_key=st.secrets["OPENAI_API_KEY"]
)
embed_model = OpenAIEmbedding()
return ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
@st.cache_data(show_spinner=False)
def query(filename, question):
logging.info(f"Asking '{question}' on '{filename}'")
documents = SimpleDirectoryReader(input_files=[filename]).load_data()
index = VectorStoreIndex.from_documents(
documents, service_context=get_service_context()
)
query_engine = index.as_query_engine()
return query_engine.query(question)
def get_question():
QUESTIONS = {
"Summarize": "What is a summary of this document?",
"Explain": "Explain this document",
}
mode = st.sidebar.selectbox("Select Mode", ("Summarize", "Explain", "Ask"))
if mode == "Ask":
question = st.sidebar.text_input("What's your question")
if not question:
st.sidebar.info("Please ask a question or select another mode.")
st.stop()
else:
question = QUESTIONS[mode]
return mode, question
mode, question = get_question()
# Upload PDFs
pdfs = upload_files(type="pdf", accept_multiple_files=True)
# Summarize each PDF
for pdf in pdfs:
filename = cache_file(pdf, type="pdf")
with st.spinner(f"{mode} '{pdf.name}'..."):
summary = query(filename, question)
with st.expander(f"'{pdf.name}'", expanded=True):
st.markdown(summary)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.OpenAIEmbedding",
"llama_index.llms.OpenAI",
"llama_index.SimpleDirectoryReader"
] | [((318, 339), 'streamlit_examples.utils.theme.initPage', 'initPage', (['"""QueryPDFs"""'], {}), "('QueryPDFs')\n", (326, 339), False, 'from streamlit_examples.utils.theme import initPage\n'), ((340, 466), 'streamlit.write', 'st.write', (['"""Ask questions or create summaries or explanations on PDFs using [LlamaIndex](https://www.llamaindex.ai/)"""'], {}), "(\n 'Ask questions or create summaries or explanations on PDFs using [LlamaIndex](https://www.llamaindex.ai/)'\n )\n", (348, 466), True, 'import streamlit as st\n'), ((466, 485), 'streamlit.cache_resource', 'st.cache_resource', ([], {}), '()\n', (483, 485), True, 'import streamlit as st\n'), ((735, 768), 'streamlit.cache_data', 'st.cache_data', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (748, 768), True, 'import streamlit as st\n'), ((1691, 1743), 'streamlit_examples.utils.streamlit.upload_files', 'upload_files', ([], {'type': '"""pdf"""', 'accept_multiple_files': '(True)'}), "(type='pdf', accept_multiple_files=True)\n", (1703, 1743), False, 'from streamlit_examples.utils.streamlit import cache_file, upload_files\n'), ((523, 612), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model': '"""gpt-3.5-turbo"""', 'api_key': "st.secrets['OPENAI_API_KEY']"}), "(temperature=0.1, model='gpt-3.5-turbo', api_key=st.secrets[\n 'OPENAI_API_KEY'])\n", (529, 612), False, 'from llama_index.llms import OpenAI\n'), ((640, 657), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (655, 657), False, 'from llama_index import OpenAIEmbedding, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((669, 731), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (697, 731), False, 'from llama_index import OpenAIEmbedding, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((804, 856), 'logging.info', 'logging.info', (['f"""Asking \'{question}\' on \'{filename}\'"""'], {}), '(f"Asking \'{question}\' on \'{filename}\'")\n', (816, 856), False, 'import logging\n'), ((1284, 1352), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select Mode"""', "('Summarize', 'Explain', 'Ask')"], {}), "('Select Mode', ('Summarize', 'Explain', 'Ask'))\n", (1304, 1352), True, 'import streamlit as st\n'), ((1798, 1825), 'streamlit_examples.utils.streamlit.cache_file', 'cache_file', (['pdf'], {'type': '"""pdf"""'}), "(pdf, type='pdf')\n", (1808, 1825), False, 'from streamlit_examples.utils.streamlit import cache_file, upload_files\n'), ((1394, 1439), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""What\'s your question"""'], {}), '("What\'s your question")\n', (1415, 1439), True, 'import streamlit as st\n'), ((1835, 1872), 'streamlit.spinner', 'st.spinner', (['f"""{mode} \'{pdf.name}\'..."""'], {}), '(f"{mode} \'{pdf.name}\'...")\n', (1845, 1872), True, 'import streamlit as st\n'), ((1927, 1970), 'streamlit.expander', 'st.expander', (['f"""\'{pdf.name}\'"""'], {'expanded': '(True)'}), '(f"\'{pdf.name}\'", expanded=True)\n', (1938, 1970), True, 'import streamlit as st\n'), ((1980, 2000), 'streamlit.markdown', 'st.markdown', (['summary'], {}), '(summary)\n', (1991, 2000), True, 'import streamlit as st\n'), ((873, 918), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[filename]'}), '(input_files=[filename])\n', (894, 918), False, 'from llama_index import OpenAIEmbedding, ServiceContext, SimpleDirectoryReader, VectorStoreIndex\n'), ((1477, 1541), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""Please ask a question or select another mode."""'], {}), "('Please ask a question or select another mode.')\n", (1492, 1541), True, 'import streamlit as st\n'), ((1554, 1563), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (1561, 1563), True, 'import streamlit as st\n')] |
#ingest uploaded documents
from global_settings import STORAGE_PATH, INDEX_STORAGE, CACHE_FILE
from logging_functions import log_action
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.ingestion import IngestionPipeline, IngestionCache
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.core.extractors import SummaryExtractor
from llama_index.embeddings.openai import OpenAIEmbedding
def ingest_documents():
documents = SimpleDirectoryReader(
STORAGE_PATH,
filename_as_id = True
).load_data()
for doc in documents:
print(doc.id_)
log_action(
f"File '{doc.id_}' uploaded user",
action_type="UPLOAD"
)
try:
cached_hashes = IngestionCache.from_persist_path(
CACHE_FILE
)
print("Cache file found. Running using cache...")
except:
cached_hashes = ""
print("No cache file found. Running without cache...")
pipeline = IngestionPipeline(
transformations=[
TokenTextSplitter(
chunk_size=1024,
chunk_overlap=20
),
SummaryExtractor(summaries=['self']),
OpenAIEmbedding()
],
cache=cached_hashes
)
nodes = pipeline.run(documents=documents)
pipeline.cache.persist(CACHE_FILE)
return nodes
if __name__ == "__main__":
embedded_nodes = ingest_documents() | [
"llama_index.core.ingestion.IngestionCache.from_persist_path",
"llama_index.core.node_parser.TokenTextSplitter",
"llama_index.core.extractors.SummaryExtractor",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((644, 711), 'logging_functions.log_action', 'log_action', (['f"""File \'{doc.id_}\' uploaded user"""'], {'action_type': '"""UPLOAD"""'}), '(f"File \'{doc.id_}\' uploaded user", action_type=\'UPLOAD\')\n', (654, 711), False, 'from logging_functions import log_action\n'), ((786, 830), 'llama_index.core.ingestion.IngestionCache.from_persist_path', 'IngestionCache.from_persist_path', (['CACHE_FILE'], {}), '(CACHE_FILE)\n', (818, 830), False, 'from llama_index.core.ingestion import IngestionPipeline, IngestionCache\n'), ((493, 549), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['STORAGE_PATH'], {'filename_as_id': '(True)'}), '(STORAGE_PATH, filename_as_id=True)\n', (514, 549), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((1089, 1141), 'llama_index.core.node_parser.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(20)'}), '(chunk_size=1024, chunk_overlap=20)\n', (1106, 1141), False, 'from llama_index.core.node_parser import TokenTextSplitter\n'), ((1202, 1238), 'llama_index.core.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'summaries': "['self']"}), "(summaries=['self'])\n", (1218, 1238), False, 'from llama_index.core.extractors import SummaryExtractor\n'), ((1252, 1269), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1267, 1269), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n')] |
import tiktoken
from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings
from llama_index.core.llms.mock import MockLLM
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
llm = MockLLM(max_tokens=256)
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
)
callback_manager = CallbackManager([token_counter])
Settings.callback_manager=callback_manager
Settings.llm=llm
documents = SimpleDirectoryReader("cost_prediction_samples").load_data()
index = TreeIndex.from_documents(
documents=documents,
num_children=2,
show_progress=True)
print("Total LLM Token Count:", token_counter.total_llm_token_count)
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.TreeIndex.from_documents",
"llama_index.core.llms.mock.MockLLM"
] | [((219, 242), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {'max_tokens': '(256)'}), '(max_tokens=256)\n', (226, 242), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((368, 400), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (383, 400), False, 'from llama_index.core.callbacks import CallbackManager, TokenCountingHandler\n'), ((545, 631), 'llama_index.core.TreeIndex.from_documents', 'TreeIndex.from_documents', ([], {'documents': 'documents', 'num_children': '(2)', 'show_progress': '(True)'}), '(documents=documents, num_children=2, show_progress\n =True)\n', (569, 631), False, 'from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings\n'), ((475, 523), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""cost_prediction_samples"""'], {}), "('cost_prediction_samples')\n", (496, 523), False, 'from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings\n'), ((295, 339), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (322, 339), False, 'import tiktoken\n')] |
import torch
from langchain.llms.base import LLM
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper
from llama_index import LLMPredictor, ServiceContext
from transformers import pipeline
from typing import Optional, List, Mapping, Any
"""
使用自定义 LLM 模型,您只需要实现Langchain 中的LLM类。您将负责将文本传递给模型并返回新生成的标记。
facebook/opt-iml-max-30b
https://huggingface.co/facebook/opt-iml-max-30b/tree/main
"""
# define prompt helper
# set maximum input size
max_input_size = 2048
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
class CustomLLM(LLM):
model_name = "facebook/opt-iml-max-30b"
pipeline = pipeline("text-generation", model=model_name, device="cuda:0", model_kwargs={"torch_dtype":torch.bfloat16})
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
prompt_length = len(prompt)
response = self.pipeline(prompt, max_new_tokens=num_output)[0]["generated_text"]
# only return newly generated tokens
return response[prompt_length:]
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"name_of_model": self.model_name}
@property
def _llm_type(self) -> str:
return "custom"
# define our LLM
llm_predictor = LLMPredictor(llm=CustomLLM())
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# Load the your data
documents = SimpleDirectoryReader('./data').load_data()
index = GPTListIndex.from_documents(documents, service_context=service_context)
# Query and print response
query_engine = index.as_query_engine()
response = query_engine.query("<query_text>")
print(response) | [
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.GPTListIndex.from_documents",
"llama_index.PromptHelper"
] | [((616, 675), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (628, 675), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper\n'), ((1429, 1520), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1457, 1520), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((1602, 1673), 'llama_index.GPTListIndex.from_documents', 'GPTListIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1629, 1673), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper\n'), ((759, 872), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_name', 'device': '"""cuda:0"""', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model=model_name, device='cuda:0', model_kwargs\n ={'torch_dtype': torch.bfloat16})\n", (767, 872), False, 'from transformers import pipeline\n'), ((1550, 1581), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {}), "('./data')\n", (1571, 1581), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, PromptHelper\n')] |