code
stringlengths 161
67.2k
| apis
sequencelengths 1
24
| extract_api
stringlengths 164
53.3k
|
---|---|---|
import asyncio
import json
from typing import Any, Tuple, List
from langchain.base_language import BaseLanguageModel
from langchain.tools import DuckDuckGoSearchResults, BaseTool
from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext
from llama_index.response_synthesizers import TreeSummarize
class WebSearchTool(DuckDuckGoSearchResults):
name: str = "web_search"
description: str = \
"Useful for when you need to search answer in the internet. " \
"Input should be a search query (like you would google it). " \
"If relevant, include location and date to get more accurate results. " \
"You will get a list of urls and a short snippet of the page. "
async def _arun(self, *args: Any, **kwargs: Any) -> Any:
return self._run(*args, **kwargs)
class AskPagesTool(BaseTool):
llm: BaseLanguageModel
_page_loader = download_loader("SimpleWebPageReader")(html_to_text=True) # noqa
name: str = "ask_urls"
description: str = \
"You can ask a question about a URL. " \
"That smart tool will parse URL content and answer your question. " \
"Provide provide urls and questions in json format. " \
"urls is a list of urls to ask corresponding question from questions list" \
'Example: {"urls": ["https://en.wikipedia.org/wiki/Cat", "https://en.wikipedia.org/wiki/Dog"], ' \
'"questions": ["How many cats in the world?", "How many dogs in the world?"]}'
def _get_page_index(self, page: Document) -> GPTListIndex:
llm_predictor_chatgpt = LLMPredictor(self.llm)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, chunk_size=1024)
doc_summary_index = GPTListIndex.from_documents(
[page],
service_context=service_context,
response_synthesizer=TreeSummarize(service_context=service_context)
)
return doc_summary_index
def _get_url_index(self, url: str) -> GPTListIndex:
page = self._page_loader.load_data(urls=[url])[0]
return self._get_page_index(page)
@staticmethod
def _parse_args(*args, **kwargs) -> List[Tuple[str, str]]:
if len(args) == 1:
urls_and_questions_dict = json.loads(args[0])
urls = urls_and_questions_dict["urls"]
questions = urls_and_questions_dict["questions"]
else:
urls = kwargs["urls"]
questions = kwargs["questions"]
if len(urls) > 1 and len(questions) == 1:
questions = questions * len(urls)
if len(questions) > 1 and len(urls) == 1:
urls = urls * len(questions)
if len(urls) != len(questions):
raise ValueError("Number of urls and questions should be equal")
return list(zip(urls, questions))
def _run_single(self, url: str, question: str) -> str:
page_index = self._get_url_index(url)
llm_predictor_chatgpt = LLMPredictor(self.llm)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, chunk_size=1024)
query_engine = page_index.as_query_engine(
response_synthesizer=TreeSummarize(service_context=service_context), use_async=False)
response = query_engine.query(question)
return response.response
async def _arun_single(self, url: str, question: str) -> str:
page_index = self._get_url_index(url)
llm_predictor_chatgpt = LLMPredictor(self.llm)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, chunk_size=1024)
query_engine = page_index.as_query_engine(
response_synthesizer=TreeSummarize(service_context=service_context), use_async=False)
response = await query_engine.aquery(question)
return response.response
def _run(self, *args, **kwargs) -> Any:
try:
urls_with_questions = self._parse_args(*args, **kwargs)
full_response = ""
for url, question in urls_with_questions:
answer = self._run_single(url, question)
full_response += f"Question: {question} to {url}\nAnswer: {answer}\n"
except Exception as e:
full_response = f"Error: {e}"
return full_response
async def _arun(self, *args, **kwargs) -> Any:
try:
urls_with_questions = self._parse_args(*args, **kwargs)
tasks = []
for url, question in urls_with_questions:
tasks.append(self._arun_single(url, question))
answers = await asyncio.gather(*tasks)
full_response = ""
for i in range(len(urls_with_questions)):
url, question = urls_with_questions[i]
answer = answers[i]
full_response += f"Question: {question} to {url}\nAnswer: {answer}\n"
except Exception as e:
full_response = f"Error: {e}"
return full_response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.response_synthesizers.TreeSummarize",
"llama_index.download_loader",
"llama_index.LLMPredictor"
] | [((916, 954), 'llama_index.download_loader', 'download_loader', (['"""SimpleWebPageReader"""'], {}), "('SimpleWebPageReader')\n", (931, 954), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((1600, 1622), 'llama_index.LLMPredictor', 'LLMPredictor', (['self.llm'], {}), '(self.llm)\n', (1612, 1622), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((1649, 1735), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor_chatgpt,\n chunk_size=1024)\n', (1677, 1735), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((2989, 3011), 'llama_index.LLMPredictor', 'LLMPredictor', (['self.llm'], {}), '(self.llm)\n', (3001, 3011), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((3038, 3124), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor_chatgpt,\n chunk_size=1024)\n', (3066, 3124), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((3496, 3518), 'llama_index.LLMPredictor', 'LLMPredictor', (['self.llm'], {}), '(self.llm)\n', (3508, 3518), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((3545, 3631), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor_chatgpt,\n chunk_size=1024)\n', (3573, 3631), False, 'from llama_index import download_loader, GPTListIndex, Document, LLMPredictor, ServiceContext\n'), ((2281, 2300), 'json.loads', 'json.loads', (['args[0]'], {}), '(args[0])\n', (2291, 2300), False, 'import json\n'), ((1887, 1933), 'llama_index.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (1900, 1933), False, 'from llama_index.response_synthesizers import TreeSummarize\n'), ((3205, 3251), 'llama_index.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (3218, 3251), False, 'from llama_index.response_synthesizers import TreeSummarize\n'), ((3712, 3758), 'llama_index.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (3725, 3758), False, 'from llama_index.response_synthesizers import TreeSummarize\n'), ((4622, 4644), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (4636, 4644), False, 'import asyncio\n')] |
"""
This script demonstrates how to use the llama_index library to create and query a vector store index.
It loads documents from a directory, creates an index, and allows querying the index.
usage: python hello_persist.py "What is the author's name and job now?"
"""
import os
import sys
import argparse
import logging
from dotenv import load_dotenv
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
Settings,
)
from llama_index.embeddings.openai import OpenAIEmbedding
def main(query):
try:
# Load environment variables
load_dotenv()
# Configure logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# Configure embedding model
Settings.embed_model = OpenAIEmbedding(model_name="text-embedding-3-small")
# Set up storage directory
storage_directory = "./storage"
if not os.path.exists(storage_directory):
logging.info("Creating new index...")
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=storage_directory)
else:
logging.info("Loading existing index...")
storage_context = StorageContext.from_defaults(persist_dir=storage_directory)
index = load_index_from_storage(storage_context)
# Query the index
query_engine = index.as_query_engine()
response = query_engine.query(query)
print(response)
except Exception as e:
logging.error(f"An error occurred: {str(e)}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Query a vector store index.")
parser.add_argument("--query", default="What is the author's name and job now?", help="The query to ask the index.")
args = parser.parse_args()
main(args.query) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.SimpleDirectoryReader",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1804, 1870), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query a vector store index."""'}), "(description='Query a vector store index.')\n", (1827, 1870), False, 'import argparse\n'), ((628, 641), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (639, 641), False, 'from dotenv import load_dotenv\n'), ((679, 737), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (698, 737), False, 'import logging\n'), ((887, 939), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model_name': '"""text-embedding-3-small"""'}), "(model_name='text-embedding-3-small')\n", (902, 939), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((777, 817), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (798, 817), False, 'import logging\n'), ((1032, 1065), 'os.path.exists', 'os.path.exists', (['storage_directory'], {}), '(storage_directory)\n', (1046, 1065), False, 'import os\n'), ((1079, 1116), 'logging.info', 'logging.info', (['"""Creating new index..."""'], {}), "('Creating new index...')\n", (1091, 1116), False, 'import logging\n'), ((1203, 1245), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1234, 1245), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, Settings\n'), ((1345, 1386), 'logging.info', 'logging.info', (['"""Loading existing index..."""'], {}), "('Loading existing index...')\n", (1357, 1386), False, 'import logging\n'), ((1417, 1476), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'storage_directory'}), '(persist_dir=storage_directory)\n', (1445, 1476), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, Settings\n'), ((1497, 1537), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1520, 1537), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, Settings\n'), ((746, 765), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (763, 765), False, 'import logging\n'), ((1141, 1170), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1162, 1170), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage, Settings\n')] |
import streamlit as st
from llama_hub.youtube_transcript import YoutubeTranscriptReader
from llama_hub.youtube_transcript import is_youtube_video
from llama_index import (
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.prompts import ChatMessage, MessageRole
from llama_index.tools import QueryEngineTool, ToolMetadata
import os
# import openai
from llama_hub.tools.wikipedia import WikipediaToolSpec
from llama_index.agent import OpenAIAgent
from fetch_yt_metadata import fetch_youtube_metadata
video_url = None
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
if openai_api_key:
os.environ["OPENAI_API_KEY"] = openai_api_key
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
video_url = st.text_input("'Enter your video url here:", key="video_url")
if video_url:
st.video(video_url)
if is_youtube_video(video_url):
metadata = fetch_youtube_metadata(video_url)
st.session_state["metadata"] = metadata
st.header("Metadata:")
for k, v in metadata.items():
if k == "video_description":
st.text_area("Description:", height=200, value=v, disabled=True)
else:
st.write(f"{k}: {v}")
st.text_area("Transcript:", height=200, value=st.session_state.get("transcript", ""))
if st.session_state.get("video_url"):
url = st.session_state.get("video_url")
st.write(f"Chat with {url}")
if "counter" not in st.session_state:
st.session_state.counter = 0
st.session_state.counter += 1
st.header(f"This page has run {st.session_state.counter} times.")
st.button("Run it again")
query_engine = None
transcript = None
if video_url:
video_id = video_url.split('=')[1].split('&')[0]
# check if storage already exists
PERSIST_DIR = f"./storage/{video_id}"
if not os.path.exists(PERSIST_DIR):
# load the documents and create the index
# documents = SimpleDirectoryReader("data").load_data()
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=[url])
# save the documents to disk using the video_id.sbt
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
with open(f"{PERSIST_DIR}/transcript.txt", "w") as f:
for doc in documents:
f.write(doc.text)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# either way we can now query the index
query_engine = index.as_query_engine()
if not st.session_state.get("summary"):
summary = query_engine.query("What's the video about?").response
st.session_state["summary"] = summary
if not st.session_state.get("transcript"):
transcript = open(f"{PERSIST_DIR}/transcript.txt").read()
st.session_state["transcript"] = transcript
st.title('💬 Talk2YouTube')
st.write(st.session_state.get("summary",'Load a youtube video and chat with it'))
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
vector_tool = QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name=f"VideoTranscript",
description=f"useful for when you want to answer queries about the content of the video.",
),
)
wiki_tool_spec = WikipediaToolSpec()
tools = wiki_tool_spec.to_tool_list() #+ query_engine_tools
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
agent = OpenAIAgent.from_tools([vector_tool], verbose=True, openai_api_key=st.session_state.get("chatbot_api_key"))
chat_history = [ChatMessage(role=MessageRole.USER if x.get("role","assistant") == "user" else "assistant", content=x.get("content","")) for x in st.session_state.messages]
response = agent.chat(prompt, chat_history=chat_history)
msg = {"role":"assistant", "content":response.response}
st.session_state.messages.append(msg)
st.chat_message("assistant").write(msg.get("content"))
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.tools.ToolMetadata",
"llama_index.StorageContext.from_defaults"
] | [((1466, 1499), 'streamlit.session_state.get', 'st.session_state.get', (['"""video_url"""'], {}), "('video_url')\n", (1486, 1499), True, 'import streamlit as st\n'), ((1684, 1749), 'streamlit.header', 'st.header', (['f"""This page has run {st.session_state.counter} times."""'], {}), "(f'This page has run {st.session_state.counter} times.')\n", (1693, 1749), True, 'import streamlit as st\n'), ((1750, 1775), 'streamlit.button', 'st.button', (['"""Run it again"""'], {}), "('Run it again')\n", (1759, 1775), True, 'import streamlit as st\n'), ((3161, 3187), 'streamlit.title', 'st.title', (['"""💬 Talk2YouTube"""'], {}), "('💬 Talk2YouTube')\n", (3169, 3187), True, 'import streamlit as st\n'), ((599, 670), 'streamlit.text_input', 'st.text_input', (['"""OpenAI API Key"""'], {'key': '"""chatbot_api_key"""', 'type': '"""password"""'}), "('OpenAI API Key', key='chatbot_api_key', type='password')\n", (612, 670), True, 'import streamlit as st\n'), ((840, 901), 'streamlit.text_input', 'st.text_input', (['"""\'Enter your video url here:"""'], {'key': '"""video_url"""'}), '("\'Enter your video url here:", key=\'video_url\')\n', (853, 901), True, 'import streamlit as st\n'), ((1511, 1544), 'streamlit.session_state.get', 'st.session_state.get', (['"""video_url"""'], {}), "('video_url')\n", (1531, 1544), True, 'import streamlit as st\n'), ((1549, 1577), 'streamlit.write', 'st.write', (['f"""Chat with {url}"""'], {}), "(f'Chat with {url}')\n", (1557, 1577), True, 'import streamlit as st\n'), ((3197, 3269), 'streamlit.session_state.get', 'st.session_state.get', (['"""summary"""', '"""Load a youtube video and chat with it"""'], {}), "('summary', 'Load a youtube video and chat with it')\n", (3217, 3269), True, 'import streamlit as st\n'), ((3511, 3526), 'streamlit.chat_input', 'st.chat_input', ([], {}), '()\n', (3524, 3526), True, 'import streamlit as st\n'), ((3926, 3945), 'llama_hub.tools.wikipedia.WikipediaToolSpec', 'WikipediaToolSpec', ([], {}), '()\n', (3943, 3945), False, 'from llama_hub.tools.wikipedia import WikipediaToolSpec\n'), ((4014, 4083), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (4046, 4083), True, 'import streamlit as st\n'), ((4547, 4584), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['msg'], {}), '(msg)\n', (4579, 4584), True, 'import streamlit as st\n'), ((928, 947), 'streamlit.video', 'st.video', (['video_url'], {}), '(video_url)\n', (936, 947), True, 'import streamlit as st\n'), ((959, 986), 'llama_hub.youtube_transcript.is_youtube_video', 'is_youtube_video', (['video_url'], {}), '(video_url)\n', (975, 986), False, 'from llama_hub.youtube_transcript import is_youtube_video\n'), ((1973, 2000), 'os.path.exists', 'os.path.exists', (['PERSIST_DIR'], {}), '(PERSIST_DIR)\n', (1987, 2000), False, 'import os\n'), ((2133, 2158), 'llama_hub.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (2156, 2158), False, 'from llama_hub.youtube_transcript import YoutubeTranscriptReader\n'), ((2288, 2330), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2319, 2330), False, 'from llama_index import VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2627, 2680), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (2655, 2680), False, 'from llama_index import VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2697, 2737), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2720, 2737), False, 'from llama_index import VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2836, 2867), 'streamlit.session_state.get', 'st.session_state.get', (['"""summary"""'], {}), "('summary')\n", (2856, 2867), True, 'import streamlit as st\n'), ((2999, 3033), 'streamlit.session_state.get', 'st.session_state.get', (['"""transcript"""'], {}), "('transcript')\n", (3019, 3033), True, 'import streamlit as st\n'), ((3563, 3617), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (3570, 3617), True, 'import streamlit as st\n'), ((3626, 3635), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (3633, 3635), True, 'import streamlit as st\n'), ((1011, 1044), 'fetch_yt_metadata.fetch_youtube_metadata', 'fetch_youtube_metadata', (['video_url'], {}), '(video_url)\n', (1033, 1044), False, 'from fetch_yt_metadata import fetch_youtube_metadata\n'), ((1109, 1131), 'streamlit.header', 'st.header', (['"""Metadata:"""'], {}), "('Metadata:')\n", (1118, 1131), True, 'import streamlit as st\n'), ((3446, 3474), 'streamlit.chat_message', 'st.chat_message', (["msg['role']"], {}), "(msg['role'])\n", (3461, 3474), True, 'import streamlit as st\n'), ((3729, 3867), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'f"""VideoTranscript"""', 'description': 'f"""useful for when you want to answer queries about the content of the video."""'}), "(name=f'VideoTranscript', description=\n f'useful for when you want to answer queries about the content of the video.'\n )\n", (3741, 3867), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((4088, 4111), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (4103, 4111), True, 'import streamlit as st\n'), ((4205, 4244), 'streamlit.session_state.get', 'st.session_state.get', (['"""chatbot_api_key"""'], {}), "('chatbot_api_key')\n", (4225, 4244), True, 'import streamlit as st\n'), ((4589, 4617), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (4604, 4617), True, 'import streamlit as st\n'), ((1422, 1460), 'streamlit.session_state.get', 'st.session_state.get', (['"""transcript"""', '""""""'], {}), "('transcript', '')\n", (1442, 1460), True, 'import streamlit as st\n'), ((1239, 1303), 'streamlit.text_area', 'st.text_area', (['"""Description:"""'], {'height': '(200)', 'value': 'v', 'disabled': '(True)'}), "('Description:', height=200, value=v, disabled=True)\n", (1251, 1303), True, 'import streamlit as st\n'), ((1346, 1367), 'streamlit.write', 'st.write', (['f"""{k}: {v}"""'], {}), "(f'{k}: {v}')\n", (1354, 1367), True, 'import streamlit as st\n')] |
import os
from typing import Any, Optional
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
DEFAULT_TOKEN_JSON_PATH = 'token.json'
DEFAULT_SERVICE_ACCOUNT_JSON_PATH = 'service_account.json'
DEFAULT_CREDENTIALS_JSON_PATH = 'credentials.json'
HEADING_STYLE_TEMPLATE = 'HEADING_{}'
DEFAULT_QUESTION_HEADING_STYLE_NUM = 3
EXCLUDED_LLM_METADATA_KEYS = ['source', 'title', 'section_name']
EXCLUDED_EMBED_METADATA_KEYS = ['source', 'title']
SCOPES = ["https://www.googleapis.com/auth/documents.readonly"]
class FAQGoogleDocsReader(BasePydanticReader):
token_json_path: str = DEFAULT_TOKEN_JSON_PATH
service_account_json_path: str = DEFAULT_SERVICE_ACCOUNT_JSON_PATH
credentials_json_path: str = DEFAULT_CREDENTIALS_JSON_PATH
question_heading_style_num: int = DEFAULT_QUESTION_HEADING_STYLE_NUM
is_remote: bool = True
def __init__(self,
token_json_path: Optional[str] = DEFAULT_TOKEN_JSON_PATH,
service_account_json_path: Optional[str] = DEFAULT_SERVICE_ACCOUNT_JSON_PATH,
credentials_json_path: Optional[str] = DEFAULT_CREDENTIALS_JSON_PATH,
question_heading_style_num: Optional[int] = DEFAULT_QUESTION_HEADING_STYLE_NUM
) -> None:
"""Initialize with parameters."""
try:
import google # noqa
import google_auth_oauthlib # noqa
import googleapiclient # noqa
except ImportError as e:
raise ImportError(
'`google_auth_oauthlib`, `googleapiclient` and `google` '
'must be installed to use the GoogleDocsReader.\n'
'Please run `pip install --upgrade google-api-python-client '
'google-auth-httplib2 google-auth-oauthlib`.'
) from e
super().__init__(token_json_path=token_json_path,
service_account_json_path=service_account_json_path,
credentials_json_path=credentials_json_path,
question_heading_style_num=question_heading_style_num)
@classmethod
def class_name(cls) -> str:
return 'CustomGoogleDocsReader'
def load_data(self, document_ids: [str]) -> [Document]:
"""Load data from the input directory.
Args:
document_ids (List[str]): a list of document ids.
"""
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for document_id in document_ids:
docs = self._load_docs(document_id)
results.extend(docs)
return results
def _load_docs(self, document_id: str) -> [Document]:
"""Load a document from Google Docs.
Args:
document_id: the document id.
Returns:
The document text.
"""
import googleapiclient.discovery as discovery
credentials = self._get_credentials()
docs_service = discovery.build('docs', 'v1', credentials=credentials)
doc = docs_service.documents().get(documentId=document_id).execute()
doc_content = doc.get('body').get('content')
doc_source = f'https://docs.google.com/document/d/{document_id}/edit#heading='
return self._structural_elements_to_docs(doc_content, doc_source)
def _get_credentials(self) -> Any:
"""Get valid user credentials from storage.
The file token.json stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time.
Returns:
Credentials, the obtained credential.
"""
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
creds = None
if os.path.exists(self.token_json_path):
creds = Credentials.from_authorized_user_file(self.token_json_path, SCOPES)
elif os.path.exists(self.service_account_json_path):
return service_account.Credentials.from_service_account_file(
self.service_account_json_path, scopes=SCOPES
)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
self.credentials_json_path, SCOPES
)
creds = flow.run_local_server(port=8080)
# Save the credentials for the next run
with open(self.token_json_path, 'w') as token:
token.write(creds.to_json())
return creds
@staticmethod
def _read_paragraph_element(element: Any) -> Any:
"""Return the text in the given ParagraphElement.
Args:
element: a ParagraphElement from a Google Doc.
"""
text_run = element.get('textRun')
return text_run.get('content') if text_run else ''
@staticmethod
def _get_text_from_paragraph_elements(elements: [Any]) -> Any:
return ''.join(FAQGoogleDocsReader._read_paragraph_element(elem) for elem in elements)
def _structural_elements_to_docs(self,
doc_elements: [Any],
doc_source: str) -> [Document]:
"""Recurse through a list of Structural Elements.
Read a document's text where text may be in nested elements.
Args:
doc_elements: a list of Structural Elements.
"""
docs = []
text = ''
heading_id = ''
section_name = ''
question_heading_style = HEADING_STYLE_TEMPLATE.format(self.question_heading_style_num)
section_heading_style = HEADING_STYLE_TEMPLATE.format(self.question_heading_style_num - 1)
for value in doc_elements:
if 'paragraph' in value:
paragraph = value['paragraph']
elements = paragraph.get('elements')
paragraph_text = FAQGoogleDocsReader._get_text_from_paragraph_elements(elements)
if 'paragraphStyle' in paragraph and 'headingId' in paragraph['paragraphStyle']:
named_style_type = paragraph['paragraphStyle']['namedStyleType']
if named_style_type in [
question_heading_style,
section_heading_style,
]:
# create previous document checking if it's not empty
if text != '':
node_metadata = {
'source': doc_source + heading_id,
'section_name': section_name,
'title': 'FAQ'
}
prev_doc = Document(text=text,
metadata=node_metadata,
excluded_embed_metadata_keys=EXCLUDED_EMBED_METADATA_KEYS,
excluded_llm_metadata_keys=EXCLUDED_LLM_METADATA_KEYS)
docs.append(prev_doc)
if named_style_type == question_heading_style:
heading_id = paragraph['paragraphStyle']['headingId']
text = paragraph_text
else:
section_name = paragraph_text
text = ''
else:
text += paragraph_text
return docs
if __name__ == '__main__':
reader = FAQGoogleDocsReader(service_account_json_path='../keys/service_account_key.json')
docs = reader.load_data(['1LpPanc33QJJ6BSsyxVg-pWNMplal84TdZtq10naIhD8'])
print(docs)
| [
"llama_index.core.schema.Document"
] | [((3044, 3098), 'googleapiclient.discovery.build', 'discovery.build', (['"""docs"""', '"""v1"""'], {'credentials': 'credentials'}), "('docs', 'v1', credentials=credentials)\n", (3059, 3098), True, 'import googleapiclient.discovery as discovery\n'), ((4002, 4038), 'os.path.exists', 'os.path.exists', (['self.token_json_path'], {}), '(self.token_json_path)\n', (4016, 4038), False, 'import os\n'), ((4060, 4127), 'google.oauth2.credentials.Credentials.from_authorized_user_file', 'Credentials.from_authorized_user_file', (['self.token_json_path', 'SCOPES'], {}), '(self.token_json_path, SCOPES)\n', (4097, 4127), False, 'from google.oauth2.credentials import Credentials\n'), ((4141, 4187), 'os.path.exists', 'os.path.exists', (['self.service_account_json_path'], {}), '(self.service_account_json_path)\n', (4155, 4187), False, 'import os\n'), ((4208, 4313), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['self.service_account_json_path'], {'scopes': 'SCOPES'}), '(self.\n service_account_json_path, scopes=SCOPES)\n', (4261, 4313), False, 'from google.oauth2 import service_account\n'), ((4604, 4681), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file', 'InstalledAppFlow.from_client_secrets_file', (['self.credentials_json_path', 'SCOPES'], {}), '(self.credentials_json_path, SCOPES)\n', (4645, 4681), False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((4552, 4561), 'google.auth.transport.requests.Request', 'Request', ([], {}), '()\n', (4559, 4561), False, 'from google.auth.transport.requests import Request\n'), ((7140, 7307), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'text', 'metadata': 'node_metadata', 'excluded_embed_metadata_keys': 'EXCLUDED_EMBED_METADATA_KEYS', 'excluded_llm_metadata_keys': 'EXCLUDED_LLM_METADATA_KEYS'}), '(text=text, metadata=node_metadata, excluded_embed_metadata_keys=\n EXCLUDED_EMBED_METADATA_KEYS, excluded_llm_metadata_keys=\n EXCLUDED_LLM_METADATA_KEYS)\n', (7148, 7307), False, 'from llama_index.core.schema import Document\n')] |
import utils
import os
import openai
import sys
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("API_KEY")
openai.api_key
os.environ['OPENAI_API_KEY'] = api_key
#
# examples
# https://github.com/kevintsai/Building-and-Evaluating-Advanced-RAG-Applications
#
# SimpleDirectoryReader is a class that reads all the files in a directory and returns a list of documents
# It will select the best file reader based on the file extensions
# https://docs.llamaindex.ai/en/stable/examples/data_connectors/simple_directory_reader.html
#
# Load all (top-level) files from directory
# ,input_dir="/"
# ,input=files="/asdf.pdf"
# ,required_exts=[".pdf", ".txt", ".md"] <- extensions to read
# ,recursive=True
# docs = reader.load_data()
# print(f"Loaded {len(docs)} docs")
#
# llamaindex
from llama_index import SimpleDirectoryReader,VectorStoreIndex,ServiceContext,Document
from llama_index.llms import OpenAI
#from langchain_community.llms import OpenAI
documents = SimpleDirectoryReader(
input_files=["data/Analisis_Decreto_de_Necesidad_y_Urgencia_Bases_para_la_Reconstrucción.pdf"],
).load_data()
print(type(documents), "\n")
print(len(documents), "\n")
print(type(documents[0]))
print(documents[0])
print(f"Loaded {len(documents)} pages docs") # pages
# basic RAG pipeline
# Document is a class that represents a document
document = Document(text="\n\n".join([doc.text for doc in documents]))
# llm declare
# bge-small-en-v1.5 is a model that was trained on the BGE dataset
# https://huggingface.co/BAAI/bge-small-en-v1.5
# FlagEmbedding can map any text to a low-dimensional dense vector which can be used for tasks like retrieval, classification, clustering, or semantic search. And it also can be used in vector databases for LLMs.
llm = OpenAI(model="gpt-4-1106-preview", temperature=0.0)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model="local:BAAI/bge-small-en-v1.5"
)
index = VectorStoreIndex.from_documents([document],
service_context=service_context)
query_engine = index.as_query_engine()
# query
response = query_engine.query(
"""
Contexto: Eres el mejor analista de documentos de leyes con un IQ de 150. Tienes que ser minucioso y necesito que revises la totalidad de las paginas del documento, sin dejar nada por fuera. Se experto en el tema y no me falles.
Pregunta: devolver en forma de items la totalidad de los temas que trata el documento presentado de forma minuciosa.
Forma de respuesta: El texto suministrado es en español y la respuesta la necesito en español.
"""
)
print(str(response))
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.SimpleDirectoryReader"
] | [((82, 95), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (93, 95), False, 'from dotenv import load_dotenv\n'), ((106, 126), 'os.getenv', 'os.getenv', (['"""API_KEY"""'], {}), "('API_KEY')\n", (115, 126), False, 'import os\n'), ((1774, 1825), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""', 'temperature': '(0.0)'}), "(model='gpt-4-1106-preview', temperature=0.0)\n", (1780, 1825), False, 'from llama_index.llms import OpenAI\n'), ((1844, 1930), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local:BAAI/bge-small-en-v1.5"""'}), "(llm=llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5')\n", (1872, 1930), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, Document\n'), ((1941, 2017), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[document]'], {'service_context': 'service_context'}), '([document], service_context=service_context)\n', (1972, 2017), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, Document\n'), ((977, 1105), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['data/Analisis_Decreto_de_Necesidad_y_Urgencia_Bases_para_la_Reconstrucción.pdf'\n ]"}), "(input_files=[\n 'data/Analisis_Decreto_de_Necesidad_y_Urgencia_Bases_para_la_Reconstrucción.pdf'\n ])\n", (998, 1105), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, Document\n')] |
# Import the necessary libraries
import random
import time
from llama_index.llms import OpenAI
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, StorageContext, set_global_service_context
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
import chromadb
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index.node_parser import SentenceSplitter
from llama_index.indices.prompt_helper import PromptHelper
import re
from llama_index.chat_engine import CondensePlusContextChatEngine
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever
from langchain_openai import ChatOpenAI
from llama_index.postprocessor import RankGPTRerank
# Streamlit interface
st.title('🦜🔗 Tourism Assistant Chatbot')
#First run, initialize the context and the chat engine
if "init" not in st.session_state:
st.session_state.init = True
system_prompt = (
'''
#### Task Instructions:
You are a friendly and knowledgeable tourism assistant, helping users with their queries related to tourism, travel, dining, events, and any related questions. Your goal is to provide accurate and useful information. If there's information you don't know, respond truthfully. Add a touch of personality and humor to engage users.
End your responses asking to the user if there's anything else you can help with, everytime.
#### Personalization & Tone:
Maintain an upbeat and helpful tone, embodying the role of a helpful travel assistant. Inject personality and humor into responses to make interactions more enjoyable.
#### Context for User Input:
Always consider the user's input in the context of tourism, travel, and related topics. If a question is outside this scope, respond with a friendly reminder of your expertise and limitations.
If a question is outisde the travel or anything related to the travel domain please kindly remember the user that that question is not in your scope of expertise (cf. "Tell me a joke!" example below).
#### Creativity & Style Guidance:
Craft responses that are not only informative but also creative. Avoid short and plain answers; instead, provide engaging and well-elaborated responses.
#### External Knowledge & Data:
Base your responses on the dataset of events and places, ensuring accuracy in facts. If the dataset doesn't have information, clearly state that you don't have the specific data.
#### Handling Non-Travel Related Questions:
If a user asks a question outside the scope of travel, respond creatively but firmly, reminding the user of the bot's expertise in the travel domain. Redirect the conversation back to travel-related topics or provide a gentle refusal.
#### Rules & Guardrails:
Adhere to ethical standards. If a user request involves prohibited content or actions, respond appropriately and within the bounds of ethical guidelines.
#### Output Verification Standards:
Maintain a commitment to accuracy. If there's uncertainty in information, it's better to express that you're not sure rather than providing potentially inaccurate details.
#### Benefits of System Prompts:
1. **Character Maintenance:** Engage users with a consistent and friendly persona for longer conversations.
2. **Creativity:** Exhibit creative and natural behavior to enhance user experience.
3. **Rule Adherence:** Follow instructions carefully to avoid prohibited tasks or text.
### Example User Interactions:
**User: Recommend a trendy restaurant in Paris.**
> "Ah, Paris - the city of love and incredible cuisine! 🥖 How about checking out 'La Mode Bistro'? It's not just a restaurant; it's a fashion show for your taste buds! 😋"
**User: What's the best way to explore Tokyo on a budget?**
> "Exploring Tokyo without breaking the bank? 🏮 How about hopping on the efficient and cost-friendly metro, grabbing some street food in Harajuku, and exploring the free admission areas of beautiful parks like Ueno! 🌸"
**User: Any upcoming events in New York City?**
> "NYC, the city that never sleeps! 🗽 Let me check my event database for you. One moment... 🕵️♂️ Ah, there's a fantastic art festival in Chelsea this weekend! 🎨"
**User: Tell me a joke!**
> "While I'm better at recommending travel spots, here's a quick one for you: Why don't scientists trust atoms? Because they make up everything! 😄 Now, anything travel-related you'd like to know?"
**User: What's the capital of France?**
> "Ah, testing my geography knowledge, are we? 😄 The capital of France is Paris! 🇫🇷 Now, if you have any travel-related questions, I'm your go-to guide!"
**User: Can you help me with my math homework?**
> "Ah, numbers are a bit outside my travel-savvy brain! 😅 If you have any questions about amazing destinations or travel tips, though, I'm all ears!"
''')
#temperature adjustable at will
st.session_state.service_context = ServiceContext.from_defaults(llm=ChatOpenAI(model="gpt-3.5-turbo", temperature=0.9),
prompt_helper = PromptHelper(),
embed_model= LangchainEmbedding(HuggingFaceEmbeddings(model_name='dangvantuan/sentence-camembert-large')), #in case of new embeddings, possibility to add "model_kwargs = {'device': 'cuda:0'}" to the HuggingFaceEmbeddings call to use GPU
node_parser=SentenceSplitter(),
system_prompt=system_prompt,
)
set_global_service_context(st.session_state.service_context)
# create or get a chroma collection
st.session_state.chroma_collection = chromadb.PersistentClient(path="./chroma_db").get_or_create_collection("tourism_db")
# assign chroma as the vector_store to the context
st.session_state.storage_context = StorageContext.from_defaults(vector_store=ChromaVectorStore(chroma_collection=st.session_state.chroma_collection))
#get the index
st.session_state.index = VectorStoreIndex.from_vector_store(ChromaVectorStore(chroma_collection=st.session_state.chroma_collection),
storage_context=st.session_state.storage_context, service_context=st.session_state.service_context)
#example of context and condense prompt adjustability
#context_prompt= "Base the reply to the user question mainly on the Description field of the context "
#condense_prompt = " "
st.session_state.retriever=VectorIndexRetriever(st.session_state.index, similarity_top_k=10) #or index.as_retriever(service_context=service_context, search_kwargs={"k": 10})
#I chose to use the RankGPTRerank postprocessor to rerank the top 4 results from the retriever over other rerankers like LLMRerank that wasn't working as expected
reranker = RankGPTRerank(
llm=OpenAI(
model="gpt-3.5-turbo",
temperature=0.0),
top_n=4,
verbose=True,
)
st.session_state.chat_engine = CondensePlusContextChatEngine.from_defaults(
retriever=st.session_state.retriever,
query_engine=st.session_state.index.as_query_engine(service_context=st.session_state.service_context,
retriever=st.session_state.retriever),
service_context=st.session_state.service_context,
system_prompt=system_prompt,
node_postprocessors=[reranker],
#condense_prompt=DEFAULT_CONDENSE_PROMPT_TEMPLATE,
#context_prompt=DEFAULT_CONTEXT_PROMPT_TEMPLATE,
verbose=True,
)
#initialize the chat history
st.session_state.messages = []
#initialize the assistant with a random greeting
assistant_response = random.choice(
[
"Hello there! How can I assist you today?",
"Good day human! I'm here to answer questions about travel. What do you need help with?",
"Hello! My name is Minotour2.0. Please feel free to ask me any questions about trips, destinations or planning.",
"Welcome! I'm an AI assistant focused on travel. How may I assist you in finding your next adventure?",
"Greetings! What are your travel plans or questions? I'm happy to provide any information I can.",
"Hi there, traveler! I'm your virtual travel guide - where would you like to go or what do you need help planning?",
"What brings you here today? I'm your assistant for all things related to getting away - what destination interests you?",
"Salutations! Let me know if you need advice on flights, hotels or activities for an upcoming journey.",
"Hello friend, I'm here to help with travel queries. What questions can I answer for you?",
"Welcome, I'm your assistant available to help with transportation, lodging or other travel logistics. How can I assist you?",
]
)
st.session_state.messages.append({"role": "assistant", "content": assistant_response})
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
def handle_chat(question):
if question.lower() == "reset":
st.session_state.chat_engine.reset()
st.session_state.messages = []
return "The conversation has been reset."
else:
response = st.session_state.chat_engine.chat(question)
cleaned_response = re.sub(r"(AI: |AI Assistant: |assistant: )", "", re.sub(r"^user: .*$", "", str(response), flags=re.MULTILINE))
return cleaned_response
if user_input:= st.chat_input("Please enter your question:"):
if user_input.lower() == "exit":
st.warning('Goodbye')
st.stop()
else:
with st.chat_message("user"):
st.markdown(user_input)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": user_input})
# Handle chat and get the response
response = handle_chat(user_input)
# Display assistant response in chat message container
with st.chat_message("assistant"):
full_response = ""
message_placeholder = st.empty()
for chunk in response.split():
full_response += chunk + " "
time.sleep(0.05)
# Add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"llama_index.indices.vector_store.retrievers.VectorIndexRetriever",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.llms.OpenAI",
"llama_index.indices.prompt_helper.PromptHelper",
"llama_index.set_global_service_context",
"llama_index.node_parser.SentenceSplitter"
] | [((855, 895), 'streamlit.title', 'st.title', (['"""🦜🔗 Tourism Assistant Chatbot"""'], {}), "('🦜🔗 Tourism Assistant Chatbot')\n", (863, 895), True, 'import streamlit as st\n'), ((5721, 5781), 'llama_index.set_global_service_context', 'set_global_service_context', (['st.session_state.service_context'], {}), '(st.session_state.service_context)\n', (5747, 5781), False, 'from llama_index import VectorStoreIndex, ServiceContext, StorageContext, set_global_service_context\n'), ((6706, 6771), 'llama_index.indices.vector_store.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', (['st.session_state.index'], {'similarity_top_k': '(10)'}), '(st.session_state.index, similarity_top_k=10)\n', (6726, 6771), False, 'from llama_index.indices.vector_store.retrievers import VectorIndexRetriever\n'), ((8706, 9817), 'random.choice', 'random.choice', (['[\'Hello there! How can I assist you today?\',\n "Good day human! I\'m here to answer questions about travel. What do you need help with?"\n ,\n \'Hello! My name is Minotour2.0. Please feel free to ask me any questions about trips, destinations or planning.\'\n ,\n "Welcome! I\'m an AI assistant focused on travel. How may I assist you in finding your next adventure?"\n ,\n "Greetings! What are your travel plans or questions? I\'m happy to provide any information I can."\n ,\n "Hi there, traveler! I\'m your virtual travel guide - where would you like to go or what do you need help planning?"\n ,\n "What brings you here today? I\'m your assistant for all things related to getting away - what destination interests you?"\n ,\n \'Salutations! Let me know if you need advice on flights, hotels or activities for an upcoming journey.\'\n ,\n "Hello friend, I\'m here to help with travel queries. What questions can I answer for you?"\n ,\n "Welcome, I\'m your assistant available to help with transportation, lodging or other travel logistics. How can I assist you?"\n ]'], {}), '([\'Hello there! How can I assist you today?\',\n "Good day human! I\'m here to answer questions about travel. What do you need help with?"\n ,\n \'Hello! My name is Minotour2.0. Please feel free to ask me any questions about trips, destinations or planning.\'\n ,\n "Welcome! I\'m an AI assistant focused on travel. How may I assist you in finding your next adventure?"\n ,\n "Greetings! What are your travel plans or questions? I\'m happy to provide any information I can."\n ,\n "Hi there, traveler! I\'m your virtual travel guide - where would you like to go or what do you need help planning?"\n ,\n "What brings you here today? I\'m your assistant for all things related to getting away - what destination interests you?"\n ,\n \'Salutations! Let me know if you need advice on flights, hotels or activities for an upcoming journey.\'\n ,\n "Hello friend, I\'m here to help with travel queries. What questions can I answer for you?"\n ,\n "Welcome, I\'m your assistant available to help with transportation, lodging or other travel logistics. How can I assist you?"\n ])\n', (8719, 9817), False, 'import random\n'), ((9979, 10069), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': assistant_response}"], {}), "({'role': 'assistant', 'content':\n assistant_response})\n", (10011, 10069), True, 'import streamlit as st\n'), ((10705, 10749), 'streamlit.chat_input', 'st.chat_input', (['"""Please enter your question:"""'], {}), "('Please enter your question:')\n", (10718, 10749), True, 'import streamlit as st\n'), ((6243, 6314), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'st.session_state.chroma_collection'}), '(chroma_collection=st.session_state.chroma_collection)\n', (6260, 6314), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((10169, 10201), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (10184, 10201), True, 'import streamlit as st\n'), ((10211, 10242), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (10222, 10242), True, 'import streamlit as st\n'), ((10315, 10351), 'streamlit.session_state.chat_engine.reset', 'st.session_state.chat_engine.reset', ([], {}), '()\n', (10349, 10351), True, 'import streamlit as st\n'), ((10470, 10513), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['question'], {}), '(question)\n', (10503, 10513), True, 'import streamlit as st\n'), ((10796, 10817), 'streamlit.warning', 'st.warning', (['"""Goodbye"""'], {}), "('Goodbye')\n", (10806, 10817), True, 'import streamlit as st\n'), ((10826, 10835), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (10833, 10835), True, 'import streamlit as st\n'), ((10984, 11057), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': user_input}"], {}), "({'role': 'user', 'content': user_input})\n", (11016, 11057), True, 'import streamlit as st\n'), ((11715, 11800), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (11747, 11800), True, 'import streamlit as st\n'), ((4991, 5041), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model='gpt-3.5-turbo', temperature=0.9)\n", (5001, 5041), False, 'from langchain_openai import ChatOpenAI\n'), ((5128, 5142), 'llama_index.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {}), '()\n', (5140, 5142), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5529, 5547), 'llama_index.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (5545, 5547), False, 'from llama_index.node_parser import SentenceSplitter\n'), ((5864, 5909), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (5889, 5909), False, 'import chromadb\n'), ((6086, 6157), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'st.session_state.chroma_collection'}), '(chroma_collection=st.session_state.chroma_collection)\n', (6103, 6157), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((7071, 7117), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.0)'}), "(model='gpt-3.5-turbo', temperature=0.0)\n", (7077, 7117), False, 'from llama_index.llms import OpenAI\n'), ((7506, 7637), 'streamlit.session_state.index.as_query_engine', 'st.session_state.index.as_query_engine', ([], {'service_context': 'st.session_state.service_context', 'retriever': 'st.session_state.retriever'}), '(service_context=st.session_state.\n service_context, retriever=st.session_state.retriever)\n', (7544, 7637), True, 'import streamlit as st\n'), ((10859, 10882), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (10874, 10882), True, 'import streamlit as st\n'), ((10896, 10919), 'streamlit.markdown', 'st.markdown', (['user_input'], {}), '(user_input)\n', (10907, 10919), True, 'import streamlit as st\n'), ((11250, 11278), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (11265, 11278), True, 'import streamlit as st\n'), ((11345, 11355), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (11353, 11355), True, 'import streamlit as st\n'), ((5244, 5316), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""dangvantuan/sentence-camembert-large"""'}), "(model_name='dangvantuan/sentence-camembert-large')\n", (5265, 5316), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((11460, 11476), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (11470, 11476), False, 'import time\n')] |
import os
import json
import logging
import sys
import requests
from dotenv import load_dotenv
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from llama_index.core import VectorStoreIndex, Document
from llama_index.tools.brave_search import BraveSearchToolSpec
from llama_index.readers.web import SimpleWebPageReader
# Constants
USER_AGENT = 'Mozilla/5.0 (compatible; YourBot/1.0; +http://yourwebsite.com/bot.html)'
HEADERS = {'User-Agent': USER_AGENT}
RETRIES = Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])
def setup_logging():
"""
Initialize logging configuration to output logs to stdout.
"""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
def load_environment_variables():
"""
Load environment variables from the .env file.
:return: The Brave API key.
"""
load_dotenv()
return os.getenv('BRAVE_API_KEY')
def perform_search(query, api_key):
"""
Perform a search using the Brave Search API.
:param query: The search query.
:param api_key: The Brave API key.
:return: The search response.
"""
tool_spec = BraveSearchToolSpec(api_key=api_key)
return tool_spec.brave_search(query=query)
def extract_search_results(response):
"""
Extract search results from the Brave Search API response.
:param response: The search response.
:return: A list of search results.
"""
documents = [doc.text for doc in response]
search_results = []
for document in documents:
response_data = json.loads(document)
search_results.extend(response_data.get('web', {}).get('results', []))
return search_results
def scrape_web_pages(search_results):
"""
Scrape web pages from the URLs obtained from the search results.
:param search_results: The list of search results.
:return: A list of scraped documents.
"""
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=RETRIES))
session.mount('https://', HTTPAdapter(max_retries=RETRIES))
all_documents = []
for result in search_results:
url = result.get('url')
try:
response = session.get(url, headers=HEADERS, timeout=10)
response.raise_for_status()
doc = Document(text=response.text, url=url)
all_documents.append(doc)
except requests.exceptions.RequestException as e:
logging.error(f"Failed to scrape {url}: {e}")
return all_documents
def main():
"""
Main function to orchestrate the search, scraping, and querying process.
"""
setup_logging()
api_key = load_environment_variables()
my_query = "What is the latest news about llamaindex?"
response = perform_search(my_query, api_key)
search_results = extract_search_results(response)
all_documents = scrape_web_pages(search_results)
# Load all the scraped documents into the vector store
index = VectorStoreIndex.from_documents(all_documents)
# Use the index to query with the language model
query_engine = index.as_query_engine()
response = query_engine.query(my_query)
print(response)
if __name__ == "__main__":
main() | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.Document",
"llama_index.tools.brave_search.BraveSearchToolSpec"
] | [((496, 569), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(0.1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])\n', (501, 569), False, 'from urllib3.util.retry import Retry\n'), ((675, 733), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (694, 733), False, 'import logging\n'), ((949, 962), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (960, 962), False, 'from dotenv import load_dotenv\n'), ((974, 1000), 'os.getenv', 'os.getenv', (['"""BRAVE_API_KEY"""'], {}), "('BRAVE_API_KEY')\n", (983, 1000), False, 'import os\n'), ((1228, 1264), 'llama_index.tools.brave_search.BraveSearchToolSpec', 'BraveSearchToolSpec', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (1247, 1264), False, 'from llama_index.tools.brave_search import BraveSearchToolSpec\n'), ((1998, 2016), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2014, 2016), False, 'import requests\n'), ((3052, 3098), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['all_documents'], {}), '(all_documents)\n', (3083, 3098), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((769, 809), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (790, 809), False, 'import logging\n'), ((1637, 1657), 'json.loads', 'json.loads', (['document'], {}), '(document)\n', (1647, 1657), False, 'import json\n'), ((2046, 2078), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2057, 2078), False, 'from requests.adapters import HTTPAdapter\n'), ((2110, 2142), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2121, 2142), False, 'from requests.adapters import HTTPAdapter\n'), ((738, 757), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (755, 757), False, 'import logging\n'), ((2374, 2411), 'llama_index.core.Document', 'Document', ([], {'text': 'response.text', 'url': 'url'}), '(text=response.text, url=url)\n', (2382, 2411), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((2520, 2565), 'logging.error', 'logging.error', (['f"""Failed to scrape {url}: {e}"""'], {}), "(f'Failed to scrape {url}: {e}')\n", (2533, 2565), False, 'import logging\n')] |
import qdrant_client
from llama_index.llms import Ollama
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.vector_stores.qdrant import QdrantVectorStore
# re-initialize the vector store
client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
vector_store = QdrantVectorStore(client=client, collection_name="tweets")
# get the LLM again
llm = Ollama(model="mistral")
service_context = ServiceContext.from_defaults(llm=llm,embed_model="local")
# load the index from the vector store
index = VectorStoreIndex.from_vector_store(vector_store=vector_store,service_context=service_context)
def rag_pipline(query):
if query is not None:
query_engine = index.as_query_engine(similarity_top_k=20)
response = query_engine.query(query)
return response
else:
return "i am sorry. i cannot answer you for this due to some error in data" | [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.llms.Ollama"
] | [((233, 281), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (259, 281), False, 'import qdrant_client\n'), ((303, 361), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""tweets"""'}), "(client=client, collection_name='tweets')\n", (320, 361), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((389, 412), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""mistral"""'}), "(model='mistral')\n", (395, 412), False, 'from llama_index.llms import Ollama\n'), ((431, 489), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (459, 489), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((536, 634), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (570, 634), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
import os
from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage
from llama_index.readers.file import PDFReader
# def get_index(data, index_name):
# index = None
# if not os.path.exists(index_name):
# print('Building index', index_name)
# index = VectorStoreIndex.from_documents(data, show_progress=True)
# index.storage_context.persist(persist_dir=index_name)
# else :
# index = load_index_from_storage(
# StorageContext.from_defaults(persist_dir=index_name)
# )
# return index
# pdf_path = os.path.join('data', 'Malaysia.pdf')
# malaysia_pdf = PDFReader().load_data(file=pdf_path)
# malaysia_index = get_index(malaysia_pdf, 'malaysia')
# malaysia_engine = malaysia_index.as_query_engine()
# malaysia_engine.query()
def get_index(data_files, index_name):
index = None
data = []
for file_path in data_files:
reader = PDFReader()
data.extend(reader.load_data(file=file_path))
if not os.path.exists(index_name):
print(f'Building index {index_name}')
index = VectorStoreIndex.from_documents(data, show_progress=True)
index.storage_context.persist(persist_dir=index_name)
else:
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=index_name)
)
return index
file_paths = [
os.path.join('data', 'Malaysia.pdf'),
# Add more file paths here
]
combined_index = get_index(file_paths, 'combined_index')
combined_engine = combined_index.as_query_engine() | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.StorageContext.from_defaults",
"llama_index.readers.file.PDFReader"
] | [((1404, 1440), 'os.path.join', 'os.path.join', (['"""data"""', '"""Malaysia.pdf"""'], {}), "('data', 'Malaysia.pdf')\n", (1416, 1440), False, 'import os\n'), ((952, 963), 'llama_index.readers.file.PDFReader', 'PDFReader', ([], {}), '()\n', (961, 963), False, 'from llama_index.readers.file import PDFReader\n'), ((1030, 1056), 'os.path.exists', 'os.path.exists', (['index_name'], {}), '(index_name)\n', (1044, 1056), False, 'import os\n'), ((1120, 1177), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['data'], {'show_progress': '(True)'}), '(data, show_progress=True)\n', (1151, 1177), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1303, 1355), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_name'}), '(persist_dir=index_name)\n', (1331, 1355), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n')] |
from llama_index.retrievers import BaseRetriever
from llama_index import QueryBundle
from llama_index.schema import NodeWithScore
from llama_index.vector_stores import VectorStoreQuery
from typing import List, Sequence, Any
from llama_index.tools import BaseTool, adapt_to_async_tool
from llama_index import Document, VectorStoreIndex
class ToolRetriever(BaseRetriever):
def __init__(
self,
tools: Sequence[BaseTool],
sql_tools: Sequence[BaseTool],
embed_model: Any,
index: VectorStoreIndex = None,
message: str = "",
append_sql: bool = True,
similarity_top_k: int = 8,
logger=None,
) -> None:
self._message = message
self._tools = tools
self._index = index
self._sql_tools = sql_tools
self._append_sql = append_sql
self._similarity_top_k = similarity_top_k
self._embed_model = embed_model
self._logger = logger
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
from llama_index.retrievers import VectorIndexRetriever
retriever = VectorIndexRetriever(
index=self._index,
similarity_top_k=self._similarity_top_k,
)
response = retriever.retrieve(query_bundle)
tools_ = []
for n in response:
tools_.append(self._tools[n.metadata["idx"]])
if self._append_sql:
tools_.append(self._sql_tools)
# tools_.append(self._tools[-1]) # add SQL tool
self._logger.debug(f"Tools before: {self._tools}")
_tmp = set(adapt_to_async_tool(t) for t in tools_)
self._logger.debug(f"Tools after: {list(_tmp)}")
return list(_tmp)
# return [adapt_to_async_tool(t) for t in tools_]
def create_vector_index_from_tools(self):
from llama_index.tools import adapt_to_async_tool
get_tools = lambda _: self._tools
tools = [adapt_to_async_tool(t) for t in get_tools("")]
docs = [
str(
"idx: "
+ str(idx)
+ ", name: "
+ str(t.metadata.name)
+ ", description: "
+ str(t.metadata.description)
)
for idx, t in enumerate(tools)
]
documents = [
Document(text=t, metadata={"idx": idx}) for idx, t in enumerate(docs)
]
self._index = VectorStoreIndex.from_documents(
documents, embed_model=self._embed_model
)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.tools.adapt_to_async_tool",
"llama_index.Document"
] | [((1143, 1228), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'self._index', 'similarity_top_k': 'self._similarity_top_k'}), '(index=self._index, similarity_top_k=self._similarity_top_k\n )\n', (1163, 1228), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((2469, 2542), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'embed_model': 'self._embed_model'}), '(documents, embed_model=self._embed_model)\n', (2500, 2542), False, 'from llama_index import Document, VectorStoreIndex\n'), ((1982, 2004), 'llama_index.tools.adapt_to_async_tool', 'adapt_to_async_tool', (['t'], {}), '(t)\n', (2001, 2004), False, 'from llama_index.tools import adapt_to_async_tool\n'), ((2366, 2405), 'llama_index.Document', 'Document', ([], {'text': 't', 'metadata': "{'idx': idx}"}), "(text=t, metadata={'idx': idx})\n", (2374, 2405), False, 'from llama_index import Document, VectorStoreIndex\n'), ((1635, 1657), 'llama_index.tools.adapt_to_async_tool', 'adapt_to_async_tool', (['t'], {}), '(t)\n', (1654, 1657), False, 'from llama_index.tools import adapt_to_async_tool\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from app.utils.json import json_to_model
from app.utils.index import get_index
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index import VectorStoreIndex
from llama_index.llms.base import MessageRole, ChatMessage
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
# Note: To support clients sending a JSON object using content-type "text/plain",
# we need to use Depends(json_to_model(_ChatData)) here
data: _ChatData = Depends(json_to_model(_ChatData)),
index: VectorStoreIndex = Depends(get_index),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
chat_engine = index.as_chat_engine(
chat_mode="context",
sparse_top_k=12,
vector_store_query_mode="hybrid",
similarity_top_k=2,
system_prompt=(
"You are a chatbot, able to have normal interactions, as well as talk"
" about an Grade 3 Unit Tests, Holidays and Dairy of the School."
),
verbose=False,
)
response = chat_engine.stream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
for token in response.response_gen:
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain") | [
"llama_index.llms.base.ChatMessage"
] | [((374, 385), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (383, 385), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((798, 816), 'fastapi.Depends', 'Depends', (['get_index'], {}), '(get_index)\n', (805, 816), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((741, 765), 'app.utils.json.json_to_model', 'json_to_model', (['_ChatData'], {}), '(_ChatData)\n', (754, 765), False, 'from app.utils.json import json_to_model\n'), ((914, 1004), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (927, 1004), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1132, 1232), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (1145, 1232), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1355, 1398), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1366, 1398), False, 'from llama_index.llms.base import MessageRole, ChatMessage\n')] |
import os
from llama_index import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
BOT_NAME = os.environ["BOT_NAME"]
def construct_index(directory_data, directory_index, force_reload=False):
# check if storage already exists
if not os.path.exists(directory_index) or force_reload:
print(f'Creating new index using {directory_data}')
# load the documents and create the index
documents = SimpleDirectoryReader(directory_data).load_data()
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=directory_index)
print(f'Storing new index to {directory_index}')
else:
# load the existing index
print(f'Loading existing index from {directory_index}')
storage_context = StorageContext.from_defaults(persist_dir=directory_index)
index = load_index_from_storage(storage_context)
return index
def query(question, index):
query_engine = index.as_query_engine()
response = query_engine.query(question)
return response
def ask(bot_name):
index = construct_index(directory_data=f'data/{bot_name}', directory_index=f'storage/{bot_name}')
while True:
question = input("What do you want to know?")
response = query(question=question, index=index)
print(f"{bot_name} says: {response}")
if __name__ == '__main__':
ask(BOT_NAME) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((541, 583), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (572, 583), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((871, 928), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'directory_index'}), '(persist_dir=directory_index)\n', (899, 928), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((945, 985), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (968, 985), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((296, 327), 'os.path.exists', 'os.path.exists', (['directory_index'], {}), '(directory_index)\n', (310, 327), False, 'import os\n'), ((475, 512), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_data'], {}), '(directory_data)\n', (496, 512), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage\n')] |
from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex
from llama_index.llms import OpenAI, ChatMessage, MessageRole
from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine
from dotenv import load_dotenv
import os
load_dotenv()
vector_index = None
history = []
def initializeService():
global vector_index
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.5)
promptFile = open('./data/prompt.txt')
prompt = promptFile.read()
#print("Using the following system prompt: ", prompt, sep='\n')
service_context = ServiceContext.from_defaults(
llm=llm, system_prompt=prompt)
try:
reader = SimpleDirectoryReader(
input_dir='./data/context', recursive=False)
docs = reader.load_data()
except ValueError:
print(
f"Context directory is empty, using only prompt")
docs = []
vector_index = VectorStoreIndex.from_documents(
docs, service_context=service_context)
def loadChat():
global vector_index
global history
query_engine = vector_index.as_query_engine()
chat_history = list(map(lambda item: ChatMessage(
role=item['source'], content=item['message']),
history
))
chat_engine = CondensePlusContextChatEngine.from_defaults(
query_engine,
chat_history=chat_history
)
return chat_engine
def chat(message):
global history
history.append({'source': MessageRole.USER, 'message': message})
chat_engine = loadChat()
response = chat_engine.chat(message)
history.append({'source': MessageRole.SYSTEM, 'message': response.response})
return response.response
if __name__ == "__main__":
initializeService()
question = input("Ask me anything: ")
while question != "exit":
print(chat(question))
question = input("Ask me anything: ") | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.llms.ChatMessage",
"llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults"
] | [((271, 284), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (282, 284), False, 'from dotenv import load_dotenv\n'), ((379, 425), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)'}), "(model='gpt-3.5-turbo', temperature=0.5)\n", (385, 425), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((592, 651), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'system_prompt': 'prompt'}), '(llm=llm, system_prompt=prompt)\n', (620, 651), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((939, 1009), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (970, 1009), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((1282, 1371), 'llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults', 'CondensePlusContextChatEngine.from_defaults', (['query_engine'], {'chat_history': 'chat_history'}), '(query_engine, chat_history=\n chat_history)\n', (1325, 1371), False, 'from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine\n'), ((687, 753), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data/context"""', 'recursive': '(False)'}), "(input_dir='./data/context', recursive=False)\n", (708, 753), False, 'from llama_index import SimpleDirectoryReader, ServiceContext, VectorStoreIndex\n'), ((1172, 1229), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': "item['source']", 'content': "item['message']"}), "(role=item['source'], content=item['message'])\n", (1183, 1229), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n')] |
import streamlit as st
import openai
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import FaissVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index import load_index_from_storage
from llama_index.storage.storage_context import StorageContext
from llama_index.query_engine import CitationQueryEngine
@st.cache_resource
def preprocess_prelimnary():
storage_context = StorageContext.from_defaults(docstore = SimpleDocumentStore.from_persist_dir(persist_dir = "persist_new"),
vector_store = FaissVectorStore.from_persist_dir(persist_dir = "persist_new"),
index_store = SimpleIndexStore.from_persist_dir(persist_dir = "persist_new"))
index = load_index_from_storage(storage_context = storage_context)
query_engine = CitationQueryEngine.from_args(index, similarity_top_k = 3, citation_chunk_size = 1024)
return query_engine
openai.api_key = st.secrets['OPENAI_API_KEY']
st.set_page_config(layout = 'wide', page_title = 'Precedents Database')
st.title('Query Precedents')
q_e = preprocess_prelimnary()
query = st.text_area(label = 'Enter your query involving Indian Legal Precedents.')
# model = st.selectbox(label = 'Select a model', options = ['gpt-3.5-turbo', 'gpt-4'])
start = st.button(label = 'Start')
base_append = ""
if start:
st.subheader('Query Response -')
database_answer = q_e.query(query + base_append)
st.write(database_answer.response)
st.subheader('Actual Sources -')
for i in range(len(database_answer.source_nodes)):
st.write(database_answer.source_nodes[i].node.get_text())
st.write(f'Case Name - {database_answer.source_nodes[i].node.extra_info["file_name"]}') | [
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.query_engine.CitationQueryEngine.from_args",
"llama_index.vector_stores.FaissVectorStore.from_persist_dir",
"llama_index.load_index_from_storage"
] | [((983, 1050), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""', 'page_title': '"""Precedents Database"""'}), "(layout='wide', page_title='Precedents Database')\n", (1001, 1050), True, 'import streamlit as st\n'), ((1056, 1084), 'streamlit.title', 'st.title', (['"""Query Precedents"""'], {}), "('Query Precedents')\n", (1064, 1084), True, 'import streamlit as st\n'), ((1125, 1198), 'streamlit.text_area', 'st.text_area', ([], {'label': '"""Enter your query involving Indian Legal Precedents."""'}), "(label='Enter your query involving Indian Legal Precedents.')\n", (1137, 1198), True, 'import streamlit as st\n'), ((1297, 1321), 'streamlit.button', 'st.button', ([], {'label': '"""Start"""'}), "(label='Start')\n", (1306, 1321), True, 'import streamlit as st\n'), ((746, 802), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context'}), '(storage_context=storage_context)\n', (769, 802), False, 'from llama_index import load_index_from_storage\n'), ((824, 910), 'llama_index.query_engine.CitationQueryEngine.from_args', 'CitationQueryEngine.from_args', (['index'], {'similarity_top_k': '(3)', 'citation_chunk_size': '(1024)'}), '(index, similarity_top_k=3,\n citation_chunk_size=1024)\n', (853, 910), False, 'from llama_index.query_engine import CitationQueryEngine\n'), ((1357, 1389), 'streamlit.subheader', 'st.subheader', (['"""Query Response -"""'], {}), "('Query Response -')\n", (1369, 1389), True, 'import streamlit as st\n'), ((1447, 1481), 'streamlit.write', 'st.write', (['database_answer.response'], {}), '(database_answer.response)\n', (1455, 1481), True, 'import streamlit as st\n'), ((1486, 1518), 'streamlit.subheader', 'st.subheader', (['"""Actual Sources -"""'], {}), "('Actual Sources -')\n", (1498, 1518), True, 'import streamlit as st\n'), ((1648, 1745), 'streamlit.write', 'st.write', (['f"""Case Name - {database_answer.source_nodes[i].node.extra_info[\'file_name\']}"""'], {}), '(\n f"Case Name - {database_answer.source_nodes[i].node.extra_info[\'file_name\']}"\n )\n', (1656, 1745), True, 'import streamlit as st\n'), ((494, 557), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""persist_new"""'}), "(persist_dir='persist_new')\n", (530, 557), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((584, 644), 'llama_index.vector_stores.FaissVectorStore.from_persist_dir', 'FaissVectorStore.from_persist_dir', ([], {'persist_dir': '"""persist_new"""'}), "(persist_dir='persist_new')\n", (617, 644), False, 'from llama_index.vector_stores import FaissVectorStore\n'), ((670, 730), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""persist_new"""'}), "(persist_dir='persist_new')\n", (703, 730), False, 'from llama_index.storage.index_store import SimpleIndexStore\n')] |
import streamlit as st
from dotenv import load_dotenv
load_dotenv()
import os
import tempfile
from llama_index import SimpleDirectoryReader, StorageContext, LLMPredictor
from llama_index import VectorStoreIndex
from llama_index import ServiceContext
from llama_index.embeddings.langchain import LangchainEmbedding
from langchain.chat_models import ChatOpenAI
import tiktoken
from langchain.embeddings import CohereEmbeddings
import openai
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
openai.api_key = st.secrets["OPENAI_API_KEY"]
os.environ["COHERE_API_KEY"] = st.secrets["COHERE_API_KEY"]
llm_predictor = LLMPredictor(llm = ChatOpenAI(temperature = 0, model_name = 'gpt-3.5-turbo', max_tokens = -1, openai_api_key = openai.api_key))
embed_model = LangchainEmbedding(CohereEmbeddings(model = "embed-english-light-v2.0"))
storage_context = StorageContext.from_defaults()
service_context = ServiceContext.from_defaults(llm_predictor = llm_predictor, embed_model = embed_model)
def num_tokens_from_string(string: str, encoding_name: str) -> int:
encoding = tiktoken.encoding_for_model(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
@st.cache_resource
def preprocessing(uploaded_file):
if uploaded_file:
temp_dir = tempfile.TemporaryDirectory()
file_path = os.path.join(temp_dir.name, uploaded_file.name)
with open(file_path, "wb") as f:
f.write(uploaded_file.read())
document = SimpleDirectoryReader(input_files = [file_path]).load_data()
tokens = num_tokens_from_string(document[0].text, 'gpt-3.5-turbo')
global context
context = document[0].text
if tokens <= 4000:
print('Case - A')
return context
else:
print('Case - B')
index = VectorStoreIndex.from_documents(document, service_context = service_context, storage_context = storage_context)
global engine
engine = index.as_query_engine(similarity_top_k = 3)
return engine
@st.cache_resource
def run(_query_engine, query):
if type(_query_engine) == str:
print('Executing Case - A')
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant who answers questions given context."},
{"role": "user", "content": f"The question is - {query}\nThe provided context is - {_query_engine}\nAnswer the question to the best of your abilities."},
]
)
st.write(response['choices'][0]['message']['content'])
else:
print('Executing Case - B')
st.write(query_engine.query(query).response)
return True
st.set_page_config(layout = "wide")
st.title("Document Querying")
uploaded_file = st.file_uploader('Upload your file')
query_engine = preprocessing(uploaded_file)
if query_engine:
query = st.text_input('Enter your Query.', key = 'query_input')
if query:
run(query_engine, query) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((55, 68), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (66, 68), False, 'from dotenv import load_dotenv\n'), ((860, 890), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (888, 890), False, 'from llama_index import SimpleDirectoryReader, StorageContext, LLMPredictor\n'), ((909, 996), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (937, 996), False, 'from llama_index import ServiceContext\n'), ((2820, 2853), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (2838, 2853), True, 'import streamlit as st\n'), ((2857, 2886), 'streamlit.title', 'st.title', (['"""Document Querying"""'], {}), "('Document Querying')\n", (2865, 2886), True, 'import streamlit as st\n'), ((2904, 2940), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your file"""'], {}), "('Upload your file')\n", (2920, 2940), True, 'import streamlit as st\n'), ((787, 837), 'langchain.embeddings.CohereEmbeddings', 'CohereEmbeddings', ([], {'model': '"""embed-english-light-v2.0"""'}), "(model='embed-english-light-v2.0')\n", (803, 837), False, 'from langchain.embeddings import CohereEmbeddings\n'), ((1080, 1122), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['encoding_name'], {}), '(encoding_name)\n', (1107, 1122), False, 'import tiktoken\n'), ((3016, 3069), 'streamlit.text_input', 'st.text_input', (['"""Enter your Query."""'], {'key': '"""query_input"""'}), "('Enter your Query.', key='query_input')\n", (3029, 3069), True, 'import streamlit as st\n'), ((644, 747), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': '(-1)', 'openai_api_key': 'openai.api_key'}), "(temperature=0, model_name='gpt-3.5-turbo', max_tokens=-1,\n openai_api_key=openai.api_key)\n", (654, 747), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1286, 1315), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1313, 1315), False, 'import tempfile\n'), ((1336, 1383), 'os.path.join', 'os.path.join', (['temp_dir.name', 'uploaded_file.name'], {}), '(temp_dir.name, uploaded_file.name)\n', (1348, 1383), False, 'import os\n'), ((2198, 2537), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': '[{\'role\': \'system\', \'content\':\n \'You are a helpful assistant who answers questions given context.\'}, {\n \'role\': \'user\', \'content\':\n f"""The question is - {query}\nThe provided context is - {_query_engine}\nAnswer the question to the best of your abilities."""\n }]'}), '(model=\'gpt-3.5-turbo\', messages=[{\'role\':\n \'system\', \'content\':\n \'You are a helpful assistant who answers questions given context.\'}, {\n \'role\': \'user\', \'content\':\n f"""The question is - {query}\nThe provided context is - {_query_engine}\nAnswer the question to the best of your abilities."""\n }])\n', (2226, 2537), False, 'import openai\n'), ((2645, 2699), 'streamlit.write', 'st.write', (["response['choices'][0]['message']['content']"], {}), "(response['choices'][0]['message']['content'])\n", (2653, 2699), True, 'import streamlit as st\n'), ((1828, 1939), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(document, service_context=service_context,\n storage_context=storage_context)\n', (1859, 1939), False, 'from llama_index import VectorStoreIndex\n'), ((1486, 1532), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]'}), '(input_files=[file_path])\n', (1507, 1532), False, 'from llama_index import SimpleDirectoryReader, StorageContext, LLMPredictor\n')] |
from dotenv import load_dotenv
import os
import streamlit as st
import pandas as pd
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from prompts import new_prompt, instruction_str, context
from note_engine import note_engine
from pdf import combined_engine
from pdf import get_index as pdf_get_index
load_dotenv()
population_path = os.path.join("data", "population.csv")
population_df = pd.read_csv(population_path)
population_query_engine = PandasQueryEngine(
df=population_df, verbose=True, instruction_str=instruction_str
)
population_query_engine.update_prompts({"pandas_prompt" : new_prompt})
tools = [
note_engine,
QueryEngineTool(
query_engine=population_query_engine,
metadata=ToolMetadata(
name="population_query_engine",
description="This gives information at the world population and demographic",
),
),
# QueryEngineTool(
# query_engine=malaysia_engine,
# metadata=ToolMetadata(
# name="malaysia_data",
# description="This gives details information about Malaysia country",
# ),
# ),
QueryEngineTool(
query_engine=combined_engine,
metadata=ToolMetadata(
name="combined_data",
description="This gives information from multiple files",
),
),
]
llm = OpenAI(model="gpt-3.5-turbo-0613")
agent = ReActAgent.from_tools(tools, llm=llm, verbose=True, context=context)
# while (prompt := input("Enter a prompt (q to quit): ")) != "q":
# result = agent.query(prompt)
# print(result)
file_paths = []
# File uploader in the sidebar
with st.sidebar:
st.header("Upload PDF file")
file = st.file_uploader("", type=["pdf"])
if file:
file_path = os.path.join("data", file.name)
# Save the uploaded file
with open(file_path, "wb") as f:
f.write(file.getvalue())
# Display confirmation message
st.success(f"File uploaded successfully: {file.name}")
# Add the uploaded file path to the list of file paths
file_paths.append(file_path)
# Check if there are any uploaded files
if file_paths:
# Get combined index for uploaded files
combined_index = pdf_get_index(file_paths, 'combined_index')
combined_engine = combined_index.as_query_engine()
# Add QueryEngineTool for combined data
tools.append(
QueryEngineTool(
query_engine=combined_engine,
metadata=ToolMetadata(
name="combined_data",
description="This gives information from multiple files",
),
)
)
st.title("AgentAI - RAG")
user_input = st.text_input("Enter a prompt:")
if user_input:
if user_input.lower() == 'q':
st.stop()
else:
result = agent.query(user_input)
st.text_area("Response:", value=result, height=100, disabled=False)
| [
"llama_index.core.tools.ToolMetadata",
"llama_index.llms.openai.OpenAI",
"llama_index.core.query_engine.PandasQueryEngine",
"llama_index.core.agent.ReActAgent.from_tools"
] | [((468, 481), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (479, 481), False, 'from dotenv import load_dotenv\n'), ((501, 539), 'os.path.join', 'os.path.join', (['"""data"""', '"""population.csv"""'], {}), "('data', 'population.csv')\n", (513, 539), False, 'import os\n'), ((556, 584), 'pandas.read_csv', 'pd.read_csv', (['population_path'], {}), '(population_path)\n', (567, 584), True, 'import pandas as pd\n'), ((612, 699), 'llama_index.core.query_engine.PandasQueryEngine', 'PandasQueryEngine', ([], {'df': 'population_df', 'verbose': '(True)', 'instruction_str': 'instruction_str'}), '(df=population_df, verbose=True, instruction_str=\n instruction_str)\n', (629, 699), False, 'from llama_index.core.query_engine import PandasQueryEngine\n'), ((1517, 1551), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (1523, 1551), False, 'from llama_index.llms.openai import OpenAI\n'), ((1560, 1628), 'llama_index.core.agent.ReActAgent.from_tools', 'ReActAgent.from_tools', (['tools'], {'llm': 'llm', 'verbose': '(True)', 'context': 'context'}), '(tools, llm=llm, verbose=True, context=context)\n', (1581, 1628), False, 'from llama_index.core.agent import ReActAgent\n'), ((2824, 2849), 'streamlit.title', 'st.title', (['"""AgentAI - RAG"""'], {}), "('AgentAI - RAG')\n", (2832, 2849), True, 'import streamlit as st\n'), ((2864, 2896), 'streamlit.text_input', 'st.text_input', (['"""Enter a prompt:"""'], {}), "('Enter a prompt:')\n", (2877, 2896), True, 'import streamlit as st\n'), ((1821, 1849), 'streamlit.header', 'st.header', (['"""Upload PDF file"""'], {}), "('Upload PDF file')\n", (1830, 1849), True, 'import streamlit as st\n'), ((1861, 1895), 'streamlit.file_uploader', 'st.file_uploader', (['""""""'], {'type': "['pdf']"}), "('', type=['pdf'])\n", (1877, 1895), True, 'import streamlit as st\n'), ((2415, 2458), 'pdf.get_index', 'pdf_get_index', (['file_paths', '"""combined_index"""'], {}), "(file_paths, 'combined_index')\n", (2428, 2458), True, 'from pdf import get_index as pdf_get_index\n'), ((1930, 1961), 'os.path.join', 'os.path.join', (['"""data"""', 'file.name'], {}), "('data', file.name)\n", (1942, 1961), False, 'import os\n'), ((2138, 2192), 'streamlit.success', 'st.success', (['f"""File uploaded successfully: {file.name}"""'], {}), "(f'File uploaded successfully: {file.name}')\n", (2148, 2192), True, 'import streamlit as st\n'), ((2954, 2963), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (2961, 2963), True, 'import streamlit as st\n'), ((3023, 3090), 'streamlit.text_area', 'st.text_area', (['"""Response:"""'], {'value': 'result', 'height': '(100)', 'disabled': '(False)'}), "('Response:', value=result, height=100, disabled=False)\n", (3035, 3090), True, 'import streamlit as st\n'), ((888, 1015), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""population_query_engine"""', 'description': '"""This gives information at the world population and demographic"""'}), "(name='population_query_engine', description=\n 'This gives information at the world population and demographic')\n", (900, 1015), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n'), ((1371, 1468), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""combined_data"""', 'description': '"""This gives information from multiple files"""'}), "(name='combined_data', description=\n 'This gives information from multiple files')\n", (1383, 1468), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n'), ((2665, 2762), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""combined_data"""', 'description': '"""This gives information from multiple files"""'}), "(name='combined_data', description=\n 'This gives information from multiple files')\n", (2677, 2762), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n')] |
# general imports
from constants import *
# streamlit imports
import streamlit as st
from utils import *
from streamlit_lottie import st_lottie
# llama index imports
import openai
from llama_index import (
VectorStoreIndex,
download_loader,
ServiceContext,
set_global_service_context,
)
from llama_index.llms import OpenAI
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
openai.api_key = OpenAI_key # from constants.py
system_prompt = """
[INST] <>
You are a helpful bank loan officer. You are going to be given a bank statement
to analyse and you must provide accurate insights about its contents.
If a question doesn't make any sense, or is not factually coherent, explain what is wrong with
the question instead of answering something incorrect. If you don't know the answer, don't share
inaccurate information.
Your goal is to provide insightful answers about the financial background of an individual.
<>
"""
llm = OpenAI(model="gpt-4-1106-preview", system_prompt=system_prompt)
embeddings = LangchainEmbedding(HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2"))
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embeddings)
set_global_service_context(service_context)
# import lottie
lottie_file = load_lottieurl() # animation url
st.set_page_config(page_title="loan_gpt")
st_lottie(lottie_file, height=175, quality="medium")
st.title("**Loan Check: Business Loan Analysis**")
if "uploaded" not in st.session_state:
st.session_state["uploaded"] = False
st.session_state["filename"] = None
st.session_state["initial_response"] = None
if "query_engine" not in st.session_state:
st.session_state["query_engine"] = None
def reset():
st.session_state["uploaded"] = False
st.session_state["filename"] = None
st.session_state["initial_response"] = None
st.session_state["query_engine"] = None
if not st.session_state["uploaded"]:
st.write("Upload a bank statement and analyze loan worthiness.")
input_file = st.file_uploader("Choose a file")
if input_file and does_file_have_pdf_extension(input_file):
path = store_pdf_file(input_file, dir) # default dir is ./statements/
scs = st.success("File successfully uploaded")
filename = input_file.name
with st.spinner("Analyzing document..."):
PyMuPDFReader = download_loader("PyMuPDFReader")
loader = PyMuPDFReader()
documents = loader.load(file_path=path, metadata=True)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
st.session_state["query_engine"] = query_engine
scs.empty()
st.session_state["uploaded"] = True
st.session_state["filename"] = filename
st.rerun()
if st.session_state["uploaded"]:
st.write(
f"Here is a financial summary of the account holder for the uploaded statement:"
)
st.button("Upload New PDF", on_click=reset)
initial_prompt = """
I want to analyze the financial health of the individual based solely on the given statement. Here are some details I want information on:
1. Total monthly deposits (with months and amounts)
2. Total monthly withdrawals (with months and amounts)
3. Any recurring payments (such as rent, utilities, loan repayments - with descriptions, dates, and amounts)
4. Any other noticeable spending habits (with amounts)
Make sure your output is well formatted and is plain-text.
I want to determine if this individual should be awarded a business loan based on the above.
Give me a potential yes, potential no or cannot say answer and evidence your response from details from above. Be sure to highlight any noticeable red-flags or positive signs.
"""
query_engine = st.session_state["query_engine"]
if not st.session_state["initial_response"]:
with st.spinner("Generating initial analysis..."):
response = query_engine.query(initial_prompt)
st.session_state["initial_response"] = response.response
st.write(st.session_state["initial_response"])
prompt = st.text_input("Type any additional queries query")
if prompt:
with st.spinner("Generating response..."):
response = query_engine.query(prompt)
st.write(response.response)
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.set_global_service_context"
] | [((1017, 1080), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""', 'system_prompt': 'system_prompt'}), "(model='gpt-4-1106-preview', system_prompt=system_prompt)\n", (1023, 1080), False, 'from llama_index.llms import OpenAI\n'), ((1187, 1248), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embeddings'}), '(llm=llm, embed_model=embeddings)\n', (1215, 1248), False, 'from llama_index import VectorStoreIndex, download_loader, ServiceContext, set_global_service_context\n'), ((1249, 1292), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1275, 1292), False, 'from llama_index import VectorStoreIndex, download_loader, ServiceContext, set_global_service_context\n'), ((1359, 1400), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""loan_gpt"""'}), "(page_title='loan_gpt')\n", (1377, 1400), True, 'import streamlit as st\n'), ((1401, 1453), 'streamlit_lottie.st_lottie', 'st_lottie', (['lottie_file'], {'height': '(175)', 'quality': '"""medium"""'}), "(lottie_file, height=175, quality='medium')\n", (1410, 1453), False, 'from streamlit_lottie import st_lottie\n'), ((1455, 1505), 'streamlit.title', 'st.title', (['"""**Loan Check: Business Loan Analysis**"""'], {}), "('**Loan Check: Business Loan Analysis**')\n", (1463, 1505), True, 'import streamlit as st\n'), ((1114, 1166), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (1135, 1166), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1994, 2058), 'streamlit.write', 'st.write', (['"""Upload a bank statement and analyze loan worthiness."""'], {}), "('Upload a bank statement and analyze loan worthiness.')\n", (2002, 2058), True, 'import streamlit as st\n'), ((2076, 2109), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a file"""'], {}), "('Choose a file')\n", (2092, 2109), True, 'import streamlit as st\n'), ((2905, 3005), 'streamlit.write', 'st.write', (['f"""Here is a financial summary of the account holder for the uploaded statement:"""'], {}), "(\n f'Here is a financial summary of the account holder for the uploaded statement:'\n )\n", (2913, 3005), True, 'import streamlit as st\n'), ((3014, 3057), 'streamlit.button', 'st.button', (['"""Upload New PDF"""'], {'on_click': 'reset'}), "('Upload New PDF', on_click=reset)\n", (3023, 3057), True, 'import streamlit as st\n'), ((4160, 4206), 'streamlit.write', 'st.write', (["st.session_state['initial_response']"], {}), "(st.session_state['initial_response'])\n", (4168, 4206), True, 'import streamlit as st\n'), ((4220, 4270), 'streamlit.text_input', 'st.text_input', (['"""Type any additional queries query"""'], {}), "('Type any additional queries query')\n", (4233, 4270), True, 'import streamlit as st\n'), ((2268, 2308), 'streamlit.success', 'st.success', (['"""File successfully uploaded"""'], {}), "('File successfully uploaded')\n", (2278, 2308), True, 'import streamlit as st\n'), ((2856, 2866), 'streamlit.rerun', 'st.rerun', ([], {}), '()\n', (2864, 2866), True, 'import streamlit as st\n'), ((2358, 2393), 'streamlit.spinner', 'st.spinner', (['"""Analyzing document..."""'], {}), "('Analyzing document...')\n", (2368, 2393), True, 'import streamlit as st\n'), ((2423, 2455), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (2438, 2455), False, 'from llama_index import VectorStoreIndex, download_loader, ServiceContext, set_global_service_context\n'), ((2580, 2622), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2611, 2622), False, 'from llama_index import VectorStoreIndex, download_loader, ServiceContext, set_global_service_context\n'), ((3983, 4027), 'streamlit.spinner', 'st.spinner', (['"""Generating initial analysis..."""'], {}), "('Generating initial analysis...')\n", (3993, 4027), True, 'import streamlit as st\n'), ((4299, 4335), 'streamlit.spinner', 'st.spinner', (['"""Generating response..."""'], {}), "('Generating response...')\n", (4309, 4335), True, 'import streamlit as st\n'), ((4399, 4426), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (4407, 4426), True, 'import streamlit as st\n')] |
import pathlib
import tempfile
from io import BytesIO
import openai
import streamlit as st
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.chat_engine import ContextChatEngine
from llama_index.llms.openai import OpenAI
from sidebar import sidebar_params
st.set_page_config(page_title="Chat with Documents", layout="wide", page_icon="🔥")
st.title("Chat with Documents")
@st.cache_resource(show_spinner=False)
def build_chat_engine(file: BytesIO, temperature: float) -> ContextChatEngine:
with st.spinner("Loading and indexing the document..."):
with tempfile.TemporaryDirectory() as temp_dir:
temp_file_path = pathlib.Path(temp_dir) / file.name
with open(temp_file_path, "wb") as f:
f.write(file.getbuffer())
reader = SimpleDirectoryReader(input_files=[temp_file_path])
documents = reader.load_data()
llm = OpenAI(model="gpt-3.5-turbo", temperature=temperature)
index = VectorStoreIndex.from_documents(documents)
return index.as_chat_engine(chat_mode="context", llm=llm, verbose=True)
def add_message(role: str, content: str):
st.session_state.messages.append({"role": role, "content": content})
openai_api_key, temperature = sidebar_params()
uploaded_file = st.file_uploader(
"Upload a pdf, docx, or txt file",
type=["pdf", "docx", "txt"],
)
if not openai_api_key or not uploaded_file:
st.stop()
openai.api_key = openai_api_key
chat_engine = build_chat_engine(uploaded_file, temperature)
if "messages" not in st.session_state or st.sidebar.button("Clear message history"):
st.session_state.messages = [
{
"role": "assistant",
"content": "Ask me questions about the uploaded document!",
},
]
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
if user_query := st.chat_input("Ask questions about the document..."):
with st.chat_message("user"):
st.write(user_query)
add_message("user", user_query)
with st.chat_message("assistant"), st.spinner("Generating response..."):
response = chat_engine.chat(user_query).response
st.write(response)
add_message("assistant", response)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader",
"llama_index.llms.openai.OpenAI"
] | [((300, 386), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with Documents"""', 'layout': '"""wide"""', 'page_icon': '"""🔥"""'}), "(page_title='Chat with Documents', layout='wide',\n page_icon='🔥')\n", (318, 386), True, 'import streamlit as st\n'), ((383, 414), 'streamlit.title', 'st.title', (['"""Chat with Documents"""'], {}), "('Chat with Documents')\n", (391, 414), True, 'import streamlit as st\n'), ((418, 455), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (435, 455), True, 'import streamlit as st\n'), ((1282, 1298), 'sidebar.sidebar_params', 'sidebar_params', ([], {}), '()\n', (1296, 1298), False, 'from sidebar import sidebar_params\n'), ((1316, 1401), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload a pdf, docx, or txt file"""'], {'type': "['pdf', 'docx', 'txt']"}), "('Upload a pdf, docx, or txt file', type=['pdf', 'docx', 'txt']\n )\n", (1332, 1401), True, 'import streamlit as st\n'), ((1181, 1249), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': role, 'content': content}"], {}), "({'role': role, 'content': content})\n", (1213, 1249), True, 'import streamlit as st\n'), ((1457, 1466), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (1464, 1466), True, 'import streamlit as st\n'), ((1602, 1644), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Clear message history"""'], {}), "('Clear message history')\n", (1619, 1644), True, 'import streamlit as st\n'), ((1953, 2005), 'streamlit.chat_input', 'st.chat_input', (['"""Ask questions about the document..."""'], {}), "('Ask questions about the document...')\n", (1966, 2005), True, 'import streamlit as st\n'), ((544, 594), 'streamlit.spinner', 'st.spinner', (['"""Loading and indexing the document..."""'], {}), "('Loading and indexing the document...')\n", (554, 594), True, 'import streamlit as st\n'), ((939, 993), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': 'temperature'}), "(model='gpt-3.5-turbo', temperature=temperature)\n", (945, 993), False, 'from llama_index.llms.openai import OpenAI\n'), ((1010, 1052), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1041, 1052), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((1864, 1896), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (1879, 1896), True, 'import streamlit as st\n'), ((1906, 1934), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (1914, 1934), True, 'import streamlit as st\n'), ((2016, 2039), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (2031, 2039), True, 'import streamlit as st\n'), ((2049, 2069), 'streamlit.write', 'st.write', (['user_query'], {}), '(user_query)\n', (2057, 2069), True, 'import streamlit as st\n'), ((2116, 2144), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2131, 2144), True, 'import streamlit as st\n'), ((2146, 2182), 'streamlit.spinner', 'st.spinner', (['"""Generating response..."""'], {}), "('Generating response...')\n", (2156, 2182), True, 'import streamlit as st\n'), ((2249, 2267), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (2257, 2267), True, 'import streamlit as st\n'), ((609, 638), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (636, 638), False, 'import tempfile\n'), ((829, 880), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[temp_file_path]'}), '(input_files=[temp_file_path])\n', (850, 880), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((681, 703), 'pathlib.Path', 'pathlib.Path', (['temp_dir'], {}), '(temp_dir)\n', (693, 703), False, 'import pathlib\n')] |
import logging
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
# from IPython.display import Markdown, display
from llama_index.node_parser import SentenceSplitter
from embedding_manager import Embeddings
import chromadb
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DataLoader:
def __init__(self, file_paths):
self.file_paths = file_paths
def read_data(self):
logger.info("Reading data from files: %s", self.file_paths)
# automatically selects the best file reader based on the file extensions
data_loader = SimpleDirectoryReader(input_files=self.file_paths)
return data_loader.load_data()
def chunk_data(self, data, chunk_size=500, chunk_overlap=50):
logger.info("Parsing data")
node_parser = SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separator=" ",
paragraph_separator="\n\n\n",
secondary_chunking_regex="[^,.;。]+[,.;。]?"
)
return node_parser.get_nodes_from_documents(data)
class DatabaseManager:
def __init__(self, db_path, collection_name):
self.db_path = db_path
self.collection_name = collection_name
def get_db(self):
logger.info("Initializing the database at path: %s", self.db_path)
db = chromadb.PersistentClient(path=self.db_path)
collection = db.get_or_create_collection(self.collection_name)
return collection
# class VectorIndexer:
# def __init__(self, nodes, vector_store, embedding_model, llm_model, indexid, index_path):
# self.nodes = nodes
# self.vector_store = vector_store
# self.embedding_model = embedding_model
# self.llm_model = llm_model
# self.indexid = indexid
# self.index_path = index_path
# # self.service_context
# def get_index(self):
# try:
# logger.info(f"Load {self.indexid} from local path {self.index_path}")
# storage_context = StorageContext.from_defaults(vector_store=self.vector_store,
# persist_dir=self.index_path)
# index = load_index_from_storage(storage_context=storage_context, index_id=indexid)
# except Exception as e:
# logger.info("Creating the vector index")
# storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
# service_context = ServiceContext.from_defaults(embed_model=self.embedding_model, llm=self.llm_model)
# index = VectorStoreIndex(
# self.nodes, storage_context=storage_context, service_context=service_context
# )
# index.set_index_id(self.indexid)
# index.storage_context.persist(persist_dir=self.index_path)
# return index
| [
"llama_index.SimpleDirectoryReader",
"llama_index.node_parser.SentenceSplitter"
] | [((373, 412), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (392, 412), False, 'import logging\n'), ((422, 449), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (439, 449), False, 'import logging\n'), ((740, 790), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'self.file_paths'}), '(input_files=self.file_paths)\n', (761, 790), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((955, 1121), 'llama_index.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separator': '""" """', 'paragraph_separator': '"""\n\n\n"""', 'secondary_chunking_regex': '"""[^,.;。]+[,.;。]?"""'}), "(chunk_size=chunk_size, chunk_overlap=chunk_overlap,\n separator=' ', paragraph_separator='\\n\\n\\n', secondary_chunking_regex=\n '[^,.;。]+[,.;。]?')\n", (971, 1121), False, 'from llama_index.node_parser import SentenceSplitter\n'), ((1505, 1549), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'self.db_path'}), '(path=self.db_path)\n', (1530, 1549), False, 'import chromadb\n')] |
from dotenv import load_dotenv
import os
from typing import List
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core.settings import Settings
from llama_index.llms.openai import OpenAI
from llama_index.core.embeddings import resolve_embed_model
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.response.notebook_utils import display_source_node
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class RAGCreator():
def __init__(self):
self.documents = []
self.nodes = None
self.retriever = None
self.query_engine = None
self.rag_info = {}
def _update_rag_info(self, params:dict):
params.__delitem__("self")
callable_obj_keys = [k for k,v in params.items() if callable(v)]
for k in callable_obj_keys:
params[k] = params[k].__name__
self.rag_info.update(params)
def load_documents(self, data_loader, data_loader_kwargs:dict):
self._update_rag_info(locals())
try:
docs = data_loader().load_data(**data_loader_kwargs)
except Exception as e:
raise TypeError(f"Error loading documents: {e}.")
self.documents = docs
def parse_docs_to_nodes(self, node_parser=SimpleNodeParser, chunk_size=1024):
self._update_rag_info(locals())
node_parser = node_parser.from_defaults(chunk_size=chunk_size)
nodes = node_parser.get_nodes_from_documents(self.documents)
for idx, node in enumerate(nodes):
node.id_ = f"node-{idx}"
self.nodes = nodes
def set_model_settings(self, open_ai_model="gpt-3.5-turbo", embed_model="local:BAAI/bge-small-en"):
self._update_rag_info(locals())
load_dotenv()
Settings.llm = OpenAI(model=open_ai_model)
Settings.embed_model = resolve_embed_model(embed_model)
def create_retriever(self, vector_store_impl=VectorStoreIndex, similarity_top_k=2):
self._update_rag_info(locals())
index = vector_store_impl(self.nodes)
self.retriever = index.as_retriever(similarity_top_k=similarity_top_k)
def create_query_engine(self, query_engine=RetrieverQueryEngine):
self._update_rag_info(locals())
self.query_engine = query_engine.from_args(self.retriever)
def setup_and_deploy_RAG(self, data_loader, data_loader_kwargs,
node_parser=SimpleNodeParser, chunk_size=1024,
open_ai_model="gpt-3.5-turbo", embed_model="local:BAAI/bge-small-en",
vector_store_impl=VectorStoreIndex, similarity_top_k=2,
query_engine=RetrieverQueryEngine):
self.load_documents(data_loader, data_loader_kwargs)
self.parse_docs_to_nodes(node_parser, chunk_size)
self.set_model_settings(open_ai_model, embed_model)
self.create_retriever(vector_store_impl, similarity_top_k)
self.create_query_engine(query_engine)
return self
def query(self, query:str) -> str:
if self.query_engine is not None:
response = self.query_engine.query(query)
return str(response)
else:
raise ValueError("You must set up your RAG and its query engine before submitting queries.")
def query_multiple(self, queries:List[str]) -> List[str]:
if self.query_engine is not None:
responses = []
for query in queries:
response = self.query_engine.query(query)
responses.append(str(response))
return responses
else:
raise ValueError("You must set up your RAG and its query engine before submitting queries.")
def fetch_relevant_info(self, query:str) -> List[str]:
if self.retriever is not None:
retrievals = self.retriever.retrieve(query)
return retrievals
else:
raise ValueError("You must set up your RAG and its retriever before fetching relevant information.")
def display_relevant_info(self, query:str, source_length=1500):
retrievals = self.fetch_relevant_info(query=query)
for retrieval in retrievals:
display_source_node(retrieval, source_length=source_length)
def get_rag_info(self):
return self.rag_info | [
"llama_index.llms.openai.OpenAI",
"llama_index.core.embeddings.resolve_embed_model",
"llama_index.core.response.notebook_utils.display_source_node"
] | [((1816, 1829), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1827, 1829), False, 'from dotenv import load_dotenv\n'), ((1862, 1889), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'open_ai_model'}), '(model=open_ai_model)\n', (1868, 1889), False, 'from llama_index.llms.openai import OpenAI\n'), ((1921, 1953), 'llama_index.core.embeddings.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (1940, 1953), False, 'from llama_index.core.embeddings import resolve_embed_model\n'), ((4320, 4379), 'llama_index.core.response.notebook_utils.display_source_node', 'display_source_node', (['retrieval'], {'source_length': 'source_length'}), '(retrieval, source_length=source_length)\n', (4339, 4379), False, 'from llama_index.core.response.notebook_utils import display_source_node\n')] |
from fastapi import FastAPI, File, UploadFile, HTTPException
import openai
from dotenv import load_dotenv
import os
import json
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.vector_stores.weaviate import WeaviateVectorStore
from llama_index.core import VectorStoreIndex, StorageContext
import weaviate
import uvicorn
load_dotenv()
app = FastAPI()
api_key = os.environ.get('OPENAI_API_KEY')
openai.api_key = api_key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
WEAVIATE_API_KEY = os.getenv("WEAVIATE_API_KEY")
WEAVIATE_URL = os.getenv("WEAVIATE_URL")
auth_config = weaviate.AuthApiKey(api_key=WEAVIATE_API_KEY)
client = weaviate.Client(
url=WEAVIATE_URL,
auth_client_secret=auth_config
)
def search_and_query():
blogs = SimpleDirectoryReader("./Data").load_data()
vector_store = WeaviateVectorStore(weaviate_client=client, index_name="DCPR")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
VectorStoreIndex.from_documents(blogs, storage_context=storage_context)
return "Done Embeddings"
def Quert(ask):
vector_store = WeaviateVectorStore(weaviate_client=client, index_name="DCPR")
loaded_index = VectorStoreIndex.from_vector_store(vector_store)
query_engine = loaded_index.as_query_engine()
response = query_engine.query(ask)
return response
def contract_analysis_w_fact_checking(text):
if not text:
raise HTTPException(
status_code=400, detail="Text field is required in the input data.")
print("done 1")
# Perform contract analysis using Quert (assuming Quert is a class or function)
quert_instance = Quert(text)
# Extract relevant information from the Quert response
if quert_instance.response:
contract_results = [{
"LLM Response": quert_instance.response,
"Source_node": [{
"Page_number": key_point.node.metadata.get('page_label', ''),
"File_Name": key_point.node.metadata.get('file_name', ''),
"Text": key_point.node.text,
"Start_Char": key_point.node.start_char_idx,
"End_Char": key_point.node.end_char_idx,
"Score_Matching": key_point.score
} for key_point in quert_instance.source_nodes]
}]
else:
contract_results = []
# Return a standardized response
return {"status": "success", "message": "Contract analysis successful", "model_response": contract_results}
@app.post("/embedd")
async def predict():
try:
dor = search_and_query()
return {"user_content": dor}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/predict")
async def predict(data: dict):
try:
messages = data.get("messages", [])
user_message = next((msg["content"] for msg in messages if msg["role"] == "user"), None)
out = contract_analysis_w_fact_checking(user_message)
if user_message:
return {"user_content": out}
else:
raise HTTPException(
status_code=400, detail="User message not found in input.")
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/")
def read_root():
return {"Hello": "World"}
| [
"llama_index.vector_stores.weaviate.WeaviateVectorStore",
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader"
] | [((402, 415), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (413, 415), False, 'from dotenv import load_dotenv\n'), ((423, 432), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (430, 432), False, 'from fastapi import FastAPI, File, UploadFile, HTTPException\n'), ((444, 476), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (458, 476), False, 'import os\n'), ((519, 546), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (528, 546), False, 'import os\n'), ((612, 641), 'os.getenv', 'os.getenv', (['"""WEAVIATE_API_KEY"""'], {}), "('WEAVIATE_API_KEY')\n", (621, 641), False, 'import os\n'), ((657, 682), 'os.getenv', 'os.getenv', (['"""WEAVIATE_URL"""'], {}), "('WEAVIATE_URL')\n", (666, 682), False, 'import os\n'), ((697, 742), 'weaviate.AuthApiKey', 'weaviate.AuthApiKey', ([], {'api_key': 'WEAVIATE_API_KEY'}), '(api_key=WEAVIATE_API_KEY)\n', (716, 742), False, 'import weaviate\n'), ((753, 818), 'weaviate.Client', 'weaviate.Client', ([], {'url': 'WEAVIATE_URL', 'auth_client_secret': 'auth_config'}), '(url=WEAVIATE_URL, auth_client_secret=auth_config)\n', (768, 818), False, 'import weaviate\n'), ((926, 988), 'llama_index.vector_stores.weaviate.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'client', 'index_name': '"""DCPR"""'}), "(weaviate_client=client, index_name='DCPR')\n", (945, 988), False, 'from llama_index.vector_stores.weaviate import WeaviateVectorStore\n'), ((1011, 1066), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1039, 1066), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1071, 1142), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['blogs'], {'storage_context': 'storage_context'}), '(blogs, storage_context=storage_context)\n', (1102, 1142), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1208, 1270), 'llama_index.vector_stores.weaviate.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'client', 'index_name': '"""DCPR"""'}), "(weaviate_client=client, index_name='DCPR')\n", (1227, 1270), False, 'from llama_index.vector_stores.weaviate import WeaviateVectorStore\n'), ((1290, 1338), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (1324, 1338), False, 'from llama_index.core import VectorStoreIndex, StorageContext\n'), ((1527, 1614), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""Text field is required in the input data."""'}), "(status_code=400, detail=\n 'Text field is required in the input data.')\n", (1540, 1614), False, 'from fastapi import FastAPI, File, UploadFile, HTTPException\n'), ((863, 894), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./Data"""'], {}), "('./Data')\n", (884, 894), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((3176, 3249), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(400)', 'detail': '"""User message not found in input."""'}), "(status_code=400, detail='User message not found in input.')\n", (3189, 3249), False, 'from fastapi import FastAPI, File, UploadFile, HTTPException\n')] |
from init import *
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext
from llama_index.node_parser import SimpleNodeParser
from llama_index import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import download_loader
class Index:
def __init__(self, dir="data"):
"""Initialize the index."""
self.loader = download_loader("UnstructuredReader")()
self.docs = self.load(dir)
self.nodes = SimpleNodeParser.from_defaults().get_nodes_from_documents(self.docs)
llm_predictor = LLMPredictor(llm=OpenAI(model="gpt-3.5-turbo", temperature=0))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=1000)
self.index = VectorStoreIndex(self.nodes, service_context=service_context)
self.query_engine = self.index.as_query_engine(streaming=True)
self.retriever = self.index.as_retriever()
def load(self, dir="data"):
"""Load all documents from a directory."""
print(f"Loading directory: {dir}")
doc_files = []
for path, subdirs, files in os.walk(dir):
for name in files:
doc_files.append(os.path.join(path, name))
docs = []
for f in doc_files:
print(f"Loading file: {f}")
try:
docs += self.loader.load_data(f, split_documents=False)
except Exception as e:
print(e, "Skipping.")
return docs
def query(self, query):
"""Query the index."""
print("Query:", query)
response = self.query_engine.query(query)
print("Response:", response)
return response
if __name__ == "__main__":
index = Index(dir="data")
response = index.query("What is the meaning of life?")
| [
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.VectorStoreIndex"
] | [((653, 727), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(1000)'}), '(llm_predictor=llm_predictor, chunk_size=1000)\n', (681, 727), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((749, 810), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['self.nodes'], {'service_context': 'service_context'}), '(self.nodes, service_context=service_context)\n', (765, 810), False, 'from llama_index import VectorStoreIndex\n'), ((374, 411), 'llama_index.download_loader', 'download_loader', (['"""UnstructuredReader"""'], {}), "('UnstructuredReader')\n", (389, 411), False, 'from llama_index import download_loader\n'), ((470, 502), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {}), '()\n', (500, 502), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((581, 625), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (587, 625), False, 'from llama_index.llms import OpenAI\n')] |
def get_agent(list_filters,openai_key,pinecone_key):
import logging
import sys
import os
import pandas as pd
import pinecone
import openai
from llama_index import VectorStoreIndex
from llama_index.vector_stores import PineconeVectorStore
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
from llama_index.tools import BaseTool, FunctionTool
from agent_utils import get_rebate,get_tax
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.llms import ChatMessage
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#Openai and Pinecone private key
openai.api_key = openai_key
api_key = pinecone_key
#Instantiate pinecone vector store
pinecone.init(api_key=api_key, environment="gcp-starter")
pinecone_index = pinecone.Index("quickstart-index")
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index
)
index = VectorStoreIndex.from_vector_store(vector_store)
if not list_filters:
list_filters = ['rates','claim']
else:
list_filters += ['rates','claim']
#Define retriever
retriever = index.as_retriever(
vector_store_kwargs={"filter": {"category": {"$in":list_filters}}},streaming=True)
# assemble query engine
query_engine = RetrieverQueryEngine(retriever=retriever)
#Get agent tools from agent_utils file and instantiate tools
tax_tool = FunctionTool.from_defaults(fn=get_tax)
relief_tool = FunctionTool.from_defaults(fn=get_rebate)
#Create list of tool for agent
tools = [
QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(
name="tax_relief_retriever",
description=(
"Provides information on reliefs for a given item category and information on how to claim tax reliefs"
"Use a detailed plain text question as input to the tool."
),
),
),
tax_tool,relief_tool]
#Define chat agent
llm = OpenAI(model="gpt-3.5-turbo-0613")
#Set a default chat history to handle cases where information is not provided
str_cat = ','.join(list_filters)
chat_history = [ChatMessage(role= 'user', content=f"Assume I earn an income of RM90,000. If I state my income chat, update it to the stated income. I want to buy an items in category {str_cat}")]
system_prompt = "You are a tax advisory agent a you must only use information from the tool to answer queries. Other topics unrelated to personal income tax and does not use on of the tool should not be answered"
agent = OpenAIAgent.from_tools(tools,system_prompt = system_prompt, chat_history = chat_history, verbose=True)
# chat_engine = CondenseQuestionChatEngine.from_defaults(
# query_engine=query_engine,
# # condense_question_prompt=custom_prompt,
# # chat_history=custom_chat_history,
# verbose=True,
# )
return agent
| [
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.llms.OpenAI",
"llama_index.tools.ToolMetadata",
"llama_index.llms.ChatMessage",
"llama_index.tools.FunctionTool.from_defaults",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.RetrieverQueryEngine"
] | [((721, 779), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (740, 779), False, 'import logging\n'), ((1006, 1063), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'api_key', 'environment': '"""gcp-starter"""'}), "(api_key=api_key, environment='gcp-starter')\n", (1019, 1063), False, 'import pinecone\n'), ((1085, 1119), 'pinecone.Index', 'pinecone.Index', (['"""quickstart-index"""'], {}), "('quickstart-index')\n", (1099, 1119), False, 'import pinecone\n'), ((1139, 1189), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index'}), '(pinecone_index=pinecone_index)\n', (1158, 1189), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((1217, 1265), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (1251, 1265), False, 'from llama_index import VectorStoreIndex\n'), ((1583, 1624), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever'}), '(retriever=retriever)\n', (1603, 1624), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((1706, 1744), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'get_tax'}), '(fn=get_tax)\n', (1732, 1744), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((1763, 1804), 'llama_index.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'get_rebate'}), '(fn=get_rebate)\n', (1789, 1804), False, 'from llama_index.tools import BaseTool, FunctionTool\n'), ((2346, 2380), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (2352, 2380), False, 'from llama_index.llms import OpenAI\n'), ((2929, 3033), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', (['tools'], {'system_prompt': 'system_prompt', 'chat_history': 'chat_history', 'verbose': '(True)'}), '(tools, system_prompt=system_prompt, chat_history=\n chat_history, verbose=True)\n', (2951, 3033), False, 'from llama_index.agent import OpenAIAgent\n'), ((815, 855), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (836, 855), False, 'import logging\n'), ((2520, 2707), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'f"""Assume I earn an income of RM90,000. If I state my income chat, update it to the stated income. I want to buy an items in category {str_cat}"""'}), "(role='user', content=\n f'Assume I earn an income of RM90,000. If I state my income chat, update it to the stated income. I want to buy an items in category {str_cat}'\n )\n", (2531, 2707), False, 'from llama_index.llms import ChatMessage\n'), ((784, 803), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (801, 803), False, 'import logging\n'), ((1945, 2169), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""tax_relief_retriever"""', 'description': '"""Provides information on reliefs for a given item category and information on how to claim tax reliefsUse a detailed plain text question as input to the tool."""'}), "(name='tax_relief_retriever', description=\n 'Provides information on reliefs for a given item category and information on how to claim tax reliefsUse a detailed plain text question as input to the tool.'\n )\n", (1957, 2169), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script allows you to ask questions to the Alice in Wonderland book.
It uses the GPT-3 model to create a vector index of the book, and then
allows you to ask questions to the index.
'''
import os
import yaml
import openai
from llama_index import (
GPTVectorStoreIndex,
StorageContext,
SimpleDirectoryReader,
download_loader,
load_index_from_storage
)
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from argparse import ArgumentParser
# script configuration
persist_dir = "./indices/alice/"
pdf_file = "alice.pdf"
openai_config = "projects/infrastructure/charts/secrets/values/integration/openai-configuration/openai.yml"
credentials_path = os.path.join(os.path.expanduser('~'), openai_config)
credentials = yaml.safe_load(open(credentials_path, "r"))
os.environ["OPENAI_API_KEY"] = credentials["access_token"]
os.environ["OPENAI_ORGANIZATION"] = credentials["organization_id"]
# Save the index in .JSON file for repeated use. Saves money on ADA API calls
def create_index_from_pdf(persist_dir):
# This example uses PDF reader, there are many options at https://llamahub.ai/
# Use SimpleDirectoryReader to read all the txt files in a folder
PDFReader = download_loader("PDFReader")
loader = PDFReader()
documents = loader.load_data(file=pdf_file)
# Chunking and Embedding of the chunks.
index = GPTVectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=persist_dir)
return index
def load_index(persist_dir):
storage_context = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=persist_dir),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=persist_dir),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=persist_dir),
)
index = load_index_from_storage(storage_context)
return index
def main(args):
print(args.question)
if args.create_index:
index = create_index_from_pdf(persist_dir)
else:
index = load_index(persist_dir)
# Retrieval, node poseprocessing, response synthesis.
query_engine = index.as_query_engine()
# Run the query engine on a user question.
response = query_engine.query(args.question)
print(response)
if __name__ == '__main__':
parser = ArgumentParser(description=__doc__, prog='alice.py', epilog='Have fun!')
parser.add_argument('-c', '--create-index', help='(re)create the index', action='store_true')
parser.add_argument('question', help='question string to ask the index')
args = parser.parse_args()
main(args)
| [
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.download_loader",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((862, 885), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (880, 885), False, 'import os\n'), ((1376, 1404), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (1391, 1404), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, SimpleDirectoryReader, download_loader, load_index_from_storage\n'), ((1535, 1580), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1569, 1580), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, SimpleDirectoryReader, download_loader, load_index_from_storage\n'), ((2000, 2040), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2023, 2040), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, SimpleDirectoryReader, download_loader, load_index_from_storage\n'), ((2491, 2563), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'prog': '"""alice.py"""', 'epilog': '"""Have fun!"""'}), "(description=__doc__, prog='alice.py', epilog='Have fun!')\n", (2505, 2563), False, 'from argparse import ArgumentParser\n'), ((1756, 1817), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (1792, 1817), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1840, 1899), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (1874, 1899), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1921, 1979), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (1954, 1979), False, 'from llama_index.storage.index_store import SimpleIndexStore\n')] |
import os
from dotenv import load_dotenv
from IPython.display import Markdown, display
from llama_index.legacy import VectorStoreIndex, ServiceContext
from llama_index.legacy.vector_stores import ChromaVectorStore
from llama_index.legacy.storage.storage_context import StorageContext
from llama_index.legacy.embeddings import HuggingFaceEmbedding
from llama_index.legacy.llms import Gemini
from llama_index.legacy.node_parser import SentenceWindowNodeParser, SimpleNodeParser
from llama_index.legacy.llms import Gemini
from llama_index.legacy import GPTVectorStoreIndex
from llama_index.legacy.readers.web import BeautifulSoupWebReader
import chromadb
import streamlit as st
# Enable Logging
import logging
import sys
#You can set the logging level to DEBUG for more verbose output,
# or use level=logging.INFO for less detailed information.
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# Load environment variables from the .env file
load_dotenv()
loader = BeautifulSoupWebReader()
urls = [
'https://www.hsph.harvard.edu/nutritionsource/kids-healthy-eating-plate/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-eating-plate/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/whole-grains/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/protein/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/vegetables-and-fruits/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/fats-and-cholesterol/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/fats-and-cholesterol/types-of-fat/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/fats-and-cholesterol/cholesterol/',
'https://www.hsph.harvard.edu/nutritionsource/what-should-you-eat/fats-and-cholesterol/dietary-fat-and-disease/',
'https://www.hsph.harvard.edu/nutritionsource/vitamins/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/other-healthy-beverage-options/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/drinks-to-consume-in-moderation/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/sugary-drinks/',
'https://www.hsph.harvard.edu/nutritionsource/sports-drinks/',
'https://www.hsph.harvard.edu/nutritionsource/energy-drinks/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/beverages-public-health-concerns/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-drinks/artificial-sweeteners/',
'https://www.hsph.harvard.edu/nutritionsource/salt-and-sodium/',
'https://www.hsph.harvard.edu/nutritionsource/salt-and-sodium/take-action-on-salt/',
'https://www.hsph.harvard.edu/nutritionsource/salt-and-sodium/sodium-public-health-concerns/',
'https://www.hsph.harvard.edu/nutritionsource/carbohydrates/',
'https://www.hsph.harvard.edu/nutritionsource/carbohydrates/carbohydrates-and-blood-sugar/',
'https://www.hsph.harvard.edu/nutritionsource/carbohydrates/fiber/',
'https://www.hsph.harvard.edu/nutritionsource/carbohydrates/added-sugar-in-the-diet/',
'https://www.hsph.harvard.edu/nutritionsource/sustainability/',
'https://www.hsph.harvard.edu/nutritionsource/sustainability/plate-and-planet/',
'https://www.hsph.harvard.edu/nutritionsource/sustainability/food-waste/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/measuring-fat/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/best-diet-quality-counts/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/healthy-dietary-styles/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-weight/diet-reviews/',
'https://www.hsph.harvard.edu/nutritionsource/staying-active/',
'https://www.hsph.harvard.edu/nutritionsource/staying-active/active-communities/',
'https://www.hsph.harvard.edu/nutritionsource/stress-and-health/',
'https://www.hsph.harvard.edu/nutritionsource/sleep/',
'https://www.hsph.harvard.edu/nutritionsource/healthy-longevity/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/cardiovascular-disease/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/cardiovascular-disease/preventing-cvd/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/diabetes-prevention/',
'https://www.hsph.harvard.edu/nutritionsource/disease-prevention/diabetes-prevention/preventing-diabetes-full-story/',
'https://www.hsph.harvard.edu/nutritionsource/cancer/',
'https://www.hsph.harvard.edu/nutritionsource/cancer/preventing-cancer/',
'https://www.hsph.harvard.edu/nutritionsource/oral-health/',
'https://www.hsph.harvard.edu/nutritionsource/precision-nutrition/',
'https://www.hsph.harvard.edu/nutritionsource/nutrition-and-immunity/',
'https://www.hsph.harvard.edu/nutritionsource/recipes-2/',
'https://www.hsph.harvard.edu/nutritionsource/asparagus-with-warm-tarragon-pecan-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/asparagus-spears-with-mandarin-orange/',
'https://www.hsph.harvard.edu/nutritionsource/baby-arugula-and-shaved-fennel-with-lemon-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/braised-cabbage-with-leeks-and-sesame-seeds/',
'https://www.hsph.harvard.edu/nutritionsource/braised-oyster-mushrooms-coconut-macadamia/',
'https://www.hsph.harvard.edu/nutritionsource/butternut-squash-soup-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/caesar-salad/',
'https://www.hsph.harvard.edu/nutritionsource/cardamom-roasted-cauliflower/',
'https://www.hsph.harvard.edu/nutritionsource/carrot-and-coriander-soup/',
'https://www.hsph.harvard.edu/nutritionsource/cauliflower-tomato-soup/',
'https://www.hsph.harvard.edu/nutritionsource/cauliflower-walnut-soup/',
'https://www.hsph.harvard.edu/nutritionsource/endive-salad-with-citrus-walnut-dressing/',
'https://www.hsph.harvard.edu/nutritionsource/customizable-stuffed-peppers/',
'https://www.hsph.harvard.edu/nutritionsource/fresh-spinach-with-sesame-seeds/',
'https://www.hsph.harvard.edu/nutritionsource/garlic-braised-greens/',
'https://www.hsph.harvard.edu/nutritionsource/green-beans-with-dried-cherries/',
'https://www.hsph.harvard.edu/nutritionsource/green-beans-with-chili-garlic-sauce/',
'https://www.hsph.harvard.edu/nutritionsource/green-chutney/',
'https://www.hsph.harvard.edu/nutritionsource/grilled-eggplant-cutlets/',
'https://www.hsph.harvard.edu/nutritionsource/kale-with-caramelized-onions/',
'https://www.hsph.harvard.edu/nutritionsource/marinated-shiitake-mushroom-and-cucumber-salad/',
'https://www.hsph.harvard.edu/nutritionsource/mashed-cauliflower/',
'https://www.hsph.harvard.edu/nutritionsource/mushroom-stroganoff/',
'https://www.hsph.harvard.edu/nutritionsource/pan-roasted-wild-mushrooms-with-coffee-and-hazelnuts/',
'https://www.hsph.harvard.edu/nutritionsource/portabella-steak-sandwich/',
'https://www.hsph.harvard.edu/nutritionsource/provencal-vegetables/',
'https://www.hsph.harvard.edu/nutritionsource/vegetable-stock/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-brussels-sprouts/',
'https://www.hsph.harvard.edu/nutritionsource/brussels-sprouts-with-shallots/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-beets-with-balsamic-vinegar/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-balsamic-vegetables/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-squash-with-pomegranate/',
'https://www.hsph.harvard.edu/nutritionsource/sweet-potatoes-with-pecans/',
'https://www.hsph.harvard.edu/nutritionsource/ruby-chard/',
'https://www.hsph.harvard.edu/nutritionsource/sauted-rainbow-swiss-chard/',
'https://www.hsph.harvard.edu/nutritionsource/simple-celery-date-salad/',
'https://www.hsph.harvard.edu/nutritionsource/southwestern-corn-hash/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-broccolini/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-indian-slaw/',
'https://www.hsph.harvard.edu/nutritionsource/stir-fried-vegetables-tomato-curry/',
'https://www.hsph.harvard.edu/nutritionsource/sugar-snap-peas-with-fresh-mint/',
'https://www.hsph.harvard.edu/nutritionsource/tarragon-succotash/',
'https://www.hsph.harvard.edu/nutritionsource/tunisian-carrot-salad/',
'https://www.hsph.harvard.edu/nutritionsource/vegetable-stock-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/vegetarian-shepherds-pie-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/wild-mushroom-soup-with-soba/',
'https://www.hsph.harvard.edu/nutritionsource/yellow-squash-with-sage/',
'https://www.hsph.harvard.edu/nutritionsource/arugula-watermelon-feta-and-mint-salad-with-balsamic-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/citrus-salad/',
'https://www.hsph.harvard.edu/nutritionsource/almond-coconut-macaroons/',
'https://www.hsph.harvard.edu/nutritionsource/dried-fruit-and-nuts/',
'https://www.hsph.harvard.edu/nutritionsource/watermelon-salad/',
'https://www.hsph.harvard.edu/nutritionsource/fruit-compote-spiced-nuts/',
'https://www.hsph.harvard.edu/nutritionsource/strawberry-rhubarb-crisp/',
'https://www.hsph.harvard.edu/nutritionsource/barley-roasted-portobello-and-fennel-salad/',
'https://www.hsph.harvard.edu/nutritionsource/blueberry-muffins/',
'https://www.hsph.harvard.edu/nutritionsource/brown-rice-pancakes/',
'https://www.hsph.harvard.edu/nutritionsource/bulgur-pilaf/',
'https://www.hsph.harvard.edu/nutritionsource/couscous-minted-with-pine-nuts/',
'https://www.hsph.harvard.edu/nutritionsource/couscous-quinoa-tabouli/',
'https://www.hsph.harvard.edu/nutritionsource/cranberry-orange-muffin/',
'https://www.hsph.harvard.edu/nutritionsource/fantastic-bulgur-dish/',
'https://www.hsph.harvard.edu/nutritionsource/farro-risotto-walnut-pesto/',
'https://www.hsph.harvard.edu/nutritionsource/farro-roasted-confetti-vegetables/',
'https://www.hsph.harvard.edu/nutritionsource/hearty-whole-grain-bread/',
'https://www.hsph.harvard.edu/nutritionsource/irish-brown-bread/',
'https://www.hsph.harvard.edu/nutritionsource/jalapeno-cheddar-corn-muffins/',
'https://www.hsph.harvard.edu/nutritionsource/lemon-chickpea-breakfast-muffins/',
'https://www.hsph.harvard.edu/nutritionsource/mediterranean-rice/',
'https://www.hsph.harvard.edu/nutritionsource/mixed-up-grains/',
'https://www.hsph.harvard.edu/nutritionsource/mushroom-barley-risotto/',
'https://www.hsph.harvard.edu/nutritionsource/oatmeal-roti/',
'https://www.hsph.harvard.edu/nutritionsource/pasta-in-zemino/',
'https://www.hsph.harvard.edu/nutritionsource/rigatoni-fresh-basil-pesto-corn-zucchini/',
'https://www.hsph.harvard.edu/nutritionsource/quinoa-chia-edamame-veggie-burger/',
'https://www.hsph.harvard.edu/nutritionsource/quinoa-enchilada-casserole/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-coconut-rice-with-limes/',
'https://www.hsph.harvard.edu/nutritionsource/three-green-wheat-berry-salad-with-mushroom-bacon-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/wheatberries-and-chives/',
'https://www.hsph.harvard.edu/nutritionsource/whole-wheat-banana-nut-muffins/',
'https://www.hsph.harvard.edu/nutritionsource/whole-wheat-penne-with-pistachio-pesto-and-cherry-tomatoes/',
'https://www.hsph.harvard.edu/nutritionsource/wild-rice-with-cranberries/',
'https://www.hsph.harvard.edu/nutritionsource/greek-skordalia/',
'https://www.hsph.harvard.edu/nutritionsource/green-lentil-hummus-herbs-olives/',
'https://www.hsph.harvard.edu/nutritionsource/guacamole/',
'https://www.hsph.harvard.edu/nutritionsource/hot-pepper-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/hummus/',
'https://www.hsph.harvard.edu/nutritionsource/italian-pesto-alla-trapanese/',
'https://www.hsph.harvard.edu/nutritionsource/mint-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/oregano-garlic-vinaigrette/',
'https://www.hsph.harvard.edu/nutritionsource/spanish-romesco-sauce/',
'https://www.hsph.harvard.edu/nutritionsource/turkish-muhammara/',
'https://www.hsph.harvard.edu/nutritionsource/turkish-tarator/',
'https://www.hsph.harvard.edu/nutritionsource/walnut-pesto/',
'https://www.hsph.harvard.edu/nutritionsource/white-bean-and-kale-hummus/',
'https://www.hsph.harvard.edu/nutritionsource/asian-trail-mix/',
'https://www.hsph.harvard.edu/nutritionsource/cozy-red-lentil-mash/',
'https://www.hsph.harvard.edu/nutritionsource/crunchy-roasted-chickpeas/',
'https://www.hsph.harvard.edu/nutritionsource/curried-red-lentil-soup/',
'https://www.hsph.harvard.edu/nutritionsource/dukkah/',
'https://www.hsph.harvard.edu/nutritionsource/french-style-lentils/',
'https://www.hsph.harvard.edu/nutritionsource/garbanzo-beans-with-spinach-and-tomatoes/',
'https://www.hsph.harvard.edu/nutritionsource/green-beans-with-tofu-and-crushed-peanuts/',
'https://www.hsph.harvard.edu/nutritionsource/mushroom-tofu-veggie-burger/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-lemongrass-tofu-with-asian-basil/',
'https://www.hsph.harvard.edu/nutritionsource/sprouted-lentil-cabbage-celery-slaw/',
'https://www.hsph.harvard.edu/nutritionsource/thai-eggplant-salad-with-coconut-tofu-strips/',
'https://www.hsph.harvard.edu/nutritionsource/tomato-and-white-bean-salad/',
'https://www.hsph.harvard.edu/nutritionsource/whole-wheat-penne-with-pistachio-pesto-and-cherry-tomatoes/',
'https://www.hsph.harvard.edu/nutritionsource/white-beans-wild-rice-and-mushrooms/',
'https://www.hsph.harvard.edu/nutritionsource/vegetarian-refried-beans/',
'https://www.hsph.harvard.edu/nutritionsource/cod-and-littleneck-clams/',
'https://www.hsph.harvard.edu/nutritionsource/crawfish-touffe/',
'https://www.hsph.harvard.edu/nutritionsource/crispy-pan-seared-white-fish-walnut-romesco-pea-shoot-salad/',
'https://www.hsph.harvard.edu/nutritionsource/fish-creole/',
'https://www.hsph.harvard.edu/nutritionsource/miso-marinated-salmon-grilled-alder-wood/',
'https://www.hsph.harvard.edu/nutritionsource/pan-roasted-salmon-with-dill-olive-oil-capers/',
'https://www.hsph.harvard.edu/nutritionsource/pan-roasted-salmon/',
'https://www.hsph.harvard.edu/nutritionsource/shaved-fennel-salad-coriander-crusted-hamachi/',
'https://www.hsph.harvard.edu/nutritionsource/shrimp-and-chicken-gumbo/',
'https://www.hsph.harvard.edu/nutritionsource/shrimp-red-curry-crispy-sprouted-lentils/',
'https://www.hsph.harvard.edu/nutritionsource/wild-salmon-salad/',
'https://www.hsph.harvard.edu/nutritionsource/fish-tacos-with-cilantro-slaw/',
'https://www.hsph.harvard.edu/nutritionsource/chicken-shrimp-and-fruit-salad/',
'https://www.hsph.harvard.edu/nutritionsource/lemongrass-marinated-chicken-breast/',
'https://www.hsph.harvard.edu/nutritionsource/olive-oil-dressing-with-chicken-walnuts-recipe/',
'https://www.hsph.harvard.edu/nutritionsource/rosemary-and-lemon-grilled-chicken-breast/',
'https://www.hsph.harvard.edu/nutritionsource/spicy-chicken-kebabs-with-moorish-flavors/',
'https://www.hsph.harvard.edu/nutritionsource/stir-fried-chicken/',
'https://www.hsph.harvard.edu/nutritionsource/moroccan-chicken-stew-with-apricots/',
'https://www.hsph.harvard.edu/nutritionsource/stir-fried-chicken/',
'https://www.hsph.harvard.edu/nutritionsource/baked-ricotta/',
'https://www.hsph.harvard.edu/nutritionsource/roasted-tomatoes-stuffed-goat-cheese-garlic-basil/',
'https://www.hsph.harvard.edu/nutritionsource/fruit-cooler/',
'https://www.hsph.harvard.edu/nutritionsource/iced-tea-with-lemon-and-mint/'
# Add the rest of the URLs here
]
documents = loader.load_data(urls=urls)
# base Query Engine LLM
llm = Gemini(api_key=os.getenv("google_api_key"),model='gemini-pro')
# fine-tuned Embeddings model
embed_model = HuggingFaceEmbedding(
model_name='Revankumar/fine_tuned_embeddings_for_healthy_recipes'
)
# fine-tuned ServiceContext
ctx = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(documents)
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
service_context = ServiceContext.from_defaults(embed_model=embed_model,llm=llm)
VectorStoreIndex.from_documents(
documents, storage_context=storage_context, service_context=service_context
)
| [
"llama_index.legacy.embeddings.HuggingFaceEmbedding",
"llama_index.legacy.VectorStoreIndex.from_documents",
"llama_index.legacy.storage.storage_context.StorageContext.from_defaults",
"llama_index.legacy.vector_stores.ChromaVectorStore",
"llama_index.legacy.node_parser.SimpleNodeParser",
"llama_index.legacy.readers.web.BeautifulSoupWebReader",
"llama_index.legacy.ServiceContext.from_defaults"
] | [((846, 905), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (865, 905), False, 'import logging\n'), ((1029, 1042), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1040, 1042), False, 'from dotenv import load_dotenv\n'), ((1053, 1077), 'llama_index.legacy.readers.web.BeautifulSoupWebReader', 'BeautifulSoupWebReader', ([], {}), '()\n', (1075, 1077), False, 'from llama_index.legacy.readers.web import BeautifulSoupWebReader\n'), ((16483, 16575), 'llama_index.legacy.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""Revankumar/fine_tuned_embeddings_for_healthy_recipes"""'}), "(model_name=\n 'Revankumar/fine_tuned_embeddings_for_healthy_recipes')\n", (16503, 16575), False, 'from llama_index.legacy.embeddings import HuggingFaceEmbedding\n'), ((16613, 16675), 'llama_index.legacy.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (16641, 16675), False, 'from llama_index.legacy import VectorStoreIndex, ServiceContext\n'), ((16697, 16715), 'llama_index.legacy.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (16713, 16715), False, 'from llama_index.legacy.node_parser import SentenceWindowNodeParser, SimpleNodeParser\n'), ((16775, 16820), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (16800, 16820), False, 'import chromadb\n'), ((16901, 16955), 'llama_index.legacy.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (16918, 16955), False, 'from llama_index.legacy.vector_stores import ChromaVectorStore\n'), ((16975, 17030), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (17003, 17030), False, 'from llama_index.legacy.storage.storage_context import StorageContext\n'), ((17050, 17112), 'llama_index.legacy.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (17078, 17112), False, 'from llama_index.legacy import VectorStoreIndex, ServiceContext\n'), ((17113, 17225), 'llama_index.legacy.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n service_context=service_context)\n', (17144, 17225), False, 'from llama_index.legacy import VectorStoreIndex, ServiceContext\n'), ((937, 977), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (958, 977), False, 'import logging\n'), ((906, 925), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (923, 925), False, 'import logging\n'), ((16390, 16417), 'os.getenv', 'os.getenv', (['"""google_api_key"""'], {}), "('google_api_key')\n", (16399, 16417), False, 'import os\n')] |
# Just runs .complete to make sure the LLM is listening
from llama_index.llms import Ollama
from pathlib import Path
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
download_loader,
)
from llama_index.llms import Ollama
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores.qdrant import QdrantVectorStore
JSONReader = download_loader("JSONReader")
loader = JSONReader()
class Ollama_model:
def __init__(self, model="mistral"):
self.llm = Ollama(model=model)
self.documents = loader.load_data(Path('./data/tinytweets.json'))
self.client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
self.vector_store = QdrantVectorStore(client=self.client, collection_name="tweets")
self.storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
self.service_context = ServiceContext.from_defaults(llm=self.llm,embed_model="local")
self.index = VectorStoreIndex.from_documents(self.documents,service_context=self.service_context,storage_context=self.storage_context)
self.query_engine = self.index.as_query_engine()
def get_answer(self, input):
response = self.query_engine.query(input)
return response
def change_dataset(self, name, filename):
print(filename)
self.documents = loader.load_data(Path('./uploads/'+filename))
self.vector_store = QdrantVectorStore(client=self.client, collection_name=name)
self.storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
self.index = VectorStoreIndex.from_documents(self.documents,service_context=self.service_context,storage_context=self.storage_context)
self.query_engine = self.index.as_query_engine()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.llms.Ollama"
] | [((406, 435), 'llama_index.download_loader', 'download_loader', (['"""JSONReader"""'], {}), "('JSONReader')\n", (421, 435), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((539, 558), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': 'model'}), '(model=model)\n', (545, 558), False, 'from llama_index.llms import Ollama\n'), ((655, 703), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (681, 703), False, 'import qdrant_client\n'), ((754, 817), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'self.client', 'collection_name': '"""tweets"""'}), "(client=self.client, collection_name='tweets')\n", (771, 817), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((849, 909), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'self.vector_store'}), '(vector_store=self.vector_store)\n', (877, 909), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((941, 1004), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self.llm', 'embed_model': '"""local"""'}), "(llm=self.llm, embed_model='local')\n", (969, 1004), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((1025, 1153), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['self.documents'], {'service_context': 'self.service_context', 'storage_context': 'self.storage_context'}), '(self.documents, service_context=self.\n service_context, storage_context=self.storage_context)\n', (1056, 1153), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((1490, 1549), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'self.client', 'collection_name': 'name'}), '(client=self.client, collection_name=name)\n', (1507, 1549), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((1581, 1641), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'self.vector_store'}), '(vector_store=self.vector_store)\n', (1609, 1641), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1663, 1791), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['self.documents'], {'service_context': 'self.service_context', 'storage_context': 'self.storage_context'}), '(self.documents, service_context=self.\n service_context, storage_context=self.storage_context)\n', (1694, 1791), False, 'from llama_index import VectorStoreIndex, ServiceContext, download_loader\n'), ((601, 631), 'pathlib.Path', 'Path', (['"""./data/tinytweets.json"""'], {}), "('./data/tinytweets.json')\n", (605, 631), False, 'from pathlib import Path\n'), ((1433, 1462), 'pathlib.Path', 'Path', (["('./uploads/' + filename)"], {}), "('./uploads/' + filename)\n", (1437, 1462), False, 'from pathlib import Path\n')] |
from pathlib import Path
from llama_hub.file.unstructured import UnstructuredReader
from pathlib import Path
from llama_index import download_loader
from llama_index import SimpleDirectoryReader, VectorStoreIndex
from dotenv import load_dotenv
import os
from llama_index.node_parser import SimpleNodeParser
import pinecone
from llama_index.vector_stores import PineconeVectorStore
from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext
from llama_index.embeddings.openai import OpenAIEmbedding
import openai
####################################################
# #
# This file upserts documents in data to pinecone. #
# #
####################################################
load_dotenv()
openai.api_key = os.getenv('api_key')
# find API key in console at app.pinecone.io
os.environ['PINECONE_API_KEY'] = os.getenv('pinecone_api_key')
# environment is found next to API key in the console
os.environ['PINECONE_ENVIRONMENT'] = os.getenv('pinecone_env')
# loader = UnstructuredReader()
# initialize connection to pinecone
pinecone.init(
api_key=os.environ['PINECONE_API_KEY'],
environment=os.environ['PINECONE_ENVIRONMENT']
)
# setup the index/query process, ie the embedding model (and completion if used)
embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
# Readers
PDFReader = download_loader("PDFReader")
MarkdownReader = download_loader("MarkdownReader")
# Load docs
def upsert_docs(input_dir: str, index_name: str):
print(f"Building from {input_dir} under index {index_name}...\n")
documents = SimpleDirectoryReader(input_dir=input_dir).load_data()
# create the index if it does not exist already
if index_name not in pinecone.list_indexes():
pinecone.create_index(
name=index_name,
dimension=1536,
metric='cosine'
)
# connect to the index
pineconeIndex = pinecone.Index(index_name)
vectorStore = PineconeVectorStore(
pinecone_index=pineconeIndex
)
# setup our storage (vector db)
storageContext = StorageContext.from_defaults(
vector_store=vectorStore
)
index = GPTVectorStoreIndex.from_documents(
documents=documents,
storage_context=storageContext,
service_context=service_context
)
print(f"Done building !\n")
upsert_docs(input_dir="upsert_doc/docs", index_name="ruikang-guo-knowledge-base")
| [
"llama_index.SimpleDirectoryReader",
"llama_index.vector_stores.PineconeVectorStore",
"llama_index.download_loader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((796, 809), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (807, 809), False, 'from dotenv import load_dotenv\n'), ((827, 847), 'os.getenv', 'os.getenv', (['"""api_key"""'], {}), "('api_key')\n", (836, 847), False, 'import os\n'), ((926, 955), 'os.getenv', 'os.getenv', (['"""pinecone_api_key"""'], {}), "('pinecone_api_key')\n", (935, 955), False, 'import os\n'), ((1047, 1072), 'os.getenv', 'os.getenv', (['"""pinecone_env"""'], {}), "('pinecone_env')\n", (1056, 1072), False, 'import os\n'), ((1143, 1249), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_ENVIRONMENT']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_ENVIRONMENT'])\n", (1156, 1249), False, 'import pinecone\n'), ((1351, 1420), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (1366, 1420), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((1439, 1492), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (1467, 1492), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((1516, 1544), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (1531, 1544), False, 'from llama_index import download_loader\n'), ((1562, 1595), 'llama_index.download_loader', 'download_loader', (['"""MarkdownReader"""'], {}), "('MarkdownReader')\n", (1577, 1595), False, 'from llama_index import download_loader\n'), ((2081, 2107), 'pinecone.Index', 'pinecone.Index', (['index_name'], {}), '(index_name)\n', (2095, 2107), False, 'import pinecone\n'), ((2127, 2176), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pineconeIndex'}), '(pinecone_index=pineconeIndex)\n', (2146, 2176), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((2249, 2303), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vectorStore'}), '(vector_store=vectorStore)\n', (2277, 2303), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((2331, 2456), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', ([], {'documents': 'documents', 'storage_context': 'storageContext', 'service_context': 'service_context'}), '(documents=documents, storage_context=\n storageContext, service_context=service_context)\n', (2365, 2456), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((1882, 1905), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (1903, 1905), False, 'import pinecone\n'), ((1915, 1986), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'index_name', 'dimension': '(1536)', 'metric': '"""cosine"""'}), "(name=index_name, dimension=1536, metric='cosine')\n", (1936, 1986), False, 'import pinecone\n'), ((1745, 1787), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'input_dir'}), '(input_dir=input_dir)\n', (1766, 1787), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
import chromadb
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.indices.service_context import ServiceContext
class Vector:
def __init__(self, doc_location):
self.client = chromadb.Client()
self.doc_location = doc_location
self.collection = self.client.create_collection("papers")
self.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
def process_document(self):
"""
Process the document by performing the following steps:
1. Read the document.
2. Set up ChromaVectorStore and load in data.
3. Create a VectorStoreIndex from the documents using the specified storage context, embed model, and service context.
"""
service_context = ServiceContext.from_defaults(chunk_size=100, chunk_overlap=10)
documents = SimpleDirectoryReader(self.doc_location).load_data()
vector_store = ChromaVectorStore(chroma_collection=self.collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
self.index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=self.embed_model, service_context=service_context
)
def query_document(self, query):
query_engine = self.index.as_query_engine()
response = query_engine.query(query)
return response | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.indices.service_context.ServiceContext.from_defaults",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader",
"llama_index.vector_stores.chroma.ChromaVectorStore"
] | [((403, 420), 'chromadb.Client', 'chromadb.Client', ([], {}), '()\n', (418, 420), False, 'import chromadb\n'), ((555, 611), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-base-en-v1.5"""'}), "(model_name='BAAI/bge-base-en-v1.5')\n", (575, 611), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((1002, 1064), 'llama_index.core.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(100)', 'chunk_overlap': '(10)'}), '(chunk_size=100, chunk_overlap=10)\n', (1030, 1064), False, 'from llama_index.core.indices.service_context import ServiceContext\n'), ((1170, 1222), 'llama_index.vector_stores.chroma.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'self.collection'}), '(chroma_collection=self.collection)\n', (1187, 1222), False, 'from llama_index.vector_stores.chroma import ChromaVectorStore\n'), ((1253, 1308), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1281, 1308), False, 'from llama_index.core import StorageContext\n'), ((1334, 1476), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'embed_model': 'self.embed_model', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n embed_model=self.embed_model, service_context=service_context)\n', (1365, 1476), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((1089, 1129), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['self.doc_location'], {}), '(self.doc_location)\n', (1110, 1129), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n')] |
from llama_index.llms import OpenAI
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
import os
documents = SimpleDirectoryReader("./competition").load_data()
os.environ['OPENAI_API_KEY'] = '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'
embed_model = HuggingFaceEmbedding(model_name='BAAI/bge-large-en-v1.5')
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model, chunk_size=800, chunk_overlap=20)
index = VectorStoreIndex.from_documents(documents, service_context=service_context, show_progress=True)
index.storage_context.persist()
query_engine = index.as_query_engine(similarity_top_k=2, response_mode='tree_summarize')
# response = query_engine.query(
# "what are the benefits that I can have regarding risk management and portfolio monitoring? What are the charges?"
# )
def answer(question):
return query_engine.query(question)
if __name__ == "__main__":
while True:
question = input("Ask a question: ")
print(answer(question)) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((346, 403), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-large-en-v1.5"""'}), "(model_name='BAAI/bge-large-en-v1.5')\n", (366, 403), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((411, 457), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0.1, model='gpt-3.5-turbo')\n", (417, 457), False, 'from llama_index.llms import OpenAI\n'), ((476, 577), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'chunk_size': '(800)', 'chunk_overlap': '(20)'}), '(llm=llm, embed_model=embed_model, chunk_size=\n 800, chunk_overlap=20)\n', (504, 577), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((581, 680), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'show_progress': '(True)'}), '(documents, service_context=service_context,\n show_progress=True)\n', (612, 680), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((195, 233), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./competition"""'], {}), "('./competition')\n", (216, 233), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n')] |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(response) | [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((134, 176), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (165, 176), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((82, 113), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {}), "('./data')\n", (103, 113), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n')] |
from typing import List
from llama_index import Document, TwitterTweetReader
from social_gpt.ingestion.scraper.social_scraper import SocialScraper
class TwitterScraper(SocialScraper):
def scrape(self) -> List[Document]:
TwitterTweetReader() | [
"llama_index.TwitterTweetReader"
] | [((237, 257), 'llama_index.TwitterTweetReader', 'TwitterTweetReader', ([], {}), '()\n', (255, 257), False, 'from llama_index import Document, TwitterTweetReader\n')] |
import os
from typing import List
import googleapiclient
from dotenv import load_dotenv
from llama_index import Document
from progress.bar import IncrementalBar
from youtube_transcript_api import YouTubeTranscriptApi
from social_gpt.ingestion.scraper.social_scraper import SocialScraper
load_dotenv()
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
class YoutubeScraper(SocialScraper):
def scrape(self) -> List[Document]:
print(f"scraping youtube channel ${self.username}")
return self.get_channel_video_docs()
@staticmethod
def get_transcript(video_id):
try:
transcript = YouTubeTranscriptApi.get_transcript(video_id)
return " ".join(list(map(lambda trans: trans['text'], transcript)))
except Exception:
return None
def get_channel_video_docs(self) -> List[Document]:
youtube = googleapiclient.discovery.build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=os.getenv('YOUTUBE_DEVELOPER_KEY'))
request = youtube.search().list(
part="snippet",
channelId=self.username,
maxResults=200, # Change if needed
type="video"
)
response = request.execute()
transcripts = []
bar = IncrementalBar('Transcribing', max=len(response['items']))
for item in response['items']:
transcript = YoutubeScraper.get_transcript(item['id']['videoId'])
if transcript:
transcripts.append(transcript)
bar.next()
bar.finish()
return list(map(lambda transcript: Document(transcript), transcripts))
| [
"llama_index.Document"
] | [((290, 303), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (301, 303), False, 'from dotenv import load_dotenv\n'), ((644, 689), 'youtube_transcript_api.YouTubeTranscriptApi.get_transcript', 'YouTubeTranscriptApi.get_transcript', (['video_id'], {}), '(video_id)\n', (679, 689), False, 'from youtube_transcript_api import YouTubeTranscriptApi\n'), ((1037, 1071), 'os.getenv', 'os.getenv', (['"""YOUTUBE_DEVELOPER_KEY"""'], {}), "('YOUTUBE_DEVELOPER_KEY')\n", (1046, 1071), False, 'import os\n'), ((1678, 1698), 'llama_index.Document', 'Document', (['transcript'], {}), '(transcript)\n', (1686, 1698), False, 'from llama_index import Document\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import logging
import sys
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# read the document of data dir
documents = SimpleDirectoryReader("data").load_data()
# split the document to chunk, max token size=500, convert chunk to vector
index = GPTSimpleVectorIndex(documents)
# save index
index.save_to_disk("index.json") | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader"
] | [((153, 211), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (172, 211), False, 'import logging\n'), ((457, 488), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {}), '(documents)\n', (477, 488), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex\n'), ((243, 283), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (264, 283), False, 'import logging\n'), ((212, 231), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (229, 231), False, 'import logging\n'), ((330, 359), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (351, 359), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex\n')] |
import dotenv
import os
from llama_index.readers.github import GithubRepositoryReader, GithubClient
from llama_index.core import (VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings)
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.openai import OpenAIEmbedding
def load_environ_vars():
dotenv.load_dotenv()
github_token = os.environ['GITHUB_TOKEN']
# open_api = os.environ['OPEN_API_KEY']
if github_token is None:
print("Add the GITHUB_TOKEN environment variable in the .env file")
exit()
"""if open_api is None:
print("Add the OPEN_API_KEY environment variable. Read instrucitons in the readme")
exit()"""
return github_token
def load_data(github_token: str, owner: str, repo: str):
github_client = GithubClient(github_token)
loader = GithubRepositoryReader(
github_client,
owner=owner,
repo=repo,
filter_file_extensions=(
[".py", ".ipynb", ".js", ".ts", ".md"],
GithubRepositoryReader.FilterType.INCLUDE,
),
verbose=False,
concurrent_requests=5,
)
docs = loader.load_data(branch="main")
return docs
def load_embedding_model():
embedding_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
print("embedding model loaded")
return embedding_model
def main():
github_token = load_environ_vars()
PERSIST_DIR = "./basic/storage"
choice = input("Enter 1 to use OPEN API enter 0 to use loally setup llama2 model using Ollama:")
if not os.path.exists(PERSIST_DIR):
owner = input("Enter the username of the owner of the repo: ")
repo = input("Enter the name of the repo: ")
documents = load_data(github_token, owner, repo)
try:
if choice == '1':
print("Open API is being used")
embedding_model = OpenAIEmbedding()
index = VectorStoreIndex.from_documents(documents)
else:
print("Ollama is being used")
embedding_model = load_embedding_model()
Settings.embed_model = embedding_model
index = VectorStoreIndex.from_documents(
documents,
embed_model=embedding_model
)
except Exception as e:
print(e)
exit()
print("Documents Indexed")
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
print("Already indexed data loaded")
llama = Ollama(model="llama2", request_timeout=200.0)
Settings.llm = llama
query_engine = index.as_query_engine(llm=llama)
qa_prompt_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information above I want you to think step by step to answer the query in a crisp manner, incase case you don't know the answer say 'I don't know!'.\n"
"Query: {query_str}\n"
"Answer: "
)
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
query_engine.update_prompts({"response_synthesizer:text_qa_template": qa_prompt_tmpl})
print("Press ctr + c to exit")
while True:
query = input("Enter your query: ")
response = query_engine.query(query)
print(response)
if __name__ == "__main__":
main()
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.llms.ollama.Ollama",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.PromptTemplate",
"llama_index.readers.github.GithubRepositoryReader",
"llama_index.readers.github.GithubClient",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((415, 435), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (433, 435), False, 'import dotenv\n'), ((886, 912), 'llama_index.readers.github.GithubClient', 'GithubClient', (['github_token'], {}), '(github_token)\n', (898, 912), False, 'from llama_index.readers.github import GithubRepositoryReader, GithubClient\n'), ((931, 1150), 'llama_index.readers.github.GithubRepositoryReader', 'GithubRepositoryReader', (['github_client'], {'owner': 'owner', 'repo': 'repo', 'filter_file_extensions': "(['.py', '.ipynb', '.js', '.ts', '.md'], GithubRepositoryReader.FilterType.\n INCLUDE)", 'verbose': '(False)', 'concurrent_requests': '(5)'}), "(github_client, owner=owner, repo=repo,\n filter_file_extensions=(['.py', '.ipynb', '.js', '.ts', '.md'],\n GithubRepositoryReader.FilterType.INCLUDE), verbose=False,\n concurrent_requests=5)\n", (953, 1150), False, 'from llama_index.readers.github import GithubRepositoryReader, GithubClient\n'), ((1363, 1420), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""BAAI/bge-small-en-v1.5"""'}), "(model_name='BAAI/bge-small-en-v1.5')\n", (1383, 1420), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((2826, 2871), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': '"""llama2"""', 'request_timeout': '(200.0)'}), "(model='llama2', request_timeout=200.0)\n", (2832, 2871), False, 'from llama_index.llms.ollama import Ollama\n'), ((3405, 3439), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['qa_prompt_tmpl_str'], {}), '(qa_prompt_tmpl_str)\n', (3419, 3439), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n'), ((1699, 1726), 'os.path.exists', 'os.path.exists', (['PERSIST_DIR'], {}), '(PERSIST_DIR)\n', (1713, 1726), False, 'import os\n'), ((2649, 2702), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (2677, 2702), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n'), ((2719, 2759), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2742, 2759), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n'), ((2034, 2051), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (2049, 2051), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((2076, 2118), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2107, 2118), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n'), ((2336, 2407), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'embed_model': 'embedding_model'}), '(documents, embed_model=embedding_model)\n', (2367, 2407), False, 'from llama_index.core import VectorStoreIndex, StorageContext, PromptTemplate, load_index_from_storage, Settings\n')] |
import os
from dotenv import load_dotenv
load_dotenv()
import s3fs
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage
)
# load documents
documents = SimpleDirectoryReader('../../../examples/paul_graham_essay/data/').load_data()
print(len(documents))
index = VectorStoreIndex.from_documents(documents)
# set up s3fs
AWS_KEY = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET = os.environ['AWS_SECRET_ACCESS_KEY']
R2_ACCOUNT_ID = os.environ['R2_ACCOUNT_ID']
assert AWS_KEY is not None and AWS_KEY != ""
s3 = s3fs.S3FileSystem(
key=AWS_KEY,
secret=AWS_SECRET,
endpoint_url=f'https://{R2_ACCOUNT_ID}.r2.cloudflarestorage.com',
s3_additional_kwargs={'ACL': 'public-read'}
)
# save index to remote blob storage
index.set_index_id("vector_index")
# this is {bucket_name}/{index_name}
index.storage_context.persist('llama-index/storage_demo', fs=s3)
# load index from s3
sc = StorageContext.from_defaults(persist_dir='llama-index/storage_demo', fs=s3)
index2 = load_index_from_storage(sc, 'vector_index') | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.load_index_from_storage",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((41, 54), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (52, 54), False, 'from dotenv import load_dotenv\n'), ((329, 371), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (360, 371), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((575, 744), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {'key': 'AWS_KEY', 'secret': 'AWS_SECRET', 'endpoint_url': 'f"""https://{R2_ACCOUNT_ID}.r2.cloudflarestorage.com"""', 's3_additional_kwargs': "{'ACL': 'public-read'}"}), "(key=AWS_KEY, secret=AWS_SECRET, endpoint_url=\n f'https://{R2_ACCOUNT_ID}.r2.cloudflarestorage.com',\n s3_additional_kwargs={'ACL': 'public-read'})\n", (592, 744), False, 'import s3fs\n'), ((951, 1026), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""llama-index/storage_demo"""', 'fs': 's3'}), "(persist_dir='llama-index/storage_demo', fs=s3)\n", (979, 1026), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((1036, 1079), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['sc', '"""vector_index"""'], {}), "(sc, 'vector_index')\n", (1059, 1079), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((220, 286), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../../../examples/paul_graham_essay/data/"""'], {}), "('../../../examples/paul_graham_essay/data/')\n", (241, 286), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage\n')] |
import sys
import logging
import chromadb
import streamlit as st
from llama_index.llms import OpenAI
from llama_index import SimpleDirectoryReader, VectorStoreIndex
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index import ServiceContext
from llama_index.node_parser.file.markdown import MarkdownNodeParser
from llama_index.chat_engine.types import ChatMode
from MarkdownReader import MarkdownReader
from sources import sources, get_file_metadata, Source
logger = logging.getLogger()
# logger.setLevel(logging.DEBUG)
# stream_handler = logging.StreamHandler(stream=sys.stdout)
# stream_handler.setLevel(logging.DEBUG)
# file_handler = logging.FileHandler("logs.log")
# file_handler.setLevel(logging.DEBUG)
# logger.addHandler(file_handler)
# logger.addHandler(stream_handler)
def get_filename_metadata(source, filename):
metadata = {
"source": source.get("description", source.get("title")),
**source.get("file_metadata", get_file_metadata)(filename),
}
# print(filename, metadata)
return metadata
def get_all_metadata(source):
return lambda filename: get_filename_metadata(source, filename)
def get_documents(source):
"""return Document for given source(path, file_metadata)"""
reader = SimpleDirectoryReader(
input_dir=source.get("path"),
required_exts=[".md"],
recursive=True,
exclude=source.get("exclude", []),
file_extractor={".md": MarkdownReader(source.get("include_metas", []))},
file_metadata=get_all_metadata(source),
)
# use MarkdownReader
docs = reader.load_data()
return docs
def index_source(chroma_client, source: Source):
"""index given source in chromadb"""
docs = get_documents(source)
chroma_collection = None
try:
chroma_collection = chroma_client.get_collection(source.get("id"))
logger.info("==> Collection {} already exist\n\n".format(source.get("id")))
except ValueError:
nodes = node_parser.get_nodes_from_documents(docs, show_progress=True)
chroma_collection = chroma_client.create_collection(source.get("id"))
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# todo: show nodes content length
logger.info(
"index {} documents and {} nodes in {}".format(
len(docs), len(nodes), source.get("id")
)
)
index = VectorStoreIndex.from_documents(
docs,
storage_context=storage_context,
service_context=service_context,
show_progress=True,
)
logger.info(f"==> Loaded {len(docs)} docs\n\n")
if source.get("on_finish"):
source.get("on_finish", lambda a, b: None)(
docs, index
) # lambda for typings
finally:
if chroma_collection:
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(vector_store)
return index
def debug_source(index, source):
query_engine = index.as_query_engine()
for query in source.get("examples", []):
response = query_engine.query(query)
print("\n", source.get("id"), ":", query, "\n")
print(str(response))
# print((response.get_formatted_sources()))
# print((response.source_nodes))
print("\n-------------")
# @st.cache_resource(show_spinner=False)
def index_sources1(sources):
logger.info("Indexing sources...")
indices = []
for source in sources:
logger.info("Indexing {}".format(source.get("id")))
index = index_source(chroma_client, source)
# debug_source(index, source)
indices.append(index)
return list(zip(indices, sources))
def index_sources(sources):
logger.info("Indexing sources...")
docs = []
index_id = "all_docs"
chroma_collection = None
for source in sources:
sourceDocs = get_documents(source)
docs += sourceDocs
if source.get("additional_documents"):
docs += source.get("additional_documents")(sourceDocs)
try:
chroma_collection = chroma_client.get_collection(index_id)
logger.info(f"==> Collection {index_id} already exist\n\n")
except ValueError:
# nodes = node_parser.get_nodes_from_documents(docs, show_progress=True)
chroma_collection = chroma_client.create_collection(index_id)
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# todo: show nodes content length
logger.info("index {} documents in {}".format(len(docs), index_id))
index = VectorStoreIndex.from_documents(
docs,
storage_context=storage_context,
service_context=service_context,
show_progress=True,
)
logger.info(f"==> Loaded {len(docs)} docs\n\n")
# if source.get("on_finish"):
# source.get("on_finish", lambda a, b: None)(docs, index) # lambda for typings
finally:
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
index = VectorStoreIndex.from_vector_store(
vector_store, service_context=service_context
)
return index
node_parser = MarkdownNodeParser.from_defaults()
chroma_client = chromadb.PersistentClient(path="./chroma_db")
# llm = OpenAI(
# model="gpt-3.5-turbo",
# temperature=0.0,
# )
# use OpenAI by default
service_context = ServiceContext.from_defaults(
chunk_size=512,
# embed_model=embed_model,
node_parser=node_parser,
# llm=llm,
# prompt_helper=
)
index = index_sources(sources)
if __name__ == "__main__":
# query
chat = index.as_chat_engine(
chat_mode=ChatMode.CONTEXT,
verbose=True,
similarity_top_k=5,
)
chat.chat_repl()
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.file.markdown.MarkdownNodeParser.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((553, 572), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (570, 572), False, 'import logging\n'), ((5510, 5544), 'llama_index.node_parser.file.markdown.MarkdownNodeParser.from_defaults', 'MarkdownNodeParser.from_defaults', ([], {}), '()\n', (5542, 5544), False, 'from llama_index.node_parser.file.markdown import MarkdownNodeParser\n'), ((5562, 5607), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (5587, 5607), False, 'import chromadb\n'), ((5725, 5794), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(512)', 'node_parser': 'node_parser'}), '(chunk_size=512, node_parser=node_parser)\n', (5753, 5794), False, 'from llama_index import ServiceContext\n'), ((5302, 5356), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (5319, 5356), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((5373, 5459), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (5407, 5459), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((2220, 2274), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (2237, 2274), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((2301, 2356), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2329, 2356), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((2576, 2703), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context', 'show_progress': '(True)'}), '(docs, storage_context=storage_context,\n service_context=service_context, show_progress=True)\n', (2607, 2703), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((3041, 3095), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (3058, 3095), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((3116, 3164), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (3150, 3164), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((4627, 4681), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (4644, 4681), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((4708, 4763), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4736, 4763), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((4898, 5025), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context', 'show_progress': '(True)'}), '(docs, storage_context=storage_context,\n service_context=service_context, show_progress=True)\n', (4929, 5025), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n')] |
from flask_restx import Resource
from flask import request, render_template, Response
import openai
import os
import json
from llama_index import GPTSimpleVectorIndex
from llama_index import Document
from furl import furl
from PyPDF2 import PdfReader
os.environ["OPENAI_API_KEY"] = "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"
openai.organization = "org-Ddi6ZSgWKe8kPZlpwd6M6WVe"
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_domain(link):
print("link", link)
f = furl(link)
host = f.host
tld = host.split(".")
if len(tld) > 2:
return tld[1]
else:
return tld[0]
def get_title(title):
f = furl(title)
host = f.host
if host != "":
return host
else:
return title
class Upload(Resource):
def post(self):
data = {}
userid = data.get('userid', 'cibi')
print(request.files)
file = request.files['userfile']
file.save(userid + file.filename)
print(file)
reader = PdfReader(userid + file.filename)
data = ""
for page in reader.pages:
data += page.extract_text()
unique_doc = file.filename
file_name = str(hash(userid + unique_doc)) + ".txt"
#dict_obj = {"userid":userid,"pageTitle":pageTitle}
alreadyPresentList = []
userDataJson = {}
if os.path.exists("./userData.json"):
with open('./userData.json', 'r') as userDataJsonFile:
userDataJson = json.loads(userDataJsonFile.read())
if userid in userDataJson:
alreadyPresentList = userDataJson[userid]
if unique_doc not in alreadyPresentList:
alreadyPresentList.append(unique_doc)
else:
alreadyPresentList.append(unique_doc)
userDataJson[userid] = alreadyPresentList
print("New data : ", str(userDataJson))
userDataJsonFileWrite = open('./userData.json', "w")
userDataJsonFileWrite.write(json.dumps(userDataJson))
userDataJsonFileWrite.close()
with open(str(file_name), 'w') as fl:
fl.write(data)
llama_doc = Document(data, doc_id=userid + "<sep>" + unique_doc)
if os.path.exists("database.json"):
existing_index = GPTSimpleVectorIndex.load_from_disk('database.json')
existing_index.update(llama_doc)
existing_index.save_to_disk("database.json")
else:
index = GPTSimpleVectorIndex.from_documents(documents=[llama_doc])
index.update(llama_doc)
index.save_to_disk("database.json")
response = ""
return response, 200 | [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.Document"
] | [((407, 434), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (416, 434), False, 'import os\n'), ((491, 501), 'furl.furl', 'furl', (['link'], {}), '(link)\n', (495, 501), False, 'from furl import furl\n'), ((653, 664), 'furl.furl', 'furl', (['title'], {}), '(title)\n', (657, 664), False, 'from furl import furl\n'), ((1010, 1043), 'PyPDF2.PdfReader', 'PdfReader', (['(userid + file.filename)'], {}), '(userid + file.filename)\n', (1019, 1043), False, 'from PyPDF2 import PdfReader\n'), ((1360, 1393), 'os.path.exists', 'os.path.exists', (['"""./userData.json"""'], {}), "('./userData.json')\n", (1374, 1393), False, 'import os\n'), ((2173, 2225), 'llama_index.Document', 'Document', (['data'], {'doc_id': "(userid + '<sep>' + unique_doc)"}), "(data, doc_id=userid + '<sep>' + unique_doc)\n", (2181, 2225), False, 'from llama_index import Document\n'), ((2237, 2268), 'os.path.exists', 'os.path.exists', (['"""database.json"""'], {}), "('database.json')\n", (2251, 2268), False, 'import os\n'), ((2016, 2040), 'json.dumps', 'json.dumps', (['userDataJson'], {}), '(userDataJson)\n', (2026, 2040), False, 'import json\n'), ((2299, 2351), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""database.json"""'], {}), "('database.json')\n", (2334, 2351), False, 'from llama_index import GPTSimpleVectorIndex\n'), ((2488, 2546), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', ([], {'documents': '[llama_doc]'}), '(documents=[llama_doc])\n', (2523, 2546), False, 'from llama_index import GPTSimpleVectorIndex\n')] |
from __future__ import annotations
import os
import dataclasses
from typing import TYPE_CHECKING, ClassVar
import time
import httpx
from rich import print
from xiaogpt.bot.base_bot import BaseBot, ChatHistoryMixin
from xiaogpt.utils import split_sentences
if TYPE_CHECKING:
import openai
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.azure_openai import AzureOpenAI, AsyncAzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import (
Settings,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
PromptTemplate,
SimpleDirectoryReader
)
@dataclasses.dataclass
class RagBot(ChatHistoryMixin, BaseBot):
name: ClassVar[str] = "RAG"
default_options: ClassVar[dict[str, str]] = {"model": "gpt4-1106-prevision"}
openai_key: str
api_base: str | None = None
proxy: str | None = None
history: list[tuple[str, str]] = dataclasses.field(default_factory=list, init=False)
def _make_query_engine(self, sess: httpx.AsyncClient, stream=False):
llm = AzureOpenAI(
engine="gpt4-1106-prevision",
api_key=self.openai_key,
azure_endpoint=self.api_base,
api_version="2023-12-01-preview",
)
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="embedding-ada-002-v2",
api_key=self.openai_key,
azure_endpoint="http://192.168.12.232:8880",
api_version="2023-05-15",
)
Settings.embed_model = embed_model
Settings.llm = llm
# check if storage already exists
PERSIST_DIR = "xiaogpt/rag/storage"
if not os.path.exists(PERSIST_DIR):
# load the documents and create the index
documents = SimpleDirectoryReader("xiaogpt/rag/data").load_data()
index = VectorStoreIndex.from_documents(documents)
# store it for later
index.storage_context.persist(persist_dir=PERSIST_DIR)
else:
# load the existing index
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
# set Logging to DEBUG for more detailed outputs
text_qa_template_str = (
"Context information is"
" below.\n---------------------\n{context_str}\n---------------------\nUsing"
" both the context information and also using your own knowledge, answer"
" the question with less that 100 words: {query_str}\nIf the context isn't helpful, you can also"
" answer the question on your own.\n"
)
text_qa_template = PromptTemplate(text_qa_template_str)
refine_template_str = (
"The original question is as follows: {query_str}\nWe have provided an"
" existing answer: {existing_answer}\nWe have the opportunity to refine"
" the existing answer (only if needed) with some more context"
" below.\n------------\n{context_msg}\n------------\nUsing both the new"
" context and your own knowledge, update existing answer with less than 100 words. \n"
)
refine_template = PromptTemplate(refine_template_str)
query_engine = index.as_query_engine(
text_qa_template=text_qa_template,
refine_template=refine_template,
llm=llm,
streaming=stream
)
return query_engine
@classmethod
def from_config(cls, config):
return cls(
openai_key=config.openai_key,
api_base=config.api_base,
proxy=config.proxy
)
async def ask(self, query, **options):
ms = self.get_messages()
ms.append({"role": "user", "content": f"{query}"})
kwargs = {**self.default_options, **options}
httpx_kwargs = {}
if self.proxy:
httpx_kwargs["proxies"] = self.proxy
async with httpx.AsyncClient(trust_env=True, **httpx_kwargs) as sess:
query_engine = self._make_query_engine(sess)
try:
completion = query_engine.query(query)
except Exception as e:
print(str(e))
return ""
message = completion.response
# print(completion.source_nodes[0].get_text())
self.add_message(query, message)
print(message)
return message
async def ask_stream(self, query, **options):
ms = self.get_messages()
ms.append({"role": "user", "content": f"{query}"})
kwargs = {**self.default_options, **options}
httpx_kwargs = {}
if self.proxy:
httpx_kwargs["proxies"] = self.proxy
async with httpx.AsyncClient(trust_env=True, **httpx_kwargs) as sess:
query_engine = self._make_query_engine(sess, stream=True)
try:
completion = query_engine.query(query)
except Exception as e:
print(str(e))
return
async def text_gen():
async for event in completion:
if not event.response:
continue
chunk_message = event.response
if chunk_message.response is None:
continue
print(chunk_message.response, end="")
yield chunk_message.response
message = ""
try:
async for sentence in split_sentences(text_gen()):
message += sentence
yield sentence
finally:
print()
self.add_message(query, message)
import functools
import dataclasses
from typing import Any, AsyncIterator, Literal, Optional
@dataclasses.dataclass
class Config:
openai_key: str = "voxelcloud"
proxy: str | None = None
api_base: str = "http://192.168.12.232:8881"
stream: bool = False
bot: str = "chatgptapi"
gpt_options: dict[str, Any] = dataclasses.field(default_factory=dict)
import asyncio
async def main():
config = Config() # 假设 Config 类已经定义并可以接受默认参数
bot = RagBot.from_config(config)
# 询问问题
response = await bot.ask("什么是光疗?")
print(response)
# 运行异步 main 函数
if __name__ == "__main__":
asyncio.run(main())
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding",
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.PromptTemplate",
"llama_index.llms.azure_openai.AzureOpenAI",
"llama_index.core.SimpleDirectoryReader"
] | [((1030, 1081), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list', 'init': '(False)'}), '(default_factory=list, init=False)\n', (1047, 1081), False, 'import dataclasses\n'), ((6247, 6286), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (6264, 6286), False, 'import dataclasses\n'), ((6470, 6485), 'rich.print', 'print', (['response'], {}), '(response)\n', (6475, 6485), False, 'from rich import print\n'), ((1172, 1306), 'llama_index.llms.azure_openai.AzureOpenAI', 'AzureOpenAI', ([], {'engine': '"""gpt4-1106-prevision"""', 'api_key': 'self.openai_key', 'azure_endpoint': 'self.api_base', 'api_version': '"""2023-12-01-preview"""'}), "(engine='gpt4-1106-prevision', api_key=self.openai_key,\n azure_endpoint=self.api_base, api_version='2023-12-01-preview')\n", (1183, 1306), False, 'from llama_index.llms.azure_openai import AzureOpenAI, AsyncAzureOpenAI\n'), ((1384, 1582), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'deployment_name': '"""embedding-ada-002-v2"""', 'api_key': 'self.openai_key', 'azure_endpoint': '"""http://192.168.12.232:8880"""', 'api_version': '"""2023-05-15"""'}), "(model='text-embedding-ada-002', deployment_name=\n 'embedding-ada-002-v2', api_key=self.openai_key, azure_endpoint=\n 'http://192.168.12.232:8880', api_version='2023-05-15')\n", (1404, 1582), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n'), ((2838, 2874), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['text_qa_template_str'], {}), '(text_qa_template_str)\n', (2852, 2874), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((3372, 3407), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['refine_template_str'], {}), '(refine_template_str)\n', (3386, 3407), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((1815, 1842), 'os.path.exists', 'os.path.exists', (['PERSIST_DIR'], {}), '(PERSIST_DIR)\n', (1829, 1842), False, 'import os\n'), ((1996, 2038), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2027, 2038), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((2221, 2274), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (2249, 2274), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((2295, 2335), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2318, 2335), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((4149, 4198), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {'trust_env': '(True)'}), '(trust_env=True, **httpx_kwargs)\n', (4166, 4198), False, 'import httpx\n'), ((4586, 4600), 'rich.print', 'print', (['message'], {}), '(message)\n', (4591, 4600), False, 'from rich import print\n'), ((4941, 4990), 'httpx.AsyncClient', 'httpx.AsyncClient', ([], {'trust_env': '(True)'}), '(trust_env=True, **httpx_kwargs)\n', (4958, 4990), False, 'import httpx\n'), ((5856, 5863), 'rich.print', 'print', ([], {}), '()\n', (5861, 5863), False, 'from rich import print\n'), ((1922, 1963), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""xiaogpt/rag/data"""'], {}), "('xiaogpt/rag/data')\n", (1943, 1963), False, 'from llama_index.core import Settings, VectorStoreIndex, StorageContext, load_index_from_storage, PromptTemplate, SimpleDirectoryReader\n'), ((5547, 5584), 'rich.print', 'print', (['chunk_message.response'], {'end': '""""""'}), "(chunk_message.response, end='')\n", (5552, 5584), False, 'from rich import print\n')] |
import logging
import sys
import requests
import os
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
import torch
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding
#!CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir
#un comment this to use GPU engine- CUBLAS
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
url = 'https://firebasestorage.googleapis.com/v0/b/ichiropractic.appspot.com/o/test.pdf?alt=media&token=c7b685c1-712d-4b0e-bbfd-3d80198c6584'
if not os.path.exists('Data'):
os.makedirs('Data')
file_path = os.path.join('Data', 'test.pdf')
response = requests.get(url)
if response.status_code == 200:
with open(file_path, 'wb') as file:
file.write(response.content)
else:
print(f'Failed to download the file: {response.status_code}')
# Setup LlamaCPP
llm = LlamaCPP(
model_url='', # compactible model is GGUF only.
model_path='./dolphin-2.1-mistral-7b.Q4_K_M.gguf', # Here I have use dolphin model from my local machine. please remove this and use your own model path
temperature=0.1,
max_new_tokens=3024,
context_window=3900,
generate_kwargs={},
model_kwargs={"n_gpu_layers": 128},
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=True,
)
print('LlamaCPP is ready to use.')
| [
"llama_index.llms.LlamaCPP"
] | [((511, 570), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (530, 570), False, 'import logging\n'), ((854, 886), 'os.path.join', 'os.path.join', (['"""Data"""', '"""test.pdf"""'], {}), "('Data', 'test.pdf')\n", (866, 886), False, 'import os\n'), ((898, 915), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (910, 915), False, 'import requests\n'), ((1122, 1423), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_url': '""""""', 'model_path': '"""./dolphin-2.1-mistral-7b.Q4_K_M.gguf"""', 'temperature': '(0.1)', 'max_new_tokens': '(3024)', 'context_window': '(3900)', 'generate_kwargs': '{}', 'model_kwargs': "{'n_gpu_layers': 128}", 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt', 'verbose': '(True)'}), "(model_url='', model_path='./dolphin-2.1-mistral-7b.Q4_K_M.gguf',\n temperature=0.1, max_new_tokens=3024, context_window=3900,\n generate_kwargs={}, model_kwargs={'n_gpu_layers': 128},\n messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt, verbose=True)\n", (1130, 1423), False, 'from llama_index.llms import LlamaCPP\n'), ((602, 642), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (623, 642), False, 'import logging\n'), ((794, 816), 'os.path.exists', 'os.path.exists', (['"""Data"""'], {}), "('Data')\n", (808, 816), False, 'import os\n'), ((822, 841), 'os.makedirs', 'os.makedirs', (['"""Data"""'], {}), "('Data')\n", (833, 841), False, 'import os\n'), ((571, 590), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (588, 590), False, 'import logging\n')] |
from llama_index import SimpleDirectoryReader, VectorStoreIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
import gradio as gr
from pprint import pprint; import IPython
import sys
import os
from pathlib import Path
# Check if the environment variable exists
if "OPENAIKEY" in os.environ:
# If it exists, get its value into a Python variable
api_key = os.environ["OPENAIKEY"]
else:
raise ValueError("Please set the OPENAIKEY environment variable")
os.environ["OPENAI_API_KEY"] = api_key
from llama_index import VectorStoreIndex, download_loader
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader('/Users/despiegk1/Downloads/ai').load_data()
index = GPTVectorStoreIndex.from_documents(documents)
index.storage_context.persist()
query_engine = index.as_query_engine()
query_engine.query("what is ourworld?")
# ImageReader = download_loader("ImageReader")
# imageLoader = ImageReader(text_type="plain_text")
# FlatPdfReader = download_loader("FlatPdfReader")
# pdfLoader = FlatPdfReader(image_loader=imageLoader)
# document = pdfLoader.load_data(file=Path('~/Downloads/its not about what we have, its about what we believe in. (5).pdf'))
# index = VectorStoreIndex.from_documents([document])
# query_engine = index.as_query_engine()
# query_engine.query('how vulnerable are security protocols?')
IPython.embed()
| [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((745, 790), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (779, 790), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n'), ((1396, 1411), 'IPython.embed', 'IPython.embed', ([], {}), '()\n', (1409, 1411), False, 'import IPython\n'), ((670, 724), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""/Users/despiegk1/Downloads/ai"""'], {}), "('/Users/despiegk1/Downloads/ai')\n", (691, 724), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader\n')] |
import argparse
import copy
import logging
import os
import sys
import warnings
from typing import Optional, List, Callable
from langchain.llms import OpenAI
import faiss
import gradio as gr
import torch
import torch.distributed as dist
import transformers
from accelerate import dispatch_model, infer_auto_device_map
from accelerate.hooks import (
AlignDevicesHook,
add_hook_to_module,
remove_hook_from_submodules,
)
from accelerate.utils import get_balanced_memory
from huggingface_hub import hf_hub_download
from llama_index import LLMPredictor
from llama_index import PromptHelper, SimpleDirectoryReader
from llama_index import ServiceContext
from llama_index import GPTKeywordTableIndex, GPTSimpleVectorIndex, GPTListIndex, GPTTreeIndex, GPTFaissIndex
from peft import PeftModelForCausalLM, LoraConfig
from peft.utils import PeftType, set_peft_model_state_dict
from torch import nn
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.generation.beam_search import BeamSearchScorer
from transformers.generation.utils import (
LogitsProcessorList,
StoppingCriteriaList,
GenerationMixin,
)
from model import CustomLLM, Llama7bHFLLM
assert (
"LlamaTokenizer" in transformers._import_structure["models.llama"]
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
class SteamGenerationMixin(PeftModelForCausalLM, GenerationMixin):
# support for streamly beam search
@torch.no_grad()
def stream_generate(
self,
input_ids: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[
Callable[[int, torch.Tensor], List[int]]
] = None,
**kwargs,
):
self._reorder_cache = self.base_model._reorder_cache
if is_deepspeed_zero3_enabled() and dist.world_size() > 1:
synced_gpus = True
else:
synced_gpus = False
if kwargs.get("attention_mask", None) is not None:
# concat prompt attention mask
prefix_attention_mask = torch.ones(
kwargs["input_ids"].shape[0], self.peft_config.num_virtual_tokens
).to(kwargs["input_ids"].device)
kwargs["attention_mask"] = torch.cat(
(prefix_attention_mask, kwargs["attention_mask"]), dim=1
)
if kwargs.get("position_ids", None) is not None:
warnings.warn(
"Position ids are not supported for parameter efficient tuning. Ignoring position ids."
)
kwargs["position_ids"] = None
if kwargs.get("token_type_ids", None) is not None:
warnings.warn(
"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"
)
kwargs["token_type_ids"] = None
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
if generation_config is None:
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs)
bos_token_id, eos_token_id, pad_token_id = (
generation_config.bos_token_id,
generation_config.eos_token_id,
generation_config.pad_token_id,
)
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
has_default_max_length = (
kwargs.get("max_length") is None
and generation_config.max_length is not None
)
if has_default_max_length and generation_config.max_new_tokens is None:
warnings.warn(
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
" recommend using `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
generation_config.max_length = (
generation_config.max_new_tokens + input_ids_seq_length
)
if generation_config.min_new_tokens is not None:
generation_config.min_length = (
generation_config.min_new_tokens + input_ids_seq_length
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = (
"decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
)
# 2. Set generation parameters if not already defined
logits_processor = (
logits_processor if logits_processor is not None else LogitsProcessorList()
)
stopping_criteria = (
stopping_criteria
if stopping_criteria is not None
else StoppingCriteriaList()
)
# 8. prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=input_ids,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
)
# 9. prepare stopping criteria
stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
logits_warper = self._get_logits_warper(generation_config)
# 10. go into beam search generation modes
# 11. prepare beam search scorer
num_beams = generation_config.num_beams
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=input_ids.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
max_length=generation_config.max_length,
)
# 12. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# beam_search logits
batch_beam_size, cur_len = input_ids.shape
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
beam_scores = torch.zeros(
(batch_size, num_beams), dtype=torch.float, device=input_ids.device
)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view((batch_size * num_beams,))
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(
0.0 if this_peer_finished else 1.0
).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=False,
output_hidden_states=False,
)
if synced_gpus and this_peer_finished:
cur_len = cur_len + 1
continue # don't waste resources running the code we don't need
next_token_logits = outputs.logits[:, -1, :]
# next_token_logits = self.adjust_logits_during_generation(next_token_logits, cur_len=cur_len) hack: adjust tokens for Marian.
next_token_scores = nn.functional.log_softmax(
next_token_logits, dim=-1
) # (batch_size * num_beams, vocab_size)
next_token_scores_processed = logits_processor(input_ids, next_token_scores)
next_token_scores = next_token_scores_processed + beam_scores[
:, None
].expand_as(next_token_scores)
# reshape for beam search
vocab_size = next_token_scores.shape[-1]
next_token_scores = next_token_scores.view(
batch_size, num_beams * vocab_size
)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True
)
next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
next_tokens = next_tokens % vocab_size
# stateless
beam_outputs = beam_scorer.process(
input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
beam_indices=None,
)
beam_scores = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids = torch.cat(
[input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1
)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
if model_kwargs["past_key_values"] is not None:
model_kwargs["past_key_values"] = self._reorder_cache(
model_kwargs["past_key_values"], beam_idx
)
# increase cur_len
cur_len = cur_len + 1
yield input_ids
if beam_scorer.is_done or stopping_criteria(input_ids, None):
if not synced_gpus:
break
else:
this_peer_finished = True
final_result = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
beam_indices=None,
)
yield final_result["sequences"]
# default it call `model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config)`, not cls!! so inherent PeftModelForCausalLM is no sense
@classmethod
def from_pretrained(cls, model, model_id, **kwargs):
# load the config
config = LoraConfig.from_pretrained(model_id)
if getattr(model, "hf_device_map", None) is not None:
remove_hook_from_submodules(model)
# here is the hack
model = cls(model, config)
# load weights if any
if os.path.exists(os.path.join(model_id, "adapter_model.bin")):
filename = os.path.join(model_id, "adapter_model.bin")
else:
try:
filename = hf_hub_download(model_id, "adapter_model.bin")
except: # noqa
raise ValueError(
f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. "
f"Please check that the file {'adapter_model.bin'} is present at {model_id}."
)
adapters_weights = torch.load(
filename,
map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
)
# load the weights into the model
model = set_peft_model_state_dict(model, adapters_weights)
if getattr(model, "hf_device_map", None) is not None:
device_map = kwargs.get("device_map", "auto")
max_memory = kwargs.get("max_memory", None)
no_split_module_classes = model._no_split_modules
if device_map != "sequential":
max_memory = get_balanced_memory(
model,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
low_zero=(device_map == "balanced_low_0"),
)
if isinstance(device_map, str):
device_map = infer_auto_device_map(
model,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
)
model = dispatch_model(model, device_map=device_map)
hook = AlignDevicesHook(io_same_device=True)
if model.peft_config.peft_type == PeftType.LORA:
add_hook_to_module(model.base_model.model, hook)
else:
remove_hook_from_submodules(model.prompt_encoder)
add_hook_to_module(model.base_model, hook)
return model
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, default="decapoda-research/llama-7b-hf")
parser.add_argument("--lora_path", type=str, default="./lora-Vicuna/checkpoint-3000")
parser.add_argument("--use_local", type=int, default=1)
args = parser.parse_args()
tokenizer = LlamaTokenizer.from_pretrained(args.model_path)
LOAD_8BIT = True
BASE_MODEL = args.model_path
LORA_WEIGHTS = args.lora_path
# fix the path for local checkpoint
lora_bin_path = os.path.join(args.lora_path, "adapter_model.bin")
print(lora_bin_path)
if not os.path.exists(lora_bin_path) and args.use_local:
pytorch_bin_path = os.path.join(args.lora_path, "pytorch_model.bin")
print(pytorch_bin_path)
if os.path.exists(pytorch_bin_path):
os.rename(pytorch_bin_path, lora_bin_path)
warnings.warn(
"The file name of the lora checkpoint'pytorch_model.bin' is replaced with 'adapter_model.bin'"
)
else:
assert ('Checkpoint is not Found!')
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
try:
if torch.backends.mps.is_available():
device = "mps"
except:
pass
if device == "cuda":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
load_in_8bit=LOAD_8BIT,
torch_dtype=torch.float16,
device_map={"": 0},
)
model = SteamGenerationMixin.from_pretrained(
model, LORA_WEIGHTS, torch_dtype=torch.float16, device_map={"": 0}
)
elif device == "mps":
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL,
device_map={"": device},
torch_dtype=torch.float16,
)
model = SteamGenerationMixin.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
torch_dtype=torch.float16,
)
else:
model = LlamaForCausalLM.from_pretrained(
BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
)
model = SteamGenerationMixin.from_pretrained(
model,
LORA_WEIGHTS,
device_map={"": device},
)
def generate_prompt(instruction, input=None):
if input:
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Input:
{input}
### Response:"""
else:
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Response:"""
if not LOAD_8BIT:
model.half() # seems to fix bugs for some users.
model.eval()
if torch.__version__ >= "2" and sys.platform != "win32":
model = torch.compile(model)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import openai
openai.api_key = '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'
os.environ["OPENAI_API_KEY"] = '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'
def evaluate(
input,
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=4,
max_new_tokens=2500,
min_new_tokens=1,
repetition_penalty=2.0,
**kwargs,
):
print('start text llama-index')
# TEST
#
# set maximum input size
max_input_size = 2048
# set number of output tokens
num_output = 1024
# set maximum chunk overlap
max_chunk_overlap = 20
gen_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
bos_token_id=1,
eos_token_id=2,
pad_token_id=0,
max_new_tokens=max_new_tokens,
# max_length=max_new_tokens+input_sequence
min_new_tokens=min_new_tokens,
# min_length=min_new_tokens+input_sequence
repetition_penalty=repetition_penalty
)
# service_context = ServiceContext.from_defaults(
# llm_predictor=LLMPredictor(llm=CustomLLM(mod=model, token=tokenizer, gen_config=gen_config, device=device)),
# prompt_helper=PromptHelper(max_input_size, num_output, max_chunk_overlap))
service_context = ServiceContext.from_defaults(
llm_predictor=LLMPredictor(llm=model),
prompt_helper=PromptHelper(max_input_size, num_output, max_chunk_overlap))
documents = SimpleDirectoryReader('Chinese-Vicuna/index-docs').load_data()
print(documents)
print('start init index')
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))
# default_service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
# index = GPTFaissIndex.from_documents(documents, service_context=service_context)
# index = GPTFaissIndex.from_documents(documents, faiss_index=faiss.IndexFlatL2(1536), service_context=default_service_context)
print('end init index done')
print('start save to disk')
# index.save_to_disk("clash-index.json")
# suffix do not matter
faiss_index_save_path = 'faiss_index.faiss'
faiss_index = faiss.IndexFlatL2(1536)
faiss.write_index(faiss_index, faiss_index_save_path)
index = GPTFaissIndex.load_from_disk(save_path='clash-index.json',
faiss_index=faiss_index_save_path,
service_context=service_context)
print('end save to disk')
# Query and print response
print('start query')
response = index.query(input)
print('end query')
print(response)
return response
# with torch.no_grad():
# # immOutPut = model.generate(input_ids=input_ids, generation_config=generation_config,
# # return_dict_in_generate=True, output_scores=False,
# # repetition_penalty=float(repetition_penalty), )
# # outputs = tokenizer.batch_decode(immOutPut)
# last_show_text = ''
# for generation_output in model.stream_generate(
# input_ids=input_ids,
# generation_config=generation_config,
# return_dict_in_generate=True,
# output_scores=False,
# repetition_penalty=float(repetition_penalty),
# ):
# outputs = tokenizer.batch_decode(generation_output)
# show_text = "\n--------------------------------------------\n".join(
# [output.split("### Response:")[1].strip().replace('�', '') for output in outputs]
# )
# # if show_text== '':
# # yield last_show_text
# # else:
# yield show_text
# last_show_text = outputs[0].split("### Response:")[1].strip().replace('�', '')
gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(
lines=2, label="Input", placeholder="Tell me about alpacas."
),
gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
gr.components.Slider(minimum=1, maximum=10, step=1, value=4, label="Beams Number"),
gr.components.Slider(
minimum=1, maximum=2000, step=1, value=256, label="Max New Tokens"
),
gr.components.Slider(
minimum=1, maximum=100, step=1, value=1, label="Min New Tokens"
),
gr.components.Slider(
minimum=0.1, maximum=10.0, step=0.1, value=1.0, label="Repetition Penalty"
),
],
outputs=[
gr.inputs.Textbox(
lines=15,
label="Output",
)
],
title="Chinese-Vicuna 中文小羊驼",
description="结合 llama-index prompt 搜索优化的 中文小羊驼",
).queue().launch(share=True)
| [
"llama_index.PromptHelper",
"llama_index.GPTFaissIndex.load_from_disk",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor"
] | [((13996, 14021), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14019, 14021), False, 'import argparse\n'), ((14291, 14338), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['args.model_path'], {}), '(args.model_path)\n', (14321, 14338), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((14471, 14520), 'os.path.join', 'os.path.join', (['args.lora_path', '"""adapter_model.bin"""'], {}), "(args.lora_path, 'adapter_model.bin')\n", (14483, 14520), False, 'import os\n'), ((14990, 15015), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15013, 15015), False, 'import torch\n'), ((16720, 16779), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (16739, 16779), False, 'import logging\n'), ((1625, 1640), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1638, 1640), False, 'import torch\n'), ((14622, 14671), 'os.path.join', 'os.path.join', (['args.lora_path', '"""pytorch_model.bin"""'], {}), "(args.lora_path, 'pytorch_model.bin')\n", (14634, 14671), False, 'import os\n'), ((14707, 14739), 'os.path.exists', 'os.path.exists', (['pytorch_bin_path'], {}), '(pytorch_bin_path)\n', (14721, 14739), False, 'import os\n'), ((15075, 15108), 'torch.backends.mps.is_available', 'torch.backends.mps.is_available', ([], {}), '()\n', (15106, 15108), False, 'import torch\n'), ((15184, 15303), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['BASE_MODEL'], {'load_in_8bit': 'LOAD_8BIT', 'torch_dtype': 'torch.float16', 'device_map': "{'': 0}"}), "(BASE_MODEL, load_in_8bit=LOAD_8BIT,\n torch_dtype=torch.float16, device_map={'': 0})\n", (15216, 15303), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((16696, 16716), 'torch.compile', 'torch.compile', (['model'], {}), '(model)\n', (16709, 16716), False, 'import torch\n'), ((16811, 16851), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (16832, 16851), False, 'import logging\n'), ((17490, 17739), 'transformers.GenerationConfig', 'GenerationConfig', ([], {'temperature': 'temperature', 'top_p': 'top_p', 'top_k': 'top_k', 'num_beams': 'num_beams', 'bos_token_id': '(1)', 'eos_token_id': '(2)', 'pad_token_id': '(0)', 'max_new_tokens': 'max_new_tokens', 'min_new_tokens': 'min_new_tokens', 'repetition_penalty': 'repetition_penalty'}), '(temperature=temperature, top_p=top_p, top_k=top_k,\n num_beams=num_beams, bos_token_id=1, eos_token_id=2, pad_token_id=0,\n max_new_tokens=max_new_tokens, min_new_tokens=min_new_tokens,\n repetition_penalty=repetition_penalty)\n', (17506, 17739), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((19097, 19120), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['(1536)'], {}), '(1536)\n', (19114, 19120), False, 'import faiss\n'), ((19126, 19179), 'faiss.write_index', 'faiss.write_index', (['faiss_index', 'faiss_index_save_path'], {}), '(faiss_index, faiss_index_save_path)\n', (19143, 19179), False, 'import faiss\n'), ((19193, 19324), 'llama_index.GPTFaissIndex.load_from_disk', 'GPTFaissIndex.load_from_disk', ([], {'save_path': '"""clash-index.json"""', 'faiss_index': 'faiss_index_save_path', 'service_context': 'service_context'}), "(save_path='clash-index.json', faiss_index=\n faiss_index_save_path, service_context=service_context)\n", (19221, 19324), False, 'from llama_index import GPTKeywordTableIndex, GPTSimpleVectorIndex, GPTListIndex, GPTTreeIndex, GPTFaissIndex\n'), ((3383, 3415), 'copy.deepcopy', 'copy.deepcopy', (['generation_config'], {}), '(generation_config)\n', (3396, 3415), False, 'import copy\n'), ((6104, 6429), 'transformers.generation.beam_search.BeamSearchScorer', 'BeamSearchScorer', ([], {'batch_size': 'batch_size', 'num_beams': 'generation_config.num_beams', 'device': 'input_ids.device', 'length_penalty': 'generation_config.length_penalty', 'do_early_stopping': 'generation_config.early_stopping', 'num_beam_hyps_to_keep': 'generation_config.num_return_sequences', 'max_length': 'generation_config.max_length'}), '(batch_size=batch_size, num_beams=generation_config.\n num_beams, device=input_ids.device, length_penalty=generation_config.\n length_penalty, do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length)\n', (6120, 6429), False, 'from transformers.generation.beam_search import BeamSearchScorer\n'), ((7160, 7245), 'torch.zeros', 'torch.zeros', (['(batch_size, num_beams)'], {'dtype': 'torch.float', 'device': 'input_ids.device'}), '((batch_size, num_beams), dtype=torch.float, device=input_ids.device\n )\n', (7171, 7245), False, 'import torch\n'), ((11738, 11774), 'peft.LoraConfig.from_pretrained', 'LoraConfig.from_pretrained', (['model_id'], {}), '(model_id)\n', (11764, 11774), False, 'from peft import PeftModelForCausalLM, LoraConfig\n'), ((12718, 12768), 'peft.utils.set_peft_model_state_dict', 'set_peft_model_state_dict', (['model', 'adapters_weights'], {}), '(model, adapters_weights)\n', (12743, 12768), False, 'from peft.utils import PeftType, set_peft_model_state_dict\n'), ((14549, 14578), 'os.path.exists', 'os.path.exists', (['lora_bin_path'], {}), '(lora_bin_path)\n', (14563, 14578), False, 'import os\n'), ((14749, 14791), 'os.rename', 'os.rename', (['pytorch_bin_path', 'lora_bin_path'], {}), '(pytorch_bin_path, lora_bin_path)\n', (14758, 14791), False, 'import os\n'), ((14800, 14919), 'warnings.warn', 'warnings.warn', (['"""The file name of the lora checkpoint\'pytorch_model.bin\' is replaced with \'adapter_model.bin\'"""'], {}), '(\n "The file name of the lora checkpoint\'pytorch_model.bin\' is replaced with \'adapter_model.bin\'"\n )\n', (14813, 14919), False, 'import warnings\n'), ((15504, 15604), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['BASE_MODEL'], {'device_map': "{'': device}", 'torch_dtype': 'torch.float16'}), "(BASE_MODEL, device_map={'': device},\n torch_dtype=torch.float16)\n", (15536, 15604), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((15811, 15908), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['BASE_MODEL'], {'device_map': "{'': device}", 'low_cpu_mem_usage': '(True)'}), "(BASE_MODEL, device_map={'': device},\n low_cpu_mem_usage=True)\n", (15843, 15908), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig\n'), ((16780, 16799), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (16797, 16799), False, 'import logging\n'), ((2134, 2162), 'transformers.deepspeed.is_deepspeed_zero3_enabled', 'is_deepspeed_zero3_enabled', ([], {}), '()\n', (2160, 2162), False, 'from transformers.deepspeed import is_deepspeed_zero3_enabled\n'), ((2584, 2651), 'torch.cat', 'torch.cat', (["(prefix_attention_mask, kwargs['attention_mask'])"], {'dim': '(1)'}), "((prefix_attention_mask, kwargs['attention_mask']), dim=1)\n", (2593, 2651), False, 'import torch\n'), ((2751, 2863), 'warnings.warn', 'warnings.warn', (['"""Position ids are not supported for parameter efficient tuning. Ignoring position ids."""'], {}), "(\n 'Position ids are not supported for parameter efficient tuning. Ignoring position ids.'\n )\n", (2764, 2863), False, 'import warnings\n'), ((2997, 3112), 'warnings.warn', 'warnings.warn', (['"""Token type ids are not supported for parameter efficient tuning. Ignoring token type ids"""'], {}), "(\n 'Token type ids are not supported for parameter efficient tuning. Ignoring token type ids'\n )\n", (3010, 3112), False, 'import warnings\n'), ((3995, 4306), 'warnings.warn', 'warnings.warn', (['f"""Using `max_length`\'s default ({generation_config.max_length}) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation."""', 'UserWarning'], {}), '(\n f"Using `max_length`\'s default ({generation_config.max_length}) to control the generation length. This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we recommend using `max_new_tokens` to control the maximum length of the generation."\n , UserWarning)\n', (4008, 4306), False, 'import warnings\n'), ((5118, 5139), 'transformers.generation.utils.LogitsProcessorList', 'LogitsProcessorList', ([], {}), '()\n', (5137, 5139), False, 'from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationMixin\n'), ((5272, 5294), 'transformers.generation.utils.StoppingCriteriaList', 'StoppingCriteriaList', ([], {}), '()\n', (5292, 5294), False, 'from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationMixin\n'), ((8817, 8869), 'torch.nn.functional.log_softmax', 'nn.functional.log_softmax', (['next_token_logits'], {'dim': '(-1)'}), '(next_token_logits, dim=-1)\n', (8842, 8869), False, 'from torch import nn\n'), ((9542, 9620), 'torch.topk', 'torch.topk', (['next_token_scores', '(2 * num_beams)'], {'dim': '(1)', 'largest': '(True)', 'sorted': '(True)'}), '(next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n', (9552, 9620), False, 'import torch\n'), ((9678, 9735), 'torch.div', 'torch.div', (['next_tokens', 'vocab_size'], {'rounding_mode': '"""floor"""'}), "(next_tokens, vocab_size, rounding_mode='floor')\n", (9687, 9735), False, 'import torch\n'), ((11850, 11884), 'accelerate.hooks.remove_hook_from_submodules', 'remove_hook_from_submodules', (['model'], {}), '(model)\n', (11877, 11884), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((12005, 12048), 'os.path.join', 'os.path.join', (['model_id', '"""adapter_model.bin"""'], {}), "(model_id, 'adapter_model.bin')\n", (12017, 12048), False, 'import os\n'), ((12074, 12117), 'os.path.join', 'os.path.join', (['model_id', '"""adapter_model.bin"""'], {}), "(model_id, 'adapter_model.bin')\n", (12086, 12117), False, 'import os\n'), ((13593, 13637), 'accelerate.dispatch_model', 'dispatch_model', (['model'], {'device_map': 'device_map'}), '(model, device_map=device_map)\n', (13607, 13637), False, 'from accelerate import dispatch_model, infer_auto_device_map\n'), ((13657, 13694), 'accelerate.hooks.AlignDevicesHook', 'AlignDevicesHook', ([], {'io_same_device': '(True)'}), '(io_same_device=True)\n', (13673, 13694), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((18251, 18274), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'model'}), '(llm=model)\n', (18263, 18274), False, 'from llama_index import LLMPredictor\n'), ((18298, 18357), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (18310, 18357), False, 'from llama_index import PromptHelper, SimpleDirectoryReader\n'), ((18376, 18426), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""Chinese-Vicuna/index-docs"""'], {}), "('Chinese-Vicuna/index-docs')\n", (18397, 18426), False, 'from llama_index import PromptHelper, SimpleDirectoryReader\n'), ((2167, 2184), 'torch.distributed.world_size', 'dist.world_size', ([], {}), '()\n', (2182, 2184), True, 'import torch.distributed as dist\n'), ((7914, 7976), 'torch.distributed.all_reduce', 'dist.all_reduce', (['this_peer_finished_flag'], {'op': 'dist.ReduceOp.SUM'}), '(this_peer_finished_flag, op=dist.ReduceOp.SUM)\n', (7929, 7976), True, 'import torch.distributed as dist\n'), ((12176, 12222), 'huggingface_hub.hf_hub_download', 'hf_hub_download', (['model_id', '"""adapter_model.bin"""'], {}), "(model_id, 'adapter_model.bin')\n", (12191, 12222), False, 'from huggingface_hub import hf_hub_download\n'), ((13079, 13223), 'accelerate.utils.get_balanced_memory', 'get_balanced_memory', (['model'], {'max_memory': 'max_memory', 'no_split_module_classes': 'no_split_module_classes', 'low_zero': "(device_map == 'balanced_low_0')"}), "(model, max_memory=max_memory, no_split_module_classes=\n no_split_module_classes, low_zero=device_map == 'balanced_low_0')\n", (13098, 13223), False, 'from accelerate.utils import get_balanced_memory\n'), ((13393, 13498), 'accelerate.infer_auto_device_map', 'infer_auto_device_map', (['model'], {'max_memory': 'max_memory', 'no_split_module_classes': 'no_split_module_classes'}), '(model, max_memory=max_memory, no_split_module_classes\n =no_split_module_classes)\n', (13414, 13498), False, 'from accelerate import dispatch_model, infer_auto_device_map\n'), ((13772, 13820), 'accelerate.hooks.add_hook_to_module', 'add_hook_to_module', (['model.base_model.model', 'hook'], {}), '(model.base_model.model, hook)\n', (13790, 13820), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((13855, 13904), 'accelerate.hooks.remove_hook_from_submodules', 'remove_hook_from_submodules', (['model.prompt_encoder'], {}), '(model.prompt_encoder)\n', (13882, 13904), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((13921, 13963), 'accelerate.hooks.add_hook_to_module', 'add_hook_to_module', (['model.base_model', 'hook'], {}), '(model.base_model, hook)\n', (13939, 13963), False, 'from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules\n'), ((2406, 2483), 'torch.ones', 'torch.ones', (["kwargs['input_ids'].shape[0]", 'self.peft_config.num_virtual_tokens'], {}), "(kwargs['input_ids'].shape[0], self.peft_config.num_virtual_tokens)\n", (2416, 2483), False, 'import torch\n'), ((7733, 7781), 'torch.tensor', 'torch.tensor', (['(0.0 if this_peer_finished else 1.0)'], {}), '(0.0 if this_peer_finished else 1.0)\n', (7745, 7781), False, 'import torch\n'), ((12611, 12636), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12634, 12636), False, 'import torch\n'), ((20831, 20919), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(2)', 'label': '"""Input"""', 'placeholder': '"""Tell me about alpacas."""'}), "(lines=2, label='Input', placeholder=\n 'Tell me about alpacas.')\n", (20852, 20919), True, 'import gradio as gr\n'), ((20946, 21020), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(0)', 'maximum': '(1)', 'value': '(0.1)', 'label': '"""Temperature"""'}), "(minimum=0, maximum=1, value=0.1, label='Temperature')\n", (20966, 21020), True, 'import gradio as gr\n'), ((21030, 21099), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(0)', 'maximum': '(1)', 'value': '(0.75)', 'label': '"""Top p"""'}), "(minimum=0, maximum=1, value=0.75, label='Top p')\n", (21050, 21099), True, 'import gradio as gr\n'), ((21109, 21186), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(0)', 'maximum': '(100)', 'step': '(1)', 'value': '(40)', 'label': '"""Top k"""'}), "(minimum=0, maximum=100, step=1, value=40, label='Top k')\n", (21129, 21186), True, 'import gradio as gr\n'), ((21196, 21283), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(1)', 'maximum': '(10)', 'step': '(1)', 'value': '(4)', 'label': '"""Beams Number"""'}), "(minimum=1, maximum=10, step=1, value=4, label=\n 'Beams Number')\n", (21216, 21283), True, 'import gradio as gr\n'), ((21288, 21381), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(1)', 'maximum': '(2000)', 'step': '(1)', 'value': '(256)', 'label': '"""Max New Tokens"""'}), "(minimum=1, maximum=2000, step=1, value=256, label=\n 'Max New Tokens')\n", (21308, 21381), True, 'import gradio as gr\n'), ((21408, 21498), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(1)', 'maximum': '(100)', 'step': '(1)', 'value': '(1)', 'label': '"""Min New Tokens"""'}), "(minimum=1, maximum=100, step=1, value=1, label=\n 'Min New Tokens')\n", (21428, 21498), True, 'import gradio as gr\n'), ((21525, 21626), 'gradio.components.Slider', 'gr.components.Slider', ([], {'minimum': '(0.1)', 'maximum': '(10.0)', 'step': '(0.1)', 'value': '(1.0)', 'label': '"""Repetition Penalty"""'}), "(minimum=0.1, maximum=10.0, step=0.1, value=1.0, label=\n 'Repetition Penalty')\n", (21545, 21626), True, 'import gradio as gr\n'), ((21674, 21717), 'gradio.inputs.Textbox', 'gr.inputs.Textbox', ([], {'lines': '(15)', 'label': '"""Output"""'}), "(lines=15, label='Output')\n", (21691, 21717), True, 'import gradio as gr\n')] |
from typing import List, Set
from llama_index.core import Document, KnowledgeGraphIndex, StorageContext
from llama_index.core.query_engine import BaseQueryEngine
from llama_index.core import load_index_from_storage
import os
def load_kg_graph_index_storage_context(kg_graph_storage_dir: str) -> StorageContext:
return StorageContext.from_defaults(persist_dir=kg_graph_storage_dir)
def persist_kg_graph_index(idx: KnowledgeGraphIndex, kg_graph_storage_dir: str):
doc_count = len(idx.docstore.docs)
print(f"Persisting {doc_count} docs for kg_graph to {kg_graph_storage_dir} ...")
idx.storage_context.persist(persist_dir=kg_graph_storage_dir)
def delete_kg_graph_index(kg_graph_storage_dir: str):
print(f"Deleting kg_graph at {kg_graph_storage_dir} ...")
if os.path.exists(kg_graph_storage_dir):
import shutil
shutil.rmtree(kg_graph_storage_dir)
def load_kg_graph_index(kg_graph_storage_dir: str) -> KnowledgeGraphIndex:
if not os.path.exists(kg_graph_storage_dir):
print(f"About to initialize an empty kg-graph ...")
kg_graph = KnowledgeGraphIndex.from_documents(
[]
)
persist_kg_graph_index(kg_graph, kg_graph_storage_dir)
return load_index_from_storage(
storage_context=load_kg_graph_index_storage_context(kg_graph_storage_dir)
)
def get_kg_graph_doc_source_ids(graph_storage_dir: str, extract_key_from_doc=lambda: str) -> Set[str]:
s = set()
for doc in load_kg_graph_index(graph_storage_dir).docstore.docs.values():
s.add(extract_key_from_doc(doc))
return s
def get_kg_graph_index(graph_storage_dir: str) -> KnowledgeGraphIndex:
return load_kg_graph_index(graph_storage_dir)
def operate_on_kg_graph_index(kg_graph_index_dir: str, operation=lambda: None) -> KnowledgeGraphIndex:
import atexit
idx = get_kg_graph_index(kg_graph_index_dir)
atexist_reg_callable = atexit.register(persist_kg_graph_index, idx, kg_graph_index_dir)
try:
operation(idx)
finally:
persist_kg_graph_index(idx, kg_graph_index_dir)
atexit.unregister(atexist_reg_callable)
return idx
def add_to_or_update_in_kg_graph(graph_storage_dir: str, documents: List[Document]):
operate_on_kg_graph_index(
graph_storage_dir,
lambda graph_index: graph_index.refresh_ref_docs(documents)
)
def get_kg_graph_query_engine(graph_storage_dir: str) -> BaseQueryEngine:
return load_kg_graph_index(graph_storage_dir).as_query_engine()
| [
"llama_index.core.KnowledgeGraphIndex.from_documents",
"llama_index.core.StorageContext.from_defaults"
] | [((323, 385), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'kg_graph_storage_dir'}), '(persist_dir=kg_graph_storage_dir)\n', (351, 385), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext\n'), ((782, 818), 'os.path.exists', 'os.path.exists', (['kg_graph_storage_dir'], {}), '(kg_graph_storage_dir)\n', (796, 818), False, 'import os\n'), ((1908, 1972), 'atexit.register', 'atexit.register', (['persist_kg_graph_index', 'idx', 'kg_graph_index_dir'], {}), '(persist_kg_graph_index, idx, kg_graph_index_dir)\n', (1923, 1972), False, 'import atexit\n'), ((850, 885), 'shutil.rmtree', 'shutil.rmtree', (['kg_graph_storage_dir'], {}), '(kg_graph_storage_dir)\n', (863, 885), False, 'import shutil\n'), ((973, 1009), 'os.path.exists', 'os.path.exists', (['kg_graph_storage_dir'], {}), '(kg_graph_storage_dir)\n', (987, 1009), False, 'import os\n'), ((1090, 1128), 'llama_index.core.KnowledgeGraphIndex.from_documents', 'KnowledgeGraphIndex.from_documents', (['[]'], {}), '([])\n', (1124, 1128), False, 'from llama_index.core import Document, KnowledgeGraphIndex, StorageContext\n'), ((2082, 2121), 'atexit.unregister', 'atexit.unregister', (['atexist_reg_callable'], {}), '(atexist_reg_callable)\n', (2099, 2121), False, 'import atexit\n')] |
from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import sys
import os
def construct_index(src_path, out_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 512
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=num_outputs))
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
documents = SimpleDirectoryReader(src_path).load_data()
index = GPTSimpleVectorIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
index.save_to_disk(f"{out_path}/index.json")
return index
if __name__ == "__main__":
import os
src_path = os.getcwd()
dir_path = src_path + "/clean"
out_path = src_path
os.environ["OPENAI_API_KEY"] = "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"
index = construct_index(src_path, out_path) | [
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.PromptHelper"
] | [((565, 664), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (577, 664), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((740, 834), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (760, 834), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((970, 981), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (979, 981), False, 'import os\n'), ((467, 543), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""', 'max_tokens': 'num_outputs'}), "(temperature=0, model_name='text-davinci-003', max_tokens=num_outputs)\n", (473, 543), False, 'from langchain import OpenAI\n'), ((679, 710), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['src_path'], {}), '(src_path)\n', (700, 710), False, 'from llama_index import SimpleDirectoryReader, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n')] |
import streamlit as st
from llama_index import VectorStoreIndex, ServiceContext, Document
from llama_index.llms import OpenAI
import openai
from llama_index import SimpleDirectoryReader
st.set_page_config(page_title="Chat with the docs, powered by LlamaIndex")
openai.api_key = st.secrets.openai_key
st.title("Chat with the custom docs, using LlamaIndex")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Ask me a question about Streamlit's open-source Python library!"}
]
@st.cache_resource(show_spinner=False)
def load_data():
with st.spinner(text="Loading and indexing the Streamlit docs"):
reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
docs = reader.load_data()
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5, system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features."))
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
return index
index = load_data()
# chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True, system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features.")
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.llms.OpenAI"
] | [((193, 267), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with the docs, powered by LlamaIndex"""'}), "(page_title='Chat with the docs, powered by LlamaIndex')\n", (211, 267), True, 'import streamlit as st\n'), ((309, 365), 'streamlit.title', 'st.title', (['"""Chat with the custom docs, using LlamaIndex"""'], {}), "('Chat with the custom docs, using LlamaIndex')\n", (317, 365), True, 'import streamlit as st\n'), ((618, 655), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (635, 655), True, 'import streamlit as st\n'), ((399, 422), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (420, 422), True, 'import streamlit as st\n'), ((1802, 1832), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (1815, 1832), True, 'import streamlit as st\n'), ((1888, 1957), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1920, 1957), True, 'import streamlit as st\n'), ((684, 742), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the Streamlit docs"""'}), "(text='Loading and indexing the Streamlit docs')\n", (694, 742), True, 'import streamlit as st\n'), ((762, 819), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data"""', 'recursive': '(True)'}), "(input_dir='./data', recursive=True)\n", (783, 819), False, 'from llama_index import SimpleDirectoryReader\n'), ((1243, 1313), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (1274, 1313), False, 'from llama_index import VectorStoreIndex, ServiceContext, Document\n'), ((2047, 2079), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2062, 2079), True, 'import streamlit as st\n'), ((2090, 2118), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2098, 2118), True, 'import streamlit as st\n'), ((2255, 2283), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2270, 2283), True, 'import streamlit as st\n'), ((2299, 2324), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2309, 2324), True, 'import streamlit as st\n'), ((2388, 2415), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2396, 2415), True, 'import streamlit as st\n'), ((2504, 2545), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2536, 2545), True, 'import streamlit as st\n'), ((915, 1234), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)', 'system_prompt': '"""You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features."""'}), "(model='gpt-3.5-turbo', temperature=0.5, system_prompt=\n 'You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features.'\n )\n", (921, 1234), False, 'from llama_index.llms import OpenAI\n')] |
from llama_index import StorageContext, load_index_from_storage, ServiceContext
import gradio as gr
import sys
import os
import logging
from utils import get_automerging_query_engine
from utils import get_sentence_window_query_engine
import configparser
from TTS.api import TTS
from gtts import gTTS
import simpleaudio as sa
import threading
from datetime import datetime
import json
import subprocess
from llama_index.prompts.base import PromptTemplate
from inference import main as generateVideo
import pyttsx3
def run_inference(checkpoint_path, face_video, audio_file, resize_factor, outfile):
# Construct the command with dynamic parameters
command = [
"--checkpoint_path", checkpoint_path,
"--face", face_video,
"--audio", audio_file,
"--resize_factor", str(resize_factor),
"--outfile", outfile
]
print(command)
generateVideo(command)
def play_sound_then_delete(path_to_wav):
def play_and_delete():
try:
wave_obj = sa.WaveObject.from_wave_file(path_to_wav)
play_obj = wave_obj.play()
play_obj.wait_done() # Wait until the sound has finished playing
except Exception as e:
print(f"Error during playback: {e}")
finally:
try:
#os.remove(path_to_wav)
print(f"File {path_to_wav} successfully deleted.")
except Exception as e:
print(f"Error deleting file: {e}")
# Start playback in a new thread
threading.Thread(target=play_and_delete, daemon=True).start()
config = configparser.ConfigParser()
config.read('config.ini')
os.environ["GRADIO_ANALYTICS_ENABLED"]='False'
indextype=config['api']['indextype']
embed_modelname = config['api']['embedmodel']
basic_idx_dir = config['index']['basic_idx_dir']
sent_win_idx_dir = config['index']['sent_win_idx_dir']
auto_mrg_idx_dir = config['index']['auto_mrg_idx_dir']
serverip = config['api']['host']
serverport = config['api']['port']
sslcert = config['api']['sslcert']
sslkey = config['api']['sslkey']
useopenai = config.getboolean('api', 'useopenai')
ttsengine = config['api']['ttsengine']
# Get the logging level
log_level_str = config.get('api', 'loglevel', fallback='WARNING').upper()
# Convert the log level string to a logging level
log_level = getattr(logging, log_level_str, logging.WARNING)
def chatbot(input_text):
global tts
print("User Text:" + input_text)
response =query_engine.query(input_text)
# Save the output
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
output_audfile=f"output_{timestamp}.wav"
output_vidfile=f"output_{timestamp}.mp4"
output_path = "../web/public/audio/output/"+output_audfile
if ttsengine == 'coqui':
tts.tts_to_file(text=response.response, file_path=output_path ) # , speaker_wav=["bruce.wav"], language="en",split_sentences=True)
elif ttsengine == 'gtts':
tts = gTTS(text=response.response, lang='en')
tts.save(output_path)
else:
tts.save_to_file(response.response , output_path)
tts.runAndWait()
checkpoint_path = "./checkpoints/wav2lip_gan.pth"
face_video = "media/Avatar.mp4"
audio_file = "../web/public/audio/output/"+output_audfile
outfile="../web/public/video/output/"+output_vidfile
resize_factor = 2
run_inference(checkpoint_path, face_video, audio_file, resize_factor, outfile)
#play_sound_then_delete(output_path)
#construct response object
# Building the citation list from source_nodes
citation = [
{
"filename": node.metadata["file_name"],
"text": node.get_text()
} for node in response.source_nodes
]
# Creating the JSON object structure
jsonResponse = {
"response": response.response,
"video": output_vidfile,
"audio": output_audfile,
"citation": citation
}
# Convert to JSON string
jsonResponseStr = json.dumps(jsonResponse, indent=4)
return jsonResponseStr
logging.basicConfig(stream=sys.stdout, level=log_level)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
iface = gr.Interface(fn=chatbot,
inputs=gr.components.Textbox(lines=7, label="Enter your text"),
outputs="text",
title="Email data query")
from langchain.llms import LlamaCpp
from langchain.globals import set_llm_cache
from langchain.cache import InMemoryCache
#from langchain.globals import set_debug
#set_debug(True)
if useopenai:
from langchain.chat_models import ChatOpenAI
modelname = config['api']['openai_modelname']
llm =ChatOpenAI(temperature=0.1, model_name=modelname)
else:
modelname = config['api']['local_modelname']
n_gpu_layers = -1 # Change this value based on your model and your GPU VRAM pool.
n_batch = 2048 # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
#cache prompt/response pairs for faster retrieval next time.
set_llm_cache(InMemoryCache())
llm = LlamaCpp(
model_path="./models/"+ modelname,
cache=True,
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=2048,
n_threads=8,
temperature=0.01,
max_tokens=512,
f16_kv=True,
repeat_penalty=1.1,
min_p=0.05,
top_p=0.95,
top_k=40,
stop=["<|end_of_turn|>"]
)
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=embed_modelname
)
index_directory=''
if indextype == 'basic':
index_directory = basic_idx_dir
elif indextype == 'sentence' :
index_directory = sent_win_idx_dir
elif indextype == 'automerge':
index_directory = auto_mrg_idx_dir
print(config['api']['indextype'] )
print(index_directory)
if ttsengine == 'coqui':
tts = TTS(model_name="tts_models/en/ljspeech/vits--neon", progress_bar=False).to("cuda")
#tts = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", progress_bar=False).to("cuda")
elif ttsengine == 'gtts':
tts = gTTS(text='', lang='en')
else:
tts = pyttsx3.init()
voices = tts.getProperty('voices')
tts.setProperty('voice', voices[1].id) # this is female voice
rate = tts.getProperty('rate')
tts.setProperty('rate', rate-50)
# load index
storage_context = StorageContext.from_defaults(persist_dir=index_directory)
index = load_index_from_storage(storage_context=storage_context, service_context=service_context)
if indextype == 'basic':
query_engine = index.as_query_engine()
elif indextype == 'sentence' :
query_engine =get_sentence_window_query_engine(index)
elif indextype == 'automerge':
query_engine = get_automerging_query_engine(automerging_index=index, service_context=service_context)
#prompts_dict = query_engine.get_prompts()
#print(list(prompts_dict.keys()))
# Optional: Adjust prompts to suit the llms.
qa_prompt_tmpl_str = (
"GPT4 User: You are an assistant named Maggie. You assist with any questions regarding the organization kwaai.\n"
"Context information is below\n"
"----------------------\n"
"{context_str}\n"
"----------------------\n"
"Given the context information and not prior knowledge respond to user: {query_str}\n"
"<|end_of_turn|>GPT4 Assistant:"
)
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:text_qa_template": qa_prompt_tmpl}
)
iface.launch( share=False, server_name=serverip, server_port=int(serverport), ssl_verify=False, ssl_keyfile=sslkey, ssl_certfile=sslcert) | [
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.base.PromptTemplate",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults"
] | [((1610, 1637), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1635, 1637), False, 'import configparser\n'), ((4094, 4149), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'log_level'}), '(stream=sys.stdout, level=log_level)\n', (4113, 4149), False, 'import logging\n'), ((5478, 5544), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_modelname'}), '(llm=llm, embed_model=embed_modelname)\n', (5506, 5544), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((6367, 6424), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_directory'}), '(persist_dir=index_directory)\n', (6395, 6424), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((6433, 6527), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (6456, 6527), False, 'from llama_index import StorageContext, load_index_from_storage, ServiceContext\n'), ((7358, 7392), 'llama_index.prompts.base.PromptTemplate', 'PromptTemplate', (['qa_prompt_tmpl_str'], {}), '(qa_prompt_tmpl_str)\n', (7372, 7392), False, 'from llama_index.prompts.base import PromptTemplate\n'), ((895, 917), 'inference.main', 'generateVideo', (['command'], {}), '(command)\n', (908, 917), True, 'from inference import main as generateVideo\n'), ((4018, 4052), 'json.dumps', 'json.dumps', (['jsonResponse'], {'indent': '(4)'}), '(jsonResponse, indent=4)\n', (4028, 4052), False, 'import json\n'), ((4181, 4221), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (4202, 4221), False, 'import logging\n'), ((4733, 4782), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': 'modelname'}), '(temperature=0.1, model_name=modelname)\n', (4743, 4782), False, 'from langchain.chat_models import ChatOpenAI\n'), ((5139, 5405), 'langchain.llms.LlamaCpp', 'LlamaCpp', ([], {'model_path': "('./models/' + modelname)", 'cache': '(True)', 'n_gpu_layers': 'n_gpu_layers', 'n_batch': 'n_batch', 'n_ctx': '(2048)', 'n_threads': '(8)', 'temperature': '(0.01)', 'max_tokens': '(512)', 'f16_kv': '(True)', 'repeat_penalty': '(1.1)', 'min_p': '(0.05)', 'top_p': '(0.95)', 'top_k': '(40)', 'stop': "['<|end_of_turn|>']"}), "(model_path='./models/' + modelname, cache=True, n_gpu_layers=\n n_gpu_layers, n_batch=n_batch, n_ctx=2048, n_threads=8, temperature=\n 0.01, max_tokens=512, f16_kv=True, repeat_penalty=1.1, min_p=0.05,\n top_p=0.95, top_k=40, stop=['<|end_of_turn|>'])\n", (5147, 5405), False, 'from langchain.llms import LlamaCpp\n'), ((4150, 4169), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4167, 4169), False, 'import logging\n'), ((4284, 4339), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(7)', 'label': '"""Enter your text"""'}), "(lines=7, label='Enter your text')\n", (4305, 4339), True, 'import gradio as gr\n'), ((5107, 5122), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (5120, 5122), False, 'from langchain.cache import InMemoryCache\n'), ((6091, 6115), 'gtts.gTTS', 'gTTS', ([], {'text': '""""""', 'lang': '"""en"""'}), "(text='', lang='en')\n", (6095, 6115), False, 'from gtts import gTTS\n'), ((6141, 6155), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (6153, 6155), False, 'import pyttsx3\n'), ((6643, 6682), 'utils.get_sentence_window_query_engine', 'get_sentence_window_query_engine', (['index'], {}), '(index)\n', (6675, 6682), False, 'from utils import get_sentence_window_query_engine\n'), ((1029, 1070), 'simpleaudio.WaveObject.from_wave_file', 'sa.WaveObject.from_wave_file', (['path_to_wav'], {}), '(path_to_wav)\n', (1057, 1070), True, 'import simpleaudio as sa\n'), ((1537, 1590), 'threading.Thread', 'threading.Thread', ([], {'target': 'play_and_delete', 'daemon': '(True)'}), '(target=play_and_delete, daemon=True)\n', (1553, 1590), False, 'import threading\n'), ((2572, 2586), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2584, 2586), False, 'from datetime import datetime\n'), ((2988, 3027), 'gtts.gTTS', 'gTTS', ([], {'text': 'response.response', 'lang': '"""en"""'}), "(text=response.response, lang='en')\n", (2992, 3027), False, 'from gtts import gTTS\n'), ((5866, 5937), 'TTS.api.TTS', 'TTS', ([], {'model_name': '"""tts_models/en/ljspeech/vits--neon"""', 'progress_bar': '(False)'}), "(model_name='tts_models/en/ljspeech/vits--neon', progress_bar=False)\n", (5869, 5937), False, 'from TTS.api import TTS\n'), ((6733, 6824), 'utils.get_automerging_query_engine', 'get_automerging_query_engine', ([], {'automerging_index': 'index', 'service_context': 'service_context'}), '(automerging_index=index, service_context=\n service_context)\n', (6761, 6824), False, 'from utils import get_automerging_query_engine\n')] |
from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import os
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 2000
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="text-davinci-003", max_tokens=num_outputs))
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
documents = SimpleDirectoryReader(directory_path).load_data()
index = GPTSimpleVectorIndex(
documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
index.save_to_disk('index.json')
return index
def ask_ai(query):
index = GPTSimpleVectorIndex.load_from_disk('./Talking_Buddy/index.json')
response = index.query(query, response_mode="compact")
return response.response
os.environ["OPENAI_API_KEY"] = "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"
construct_index("./Talking_Buddy/data")
| [
"llama_index.GPTSimpleVectorIndex.load_from_disk",
"llama_index.GPTSimpleVectorIndex",
"llama_index.SimpleDirectoryReader",
"llama_index.PromptHelper"
] | [((577, 676), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (589, 676), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((758, 852), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['documents'], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(documents, llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (778, 852), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((950, 1015), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['"""./Talking_Buddy/index.json"""'], {}), "('./Talking_Buddy/index.json')\n", (985, 1015), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n'), ((477, 555), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""text-davinci-003"""', 'max_tokens': 'num_outputs'}), "(temperature=0.5, model_name='text-davinci-003', max_tokens=num_outputs)\n", (483, 555), False, 'from langchain import OpenAI\n'), ((691, 728), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {}), '(directory_path)\n', (712, 728), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper\n')] |
import os
import streamlit as st
from dotenv import load_dotenv
from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext
from langchain.llms.openai import OpenAI
from biorxiv_manager import BioRxivManager
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
st.title("Ask BioRxiv")
query = st.text_input("What would you like to ask? (source: BioRxiv files)", "")
@st.cache_data
def fetch_and_parse():
# instantiating BioRxivManager runtime and fetch the parsed nodes
manager = BioRxivManager()
return manager.fetch_and_parse(interval="2023-07-01/2023-07-30")
embedded_documents = fetch_and_parse()
if st.button("Submit"):
if not query.strip():
st.error(f"Please provide the search query.")
else:
try:
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-4-32k", openai_api_key=openai_api_key))
max_input_size = 32767
num_output = 400
chunk_overlap_ratio = 0.2 # Adjust this value according to your need.
prompt_helper = PromptHelper(max_input_size, num_output, chunk_overlap_ratio)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTVectorStoreIndex.from_documents(embedded_documents, service_context=service_context)
response = index.query(query)
st.success(response)
except Exception as e:
st.error(f"An error occurred: {e}")
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.PromptHelper"
] | [((238, 251), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (249, 251), False, 'from dotenv import load_dotenv\n'), ((269, 296), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (278, 296), False, 'import os\n'), ((298, 321), 'streamlit.title', 'st.title', (['"""Ask BioRxiv"""'], {}), "('Ask BioRxiv')\n", (306, 321), True, 'import streamlit as st\n'), ((330, 402), 'streamlit.text_input', 'st.text_input', (['"""What would you like to ask? (source: BioRxiv files)"""', '""""""'], {}), "('What would you like to ask? (source: BioRxiv files)', '')\n", (343, 402), True, 'import streamlit as st\n'), ((656, 675), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (665, 675), True, 'import streamlit as st\n'), ((526, 542), 'biorxiv_manager.BioRxivManager', 'BioRxivManager', ([], {}), '()\n', (540, 542), False, 'from biorxiv_manager import BioRxivManager\n'), ((711, 756), 'streamlit.error', 'st.error', (['f"""Please provide the search query."""'], {}), "(f'Please provide the search query.')\n", (719, 756), True, 'import streamlit as st\n'), ((1079, 1140), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'chunk_overlap_ratio'], {}), '(max_input_size, num_output, chunk_overlap_ratio)\n', (1091, 1140), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext\n'), ((1172, 1263), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1200, 1263), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext\n'), ((1279, 1371), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['embedded_documents'], {'service_context': 'service_context'}), '(embedded_documents, service_context=\n service_context)\n', (1313, 1371), False, 'from llama_index import GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext\n'), ((1434, 1454), 'streamlit.success', 'st.success', (['response'], {}), '(response)\n', (1444, 1454), True, 'import streamlit as st\n'), ((1498, 1533), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (1506, 1533), True, 'import streamlit as st\n'), ((825, 901), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-4-32k"""', 'openai_api_key': 'openai_api_key'}), "(temperature=0, model_name='gpt-4-32k', openai_api_key=openai_api_key)\n", (831, 901), False, 'from langchain.llms.openai import OpenAI\n')] |
import logging
from llama_index.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from app.llama_index.index import setup_index
from app.llama_index.query_engine import setup_query_engine
from app.database.crud import get_vectorized_election_programs_from_db
from app.database.database import Session
def setup_agent_tools():
session = Session()
vectorized_election_programs = get_vectorized_election_programs_from_db(session)
logging.info(f"Loaded {len(vectorized_election_programs)} vectorized programs.")
vector_tools = []
for program in vectorized_election_programs:
meta_data_filters = MetadataFilters(
filters=[
ExactMatchFilter(key="group_id", value=program.id),
ExactMatchFilter(key="election_id", value=program.election_id),
ExactMatchFilter(key="party_id", value=program.party_id),
]
)
# define query engines
vector_index = setup_index()
vector_query_engine = setup_query_engine(
vector_index, filters=meta_data_filters
)
# define tools
query_engine_tool = LlamaIndexTool(
name="vector_tool",
description=(
f"Nützlich für Fragen zu spezifischen Aspekten des Wahlprogramms der {program.full_name} für die {program.label}."
),
query_engine=vector_query_engine,
)
logging.info(f"Loaded query engine tool for {program.full_name}.")
vector_tools.append(query_engine_tool)
return vector_tools
| [
"llama_index.langchain_helpers.agents.tools.LlamaIndexTool",
"llama_index.vector_stores.types.ExactMatchFilter"
] | [((424, 433), 'app.database.database.Session', 'Session', ([], {}), '()\n', (431, 433), False, 'from app.database.database import Session\n'), ((469, 518), 'app.database.crud.get_vectorized_election_programs_from_db', 'get_vectorized_election_programs_from_db', (['session'], {}), '(session)\n', (509, 518), False, 'from app.database.crud import get_vectorized_election_programs_from_db\n'), ((1044, 1057), 'app.llama_index.index.setup_index', 'setup_index', ([], {}), '()\n', (1055, 1057), False, 'from app.llama_index.index import setup_index\n'), ((1088, 1147), 'app.llama_index.query_engine.setup_query_engine', 'setup_query_engine', (['vector_index'], {'filters': 'meta_data_filters'}), '(vector_index, filters=meta_data_filters)\n', (1106, 1147), False, 'from app.llama_index.query_engine import setup_query_engine\n'), ((1221, 1427), 'llama_index.langchain_helpers.agents.tools.LlamaIndexTool', 'LlamaIndexTool', ([], {'name': '"""vector_tool"""', 'description': 'f"""Nützlich für Fragen zu spezifischen Aspekten des Wahlprogramms der {program.full_name} für die {program.label}."""', 'query_engine': 'vector_query_engine'}), "(name='vector_tool', description=\n f'Nützlich für Fragen zu spezifischen Aspekten des Wahlprogramms der {program.full_name} für die {program.label}.'\n , query_engine=vector_query_engine)\n", (1235, 1427), False, 'from llama_index.langchain_helpers.agents.tools import LlamaIndexTool\n'), ((1505, 1571), 'logging.info', 'logging.info', (['f"""Loaded query engine tool for {program.full_name}."""'], {}), "(f'Loaded query engine tool for {program.full_name}.')\n", (1517, 1571), False, 'import logging\n'), ((759, 809), 'llama_index.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""group_id"""', 'value': 'program.id'}), "(key='group_id', value=program.id)\n", (775, 809), False, 'from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n'), ((827, 889), 'llama_index.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""election_id"""', 'value': 'program.election_id'}), "(key='election_id', value=program.election_id)\n", (843, 889), False, 'from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n'), ((907, 963), 'llama_index.vector_stores.types.ExactMatchFilter', 'ExactMatchFilter', ([], {'key': '"""party_id"""', 'value': 'program.party_id'}), "(key='party_id', value=program.party_id)\n", (923, 963), False, 'from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters\n')] |
import os
from configparser import ConfigParser, SectionProxy
from typing import Any, Type
from llama_index import (
LLMPredictor,
ServiceContext,
VectorStoreIndex,
)
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices import SimpleKeywordTableIndex
from llama_index.indices.base import BaseIndex
from llama_index.indices.loading import load_index_from_storage
from llama_index.llm_predictor import StructuredLLMPredictor
from llama_index.llms.llm import LLM
from llama_index.llms.openai import OpenAI
from llama_index.storage.storage_context import StorageContext
CONFIG_FILE_NAME = "config.ini"
DEFAULT_PERSIST_DIR = "./storage"
DEFAULT_CONFIG = {
"store": {"persist_dir": DEFAULT_PERSIST_DIR},
"index": {"type": "default"},
"embed_model": {"type": "default"},
"llm_predictor": {"type": "default"},
}
def load_config(root: str = ".") -> ConfigParser:
"""Load configuration from file."""
config = ConfigParser()
config.read_dict(DEFAULT_CONFIG)
config.read(os.path.join(root, CONFIG_FILE_NAME))
return config
def save_config(config: ConfigParser, root: str = ".") -> None:
"""Load configuration to file."""
with open(os.path.join(root, CONFIG_FILE_NAME), "w") as fd:
config.write(fd)
def load_index(root: str = ".") -> BaseIndex[Any]:
"""Load existing index file."""
config = load_config(root)
service_context = _load_service_context(config)
# Index type
index_type: Type
if config["index"]["type"] == "default" or config["index"]["type"] == "vector":
index_type = VectorStoreIndex
elif config["index"]["type"] == "keyword":
index_type = SimpleKeywordTableIndex
else:
raise KeyError(f"Unknown index.type {config['index']['type']}")
try:
# try loading index
storage_context = _load_storage_context(config)
index = load_index_from_storage(storage_context)
except ValueError:
# build index
storage_context = StorageContext.from_defaults()
index = index_type(
nodes=[], service_context=service_context, storage_context=storage_context
)
return index
def save_index(index: BaseIndex[Any], root: str = ".") -> None:
"""Save index to file."""
config = load_config(root)
persist_dir = config["store"]["persist_dir"]
index.storage_context.persist(persist_dir=persist_dir)
def _load_service_context(config: ConfigParser) -> ServiceContext:
"""Internal function to load service context based on configuration."""
embed_model = _load_embed_model(config)
llm_predictor = _load_llm_predictor(config)
return ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embed_model
)
def _load_storage_context(config: ConfigParser) -> StorageContext:
persist_dir = config["store"]["persist_dir"]
return StorageContext.from_defaults(persist_dir=persist_dir)
def _load_llm_predictor(config: ConfigParser) -> LLMPredictor:
"""Internal function to load LLM predictor based on configuration."""
model_type = config["llm_predictor"]["type"].lower()
if model_type == "default":
llm = _load_llm(config["llm_predictor"])
return LLMPredictor(llm=llm)
elif model_type == "structured":
llm = _load_llm(config["llm_predictor"])
return StructuredLLMPredictor(llm=llm)
else:
raise KeyError("llm_predictor.type")
def _load_llm(section: SectionProxy) -> LLM:
if "engine" in section:
return OpenAI(engine=section["engine"])
else:
return OpenAI()
def _load_embed_model(config: ConfigParser) -> BaseEmbedding:
"""Internal function to load embedding model based on configuration."""
model_type = config["embed_model"]["type"]
if model_type == "default":
return OpenAIEmbedding()
else:
raise KeyError("embed_model.type")
| [
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.llm_predictor.StructuredLLMPredictor",
"llama_index.llms.openai.OpenAI",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.indices.loading.load_index_from_storage",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((1023, 1037), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1035, 1037), False, 'from configparser import ConfigParser, SectionProxy\n'), ((2725, 2812), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model)\n', (2753, 2812), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((2951, 3004), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (2979, 3004), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1091, 1127), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1103, 1127), False, 'import os\n'), ((1957, 1997), 'llama_index.indices.loading.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1980, 1997), False, 'from llama_index.indices.loading import load_index_from_storage\n'), ((3297, 3318), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3309, 3318), False, 'from llama_index import LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((3597, 3629), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'engine': "section['engine']"}), "(engine=section['engine'])\n", (3603, 3629), False, 'from llama_index.llms.openai import OpenAI\n'), ((3655, 3663), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (3661, 3663), False, 'from llama_index.llms.openai import OpenAI\n'), ((3898, 3915), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (3913, 3915), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((1265, 1301), 'os.path.join', 'os.path.join', (['root', 'CONFIG_FILE_NAME'], {}), '(root, CONFIG_FILE_NAME)\n', (1277, 1301), False, 'import os\n'), ((2069, 2099), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2097, 2099), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((3420, 3451), 'llama_index.llm_predictor.StructuredLLMPredictor', 'StructuredLLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3442, 3451), False, 'from llama_index.llm_predictor import StructuredLLMPredictor\n')] |
from components.store import get_storage_context
from llama_index import VectorStoreIndex
from llama_index.retrievers import (
VectorIndexRetriever,
)
from models.gpts import get_gpts_by_uuids
def search_gpts(question):
storage_context = get_storage_context()
index = VectorStoreIndex.from_documents([], storage_context=storage_context)
retriever = VectorIndexRetriever(index=index, similarity_top_k=10)
nodes = retriever.retrieve(question)
uuids = []
uuids_with_scores = {}
gpts = []
for node in nodes:
print("node metadata", node.metadata)
if node.score > 0.80:
uuid = node.metadata['uuid']
uuids.append(uuid)
uuids_with_scores[uuid] = node.score
if len(uuids) == 0:
return gpts
rows = get_gpts_by_uuids(uuids)
for row in rows:
gpts.append({
"uuid": row.uuid,
"name": row.name,
"description": row.description,
"avatar_url": row.avatar_url,
"author_name": row.author_name,
"created_at": row.created_at,
"updated_at": row.updated_at,
"visit_url": "https://chat.openai.com/g/" + row.short_url,
"score": uuids_with_scores[row.uuid],
})
sorted_gpts = sorted(gpts, key=lambda x: x['score'], reverse=True)
return sorted_gpts
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.VectorIndexRetriever"
] | [((248, 269), 'components.store.get_storage_context', 'get_storage_context', ([], {}), '()\n', (267, 269), False, 'from components.store import get_storage_context\n'), ((282, 350), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[]'], {'storage_context': 'storage_context'}), '([], storage_context=storage_context)\n', (313, 350), False, 'from llama_index import VectorStoreIndex\n'), ((368, 422), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(10)'}), '(index=index, similarity_top_k=10)\n', (388, 422), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((799, 823), 'models.gpts.get_gpts_by_uuids', 'get_gpts_by_uuids', (['uuids'], {}), '(uuids)\n', (816, 823), False, 'from models.gpts import get_gpts_by_uuids\n')] |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult
from pandas import DataFrame
load_dotenv()
class LanceDBVectorStore(LanceDBVectorStoreBase):
"""Advanced LanceDB Vector Store supporting cloud storage and prefiltering."""
from lancedb.query import LanceQueryBuilder
from lancedb.table import Table
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self._setup_connection(uri, api_key, region)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
self.api_key = api_key
self.region = region
def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None):
"""Establishes a robust connection to LanceDB."""
api_key = api_key or os.getenv('LANCEDB_API_KEY')
region = region or os.getenv('LANCEDB_REGION')
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
if api_key and region:
self.connection = lancedb.connect(uri, api_key=api_key, region=region)
else:
self.connection = lancedb.connect(uri)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Enhanced query method to support prefiltering in LanceDB queries."""
table = self.connection.open_table(self.table_name)
lance_query = self._prepare_lance_query(query, table, **kwargs)
results = lance_query.to_df()
return self._construct_query_result(results)
def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder:
"""Prepares the LanceDB query considering prefiltering and additional parameters."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface.")
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
prefilter = kwargs.pop("prefilter", False)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding).limit(query.similarity_top_k).where(
where, prefilter=prefilter).nprobes(self.nprobes))
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
return lance_query
def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult:
"""Constructs a VectorStoreQueryResult from a LanceDB query result."""
nodes = []
for _, row in results.iterrows():
node = TextNode(
text=row.get('text', ''), # ensure text is a string
id_=row['id'],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']),
})
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"llama_index.vector_stores.lancedb._to_llama_similarities",
"llama_index.schema.RelatedNodeInfo",
"llama_index.vector_stores.lancedb._to_lance_filter"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
data: _ChatData,
chat_engine: BaseChatEngine = Depends(get_chat_engine),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
response = await chat_engine.astream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
async for token in response.async_response_gen():
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
| [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((758, 848), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (771, 848), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((976, 1076), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (989, 1076), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1199, 1242), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1210, 1242), False, 'from llama_index.llms.base import ChatMessage\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
data: _ChatData,
chat_engine: BaseChatEngine = Depends(get_chat_engine),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
response = await chat_engine.astream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
async for token in response.async_response_gen():
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
| [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((758, 848), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (771, 848), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((976, 1076), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (989, 1076), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1199, 1242), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1210, 1242), False, 'from llama_index.llms.base import ChatMessage\n')] |
from typing import List
from fastapi.responses import StreamingResponse
from llama_index.chat_engine.types import BaseChatEngine
from app.engine.index import get_chat_engine
from fastapi import APIRouter, Depends, HTTPException, Request, status
from llama_index.llms.base import ChatMessage
from llama_index.llms.types import MessageRole
from pydantic import BaseModel
chat_router = r = APIRouter()
class _Message(BaseModel):
role: MessageRole
content: str
class _ChatData(BaseModel):
messages: List[_Message]
@r.post("")
async def chat(
request: Request,
data: _ChatData,
chat_engine: BaseChatEngine = Depends(get_chat_engine),
):
# check preconditions and get last message
if len(data.messages) == 0:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No messages provided",
)
lastMessage = data.messages.pop()
if lastMessage.role != MessageRole.USER:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Last message must be from user",
)
# convert messages coming from the request to type ChatMessage
messages = [
ChatMessage(
role=m.role,
content=m.content,
)
for m in data.messages
]
# query chat engine
response = await chat_engine.astream_chat(lastMessage.content, messages)
# stream response
async def event_generator():
async for token in response.async_response_gen():
# If client closes connection, stop sending events
if await request.is_disconnected():
break
yield token
return StreamingResponse(event_generator(), media_type="text/plain")
| [
"llama_index.llms.base.ChatMessage"
] | [((390, 401), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (399, 401), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((636, 660), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (643, 660), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((758, 848), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (771, 848), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((976, 1076), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (989, 1076), False, 'from fastapi import APIRouter, Depends, HTTPException, Request, status\n'), ((1199, 1242), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1210, 1242), False, 'from llama_index.llms.base import ChatMessage\n')] |
# Copyright (c) Timescale, Inc. (2023)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import streamlit as st
from streamlit.hello.utils import show_code
from llama_index.vector_stores import TimescaleVectorStore
from llama_index import ServiceContext, StorageContext
from llama_index.indices.vector_store import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import set_global_service_context
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from timescale_vector import client
from typing import List, Tuple
from llama_index.schema import TextNode
from llama_index.embeddings import OpenAIEmbedding
import psycopg2
def get_repos():
with psycopg2.connect(dsn=st.secrets["TIMESCALE_SERVICE_URL"]) as connection:
# Create a cursor within the context manager
with connection.cursor() as cursor:
try:
select_data_sql = "SELECT * FROM time_machine_catalog;"
cursor.execute(select_data_sql)
except psycopg2.errors.UndefinedTable as e:
return {}
catalog_entries = cursor.fetchall()
catalog_dict = {}
for entry in catalog_entries:
repo_url, table_name = entry
catalog_dict[repo_url] = table_name
return catalog_dict
def get_auto_retriever(index, retriever_args):
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="Description of the commits to PostgreSQL. Describes changes made to Postgres",
metadata_info=[
MetadataInfo(
name="commit_hash",
type="str",
description="Commit Hash",
),
MetadataInfo(
name="author",
type="str",
description="Author of the commit",
),
MetadataInfo(
name="__start_date",
type="datetime in iso format",
description="All results will be after this datetime",
),
MetadataInfo(
name="__end_date",
type="datetime in iso format",
description="All results will be before this datetime",
)
],
)
from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever
retriever = VectorIndexAutoRetriever(index,
vector_store_info=vector_store_info,
service_context=index.service_context,
**retriever_args)
# build query engine
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever, service_context=index.service_context
)
from llama_index.tools.query_engine import QueryEngineTool
# convert query engine to tool
query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
from llama_index.agent import OpenAIAgent
chat_engine = OpenAIAgent.from_tools(
tools=[query_engine_tool],
llm=index.service_context.llm,
verbose=True
#service_context=index.service_context
)
return chat_engine
def tm_demo():
repos = get_repos()
months = st.sidebar.slider('How many months back to search (0=no limit)?', 0, 130, 0)
if "config_months" not in st.session_state.keys() or months != st.session_state.config_months:
st.session_state.clear()
topk = st.sidebar.slider('How many commits to retrieve', 1, 150, 20)
if "config_topk" not in st.session_state.keys() or topk != st.session_state.config_topk:
st.session_state.clear()
if len(repos) > 0:
repo = st.sidebar.selectbox("Choose a repo", repos.keys())
else:
st.error("No repositiories found, please [load some data first](/LoadData)")
return
if "config_repo" not in st.session_state.keys() or repo != st.session_state.config_repo:
st.session_state.clear()
st.session_state.config_months = months
st.session_state.config_topk = topk
st.session_state.config_repo = repo
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Please choose a repo and time filter on the sidebar and then ask me a question about the git history"}
]
vector_store = TimescaleVectorStore.from_params(
service_url=st.secrets["TIMESCALE_SERVICE_URL"],
table_name=repos[repo],
time_partition_interval=timedelta(days=7),
);
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-4", temperature=0.1))
set_global_service_context(service_context)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
#chat engine goes into the session to retain history
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
retriever_args = {"similarity_top_k" : int(topk)}
if months > 0:
end_dt = datetime.now()
start_dt = end_dt - timedelta(weeks=4*months)
retriever_args["vector_store_kwargs"] = ({"start_date": start_dt, "end_date":end_dt})
st.session_state.chat_engine = get_auto_retriever(index, retriever_args)
#st.session_state.chat_engine = index.as_chat_engine(chat_mode="best", similarity_top_k=20, verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt, function_call="query_engine_tool")
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
st.set_page_config(page_title="Time machine demo", page_icon="🧑💼")
st.markdown("# Time Machine")
st.sidebar.header("Welcome to the Time Machine")
debug_llamaindex = False
if debug_llamaindex:
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
tm_demo()
#show_code(tm_demo)
| [
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.vector_stores.types.MetadataInfo",
"llama_index.set_global_service_context",
"llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.indices.vector_store.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args"
] | [((7098, 7170), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Time machine demo"""', 'page_icon': '"""🧑\u200d💼"""'}), "(page_title='Time machine demo', page_icon='🧑\\u200d💼')\n", (7116, 7170), True, 'import streamlit as st\n'), ((7166, 7195), 'streamlit.markdown', 'st.markdown', (['"""# Time Machine"""'], {}), "('# Time Machine')\n", (7177, 7195), True, 'import streamlit as st\n'), ((7196, 7244), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Welcome to the Time Machine"""'], {}), "('Welcome to the Time Machine')\n", (7213, 7244), True, 'import streamlit as st\n'), ((2991, 3120), 'llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever', 'VectorIndexAutoRetriever', (['index'], {'vector_store_info': 'vector_store_info', 'service_context': 'index.service_context'}), '(index, vector_store_info=vector_store_info,\n service_context=index.service_context, **retriever_args)\n', (3015, 3120), False, 'from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n'), ((3376, 3471), 'llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', ([], {'retriever': 'retriever', 'service_context': 'index.service_context'}), '(retriever=retriever, service_context=index.\n service_context)\n', (3406, 3471), False, 'from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n'), ((3604, 3660), 'llama_index.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine)\n', (3633, 3660), False, 'from llama_index.tools.query_engine import QueryEngineTool\n'), ((3726, 3825), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', ([], {'tools': '[query_engine_tool]', 'llm': 'index.service_context.llm', 'verbose': '(True)'}), '(tools=[query_engine_tool], llm=index.service_context\n .llm, verbose=True)\n', (3748, 3825), False, 'from llama_index.agent import OpenAIAgent\n'), ((3975, 4051), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many months back to search (0=no limit)?"""', '(0)', '(130)', '(0)'], {}), "('How many months back to search (0=no limit)?', 0, 130, 0)\n", (3992, 4051), True, 'import streamlit as st\n'), ((4197, 4258), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many commits to retrieve"""', '(1)', '(150)', '(20)'], {}), "('How many commits to retrieve', 1, 150, 20)\n", (4214, 4258), True, 'import streamlit as st\n'), ((5443, 5486), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (5469, 5486), False, 'from llama_index import set_global_service_context\n'), ((5499, 5597), 'llama_index.indices.vector_store.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (5533, 5597), False, 'from llama_index.indices.vector_store import VectorStoreIndex\n'), ((7331, 7389), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (7350, 7389), False, 'import logging\n'), ((1240, 1297), 'psycopg2.connect', 'psycopg2.connect', ([], {'dsn': "st.secrets['TIMESCALE_SERVICE_URL']"}), "(dsn=st.secrets['TIMESCALE_SERVICE_URL'])\n", (1256, 1297), False, 'import psycopg2\n'), ((4160, 4184), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4182, 4184), True, 'import streamlit as st\n'), ((4360, 4384), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4382, 4384), True, 'import streamlit as st\n'), ((4502, 4578), 'streamlit.error', 'st.error', (['"""No repositiories found, please [load some data first](/LoadData)"""'], {}), "('No repositiories found, please [load some data first](/LoadData)')\n", (4510, 4578), True, 'import streamlit as st\n'), ((4700, 4724), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4722, 4724), True, 'import streamlit as st\n'), ((4881, 4904), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4902, 4904), True, 'import streamlit as st\n'), ((5693, 5716), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (5714, 5716), True, 'import streamlit as st\n'), ((6233, 6263), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (6246, 6263), True, 'import streamlit as st\n'), ((6322, 6391), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (6354, 6391), True, 'import streamlit as st\n'), ((7425, 7465), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (7446, 7465), False, 'import logging\n'), ((4083, 4106), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4104, 4106), True, 'import streamlit as st\n'), ((4287, 4310), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4308, 4310), True, 'import streamlit as st\n'), ((4627, 4650), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4648, 4650), True, 'import streamlit as st\n'), ((5317, 5334), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (5326, 5334), False, 'from datetime import datetime, timedelta\n'), ((5399, 5437), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0.1)'}), "(model='gpt-4', temperature=0.1)\n", (5405, 5437), False, 'from llama_index.llms import OpenAI\n'), ((5849, 5863), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5861, 5863), False, 'from datetime import datetime, timedelta\n'), ((6486, 6518), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (6501, 6518), True, 'import streamlit as st\n'), ((6532, 6560), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (6540, 6560), True, 'import streamlit as st\n'), ((6705, 6733), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6720, 6733), True, 'import streamlit as st\n'), ((7394, 7413), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7411, 7413), False, 'import logging\n'), ((2185, 2256), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""commit_hash"""', 'type': '"""str"""', 'description': '"""Commit Hash"""'}), "(name='commit_hash', type='str', description='Commit Hash')\n", (2197, 2256), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2333, 2408), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""author"""', 'type': '"""str"""', 'description': '"""Author of the commit"""'}), "(name='author', type='str', description='Author of the commit')\n", (2345, 2408), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2485, 2608), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__start_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be after this datetime"""'}), "(name='__start_date', type='datetime in iso format',\n description='All results will be after this datetime')\n", (2497, 2608), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2686, 2809), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__end_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be before this datetime"""'}), "(name='__end_date', type='datetime in iso format', description=\n 'All results will be before this datetime')\n", (2698, 2809), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((5896, 5923), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(4 * months)'}), '(weeks=4 * months)\n', (5905, 5923), False, 'from datetime import datetime, timedelta\n'), ((6752, 6777), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (6762, 6777), True, 'import streamlit as st\n'), ((6806, 6882), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['prompt'], {'function_call': '"""query_engine_tool"""'}), "(prompt, function_call='query_engine_tool')\n", (6839, 6882), True, 'import streamlit as st\n'), ((6899, 6926), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (6907, 6926), True, 'import streamlit as st\n'), ((7021, 7062), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (7053, 7062), True, 'import streamlit as st\n')] |
# Copyright (c) Timescale, Inc. (2023)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import streamlit as st
from streamlit.hello.utils import show_code
from llama_index.vector_stores import TimescaleVectorStore
from llama_index import ServiceContext, StorageContext
from llama_index.indices.vector_store import VectorStoreIndex
from llama_index.llms import OpenAI
from llama_index import set_global_service_context
import pandas as pd
from pathlib import Path
from datetime import datetime, timedelta
from timescale_vector import client
from typing import List, Tuple
from llama_index.schema import TextNode
from llama_index.embeddings import OpenAIEmbedding
import psycopg2
def get_repos():
with psycopg2.connect(dsn=st.secrets["TIMESCALE_SERVICE_URL"]) as connection:
# Create a cursor within the context manager
with connection.cursor() as cursor:
try:
select_data_sql = "SELECT * FROM time_machine_catalog;"
cursor.execute(select_data_sql)
except psycopg2.errors.UndefinedTable as e:
return {}
catalog_entries = cursor.fetchall()
catalog_dict = {}
for entry in catalog_entries:
repo_url, table_name = entry
catalog_dict[repo_url] = table_name
return catalog_dict
def get_auto_retriever(index, retriever_args):
from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo
vector_store_info = VectorStoreInfo(
content_info="Description of the commits to PostgreSQL. Describes changes made to Postgres",
metadata_info=[
MetadataInfo(
name="commit_hash",
type="str",
description="Commit Hash",
),
MetadataInfo(
name="author",
type="str",
description="Author of the commit",
),
MetadataInfo(
name="__start_date",
type="datetime in iso format",
description="All results will be after this datetime",
),
MetadataInfo(
name="__end_date",
type="datetime in iso format",
description="All results will be before this datetime",
)
],
)
from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever
retriever = VectorIndexAutoRetriever(index,
vector_store_info=vector_store_info,
service_context=index.service_context,
**retriever_args)
# build query engine
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever, service_context=index.service_context
)
from llama_index.tools.query_engine import QueryEngineTool
# convert query engine to tool
query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
from llama_index.agent import OpenAIAgent
chat_engine = OpenAIAgent.from_tools(
tools=[query_engine_tool],
llm=index.service_context.llm,
verbose=True
#service_context=index.service_context
)
return chat_engine
def tm_demo():
repos = get_repos()
months = st.sidebar.slider('How many months back to search (0=no limit)?', 0, 130, 0)
if "config_months" not in st.session_state.keys() or months != st.session_state.config_months:
st.session_state.clear()
topk = st.sidebar.slider('How many commits to retrieve', 1, 150, 20)
if "config_topk" not in st.session_state.keys() or topk != st.session_state.config_topk:
st.session_state.clear()
if len(repos) > 0:
repo = st.sidebar.selectbox("Choose a repo", repos.keys())
else:
st.error("No repositiories found, please [load some data first](/LoadData)")
return
if "config_repo" not in st.session_state.keys() or repo != st.session_state.config_repo:
st.session_state.clear()
st.session_state.config_months = months
st.session_state.config_topk = topk
st.session_state.config_repo = repo
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{"role": "assistant", "content": "Please choose a repo and time filter on the sidebar and then ask me a question about the git history"}
]
vector_store = TimescaleVectorStore.from_params(
service_url=st.secrets["TIMESCALE_SERVICE_URL"],
table_name=repos[repo],
time_partition_interval=timedelta(days=7),
);
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-4", temperature=0.1))
set_global_service_context(service_context)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
#chat engine goes into the session to retain history
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
retriever_args = {"similarity_top_k" : int(topk)}
if months > 0:
end_dt = datetime.now()
start_dt = end_dt - timedelta(weeks=4*months)
retriever_args["vector_store_kwargs"] = ({"start_date": start_dt, "end_date":end_dt})
st.session_state.chat_engine = get_auto_retriever(index, retriever_args)
#st.session_state.chat_engine = index.as_chat_engine(chat_mode="best", similarity_top_k=20, verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt, function_call="query_engine_tool")
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
st.set_page_config(page_title="Time machine demo", page_icon="🧑💼")
st.markdown("# Time Machine")
st.sidebar.header("Welcome to the Time Machine")
debug_llamaindex = False
if debug_llamaindex:
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
tm_demo()
#show_code(tm_demo)
| [
"llama_index.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.llms.OpenAI",
"llama_index.vector_stores.types.MetadataInfo",
"llama_index.set_global_service_context",
"llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever",
"llama_index.agent.OpenAIAgent.from_tools",
"llama_index.indices.vector_store.VectorStoreIndex.from_vector_store",
"llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args"
] | [((7098, 7170), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Time machine demo"""', 'page_icon': '"""🧑\u200d💼"""'}), "(page_title='Time machine demo', page_icon='🧑\\u200d💼')\n", (7116, 7170), True, 'import streamlit as st\n'), ((7166, 7195), 'streamlit.markdown', 'st.markdown', (['"""# Time Machine"""'], {}), "('# Time Machine')\n", (7177, 7195), True, 'import streamlit as st\n'), ((7196, 7244), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Welcome to the Time Machine"""'], {}), "('Welcome to the Time Machine')\n", (7213, 7244), True, 'import streamlit as st\n'), ((2991, 3120), 'llama_index.indices.vector_store.retrievers.VectorIndexAutoRetriever', 'VectorIndexAutoRetriever', (['index'], {'vector_store_info': 'vector_store_info', 'service_context': 'index.service_context'}), '(index, vector_store_info=vector_store_info,\n service_context=index.service_context, **retriever_args)\n', (3015, 3120), False, 'from llama_index.indices.vector_store.retrievers import VectorIndexAutoRetriever\n'), ((3376, 3471), 'llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', ([], {'retriever': 'retriever', 'service_context': 'index.service_context'}), '(retriever=retriever, service_context=index.\n service_context)\n', (3406, 3471), False, 'from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine\n'), ((3604, 3660), 'llama_index.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine)\n', (3633, 3660), False, 'from llama_index.tools.query_engine import QueryEngineTool\n'), ((3726, 3825), 'llama_index.agent.OpenAIAgent.from_tools', 'OpenAIAgent.from_tools', ([], {'tools': '[query_engine_tool]', 'llm': 'index.service_context.llm', 'verbose': '(True)'}), '(tools=[query_engine_tool], llm=index.service_context\n .llm, verbose=True)\n', (3748, 3825), False, 'from llama_index.agent import OpenAIAgent\n'), ((3975, 4051), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many months back to search (0=no limit)?"""', '(0)', '(130)', '(0)'], {}), "('How many months back to search (0=no limit)?', 0, 130, 0)\n", (3992, 4051), True, 'import streamlit as st\n'), ((4197, 4258), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""How many commits to retrieve"""', '(1)', '(150)', '(20)'], {}), "('How many commits to retrieve', 1, 150, 20)\n", (4214, 4258), True, 'import streamlit as st\n'), ((5443, 5486), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (5469, 5486), False, 'from llama_index import set_global_service_context\n'), ((5499, 5597), 'llama_index.indices.vector_store.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (5533, 5597), False, 'from llama_index.indices.vector_store import VectorStoreIndex\n'), ((7331, 7389), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (7350, 7389), False, 'import logging\n'), ((1240, 1297), 'psycopg2.connect', 'psycopg2.connect', ([], {'dsn': "st.secrets['TIMESCALE_SERVICE_URL']"}), "(dsn=st.secrets['TIMESCALE_SERVICE_URL'])\n", (1256, 1297), False, 'import psycopg2\n'), ((4160, 4184), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4182, 4184), True, 'import streamlit as st\n'), ((4360, 4384), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4382, 4384), True, 'import streamlit as st\n'), ((4502, 4578), 'streamlit.error', 'st.error', (['"""No repositiories found, please [load some data first](/LoadData)"""'], {}), "('No repositiories found, please [load some data first](/LoadData)')\n", (4510, 4578), True, 'import streamlit as st\n'), ((4700, 4724), 'streamlit.session_state.clear', 'st.session_state.clear', ([], {}), '()\n', (4722, 4724), True, 'import streamlit as st\n'), ((4881, 4904), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4902, 4904), True, 'import streamlit as st\n'), ((5693, 5716), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (5714, 5716), True, 'import streamlit as st\n'), ((6233, 6263), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (6246, 6263), True, 'import streamlit as st\n'), ((6322, 6391), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (6354, 6391), True, 'import streamlit as st\n'), ((7425, 7465), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (7446, 7465), False, 'import logging\n'), ((4083, 4106), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4104, 4106), True, 'import streamlit as st\n'), ((4287, 4310), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4308, 4310), True, 'import streamlit as st\n'), ((4627, 4650), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (4648, 4650), True, 'import streamlit as st\n'), ((5317, 5334), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (5326, 5334), False, 'from datetime import datetime, timedelta\n'), ((5399, 5437), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0.1)'}), "(model='gpt-4', temperature=0.1)\n", (5405, 5437), False, 'from llama_index.llms import OpenAI\n'), ((5849, 5863), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5861, 5863), False, 'from datetime import datetime, timedelta\n'), ((6486, 6518), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (6501, 6518), True, 'import streamlit as st\n'), ((6532, 6560), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (6540, 6560), True, 'import streamlit as st\n'), ((6705, 6733), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6720, 6733), True, 'import streamlit as st\n'), ((7394, 7413), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7411, 7413), False, 'import logging\n'), ((2185, 2256), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""commit_hash"""', 'type': '"""str"""', 'description': '"""Commit Hash"""'}), "(name='commit_hash', type='str', description='Commit Hash')\n", (2197, 2256), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2333, 2408), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""author"""', 'type': '"""str"""', 'description': '"""Author of the commit"""'}), "(name='author', type='str', description='Author of the commit')\n", (2345, 2408), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2485, 2608), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__start_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be after this datetime"""'}), "(name='__start_date', type='datetime in iso format',\n description='All results will be after this datetime')\n", (2497, 2608), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((2686, 2809), 'llama_index.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""__end_date"""', 'type': '"""datetime in iso format"""', 'description': '"""All results will be before this datetime"""'}), "(name='__end_date', type='datetime in iso format', description=\n 'All results will be before this datetime')\n", (2698, 2809), False, 'from llama_index.vector_stores.types import MetadataInfo, VectorStoreInfo\n'), ((5896, 5923), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(4 * months)'}), '(weeks=4 * months)\n', (5905, 5923), False, 'from datetime import datetime, timedelta\n'), ((6752, 6777), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (6762, 6777), True, 'import streamlit as st\n'), ((6806, 6882), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['prompt'], {'function_call': '"""query_engine_tool"""'}), "(prompt, function_call='query_engine_tool')\n", (6839, 6882), True, 'import streamlit as st\n'), ((6899, 6926), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (6907, 6926), True, 'import streamlit as st\n'), ((7021, 7062), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (7053, 7062), True, 'import streamlit as st\n')] |
import logging
from threading import Thread
from typing import Any, List, Optional, Type
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.response.schema import RESPONSE_TYPE, StreamingResponse
from llama_index.core.callbacks import CallbackManager, trace_method
from llama_index.core.chat_engine.types import (
AgentChatResponse,
BaseChatEngine,
StreamingAgentChatResponse,
)
from llama_index.core.chat_engine.utils import response_gen_from_query_engine
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
from llama_index.core.base.llms.generic_utils import messages_to_history_str
from llama_index.core.llms.llm import LLM
from llama_index.core.memory import BaseMemory, ChatMemoryBuffer
from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate
from llama_index.core.service_context import ServiceContext
from llama_index.core.service_context_elements.llm_predictor import LLMPredictorType
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
)
from llama_index.core.tools import ToolOutput
logger = logging.getLogger(__name__)
DEFAULT_TEMPLATE = """\
Given a conversation (between Human and Assistant) and a follow up message from Human, \
rewrite the message to be a standalone question that captures all relevant context \
from the conversation.
<Chat History>
{chat_history}
<Follow Up Message>
{question}
<Standalone question>
"""
DEFAULT_PROMPT = PromptTemplate(DEFAULT_TEMPLATE)
class CondenseQuestionChatEngine(BaseChatEngine):
"""Condense Question Chat Engine.
First generate a standalone question from conversation context and last message,
then query the query engine for a response.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
condense_question_prompt: BasePromptTemplate,
memory: BaseMemory,
llm: LLMPredictorType,
verbose: bool = False,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._query_engine = query_engine
self._condense_question_prompt = condense_question_prompt
self._memory = memory
self._llm = llm
self._verbose = verbose
self.callback_manager = callback_manager or CallbackManager([])
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
condense_question_prompt: Optional[BasePromptTemplate] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
service_context: Optional[ServiceContext] = None,
verbose: bool = False,
system_prompt: Optional[str] = None,
prefix_messages: Optional[List[ChatMessage]] = None,
llm: Optional[LLM] = None,
**kwargs: Any,
) -> "CondenseQuestionChatEngine":
"""Initialize a CondenseQuestionChatEngine from default parameters."""
condense_question_prompt = condense_question_prompt or DEFAULT_PROMPT
if llm is None:
service_context = service_context or ServiceContext.from_defaults(
embed_model=MockEmbedding(embed_dim=2)
)
llm = service_context.llm
else:
service_context = service_context or ServiceContext.from_defaults(
llm=llm, embed_model=MockEmbedding(embed_dim=2)
)
chat_history = chat_history or []
memory = memory or memory_cls.from_defaults(chat_history=chat_history, llm=llm)
if system_prompt is not None:
raise NotImplementedError(
"system_prompt is not supported for CondenseQuestionChatEngine."
)
if prefix_messages is not None:
raise NotImplementedError(
"prefix_messages is not supported for CondenseQuestionChatEngine."
)
return cls(
query_engine,
condense_question_prompt,
memory,
llm,
verbose=verbose,
callback_manager=callback_manager_from_settings_or_context(
Settings, service_context
),
)
def _condense_question(
self, chat_history: List[ChatMessage], last_message: str
) -> str:
"""
Generate standalone question from conversation context and last message.
"""
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
return self._llm.predict(
self._condense_question_prompt,
question=last_message,
chat_history=chat_history_str,
)
async def _acondense_question(
self, chat_history: List[ChatMessage], last_message: str
) -> str:
"""
Generate standalone question from conversation context and last message.
"""
chat_history_str = messages_to_history_str(chat_history)
logger.debug(chat_history_str)
return await self._llm.apredict(
self._condense_question_prompt,
question=last_message,
chat_history=chat_history_str,
)
def _get_tool_output_from_response(
self, query: str, response: RESPONSE_TYPE
) -> ToolOutput:
if isinstance(response, StreamingResponse):
return ToolOutput(
content="",
tool_name="query_engine",
raw_input={"query": query},
raw_output=response,
)
else:
return ToolOutput(
content=str(response),
tool_name="query_engine",
raw_input={"query": query},
raw_output=response,
)
@trace_method("chat")
def chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = self._condense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = False
# Query with standalone question
query_response = self._query_engine.query(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
self._memory.put(
ChatMessage(role=MessageRole.ASSISTANT, content=str(query_response))
)
return AgentChatResponse(response=str(query_response), sources=[tool_output])
@trace_method("chat")
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = self._condense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = True
# Query with standalone question
query_response = self._query_engine.query(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
if (
isinstance(query_response, StreamingResponse)
and query_response.response_gen is not None
):
# override the generator to include writing to chat history
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
response = StreamingAgentChatResponse(
chat_stream=response_gen_from_query_engine(query_response.response_gen),
sources=[tool_output],
)
thread = Thread(
target=response.write_response_to_history, args=(self._memory, True)
)
thread.start()
else:
raise ValueError("Streaming is not enabled. Please use chat() instead.")
return response
@trace_method("chat")
async def achat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = await self._acondense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = False
# Query with standalone question
query_response = await self._query_engine.aquery(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
self._memory.put(
ChatMessage(role=MessageRole.ASSISTANT, content=str(query_response))
)
return AgentChatResponse(response=str(query_response), sources=[tool_output])
@trace_method("chat")
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
chat_history = chat_history or self._memory.get()
# Generate standalone question from conversation context and last message
condensed_question = await self._acondense_question(chat_history, message)
log_str = f"Querying with: {condensed_question}"
logger.info(log_str)
if self._verbose:
print(log_str)
# TODO: right now, query engine uses class attribute to configure streaming,
# we are moving towards separate streaming and non-streaming methods.
# In the meanwhile, use this hack to toggle streaming.
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
if isinstance(self._query_engine, RetrieverQueryEngine):
is_streaming = self._query_engine._response_synthesizer._streaming
self._query_engine._response_synthesizer._streaming = True
# Query with standalone question
query_response = await self._query_engine.aquery(condensed_question)
# NOTE: reset streaming flag
if isinstance(self._query_engine, RetrieverQueryEngine):
self._query_engine._response_synthesizer._streaming = is_streaming
tool_output = self._get_tool_output_from_response(
condensed_question, query_response
)
# Record response
if (
isinstance(query_response, StreamingResponse)
and query_response.response_gen is not None
):
# override the generator to include writing to chat history
# TODO: query engine does not support async generator yet
self._memory.put(ChatMessage(role=MessageRole.USER, content=message))
response = StreamingAgentChatResponse(
chat_stream=response_gen_from_query_engine(query_response.response_gen),
sources=[tool_output],
)
thread = Thread(
target=response.write_response_to_history, args=(self._memory,)
)
thread.start()
else:
raise ValueError("Streaming is not enabled. Please use achat() instead.")
return response
def reset(self) -> None:
# Clear chat history
self._memory.reset()
@property
def chat_history(self) -> List[ChatMessage]:
"""Get chat history."""
return self._memory.get_all()
| [
"llama_index.core.tools.ToolOutput",
"llama_index.core.chat_engine.utils.response_gen_from_query_engine",
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.base.llms.types.ChatMessage",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding",
"llama_index.core.callbacks.trace_method",
"llama_index.core.base.llms.generic_utils.messages_to_history_str"
] | [((1220, 1247), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1237, 1247), False, 'import logging\n'), ((1579, 1611), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_TEMPLATE'], {}), '(DEFAULT_TEMPLATE)\n', (1593, 1611), False, 'from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((5895, 5915), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (5907, 5915), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((7693, 7713), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (7705, 7713), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((9987, 10007), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (9999, 10007), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((11806, 11826), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (11818, 11826), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((4566, 4603), 'llama_index.core.base.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['chat_history'], {}), '(chat_history)\n', (4589, 4603), False, 'from llama_index.core.base.llms.generic_utils import messages_to_history_str\n'), ((5057, 5094), 'llama_index.core.base.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['chat_history'], {}), '(chat_history)\n', (5080, 5094), False, 'from llama_index.core.base.llms.generic_utils import messages_to_history_str\n'), ((2381, 2400), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2396, 2400), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((5491, 5592), 'llama_index.core.tools.ToolOutput', 'ToolOutput', ([], {'content': '""""""', 'tool_name': '"""query_engine"""', 'raw_input': "{'query': query}", 'raw_output': 'response'}), "(content='', tool_name='query_engine', raw_input={'query': query},\n raw_output=response)\n", (5501, 5592), False, 'from llama_index.core.tools import ToolOutput\n'), ((7430, 7481), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (7441, 7481), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((9724, 9800), 'threading.Thread', 'Thread', ([], {'target': 'response.write_response_to_history', 'args': '(self._memory, True)'}), '(target=response.write_response_to_history, args=(self._memory, True))\n', (9730, 9800), False, 'from threading import Thread\n'), ((11543, 11594), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (11554, 11594), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((13928, 13999), 'threading.Thread', 'Thread', ([], {'target': 'response.write_response_to_history', 'args': '(self._memory,)'}), '(target=response.write_response_to_history, args=(self._memory,))\n', (13934, 13999), False, 'from threading import Thread\n'), ((4216, 4284), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4257, 4284), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context\n'), ((9457, 9508), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (9468, 9508), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((13661, 13712), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'message'}), '(role=MessageRole.USER, content=message)\n', (13672, 13712), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n'), ((9589, 9648), 'llama_index.core.chat_engine.utils.response_gen_from_query_engine', 'response_gen_from_query_engine', (['query_response.response_gen'], {}), '(query_response.response_gen)\n', (9619, 9648), False, 'from llama_index.core.chat_engine.utils import response_gen_from_query_engine\n'), ((13793, 13852), 'llama_index.core.chat_engine.utils.response_gen_from_query_engine', 'response_gen_from_query_engine', (['query_response.response_gen'], {}), '(query_response.response_gen)\n', (13823, 13852), False, 'from llama_index.core.chat_engine.utils import response_gen_from_query_engine\n'), ((3306, 3332), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(2)'}), '(embed_dim=2)\n', (3319, 3332), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((3515, 3541), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(2)'}), '(embed_dim=2)\n', (3528, 3541), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` package not found, \
please run `pip install youtube-transcript-api`"
)
def load_data(self, urls: List[str]) -> List[Document]:
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=urls)
return documents
| [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` package not found, \
please run `pip install youtube-transcript-api`"
)
def load_data(self, urls: List[str]) -> List[Document]:
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=urls)
return documents
| [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
from typing import List
from llama_index.readers.base import BaseReader
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
from llama_index.schema import Document
class LyzrYoutubeReader(BaseReader):
def __init__(self) -> None:
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` package not found, \
please run `pip install youtube-transcript-api`"
)
def load_data(self, urls: List[str]) -> List[Document]:
loader = YoutubeTranscriptReader()
documents = loader.load_data(ytlinks=urls)
return documents
| [
"llama_index.readers.youtube_transcript.YoutubeTranscriptReader"
] | [((623, 648), 'llama_index.readers.youtube_transcript.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (646, 648), False, 'from llama_index.readers.youtube_transcript import YoutubeTranscriptReader\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.getLogger(__name__)
CONTENT_TAGS = [
"p",
"div",
"span",
"a",
"td",
"tr",
"li",
"article",
"section",
"pre",
"code",
"blockquote",
"em",
"strong",
"b",
"i",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"title",
]
def scrape(html: str) -> str:
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
content: List[Tag] = soup.find_all(CONTENT_TAGS)
text_set: Set[str] = set()
for p in content:
for text in p.stripped_strings:
text_set.add(text)
return " ".join(text_set)
async def async_load_content_using_playwright(url: str) -> str:
try:
from playwright.async_api import async_playwright
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)
html = await page.content()
await browser.close()
return html
except ImportError:
raise ImportError(
"`playwright` package not found, please install it with "
"`pip install playwright && playwright install`"
)
def load_content_using_playwright(url: str) -> str:
return asyncio.get_event_loop().run_until_complete(
async_load_content_using_playwright(url)
)
class LyzrWebPageReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(url: str) -> List[Document]:
if IS_IPYKERNEL:
warning_msg = "Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts."
warnings.warn(warning_msg, RuntimeWarning)
html = load_content_using_playwright(url)
content = scrape(html)
document = Document(text=content, metadata={"url": url})
return [document]
| [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (689, 710), False, 'from bs4 import BeautifulSoup, Tag\n'), ((2182, 2227), 'llama_index.schema.Document', 'Document', ([], {'text': 'content', 'metadata': "{'url': url}"}), "(text=content, metadata={'url': url})\n", (2190, 2227), False, 'from llama_index.schema import Document\n'), ((1088, 1106), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (1104, 1106), False, 'from playwright.async_api import async_playwright\n'), ((1609, 1633), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1631, 1633), False, 'import asyncio\n'), ((2030, 2072), 'warnings.warn', 'warnings.warn', (['warning_msg', 'RuntimeWarning'], {}), '(warning_msg, RuntimeWarning)\n', (2043, 2072), False, 'import warnings\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.getLogger(__name__)
CONTENT_TAGS = [
"p",
"div",
"span",
"a",
"td",
"tr",
"li",
"article",
"section",
"pre",
"code",
"blockquote",
"em",
"strong",
"b",
"i",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"title",
]
def scrape(html: str) -> str:
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
content: List[Tag] = soup.find_all(CONTENT_TAGS)
text_set: Set[str] = set()
for p in content:
for text in p.stripped_strings:
text_set.add(text)
return " ".join(text_set)
async def async_load_content_using_playwright(url: str) -> str:
try:
from playwright.async_api import async_playwright
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)
html = await page.content()
await browser.close()
return html
except ImportError:
raise ImportError(
"`playwright` package not found, please install it with "
"`pip install playwright && playwright install`"
)
def load_content_using_playwright(url: str) -> str:
return asyncio.get_event_loop().run_until_complete(
async_load_content_using_playwright(url)
)
class LyzrWebPageReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(url: str) -> List[Document]:
if IS_IPYKERNEL:
warning_msg = "Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts."
warnings.warn(warning_msg, RuntimeWarning)
html = load_content_using_playwright(url)
content = scrape(html)
document = Document(text=content, metadata={"url": url})
return [document]
| [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (689, 710), False, 'from bs4 import BeautifulSoup, Tag\n'), ((2182, 2227), 'llama_index.schema.Document', 'Document', ([], {'text': 'content', 'metadata': "{'url': url}"}), "(text=content, metadata={'url': url})\n", (2190, 2227), False, 'from llama_index.schema import Document\n'), ((1088, 1106), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (1104, 1106), False, 'from playwright.async_api import async_playwright\n'), ((1609, 1633), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1631, 1633), False, 'import asyncio\n'), ((2030, 2072), 'warnings.warn', 'warnings.warn', (['warning_msg', 'RuntimeWarning'], {}), '(warning_msg, RuntimeWarning)\n', (2043, 2072), False, 'import warnings\n')] |
import sys
import asyncio
import logging
import warnings
import nest_asyncio
from typing import List, Set
from bs4 import BeautifulSoup, Tag
from typing import List
from llama_index.schema import Document
IS_IPYKERNEL = "ipykernel_launcher" in sys.argv[0]
if IS_IPYKERNEL:
nest_asyncio.apply()
logger = logging.getLogger(__name__)
CONTENT_TAGS = [
"p",
"div",
"span",
"a",
"td",
"tr",
"li",
"article",
"section",
"pre",
"code",
"blockquote",
"em",
"strong",
"b",
"i",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"title",
]
def scrape(html: str) -> str:
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
content: List[Tag] = soup.find_all(CONTENT_TAGS)
text_set: Set[str] = set()
for p in content:
for text in p.stripped_strings:
text_set.add(text)
return " ".join(text_set)
async def async_load_content_using_playwright(url: str) -> str:
try:
from playwright.async_api import async_playwright
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(url)
html = await page.content()
await browser.close()
return html
except ImportError:
raise ImportError(
"`playwright` package not found, please install it with "
"`pip install playwright && playwright install`"
)
def load_content_using_playwright(url: str) -> str:
return asyncio.get_event_loop().run_until_complete(
async_load_content_using_playwright(url)
)
class LyzrWebPageReader:
def __init__(self) -> None:
pass
@staticmethod
def load_data(url: str) -> List[Document]:
if IS_IPYKERNEL:
warning_msg = "Running in Google Colab or a Jupyter notebook. Consider using nest_asyncio.apply() to avoid event loop conflicts."
warnings.warn(warning_msg, RuntimeWarning)
html = load_content_using_playwright(url)
content = scrape(html)
document = Document(text=content, metadata={"url": url})
return [document]
| [
"llama_index.schema.Document"
] | [((312, 339), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (329, 339), False, 'import logging\n'), ((281, 301), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (299, 301), False, 'import nest_asyncio\n'), ((676, 710), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (689, 710), False, 'from bs4 import BeautifulSoup, Tag\n'), ((2182, 2227), 'llama_index.schema.Document', 'Document', ([], {'text': 'content', 'metadata': "{'url': url}"}), "(text=content, metadata={'url': url})\n", (2190, 2227), False, 'from llama_index.schema import Document\n'), ((1088, 1106), 'playwright.async_api.async_playwright', 'async_playwright', ([], {}), '()\n', (1104, 1106), False, 'from playwright.async_api import async_playwright\n'), ((1609, 1633), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1631, 1633), False, 'import asyncio\n'), ((2030, 2072), 'warnings.warn', 'warnings.warn', (['warning_msg', 'RuntimeWarning'], {}), '(warning_msg, RuntimeWarning)\n', (2043, 2072), False, 'import warnings\n')] |
import logging
from typing import Optional, Union
from llama_index import ServiceContext
from llama_index.callbacks import CallbackManager
from llama_index.embeddings.utils import EmbedType
from llama_index.llms.utils import LLMType
from llama_index.prompts import PromptTemplate
from llama_index.prompts.base import BasePromptTemplate
from llama_index.node_parser import (
SimpleNodeParser,
)
logger = logging.getLogger(__name__)
class LyzrService:
@staticmethod
def from_defaults(
llm: Optional[LLMType] = "default",
embed_model: Optional[EmbedType] = "default",
system_prompt: str = None,
query_wrapper_prompt: Union[str, BasePromptTemplate] = None,
**kwargs,
) -> ServiceContext:
if isinstance(query_wrapper_prompt, str):
query_wrapper_prompt = PromptTemplate(template=query_wrapper_prompt)
callback_manager: CallbackManager = kwargs.get(
"callback_manager", CallbackManager()
)
node_parser = SimpleNodeParser.from_defaults(
chunk_size=750,
chunk_overlap=100,
callback_manager=callback_manager,
)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
callback_manager=callback_manager,
node_parser=node_parser,
**kwargs,
)
return service_context
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.CallbackManager",
"llama_index.node_parser.SimpleNodeParser.from_defaults",
"llama_index.prompts.PromptTemplate"
] | [((409, 436), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (426, 436), False, 'import logging\n'), ((1016, 1120), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': '(750)', 'chunk_overlap': '(100)', 'callback_manager': 'callback_manager'}), '(chunk_size=750, chunk_overlap=100,\n callback_manager=callback_manager)\n', (1046, 1120), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1191, 1403), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model,\n system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt,\n callback_manager=callback_manager, node_parser=node_parser, **kwargs)\n', (1219, 1403), False, 'from llama_index import ServiceContext\n'), ((830, 875), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'query_wrapper_prompt'}), '(template=query_wrapper_prompt)\n', (844, 875), False, 'from llama_index.prompts import PromptTemplate\n'), ((965, 982), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (980, 982), False, 'from llama_index.callbacks import CallbackManager\n')] |
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex
from llama_index.response.pprint_utils import pprint_response
from langchain.chat_models import ChatOpenAI
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index.query_engine import SubQuestionQueryEngine
from dotenv import load_dotenv
import gradio as gr
import os, sys
import logging
#loads dotenv lib to retrieve API keys from .env file
load_dotenv()
# enable INFO level logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#define LLM service
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo"))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
#set the global service context object, avoiding passing service_context when building the index or when loading index from vector store
from llama_index import set_global_service_context
set_global_service_context(service_context)
def data_ingestion_indexing():
#load data
report_2021_docs = SimpleDirectoryReader(input_files=["reports/executive-summary-2021.pdf"]).load_data()
print(f"loaded executive summary 2021 with {len(report_2021_docs)} pages")
report_2022_docs = SimpleDirectoryReader(input_files=["reports/executive-summary-2022.pdf"]).load_data()
print(f"loaded executive summary 2022 with {len(report_2022_docs)} pages")
#build indices
report_2021_index = GPTVectorStoreIndex.from_documents(report_2021_docs)
print(f"built index for executive summary 2021 with {len(report_2021_index.docstore.docs)} nodes")
report_2022_index = GPTVectorStoreIndex.from_documents(report_2022_docs)
print(f"built index for executive summary 2022 with {len(report_2022_index.docstore.docs)} nodes")
#build query engines
report_2021_engine = report_2021_index.as_query_engine(similarity_top_k=3)
report_2022_engine = report_2022_index.as_query_engine(similarity_top_k=3)
#build query engine tools
query_engine_tools = [
QueryEngineTool(
query_engine = report_2021_engine,
metadata = ToolMetadata(name='executive_summary_2021', description='Provides information on US government financial report executive summary 2021')
),
QueryEngineTool(
query_engine = report_2022_engine,
metadata = ToolMetadata(name='executive_summary_2022', description='Provides information on US government financial report executive summary 2022')
)
]
#define SubQuestionQueryEngine
sub_question_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
return sub_question_engine
def data_querying(input_text):
#queries the engine with the input text
response = sub_question_engine.query(input_text)
return response.response
iface = gr.Interface(fn=data_querying,
inputs=gr.components.Textbox(lines=3, label="Enter your question"),
outputs="text",
title="Analyzing the U.S. Government's Financial Reports for 2021 and 2022")
#data ingestion and indexing
sub_question_engine = data_ingestion_indexing()
iface.launch(share=False)
#run queries
#response = sub_question_engine.query('Compare and contrast the DoD costs between 2021 and 2022')
#print(response)
#response = sub_question_engine.query('Compare revenue growth from 2021 to 2022')
#print(response)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.set_global_service_context",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((460, 473), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (471, 473), False, 'from dotenv import load_dotenv\n'), ((503, 561), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (522, 561), False, 'import logging\n'), ((762, 819), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (790, 819), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1009, 1052), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1035, 1052), False, 'from llama_index import set_global_service_context\n'), ((593, 633), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (614, 633), False, 'import logging\n'), ((1521, 1573), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['report_2021_docs'], {}), '(report_2021_docs)\n', (1555, 1573), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1702, 1754), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['report_2022_docs'], {}), '(report_2022_docs)\n', (1736, 1754), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2653, 2728), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools'}), '(query_engine_tools=query_engine_tools)\n', (2689, 2728), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((562, 581), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (579, 581), False, 'import logging\n'), ((689, 742), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (699, 742), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3001, 3060), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(3)', 'label': '"""Enter your question"""'}), "(lines=3, label='Enter your question')\n", (3022, 3060), True, 'import gradio as gr\n'), ((1123, 1196), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['reports/executive-summary-2021.pdf']"}), "(input_files=['reports/executive-summary-2021.pdf'])\n", (1144, 1196), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((1312, 1385), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['reports/executive-summary-2022.pdf']"}), "(input_files=['reports/executive-summary-2022.pdf'])\n", (1333, 1385), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2195, 2341), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""executive_summary_2021"""', 'description': '"""Provides information on US government financial report executive summary 2021"""'}), "(name='executive_summary_2021', description=\n 'Provides information on US government financial report executive summary 2021'\n )\n", (2207, 2341), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n'), ((2438, 2584), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""executive_summary_2022"""', 'description': '"""Provides information on US government financial report executive summary 2022"""'}), "(name='executive_summary_2022', description=\n 'Provides information on US government financial report executive summary 2022'\n )\n", (2450, 2584), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
from llama_index.core import Settings, Document, VectorStoreIndex
from llama_index.core.node_parser import SentenceWindowNodeParser
doc = Document(
text="Sentence 1. Sentence 2. Sentence 3."
)
text_splitter = SentenceWindowNodeParser.from_defaults(
window_size=2 ,
window_metadata_key="ContextWindow",
original_text_metadata_key="node_text"
)
Settings.text_splitter = text_splitter
index = VectorStoreIndex.from_documents([doc])
retriever = index.as_retriever(similarity_top_k=1)
response = retriever.retrieve("Display the second sentence")
print(response[0].node.metadata['node_text'])
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.core.Document"
] | [((138, 190), 'llama_index.core.Document', 'Document', ([], {'text': '"""Sentence 1. Sentence 2. Sentence 3."""'}), "(text='Sentence 1. Sentence 2. Sentence 3.')\n", (146, 190), False, 'from llama_index.core import Settings, Document, VectorStoreIndex\n'), ((213, 348), 'llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(2)', 'window_metadata_key': '"""ContextWindow"""', 'original_text_metadata_key': '"""node_text"""'}), "(window_size=2, window_metadata_key=\n 'ContextWindow', original_text_metadata_key='node_text')\n", (251, 348), False, 'from llama_index.core.node_parser import SentenceWindowNodeParser\n'), ((408, 446), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[doc]'], {}), '([doc])\n', (439, 446), False, 'from llama_index.core import Settings, Document, VectorStoreIndex\n')] |
import tiktoken
from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
from llama_index.core.llms.mock import MockLLM
embed_model = MockEmbedding(embed_dim=1536)
llm = MockLLM(max_tokens=256)
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
)
callback_manager = CallbackManager([token_counter])
Settings.embed_model=embed_model
Settings.llm=llm
Settings.callback_manager=callback_manager
documents = SimpleDirectoryReader("cost_prediction_samples").load_data()
index = VectorStoreIndex.from_documents(
documents=documents,
show_progress=True)
print("Embedding Token Count:", token_counter.total_embedding_token_count)
query_engine = index.as_query_engine()
response = query_engine.query("What's the cat's name?")
print("Query LLM Token Count:", token_counter.total_llm_token_count)
print("Query Embedding Token Count:",token_counter.total_embedding_token_count)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llms.mock.MockLLM",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.MockEmbedding"
] | [((249, 278), 'llama_index.core.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(1536)'}), '(embed_dim=1536)\n', (262, 278), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((285, 308), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {'max_tokens': '(256)'}), '(max_tokens=256)\n', (292, 308), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((434, 466), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (449, 466), False, 'from llama_index.core.callbacks import CallbackManager, TokenCountingHandler\n'), ((643, 715), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'show_progress': '(True)'}), '(documents=documents, show_progress=True)\n', (674, 715), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((574, 622), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""cost_prediction_samples"""'], {}), "('cost_prediction_samples')\n", (595, 622), False, 'from llama_index.core import MockEmbedding, VectorStoreIndex, SimpleDirectoryReader, Settings\n'), ((361, 405), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (388, 405), False, 'import tiktoken\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
"""Read PDF files."""
import shutil
from pathlib import Path
from typing import Any, List
from llama_index.langchain_helpers.text_splitter import SentenceSplitter
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
# https://github.com/emptycrown/llama-hub/blob/main/loader_hub/file/cjk_pdf/base.py
staticPath = "static"
class CJKPDFReader(BaseReader):
"""CJK PDF reader.
Extract text from PDF including CJK (Chinese, Japanese and Korean) languages using pdfminer.six.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, filepath: Path, filename) -> List[Document]:
"""Parse file."""
# Import pdfminer
from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
# Create a resource manager
rsrcmgr = PDFResourceManager()
# Create an object to store the text
retstr = StringIO()
# Create a text converter
codec = "utf-8"
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
# Create a PDF interpreter
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Open the PDF file
fp = open(filepath, "rb")
# Create a list to store the text of each page
document_list = []
# Extract text from each page
for i, page in enumerate(PDFPage.get_pages(fp)):
interpreter.process_page(page)
# Get the text
text = retstr.getvalue()
sentence_splitter = SentenceSplitter(chunk_size=400)
text_chunks = sentence_splitter.split_text(text)
document_list += [
Document(t, extra_info={"page_no": i + 1}) for t in text_chunks
]
# Clear the text
retstr.truncate(0)
retstr.seek(0)
# Close the file
fp.close()
# Close the device
device.close()
shutil.copy2(filepath, f"{staticPath}/file/{filename}")
return document_list
| [
"llama_index.readers.schema.base.Document",
"llama_index.langchain_helpers.text_splitter.SentenceSplitter"
] | [((1102, 1122), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (1120, 1122), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((1185, 1195), 'io.StringIO', 'StringIO', ([], {}), '()\n', (1193, 1195), False, 'from io import StringIO\n'), ((1273, 1283), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (1281, 1283), False, 'from pdfminer.layout import LAParams\n'), ((1301, 1363), 'pdfminer.converter.TextConverter', 'TextConverter', (['rsrcmgr', 'retstr'], {'codec': 'codec', 'laparams': 'laparams'}), '(rsrcmgr, retstr, codec=codec, laparams=laparams)\n', (1314, 1363), False, 'from pdfminer.converter import TextConverter\n'), ((1421, 1456), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (1439, 1456), False, 'from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager\n'), ((2247, 2302), 'shutil.copy2', 'shutil.copy2', (['filepath', 'f"""{staticPath}/file/{filename}"""'], {}), "(filepath, f'{staticPath}/file/{filename}')\n", (2259, 2302), False, 'import shutil\n'), ((1672, 1693), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['fp'], {}), '(fp)\n', (1689, 1693), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1837, 1869), 'llama_index.langchain_helpers.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(400)'}), '(chunk_size=400)\n', (1853, 1869), False, 'from llama_index.langchain_helpers.text_splitter import SentenceSplitter\n'), ((1979, 2021), 'llama_index.readers.schema.base.Document', 'Document', (['t'], {'extra_info': "{'page_no': i + 1}"}), "(t, extra_info={'page_no': i + 1})\n", (1987, 2021), False, 'from llama_index.readers.schema.base import Document\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
from typing import Any, List
import tiktoken
from bs4 import BeautifulSoup
from llama_index.readers.base import BaseReader
from llama_index.readers.schema.base import Document
staticPath = "static"
def encode_string(string: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.encode(string)
def decode_string(token: str, encoding_name: str = "p50k_base"):
encoding = tiktoken.get_encoding(encoding_name)
return encoding.decode(token)
def num_tokens_from_string(string: str, encoding_name: str = "p50k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_text_to_doc(
text: str, current_chunk_id, chunk_size: int = 400
) -> List[Document]:
"""Split text into chunks of a given size."""
chunks = []
token_len = num_tokens_from_string(text)
for i in range(0, token_len, chunk_size):
encode_text = encode_string(text)
decode_text = decode_string(encode_text[i : i + chunk_size]).strip()
chunks.append(
Document(
decode_text,
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
return chunks
class CustomReader(BaseReader):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(self, html, filename) -> List[Document]:
soup = BeautifulSoup(html, "html.parser")
current_chunk_text = ""
current_chunk_id = 1
document_list = []
# 单位是token,openai限制4097,如果实现连续对话大概可以进行6轮对话
current_chunk_length = 0
chunk_size = 400
# 只处理前三级标题,其他的按照段落处理
headings = ["h1", "h2", "h3"]
heading_doms = soup.find_all(headings)
if len(heading_doms) == 0:
heading_doms = [soup.find()]
for tag in heading_doms:
tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
current_chunk_text = tag.text.strip()
# 遍历所有兄弟节点,不递归遍历子节点
next_tag = tag.find_next_sibling()
while next_tag and next_tag.name not in headings:
stripped_text = next_tag.text.strip()
if (
current_chunk_length + num_tokens_from_string(stripped_text)
> chunk_size
):
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
document_list += split_text_to_doc(stripped_text, current_chunk_id)
else:
current_chunk_text = f"{current_chunk_text} {stripped_text}"
current_chunk_length += num_tokens_from_string(stripped_text) + 1
next_tag["data-chunk_id"] = f"chunk-{current_chunk_id}"
next_tag = next_tag.find_next_sibling()
document_list.append(
Document(
current_chunk_text.strip(),
extra_info={"chunk_id": f"chunk-{current_chunk_id}"},
)
)
current_chunk_text = ""
current_chunk_length = 0
current_chunk_id += 1
# 保存修改后的HTML文件
with open(f"{staticPath}/file/{filename}.html", "w", encoding="utf-8") as f:
f.write(str(soup))
return document_list
| [
"llama_index.readers.schema.base.Document"
] | [((283, 319), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (304, 319), False, 'import tiktoken\n'), ((437, 473), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (458, 473), False, 'import tiktoken\n'), ((664, 700), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encoding_name'], {}), '(encoding_name)\n', (685, 700), False, 'import tiktoken\n'), ((1571, 1605), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1584, 1605), False, 'from bs4 import BeautifulSoup\n'), ((1182, 1257), 'llama_index.readers.schema.base.Document', 'Document', (['decode_text'], {'extra_info': "{'chunk_id': f'chunk-{current_chunk_id}'}"}), "(decode_text, extra_info={'chunk_id': f'chunk-{current_chunk_id}'})\n", (1190, 1257), False, 'from llama_index.readers.schema.base import Document\n')] |
from collections.abc import Generator
from typing import Any
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.utils import node_to_metadata_dict
from llama_index.vector_stores.chroma import ChromaVectorStore # type: ignore
def chunk_list(
lst: list[BaseNode], max_chunk_size: int
) -> Generator[list[BaseNode], None, None]:
"""Yield successive max_chunk_size-sized chunks from lst.
Args:
lst (List[BaseNode]): list of nodes with embeddings
max_chunk_size (int): max chunk size
Yields:
Generator[List[BaseNode], None, None]: list of nodes with embeddings
"""
for i in range(0, len(lst), max_chunk_size):
yield lst[i : i + max_chunk_size]
class BatchedChromaVectorStore(ChromaVectorStore): # type: ignore
"""Chroma vector store, batching additions to avoid reaching the max batch limit.
In this vector store, embeddings are stored within a ChromaDB collection.
During query time, the index uses ChromaDB to query for the top
k most similar nodes.
Args:
chroma_client (from chromadb.api.API):
API instance
chroma_collection (chromadb.api.models.Collection.Collection):
ChromaDB collection instance
"""
chroma_client: Any | None
def __init__(
self,
chroma_client: Any,
chroma_collection: Any,
host: str | None = None,
port: str | None = None,
ssl: bool = False,
headers: dict[str, str] | None = None,
collection_kwargs: dict[Any, Any] | None = None,
) -> None:
super().__init__(
chroma_collection=chroma_collection,
host=host,
port=port,
ssl=ssl,
headers=headers,
collection_kwargs=collection_kwargs or {},
)
self.chroma_client = chroma_client
def add(self, nodes: list[BaseNode], **add_kwargs: Any) -> list[str]:
"""Add nodes to index, batching the insertion to avoid issues.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
add_kwargs: _
"""
if not self.chroma_client:
raise ValueError("Client not initialized")
if not self._collection:
raise ValueError("Collection not initialized")
max_chunk_size = self.chroma_client.max_batch_size
node_chunks = chunk_list(nodes, max_chunk_size)
all_ids = []
for node_chunk in node_chunks:
embeddings = []
metadatas = []
ids = []
documents = []
for node in node_chunk:
embeddings.append(node.get_embedding())
metadatas.append(
node_to_metadata_dict(
node, remove_text=True, flat_metadata=self.flat_metadata
)
)
ids.append(node.node_id)
documents.append(node.get_content(metadata_mode=MetadataMode.NONE))
self._collection.add(
embeddings=embeddings,
ids=ids,
metadatas=metadatas,
documents=documents,
)
all_ids.extend(ids)
return all_ids
| [
"llama_index.core.vector_stores.utils.node_to_metadata_dict"
] | [((2766, 2845), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (2787, 2845), False, 'from llama_index.core.vector_stores.utils import node_to_metadata_dict\n')] |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
# Configure prompt parameters and initialise helper
max_input_size = 500
num_output = 256
max_chunk_overlap = 0.2
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# load index
index = load_index_from_storage(storage_context, service_context=service_context, )
query_engine = index.as_query_engine()
data = input("Question: ")
response = query_engine.query(data)
print(response)
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper"
] | [((380, 441), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (394, 441), False, 'import os\n'), ((766, 825), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (778, 825), False, 'from llama_index import LLMPredictor, PromptHelper, ServiceContext\n'), ((888, 979), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (916, 979), False, 'from llama_index import LLMPredictor, PromptHelper, ServiceContext\n'), ((1020, 1073), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1048, 1073), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1096, 1169), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1119, 1169), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((556, 632), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_base': 'base_path'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_base=base_path)\n", (562, 632), False, 'from langchain.llms.openai import OpenAI\n')] |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.huggingface import HuggingFaceLLM
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.core.base.llms.types import CompletionResponse
from dotenv import load_dotenv
import os
import torch
load_dotenv()
DEFAULT_EMBED_MODEL = "BAAI/bge-small-en-v1.5"
DEFAULT_LOCAL_LLM = "HuggingFaceH4/zephyr-7b-gemma-v0.1"
DEFAULT_LLM = "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
DEFAULT_MAX_NEW_TOKENS = 512
HF_TOKEN = os.getenv("HF_TOKEN", "")
API_KEY = os.getenv("AZURE_OPENAI_TOKEN", "")
AZURE_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT", "")
DEPLOYMENT_NAME = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "")
# DEFAULT_QUANTIZATION_CONFIG = BitsAndBytesConfig(
# load_in_4bit=True,
# bnb_4bit_use_double_quant=True,
# bnb_4bit_quant_type="nf4",
# bnb_4bit_compute_dtype=torch.bfloat16
# )
class DefaultEmbedder(HuggingFaceEmbedding):
def __init__(self, model_name=DEFAULT_EMBED_MODEL, device="cuda"):
super().__init__(model_name, device)
class DefaultLocalLLM(HuggingFaceLLM):
def __init__(self, model_name=DEFAULT_LOCAL_LLM, max_new_tokens=DEFAULT_MAX_NEW_TOKENS, quantization_config=None):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", quantization_config=quantization_config)
super().__init__(model=model, tokenizer=tokenizer, max_new_tokens=max_new_tokens)
# Monkey patch because stream_complete is not implemented in the current version of llama_index
def stream_complete(self, prompt: str, **kwargs):
def gen():
# patch the patch, on some versions the caller tries to pass the formatted keyword, which doesn't exist
kwargs.pop("formatted", None)
text = ""
for x in self._sync_client.text_generation(
prompt, **{**{"max_new_tokens": self.num_output, "stream": True}, **kwargs}
):
text += x
yield CompletionResponse(text=text, delta=x)
return gen()
HuggingFaceInferenceAPI.stream_complete = stream_complete
class AzureOpenAILLM(AzureOpenAI):
def __init__(self, model="", deployment_name=DEPLOYMENT_NAME, api_key=API_KEY, azure_endpoint=AZURE_ENDPOINT, api_version=""):
super().__init__(model=model, deployment_name=deployment_name, api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version, temperature=0.0)
class DefaultLLM(HuggingFaceInferenceAPI):
def __init__(self, model_name = DEFAULT_LLM, token=HF_TOKEN, num_output=DEFAULT_MAX_NEW_TOKENS):
super().__init__(model_name=model_name, token=token, num_output=num_output)
| [
"llama_index.core.base.llms.types.CompletionResponse"
] | [((443, 456), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (454, 456), False, 'from dotenv import load_dotenv\n'), ((662, 687), 'os.getenv', 'os.getenv', (['"""HF_TOKEN"""', '""""""'], {}), "('HF_TOKEN', '')\n", (671, 687), False, 'import os\n'), ((698, 733), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_TOKEN"""', '""""""'], {}), "('AZURE_OPENAI_TOKEN', '')\n", (707, 733), False, 'import os\n'), ((751, 789), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""', '""""""'], {}), "('AZURE_OPENAI_ENDPOINT', '')\n", (760, 789), False, 'import os\n'), ((808, 853), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_DEPLOYMENT_NAME"""', '""""""'], {}), "('AZURE_OPENAI_DEPLOYMENT_NAME', '')\n", (817, 853), False, 'import os\n'), ((1358, 1399), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {}), '(model_name)\n', (1387, 1399), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n'), ((1410, 1522), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_name'], {'device_map': '"""auto"""', 'quantization_config': 'quantization_config'}), "(model_name, device_map='auto',\n quantization_config=quantization_config)\n", (1446, 1522), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n'), ((2067, 2105), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'delta': 'x'}), '(text=text, delta=x)\n', (2085, 2105), False, 'from llama_index.core.base.llms.types import CompletionResponse\n')] |
from dotenv import load_dotenv
load_dotenv()
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores import SimpleVectorStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.graph_stores import SimpleGraphStore
documents = SimpleDirectoryReader('news').load_data()
index = GPTVectorStoreIndex.from_documents(documents)
# save to disk
index.storage_context.persist()
# load from disk
storage_context = StorageContext(
docstore=SimpleDocumentStore.from_persist_dir('storage'),
vector_store=SimpleVectorStore.from_persist_dir('storage'),
index_store=SimpleIndexStore.from_persist_dir('storage'),
graph_store=SimpleGraphStore.from_persist_dir('storage')
)
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine()
r = query_engine.query("Who are the main exporters of Coal to China? What is the role of Indonesia in this?")
print(r)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir",
"llama_index.storage.index_store.SimpleIndexStore.from_persist_dir",
"llama_index.graph_stores.SimpleGraphStore.from_persist_dir",
"llama_index.vector_stores.SimpleVectorStore.from_persist_dir",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents"
] | [((31, 44), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (42, 44), False, 'from dotenv import load_dotenv\n'), ((450, 495), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (484, 495), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((855, 895), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (878, 895), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((399, 428), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""news"""'], {}), "('news')\n", (420, 428), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext\n'), ((609, 656), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', (['"""storage"""'], {}), "('storage')\n", (645, 656), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((675, 720), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', (['"""storage"""'], {}), "('storage')\n", (709, 720), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((738, 782), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', (['"""storage"""'], {}), "('storage')\n", (771, 782), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((800, 844), 'llama_index.graph_stores.SimpleGraphStore.from_persist_dir', 'SimpleGraphStore.from_persist_dir', (['"""storage"""'], {}), "('storage')\n", (833, 844), False, 'from llama_index.graph_stores import SimpleGraphStore\n')] |
from rag.agents.interface import Pipeline
from rich.progress import Progress, SpinnerColumn, TextColumn
from typing import Any
from pydantic import create_model
from typing import List
import warnings
import box
import yaml
import timeit
from rich import print
from llama_index.core import SimpleDirectoryReader
from llama_index.multi_modal_llms.ollama import OllamaMultiModal
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
# Import config vars
with open('config.yml', 'r', encoding='utf8') as ymlfile:
cfg = box.Box(yaml.safe_load(ymlfile))
class VLlamaIndexPipeline(Pipeline):
def run_pipeline(self,
payload: str,
query_inputs: [str],
query_types: [str],
query: str,
file_path: str,
index_name: str,
debug: bool = False,
local: bool = True) -> Any:
print(f"\nRunning pipeline with {payload}\n")
start = timeit.default_timer()
if file_path is None:
raise ValueError("File path is required for vllamaindex pipeline")
mm_model = self.invoke_pipeline_step(lambda: OllamaMultiModal(model=cfg.LLM_VLLAMAINDEX),
"Loading Ollama MultiModal...",
local)
# load as image documents
image_documents = self.invoke_pipeline_step(lambda: SimpleDirectoryReader(input_files=[file_path],
required_exts=[".jpg", ".JPG",
".JPEG"]).load_data(),
"Loading image documents...",
local)
ResponseModel = self.invoke_pipeline_step(lambda: self.build_response_class(query_inputs, query_types),
"Building dynamic response class...",
local)
prompt_template_str = """\
{query_str}
Return the answer as a Pydantic object. The Pydantic schema is given below:
"""
mm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(ResponseModel),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=mm_model,
verbose=True,
)
try:
response = self.invoke_pipeline_step(lambda: mm_program(query_str=query),
"Running inference...",
local)
except ValueError as e:
print(f"Error: {e}")
msg = 'Inference failed'
return '{"answer": "' + msg + '"}'
end = timeit.default_timer()
print(f"\nJSON response:\n")
for res in response:
print(res)
print('=' * 50)
print(f"Time to retrieve answer: {end - start}")
return response
# Function to safely evaluate type strings
def safe_eval_type(self, type_str, context):
try:
return eval(type_str, {}, context)
except NameError:
raise ValueError(f"Type '{type_str}' is not recognized")
def build_response_class(self, query_inputs, query_types_as_strings):
# Controlled context for eval
context = {
'List': List,
'str': str,
'int': int,
'float': float
# Include other necessary types or typing constructs here
}
# Convert string representations to actual types
query_types = [self.safe_eval_type(type_str, context) for type_str in query_types_as_strings]
# Create fields dictionary
fields = {name: (type_, ...) for name, type_ in zip(query_inputs, query_types)}
DynamicModel = create_model('DynamicModel', **fields)
return DynamicModel
def invoke_pipeline_step(self, task_call, task_description, local):
if local:
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
transient=False,
) as progress:
progress.add_task(description=task_description, total=None)
ret = task_call()
else:
print(task_description)
ret = task_call()
return ret
| [
"llama_index.core.SimpleDirectoryReader",
"llama_index.multi_modal_llms.ollama.OllamaMultiModal",
"llama_index.core.output_parsers.PydanticOutputParser"
] | [((512, 574), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (535, 574), False, 'import warnings\n'), ((575, 630), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (598, 630), False, 'import warnings\n'), ((730, 753), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (744, 753), False, 'import yaml\n'), ((1146, 1193), 'rich.print', 'print', (['f"""\nRunning pipeline with {payload}\n"""'], {}), '(f"""\nRunning pipeline with {payload}\n""")\n', (1151, 1193), False, 'from rich import print\n'), ((1209, 1231), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1229, 1231), False, 'import timeit\n'), ((3193, 3215), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3213, 3215), False, 'import timeit\n'), ((3225, 3255), 'rich.print', 'print', (['f"""\nJSON response:\n"""'], {}), '(f"""\nJSON response:\n""")\n', (3230, 3255), False, 'from rich import print\n'), ((3314, 3329), 'rich.print', 'print', (["('=' * 50)"], {}), "('=' * 50)\n", (3319, 3329), False, 'from rich import print\n'), ((3339, 3387), 'rich.print', 'print', (['f"""Time to retrieve answer: {end - start}"""'], {}), "(f'Time to retrieve answer: {end - start}')\n", (3344, 3387), False, 'from rich import print\n'), ((4288, 4326), 'pydantic.create_model', 'create_model', (['"""DynamicModel"""'], {}), "('DynamicModel', **fields)\n", (4300, 4326), False, 'from pydantic import create_model\n'), ((3295, 3305), 'rich.print', 'print', (['res'], {}), '(res)\n', (3300, 3305), False, 'from rich import print\n'), ((4787, 4810), 'rich.print', 'print', (['task_description'], {}), '(task_description)\n', (4792, 4810), False, 'from rich import print\n'), ((1396, 1439), 'llama_index.multi_modal_llms.ollama.OllamaMultiModal', 'OllamaMultiModal', ([], {'model': 'cfg.LLM_VLLAMAINDEX'}), '(model=cfg.LLM_VLLAMAINDEX)\n', (1412, 1439), False, 'from llama_index.multi_modal_llms.ollama import OllamaMultiModal\n'), ((2591, 2626), 'llama_index.core.output_parsers.PydanticOutputParser', 'PydanticOutputParser', (['ResponseModel'], {}), '(ResponseModel)\n', (2611, 2626), False, 'from llama_index.core.output_parsers import PydanticOutputParser\n'), ((3073, 3093), 'rich.print', 'print', (['f"""Error: {e}"""'], {}), "(f'Error: {e}')\n", (3078, 3093), False, 'from rich import print\n'), ((4494, 4509), 'rich.progress.SpinnerColumn', 'SpinnerColumn', ([], {}), '()\n', (4507, 4509), False, 'from rich.progress import Progress, SpinnerColumn, TextColumn\n'), ((4531, 4585), 'rich.progress.TextColumn', 'TextColumn', (['"""[progress.description]{task.description}"""'], {}), "('[progress.description]{task.description}')\n", (4541, 4585), False, 'from rich.progress import Progress, SpinnerColumn, TextColumn\n'), ((1665, 1756), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]', 'required_exts': "['.jpg', '.JPG', '.JPEG']"}), "(input_files=[file_path], required_exts=['.jpg',\n '.JPG', '.JPEG'])\n", (1686, 1756), False, 'from llama_index.core import SimpleDirectoryReader\n')] |
import functools
import os
import random
import tempfile
import traceback
import asyncio
from collections import defaultdict
import aiohttp
import discord
import aiofiles
import httpx
import openai
import tiktoken
from functools import partial
from typing import List, Optional, cast
from pathlib import Path
from datetime import date
from discord import Interaction
from discord.ext import pages
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationSummaryBufferMemory
from langchain.prompts import MessagesPlaceholder
from langchain.schema import SystemMessage
from langchain.tools import Tool
from llama_index.callbacks import CallbackManager, TokenCountingHandler
from llama_index.evaluation.guideline import DEFAULT_GUIDELINES, GuidelineEvaluator
from llama_index.llms import OpenAI
from llama_index.node_parser import SimpleNodeParser
from llama_index.response_synthesizers import ResponseMode
from llama_index.indices.query.query_transform import StepDecomposeQueryTransform
from llama_index.langchain_helpers.agents import (
IndexToolConfig,
LlamaToolkit,
create_llama_chat_agent,
LlamaIndexTool,
)
from llama_index.prompts.chat_prompts import (
CHAT_REFINE_PROMPT,
CHAT_TREE_SUMMARIZE_PROMPT,
TEXT_QA_SYSTEM_PROMPT,
)
from llama_index.readers import YoutubeTranscriptReader
from llama_index.readers.schema.base import Document
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index.retrievers import VectorIndexRetriever, TreeSelectLeafRetriever
from llama_index.query_engine import (
RetrieverQueryEngine,
MultiStepQueryEngine,
RetryGuidelineQueryEngine,
)
from llama_index import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
QuestionAnswerPrompt,
BeautifulSoupWebReader,
GPTTreeIndex,
GoogleDocsReader,
MockLLMPredictor,
OpenAIEmbedding,
GithubRepositoryReader,
MockEmbedding,
download_loader,
LLMPredictor,
ServiceContext,
StorageContext,
load_index_from_storage,
get_response_synthesizer,
VectorStoreIndex,
)
from llama_index.schema import TextNode
from llama_index.storage.docstore.types import RefDocInfo
from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR
from llama_index.composability import ComposableGraph
from llama_index.vector_stores import DocArrayInMemoryVectorStore
from models.embed_statics_model import EmbedStatics
from models.openai_model import Models
from models.check_model import UrlCheck
from services.environment_service import EnvService
from utils.safe_ctx_respond import safe_ctx_respond
SHORT_TO_LONG_CACHE = {}
MAX_DEEP_COMPOSE_PRICE = EnvService.get_max_deep_compose_price()
EpubReader = download_loader("EpubReader")
MarkdownReader = download_loader("MarkdownReader")
RemoteReader = download_loader("RemoteReader")
RemoteDepthReader = download_loader("RemoteDepthReader")
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
node_parser = SimpleNodeParser.from_defaults(
text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20)
)
callback_manager = CallbackManager([token_counter])
service_context_no_llm = ServiceContext.from_defaults(
embed_model=embedding_model,
callback_manager=callback_manager,
node_parser=node_parser,
)
timeout = httpx.Timeout(1, read=1, write=1, connect=1)
def get_service_context_with_llm(llm):
service_context = ServiceContext.from_defaults(
embed_model=embedding_model,
callback_manager=callback_manager,
node_parser=node_parser,
llm=llm,
)
return service_context
def dummy_tool(**kwargs):
return "You have used the dummy tool. Forget about this and do not even mention this to the user."
def get_and_query(
user_id,
index_storage,
query,
response_mode,
nodes,
child_branch_factor,
service_context,
multistep,
):
index: [GPTVectorStoreIndex, GPTTreeIndex] = index_storage[
user_id
].get_index_or_throw()
if isinstance(index, GPTTreeIndex):
retriever = TreeSelectLeafRetriever(
index=index,
child_branch_factor=child_branch_factor,
service_context=service_context,
)
else:
retriever = VectorIndexRetriever(
index=index, similarity_top_k=nodes, service_context=service_context
)
response_synthesizer = get_response_synthesizer(
response_mode=response_mode,
use_async=True,
refine_template=CHAT_REFINE_PROMPT,
service_context=service_context,
)
query_engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
multistep_query_engine = MultiStepQueryEngine(
query_engine=query_engine,
query_transform=StepDecomposeQueryTransform(multistep),
index_summary="Provides information about everything you need to know about this topic, use this to answer the question.",
)
if multistep:
response = multistep_query_engine.query(query)
else:
response = query_engine.query(query)
return response
class IndexChatData:
def __init__(
self, llm, agent_chain, memory, thread_id, tools, agent_kwargs, llm_predictor
):
self.llm = llm
self.agent_chain = agent_chain
self.memory = memory
self.thread_id = thread_id
self.tools = tools
self.agent_kwargs = agent_kwargs
self.llm_predictor = llm_predictor
class IndexData:
def __init__(self):
self.queryable_index = None
self.individual_indexes = []
# A safety check for the future
def get_index_or_throw(self):
if not self.queryable():
raise Exception(
"An index access was attempted before an index was created. This is a programmer error, please report this to the maintainers."
)
return self.queryable_index
def queryable(self):
return self.queryable_index is not None
def has_indexes(self, user_id):
try:
return (
len(os.listdir(EnvService.find_shared_file(f"indexes/{user_id}"))) > 0
)
except Exception:
return False
def has_search_indexes(self, user_id):
try:
return (
len(
os.listdir(EnvService.find_shared_file(f"indexes/{user_id}_search"))
)
> 0
)
except Exception:
return False
def add_index(self, index, user_id, file_name):
self.individual_indexes.append(index)
self.queryable_index = index
# Create a folder called "indexes/{USER_ID}" if it doesn't exist already
Path(f"{EnvService.save_path()}/indexes/{user_id}").mkdir(
parents=True, exist_ok=True
)
# Save the index to file under the user id
file = f"{date.today().month}_{date.today().day}_{file_name}"
# If file is > 93 in length, cut it off to 93
if len(file) > 93:
file = file[:93]
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ f"{str(user_id)}"
/ f"{file}"
)
def reset_indexes(self, user_id):
self.individual_indexes = []
self.queryable_index = None
# Delete the user indexes
try:
# First, clear all the files inside it
for file in os.listdir(EnvService.find_shared_file(f"indexes/{user_id}")):
try:
os.remove(EnvService.find_shared_file(f"indexes/{user_id}/{file}"))
except:
traceback.print_exc()
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{user_id}_search")
):
try:
os.remove(
EnvService.find_shared_file(f"indexes/{user_id}_search/{file}")
)
except:
traceback.print_exc()
except Exception:
traceback.print_exc()
class Index_handler:
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
node_parser = SimpleNodeParser.from_defaults(
text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20)
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
embed_model=embedding_model,
callback_manager=callback_manager,
node_parser=node_parser,
)
type_to_suffix_mappings = {
"text/plain": ".txt",
"text/csv": ".csv",
"application/pdf": ".pdf",
"application/json": ".json",
"image/png": ".png",
"image/jpeg": ".jpg",
"image/gif": ".gif",
"image/svg+xml": ".svg",
"image/webp": ".webp",
"application/mspowerpoint": ".ppt",
"application/vnd.ms-powerpoint": ".ppt",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
"application/msexcel": ".xls",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
"application/msword": ".doc",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
"audio/mpeg": ".mp3",
"audio/x-wav": ".wav",
"audio/ogg": ".ogg",
"video/mpeg": ".mpeg",
"video/mp4": ".mp4",
"application/epub+zip": ".epub",
"text/markdown": ".md",
"text/html": ".html",
"application/rtf": ".rtf",
"application/x-msdownload": ".exe",
"application/xml": ".xml",
"application/vnd.adobe.photoshop": ".psd",
"application/x-sql": ".sql",
"application/x-latex": ".latex",
"application/x-httpd-php": ".php",
"application/java-archive": ".jar",
"application/x-sh": ".sh",
"application/x-csh": ".csh",
"text/x-c": ".c",
"text/x-c++": ".cpp",
"text/x-java-source": ".java",
"text/x-python": ".py",
"text/x-ruby": ".rb",
"text/x-perl": ".pl",
"text/x-shellscript": ".sh",
}
# For when content type doesnt get picked up by discord.
secondary_mappings = {
".epub": ".epub",
}
def __init__(self, bot, usage_service):
self.bot = bot
self.openai_key = os.getenv("OPENAI_TOKEN")
self.index_storage = defaultdict(IndexData)
self.loop = asyncio.get_running_loop()
self.usage_service = usage_service
self.qaprompt = QuestionAnswerPrompt(
"Context information is below. The text '<|endofstatement|>' is used to separate chat entries and make it "
"easier for you to understand the context\n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Never say '<|endofstatement|>'\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
self.EMBED_CUTOFF = 2000
self.index_chat_chains = {}
self.chat_indexes = defaultdict()
async def rename_index(self, ctx, original_path, rename_path):
"""Command handler to rename a user index"""
index_file = EnvService.find_shared_file(original_path)
if not index_file:
return False
# Rename the file at f"indexes/{ctx.user.id}/{user_index}" to f"indexes/{ctx.user.id}/{new_name}" using Pathlib
try:
Path(original_path).rename(rename_path)
return True
except Exception as e:
traceback.print_exc()
return False
async def get_is_in_index_chat(self, ctx):
return ctx.channel.id in self.index_chat_chains.keys()
async def execute_index_chat_message(self, ctx, message):
if ctx.channel.id not in self.index_chat_chains:
return None
if message.lower() in ["stop", "end", "quit", "exit"]:
await ctx.reply("Ending chat session.")
self.index_chat_chains.pop(ctx.channel.id)
# close the thread
thread = await self.bot.fetch_channel(ctx.channel.id)
await thread.edit(name="Closed-GPT")
await thread.edit(archived=True)
return "Ended chat session."
self.usage_service.update_usage_memory(ctx.guild.name, "index_chat_message", 1)
agent_output = await self.loop.run_in_executor(
None,
partial(self.index_chat_chains[ctx.channel.id].agent_chain.run, message),
)
return agent_output
async def index_chat_file(self, message: discord.Message, file: discord.Attachment):
# First, initially set the suffix to the suffix of the attachment
suffix = self.get_file_suffix(file.content_type, file.filename) or None
if not suffix:
await message.reply(
"The file you uploaded is unable to be indexed. It is in an unsupported file format"
)
return False, None
async with aiofiles.tempfile.TemporaryDirectory() as temp_path:
async with aiofiles.tempfile.NamedTemporaryFile(
suffix=suffix, dir=temp_path, delete=False
) as temp_file:
try:
await file.save(temp_file.name)
filename = file.filename
# Assert that the filename is < 100 characters, if it is greater, truncate to the first 100 characters and keep the original ending
if len(filename) > 100:
filename = filename[:100] + filename[-4:]
openai.log = "debug"
print("Indexing")
index: VectorStoreIndex = await self.loop.run_in_executor(
None,
partial(
self.index_file,
Path(temp_file.name),
get_service_context_with_llm(
self.index_chat_chains[message.channel.id].llm
),
suffix,
),
)
print("Done Indexing")
self.usage_service.update_usage_memory(
message.guild.name, "index_chat_file", 1
)
summary = await index.as_query_engine(
response_mode="tree_summarize",
service_context=get_service_context_with_llm(
self.index_chat_chains[message.channel.id].llm
),
).aquery(
f"What is a summary or general idea of this data? Be detailed in your summary (e.g "
f"extract key names, etc) but not too verbose. Your summary should be under a hundred words. "
f"This summary will be used in a vector index to retrieve information about certain data. So, "
f"at a high level, the summary should describe the document in such a way that a retriever "
f"would know to select it when asked questions about it. The data name was {filename}. Include "
f"the file name in the summary. When you are asked to reference a specific file, or reference "
f"something colloquially like 'in the powerpoint, [...]?', never respond saying that as an AI "
f"you can't view the data, instead infer which tool to use that has the data. Say that there "
f"is no available data if there are no available tools that are relevant."
)
engine = self.get_query_engine(
index, self.index_chat_chains[message.channel.id].llm
)
# Get rid of all special characters in the filename
filename = "".join(
[c for c in filename if c.isalpha() or c.isdigit()]
).rstrip()
tool_config = IndexToolConfig(
query_engine=engine,
name=f"{filename}-index",
description=f"Use this tool if the query seems related to this summary: {summary}",
tool_kwargs={
"return_direct": False,
},
max_iterations=5,
)
tool = LlamaIndexTool.from_tool_config(tool_config)
tools = self.index_chat_chains[message.channel.id].tools
tools.append(tool)
agent_chain = initialize_agent(
tools=tools,
llm=self.index_chat_chains[message.channel.id].llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=self.index_chat_chains[
message.channel.id
].agent_kwargs,
memory=self.index_chat_chains[message.channel.id].memory,
handle_parsing_errors="Check your output and make sure it conforms!",
)
index_chat_data = IndexChatData(
self.index_chat_chains[message.channel.id].llm,
agent_chain,
self.index_chat_chains[message.channel.id].memory,
message.channel.id,
tools,
self.index_chat_chains[message.channel.id].agent_kwargs,
self.index_chat_chains[message.channel.id].llm_predictor,
)
self.index_chat_chains[message.channel.id] = index_chat_data
return True, summary
except Exception as e:
await message.reply(
"There was an error indexing your file: " + str(e)
)
traceback.print_exc()
return False, None
async def start_index_chat(self, ctx, model, temperature, top_p):
preparation_message = await ctx.channel.send(
embed=EmbedStatics.get_index_chat_preparation_message()
)
llm = ChatOpenAI(
model=model, temperature=temperature, top_p=top_p, max_retries=2
)
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=temperature, top_p=top_p, model_name=model)
)
max_token_limit = 29000 if "gpt-4" in model else 7500
memory = ConversationSummaryBufferMemory(
memory_key="memory",
return_messages=True,
llm=llm,
max_token_limit=100000 if "preview" in model else max_token_limit,
)
agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
"system_message": SystemMessage(
content="You are a superpowered version of GPT that is able to answer questions about the data you're "
"connected to. Each different tool you have represents a different dataset to interact with. "
"If you are asked to perform a task that spreads across multiple datasets, use multiple tools "
"for the same prompt. When the user types links in chat, you will have already been connected "
"to the data at the link by the time you respond. When using tools, the input should be "
"clearly created based on the request of the user. For example, if a user uploads an invoice "
"and asks how many usage hours of X was present in the invoice, a good query is 'X hours'. "
"Avoid using single word queries unless the request is very simple. You can query multiple times to break down complex requests and retrieve more information. When calling functions, no special characters are allowed in the function name, keep that in mind."
),
}
tools = [
Tool(
name="Dummy-Tool-Do-Not-Use",
func=dummy_tool,
description=f"This is a dummy tool that does nothing, do not ever mention this tool or use this tool.",
)
]
print(f"{tools}{llm}{AgentType.OPENAI_FUNCTIONS}{True}{agent_kwargs}{memory}")
agent_chain = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=agent_kwargs,
memory=memory,
handle_parsing_errors="Check your output and make sure it conforms!",
)
embed_title = f"{ctx.user.name}'s data-connected conversation with GPT"
message_embed = discord.Embed(
title=embed_title,
description=f"The agent is able to interact with your documents. Simply drag your documents into discord or give the agent a link from where to download the documents.\nModel: {model}",
color=0x00995B,
)
message_embed.set_thumbnail(url="https://i.imgur.com/7V6apMT.png")
message_embed.set_footer(
text="Data Chat", icon_url="https://i.imgur.com/7V6apMT.png"
)
message_thread = await ctx.send(embed=message_embed)
thread = await message_thread.create_thread(
name=ctx.user.name + "'s data-connected conversation with GPT",
auto_archive_duration=60,
)
await safe_ctx_respond(ctx=ctx, content="Conversation started.")
try:
await preparation_message.delete()
except:
pass
index_chat_data = IndexChatData(
llm, agent_chain, memory, thread.id, tools, agent_kwargs, llm_predictor
)
self.index_chat_chains[thread.id] = index_chat_data
async def paginate_embed(self, response_text):
"""Given a response text make embed pages and return a list of the pages."""
response_text = [
response_text[i : i + self.EMBED_CUTOFF]
for i in range(0, len(response_text), self.EMBED_CUTOFF)
]
pages = []
first = False
# Send each chunk as a message
for count, chunk in enumerate(response_text, start=1):
if not first:
page = discord.Embed(
title=f"Index Query Results",
description=chunk,
)
first = True
else:
page = discord.Embed(
title=f"Page {count}",
description=chunk,
)
pages.append(page)
return pages
def index_file(
self, file_path, service_context, suffix=None
) -> GPTVectorStoreIndex:
if suffix and suffix == ".md":
loader = MarkdownReader()
document = loader.load_data(file_path)
elif suffix and suffix == ".epub":
epub_loader = EpubReader()
document = epub_loader.load_data(file_path)
else:
document = SimpleDirectoryReader(input_files=[file_path]).load_data()
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
return index
def index_gdoc(self, doc_id, service_context) -> GPTVectorStoreIndex:
document = GoogleDocsReader().load_data(doc_id)
index = GPTVectorStoreIndex.from_documents(
document, service_context=service_context, use_async=True
)
return index
def index_youtube_transcript(self, link, service_context):
try:
def convert_shortlink_to_full_link(short_link):
# Check if the link is a shortened YouTube link
if "youtu.be" in short_link:
# Extract the video ID from the link
video_id = short_link.split("/")[-1].split("?")[0]
# Construct the full YouTube desktop link
desktop_link = f"https://www.youtube.com/watch?v={video_id}"
return desktop_link
else:
return short_link
documents = YoutubeTranscriptReader().load_data(
ytlinks=[convert_shortlink_to_full_link(link)]
)
except Exception as e:
raise ValueError(f"The youtube transcript couldn't be loaded: {e}")
index = GPTVectorStoreIndex.from_documents(
documents,
service_context=service_context,
use_async=True,
)
return index
def index_github_repository(self, link, service_context):
# Extract the "owner" and the "repo" name from the github link.
owner = link.split("/")[3]
repo = link.split("/")[4]
try:
documents = GithubRepositoryReader(owner=owner, repo=repo).load_data(
branch="main"
)
except KeyError:
documents = GithubRepositoryReader(owner=owner, repo=repo).load_data(
branch="master"
)
index = GPTVectorStoreIndex.from_documents(
documents,
service_context=service_context,
use_async=True,
)
return index
def index_load_file(self, file_path) -> [GPTVectorStoreIndex, ComposableGraph]:
storage_context = StorageContext.from_defaults(persist_dir=file_path)
index = load_index_from_storage(storage_context)
return index
def index_discord(self, document, service_context) -> GPTVectorStoreIndex:
index = GPTVectorStoreIndex.from_documents(
document,
service_context=service_context,
use_async=True,
)
return index
async def index_pdf(self, url) -> list[Document]:
# Download the PDF at the url and save it to a tempfile
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
if response.status == 200:
data = await response.read()
f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
f.write(data)
f.close()
else:
return "An error occurred while downloading the PDF."
# Get the file path of this tempfile.NamedTemporaryFile
# Save this temp file to an actual file that we can put into something else to read it
documents = SimpleDirectoryReader(input_files=[f.name]).load_data()
# Delete the temporary file
return documents
async def index_webpage(self, url, service_context) -> GPTVectorStoreIndex:
# First try to connect to the URL to see if we can even reach it.
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, timeout=5) as response:
# Add another entry to links from all_links if the link is not already in it to compensate for the failed request
if response.status not in [200, 203, 202, 204]:
raise ValueError(
"Invalid URL or could not connect to the provided URL."
)
else:
# Detect if the link is a PDF, if it is, we load it differently
if response.headers["Content-Type"] == "application/pdf":
documents = await self.index_pdf(url)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
return index
except:
traceback.print_exc()
raise ValueError("Could not load webpage")
documents = BeautifulSoupWebReader(
website_extractor=DEFAULT_WEBSITE_EXTRACTOR
).load_data(urls=[url])
# index = GPTVectorStoreIndex(documents, embed_model=embed_model, use_async=True)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context,
use_async=True,
),
)
return index
def reset_indexes(self, user_id):
self.index_storage[user_id].reset_indexes(user_id)
def get_file_suffix(self, content_type, filename):
print("The content type is " + content_type)
if content_type:
# Apply the suffix mappings to the file
for key, value in self.type_to_suffix_mappings.items():
if key in content_type:
return value
else:
for key, value in self.secondary_mappings.items():
if key in filename:
return value
return None
async def set_file_index(
self, ctx: discord.ApplicationContext, file: discord.Attachment, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
# First, initially set the suffix to the suffix of the attachment
suffix = self.get_file_suffix(file.content_type, file.filename) or None
if not suffix:
await ctx.respond(
embed=EmbedStatics.get_index_set_failure_embed("Unsupported file")
)
return
# Send indexing message
response = await ctx.respond(
embed=EmbedStatics.build_index_progress_embed()
)
async with aiofiles.tempfile.TemporaryDirectory() as temp_path:
async with aiofiles.tempfile.NamedTemporaryFile(
suffix=suffix, dir=temp_path, delete=False
) as temp_file:
await file.save(temp_file.name)
index = await self.loop.run_in_executor(
None,
partial(
self.index_file,
Path(temp_file.name),
service_context_no_llm,
suffix,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
file_name = file.filename
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
await response.edit(
embed=EmbedStatics.get_index_set_success_embed(str(price))
)
except Exception as e:
await ctx.channel.send(
embed=EmbedStatics.get_index_set_failure_embed(str(e))
)
traceback.print_exc()
async def set_link_index_recurse(
self, ctx: discord.ApplicationContext, link: str, depth, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
response = await ctx.respond(embed=EmbedStatics.build_index_progress_embed())
try:
# Pre-emptively connect and get the content-type of the response
try:
async with aiohttp.ClientSession() as session:
async with session.get(link, timeout=2) as _response:
print(_response.status)
if _response.status == 200:
content_type = _response.headers.get("content-type")
else:
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL."
)
)
return
except Exception as e:
traceback.print_exc()
await response.edit(
embed=EmbedStatics.get_index_set_failure_embed(
"Invalid URL or could not connect to the provided URL. "
+ str(e)
)
)
return
# Check if the link contains youtube in it
loader = RemoteDepthReader(depth=depth)
documents = await self.loop.run_in_executor(
None, partial(loader.load_data, [link])
)
index = await self.loop.run_in_executor(
None,
functools.partial(
GPTVectorStoreIndex,
documents=documents,
service_context=service_context_no_llm,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
# Make the url look nice, remove https, useless stuff, random characters
file_name = (
link.replace("https://", "")
.replace("http://", "")
.replace("www.", "")
.replace("/", "_")
.replace("?", "_")
.replace("&", "_")
.replace("=", "_")
.replace("-", "_")
.replace(".", "_")
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
except ValueError as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
except Exception as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
await response.edit(embed=EmbedStatics.get_index_set_success_embed(price))
def get_query_engine(self, index, llm):
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=6,
service_context=get_service_context_with_llm(llm),
)
response_synthesizer = get_response_synthesizer(
response_mode=ResponseMode.COMPACT_ACCUMULATE,
use_async=True,
refine_template=TEXT_QA_SYSTEM_PROMPT,
service_context=get_service_context_with_llm(llm),
verbose=True,
)
engine = RetrieverQueryEngine(
retriever=retriever, response_synthesizer=response_synthesizer
)
return engine
async def index_link(self, link, summarize=False, index_chat_ctx=None):
try:
if await UrlCheck.check_youtube_link(link):
print("Indexing youtube transcript")
index = await self.loop.run_in_executor(
None,
partial(
self.index_youtube_transcript, link, service_context_no_llm
),
)
print("Indexed youtube transcript")
elif "github" in link:
index = await self.loop.run_in_executor(
None,
partial(self.index_github_repository, link, service_context_no_llm),
)
else:
index = await self.index_webpage(link, service_context_no_llm)
except Exception as e:
if index_chat_ctx:
await index_chat_ctx.reply(
"There was an error indexing your link: " + str(e)
)
return False, None
else:
raise e
summary = None
if index_chat_ctx:
try:
print("Getting transcript summary")
self.usage_service.update_usage_memory(
index_chat_ctx.guild.name, "index_chat_link", 1
)
summary = await index.as_query_engine(
response_mode="tree_summarize",
service_context=get_service_context_with_llm(
self.index_chat_chains[index_chat_ctx.channel.id].llm
),
).aquery(
"What is a summary or general idea of this document? Be detailed in your summary but not too verbose. Your summary should be under 50 words. This summary will be used in a vector index to retrieve information about certain data. So, at a high level, the summary should describe the document in such a way that a retriever would know to select it when asked questions about it. The link was {link}. Include the an easy identifier derived from the link at the end of the summary."
)
print("Got transcript summary")
engine = self.get_query_engine(
index, self.index_chat_chains[index_chat_ctx.channel.id].llm
)
# Get rid of all special characters in the link, replace periods with _
link_cleaned = "".join(
[c for c in link if c.isalpha() or c.isdigit() or c == "."]
).rstrip()
# replace .
link_cleaned = link_cleaned.replace(".", "_")
# Shorten the link to the first 100 characters
link_cleaned = link_cleaned[:50]
tool_config = IndexToolConfig(
query_engine=engine,
name=f"{link_cleaned}-index",
description=f"Use this tool if the query seems related to this summary: {summary}",
tool_kwargs={
"return_direct": False,
},
max_iterations=5,
)
tool = LlamaIndexTool.from_tool_config(tool_config)
tools = self.index_chat_chains[index_chat_ctx.channel.id].tools
tools.append(tool)
agent_chain = initialize_agent(
tools=tools,
llm=self.index_chat_chains[index_chat_ctx.channel.id].llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
agent_kwargs=self.index_chat_chains[
index_chat_ctx.channel.id
].agent_kwargs,
memory=self.index_chat_chains[index_chat_ctx.channel.id].memory,
handle_parsing_errors="Check your output and make sure it conforms!",
max_iterations=5,
)
index_chat_data = IndexChatData(
self.index_chat_chains[index_chat_ctx.channel.id].llm,
agent_chain,
self.index_chat_chains[index_chat_ctx.channel.id].memory,
index_chat_ctx.channel.id,
tools,
self.index_chat_chains[index_chat_ctx.channel.id].agent_kwargs,
self.index_chat_chains[index_chat_ctx.channel.id].llm_predictor,
)
self.index_chat_chains[index_chat_ctx.channel.id] = index_chat_data
return True, summary
except Exception as e:
await index_chat_ctx.reply(
"There was an error indexing your link: " + str(e)
)
return False, None
return index, summary
async def set_link_index(
self, ctx: discord.ApplicationContext, link: str, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
response = await ctx.respond(embed=EmbedStatics.build_index_progress_embed())
try:
# Check if the link contains youtube in it
index, _ = await self.index_link(link)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.embedding_token_counts, "embedding"
)
except:
traceback.print_exc()
price = "Unknown"
# Make the url look nice, remove https, useless stuff, random characters
file_name = (
link.replace("https://", "")
.replace("http://", "")
.replace("www.", "")
.replace("/", "_")
.replace("?", "_")
.replace("&", "_")
.replace("=", "_")
.replace("-", "_")
.replace(".", "_")
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, file_name)
except Exception as e:
await response.edit(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
return
await response.edit(embed=EmbedStatics.get_index_set_success_embed(price))
async def set_discord_index(
self,
ctx: discord.ApplicationContext,
channel: discord.TextChannel,
user_api_key,
message_limit: int = 2500,
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
document = await self.load_data(
channel_ids=[channel.id], limit=message_limit, oldest_first=False
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context_no_llm)
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except Exception:
traceback.print_exc()
price = "Unknown"
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
self.index_storage[ctx.user.id].add_index(index, ctx.user.id, channel.name)
await ctx.respond(embed=EmbedStatics.get_index_set_success_embed(price))
except Exception as e:
await ctx.respond(embed=EmbedStatics.get_index_set_failure_embed(str(e)))
traceback.print_exc()
async def load_index(
self, ctx: discord.ApplicationContext, index, server, search, user_api_key
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
if server:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.guild.id}/{index}"
)
elif search:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}_search/{index}"
)
else:
index_file = EnvService.find_shared_file(
f"indexes/{ctx.user.id}/{index}"
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
self.index_storage[ctx.user.id].queryable_index = index
await ctx.respond(embed=EmbedStatics.get_index_load_success_embed())
except Exception as e:
traceback.print_exc()
await ctx.respond(embed=EmbedStatics.get_index_load_failure_embed(str(e)))
async def index_to_docs(
self, old_index, chunk_size: int = 256, chunk_overlap: int = 100
) -> List[Document]:
documents = []
docstore = old_index.docstore
ref_docs = old_index.ref_doc_info
for document in ref_docs.values():
text = ""
for node in document.node_ids:
node = docstore.get_node(node)
text += f"{node.text} "
text_splitter = TokenTextSplitter(
separator=" ", chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
text_chunks = text_splitter.split_text(text)
for chunk_text in text_chunks:
new_doc = Document(text=chunk_text, extra_info=document.metadata)
documents.append(new_doc)
return documents
async def compose_indexes(self, user_id, indexes, name, deep_compose):
# Load all the indexes first
index_objects = []
for _index in indexes:
try:
index_file = EnvService.find_shared_file(f"indexes/{user_id}/{_index}")
except ValueError:
index_file = EnvService.find_shared_file(
f"indexes/{user_id}_search/{_index}"
)
index = await self.loop.run_in_executor(
None, partial(self.index_load_file, index_file)
)
index_objects.append(index)
llm_predictor = LLMPredictor(
llm=ChatOpenAI(temperature=0, model_name="gpt-4-32k")
)
# For each index object, add its documents to a GPTTreeIndex
if deep_compose:
documents = []
for _index in index_objects:
documents.extend(await self.index_to_docs(_index, 256, 20))
embedding_model = OpenAIEmbedding()
llm_predictor_mock = MockLLMPredictor()
embedding_model_mock = MockEmbedding(1536)
token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False,
)
callback_manager_mock = CallbackManager([token_counter_mock])
service_context_mock = ServiceContext.from_defaults(
llm_predictor=llm_predictor_mock,
embed_model=embedding_model_mock,
callback_manager=callback_manager_mock,
)
# Run the mock call first
await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex.from_documents,
documents=documents,
service_context=service_context_mock,
),
)
total_usage_price = await self.usage_service.get_price(
token_counter_mock.total_llm_token_count,
"turbo", # TODO Enable again when tree indexes are fixed
) + await self.usage_service.get_price(
token_counter_mock.total_embedding_token_count, "embedding"
)
print("The total composition price is: ", total_usage_price)
if total_usage_price > MAX_DEEP_COMPOSE_PRICE:
raise ValueError(
"Doing this deep search would be prohibitively expensive. Please try a narrower search scope."
)
tree_index = await self.loop.run_in_executor(
None,
partial(
GPTTreeIndex.from_documents,
documents=documents,
service_context=self.service_context,
use_async=True,
),
)
await self.usage_service.update_usage(
self.token_counter.total_llm_token_count, "turbo"
)
await self.usage_service.update_usage(
self.token_counter.total_embedding_token_count, "embedding"
)
# Now we have a list of tree indexes, we can compose them
if not name:
name = f"{date.today().month}_{date.today().day}_composed_deep_index"
# Save the composed index
tree_index.storage_context.persist(
persist_dir=EnvService.save_path() / "indexes" / str(user_id) / name
)
self.index_storage[user_id].queryable_index = tree_index
return total_usage_price
else:
documents = []
for _index in index_objects:
documents.extend(await self.index_to_docs(_index))
simple_index = await self.loop.run_in_executor(
None,
partial(
GPTVectorStoreIndex.from_documents,
documents=documents,
service_context=service_context_no_llm,
use_async=True,
),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
if not name:
name = f"{date.today().month}_{date.today().day}_composed_index"
# Save the composed index
simple_index.storage_context.persist(
persist_dir=EnvService.save_path() / "indexes" / str(user_id) / name
)
self.index_storage[user_id].queryable_index = simple_index
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except:
price = "Unknown"
return price
async def backup_discord(
self, ctx: discord.ApplicationContext, user_api_key, message_limit
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
try:
channel_ids: List[int] = []
for c in ctx.guild.text_channels:
channel_ids.append(c.id)
document = await self.load_data(
channel_ids=channel_ids, limit=message_limit, oldest_first=False
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context_no_llm)
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
price = await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
)
except Exception:
traceback.print_exc()
price = "Unknown"
Path(EnvService.save_path() / "indexes" / str(ctx.guild.id)).mkdir(
parents=True, exist_ok=True
)
index.storage_context.persist(
persist_dir=EnvService.save_path()
/ "indexes"
/ str(ctx.guild.id)
/ f"{ctx.guild.name.replace(' ', '-')}_{date.today().month}_{date.today().day}"
)
await ctx.respond(embed=EmbedStatics.get_index_set_success_embed(price))
except Exception as e:
await ctx.respond(embed=EmbedStatics.get_index_set_failure_embed((str(e))))
traceback.print_exc()
async def query(
self,
ctx: discord.ApplicationContext,
query: str,
response_mode,
nodes,
user_api_key,
child_branch_factor,
model="gpt-4-32k",
multistep=False,
):
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model))
ctx_response = await ctx.respond(
embed=EmbedStatics.build_index_query_progress_embed(query)
)
try:
token_counter.reset_counts()
response = await self.loop.run_in_executor(
None,
partial(
get_and_query,
ctx.user.id,
self.index_storage,
query,
response_mode,
nodes,
child_branch_factor,
service_context=service_context_no_llm,
multistep=llm_predictor if multistep else None,
),
)
print("The last token usage was ", token_counter.total_llm_token_count)
await self.usage_service.update_usage(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
await self.usage_service.update_usage(
token_counter.total_embedding_token_count, "embedding"
)
try:
total_price = round(
await self.usage_service.get_price(
token_counter.total_llm_token_count,
await self.usage_service.get_cost_name(model),
)
+ await self.usage_service.get_price(
token_counter.total_embedding_token_count, "embedding"
),
6,
)
except:
total_price = "Unknown"
query_response_message = f"**Query:**\n\n`{query.strip()}`\n\n**Query response:**\n\n{response.response.strip()}"
query_response_message = query_response_message.replace(
"<|endofstatement|>", ""
)
embed_pages = await self.paginate_embed(query_response_message)
paginator = pages.Paginator(
pages=embed_pages,
timeout=None,
author_check=False,
)
await ctx_response.edit(
embed=EmbedStatics.build_index_query_success_embed(query, total_price)
)
await paginator.respond(ctx.interaction)
except Exception:
traceback.print_exc()
await ctx_response.edit(
embed=EmbedStatics.get_index_query_failure_embed(
"Failed to send query. You may not have an index set, load an index with /index load"
)
)
# Extracted functions from DiscordReader
async def read_channel(
self, channel_id: int, limit: Optional[int], oldest_first: bool
) -> str:
"""Async read channel."""
messages: List[discord.Message] = []
try:
channel = self.bot.get_channel(channel_id)
print(f"Added {channel.name} from {channel.guild.name}")
# only work for text channels for now
if not isinstance(channel, discord.TextChannel):
raise ValueError(
f"Channel {channel_id} is not a text channel. "
"Only text channels are supported for now."
)
# thread_dict maps thread_id to thread
thread_dict = {}
for thread in channel.threads:
thread_dict[thread.id] = thread
async for msg in channel.history(limit=limit, oldest_first=oldest_first):
if msg.author.bot:
pass
else:
messages.append(msg)
if msg.id in thread_dict:
thread = thread_dict[msg.id]
async for thread_msg in thread.history(
limit=limit, oldest_first=oldest_first
):
messages.append(thread_msg)
except Exception as e:
print("Encountered error: " + str(e))
channel = self.bot.get_channel(channel_id)
msg_txt_list = [
f"user:{m.author.display_name}, content:{m.content}" for m in messages
]
return ("<|endofstatement|>\n\n".join(msg_txt_list), channel.name)
async def load_data(
self,
channel_ids: List[int],
limit: Optional[int] = None,
oldest_first: bool = True,
) -> List[Document]:
"""Load data from the input directory.
Args:
channel_ids (List[int]): List of channel ids to read.
limit (Optional[int]): Maximum number of messages to read.
oldest_first (bool): Whether to read oldest messages first.
Defaults to `True`.
Returns:
List[Document]: List of documents.
"""
results: List[Document] = []
for channel_id in channel_ids:
if not isinstance(channel_id, int):
raise ValueError(
f"Channel id {channel_id} must be an integer, "
f"not {type(channel_id)}."
)
(channel_content, channel_name) = await self.read_channel(
channel_id, limit=limit, oldest_first=oldest_first
)
results.append(
Document(
text=channel_content, extra_info={"channel_name": channel_name}
)
)
return results
async def compose(self, ctx: discord.ApplicationContext, name, user_api_key):
# Send the ComposeModal
if not user_api_key:
os.environ["OPENAI_API_KEY"] = self.openai_key
else:
os.environ["OPENAI_API_KEY"] = user_api_key
openai.api_key = os.environ["OPENAI_API_KEY"]
if not self.index_storage[ctx.user.id].has_indexes(ctx.user.id):
await ctx.respond(
embed=EmbedStatics.get_index_compose_failure_embed(
"You must have at least one index to compose."
)
)
return
await ctx.respond(
"Select the index(es) to compose. You can compose multiple indexes together, you can also Deep Compose a single index.",
view=ComposeModal(self, ctx.user.id, name),
ephemeral=True,
)
class ComposeModal(discord.ui.View):
def __init__(self, index_cog, user_id, name=None, deep=None) -> None:
super().__init__()
# Get the argument named "user_key_db" and save it as USER_KEY_DB
self.index_cog = index_cog
self.user_id = user_id
self.deep = deep
# Get all the indexes for the user
self.indexes = [
file
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{str(user_id)}/")
)
]
if index_cog.index_storage[user_id].has_search_indexes(user_id):
self.indexes.extend(
[
file
for file in os.listdir(
EnvService.find_shared_file(f"indexes/{str(user_id)}_search/")
)
]
)
print("Found the indexes, they are ", self.indexes)
# Map everything into the short to long cache
for index in self.indexes:
if len(index) > 93:
index_name = index[:93] + "-" + str(random.randint(0000, 9999))
SHORT_TO_LONG_CACHE[index_name] = index
else:
SHORT_TO_LONG_CACHE[index[:99]] = index
# Reverse the SHORT_TO_LONG_CACHE index
LONG_TO_SHORT_CACHE = {v: k for k, v in SHORT_TO_LONG_CACHE.items()}
# A text entry field for the name of the composed index
self.name = name
# A discord UI select menu with all the indexes. Limited to 25 entries. For the label field in the SelectOption,
# cut it off at 100 characters to prevent the message from being too long
self.index_select = discord.ui.Select(
placeholder="Select index(es) to compose",
options=[
discord.SelectOption(
label=LONG_TO_SHORT_CACHE[index], value=LONG_TO_SHORT_CACHE[index]
)
for index in self.indexes
][0:25],
max_values=len(self.indexes) if len(self.indexes) < 25 else 25,
min_values=1,
)
# Add the select menu to the modal
self.add_item(self.index_select)
# If we have more than 25 entries, add more Select fields as neccessary
self.extra_index_selects = []
if len(self.indexes) > 25:
for i in range(25, len(self.indexes), 25):
self.extra_index_selects.append(
discord.ui.Select(
placeholder="Select index(es) to compose",
options=[
discord.SelectOption(
label=LONG_TO_SHORT_CACHE[index],
value=LONG_TO_SHORT_CACHE[index],
)
for index in self.indexes
][i : i + 25],
max_values=len(self.indexes[i : i + 25]),
min_values=1,
)
)
self.add_item(self.extra_index_selects[-1])
# Add an input field for "Deep", a "yes" or "no" option, default no
self.deep_select = discord.ui.Select(
placeholder="Deep Compose",
options=[
discord.SelectOption(label="Yes", value="yes"),
discord.SelectOption(label="No", value="no"),
],
max_values=1,
min_values=1,
)
self.add_item(self.deep_select)
# Add a button to the modal called "Compose"
self.add_item(
discord.ui.Button(
label="Compose", style=discord.ButtonStyle.green, custom_id="compose"
)
)
# The callback for the button
async def interaction_check(self, interaction: discord.Interaction) -> bool:
# Check that the interaction was for custom_id "compose"
if interaction.data["custom_id"] == "compose":
# Check that the user selected at least one index
# The total list of indexes is the union of the values of all the select menus
indexes = self.index_select.values + [
select.values[0] for select in self.extra_index_selects
]
# Remap them from the SHORT_TO_LONG_CACHE
indexes = [SHORT_TO_LONG_CACHE[index] for index in indexes]
if len(indexes) < 1:
await interaction.response.send_message(
embed=EmbedStatics.get_index_compose_failure_embed(
"You must select at least 1 index"
),
ephemeral=True,
)
else:
composing_message = await interaction.response.send_message(
embed=EmbedStatics.get_index_compose_progress_embed(),
ephemeral=True,
)
# Compose the indexes
try:
price = await self.index_cog.compose_indexes(
self.user_id,
indexes,
self.name,
(
False
if not self.deep_select.values
or self.deep_select.values[0] == "no"
else True
),
)
except ValueError as e:
await interaction.followup.send(
str(e), ephemeral=True, delete_after=180
)
return False
except Exception as e:
traceback.print_exc()
await interaction.followup.send(
embed=EmbedStatics.get_index_compose_failure_embed(
"An error occurred while composing the indexes: " + str(e)
),
ephemeral=True,
delete_after=180,
)
return False
await interaction.followup.send(
embed=EmbedStatics.get_index_compose_success_embed(price),
ephemeral=True,
delete_after=180,
)
# Try to direct message the user that their composed index is ready
try:
await self.index_cog.bot.get_user(self.user_id).send(
f"Your composed index is ready! You can load it with /index load now in the server."
)
except discord.Forbidden:
pass
try:
composing_message: Interaction
await composing_message.delete_original_response()
except:
traceback.print_exc()
else:
await interaction.response.defer(ephemeral=True)
| [
"llama_index.langchain_helpers.agents.IndexToolConfig",
"llama_index.download_loader",
"llama_index.retrievers.TreeSelectLeafRetriever",
"llama_index.GithubRepositoryReader",
"llama_index.langchain_helpers.text_splitter.TokenTextSplitter",
"llama_index.BeautifulSoupWebReader",
"llama_index.langchain_helpers.agents.LlamaIndexTool.from_tool_config",
"llama_index.callbacks.CallbackManager",
"llama_index.readers.schema.base.Document",
"llama_index.OpenAIEmbedding",
"llama_index.retrievers.VectorIndexRetriever",
"llama_index.StorageContext.from_defaults",
"llama_index.MockEmbedding",
"llama_index.GoogleDocsReader",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.indices.query.query_transform.StepDecomposeQueryTransform",
"llama_index.query_engine.RetrieverQueryEngine",
"llama_index.SimpleDirectoryReader",
"llama_index.get_response_synthesizer",
"llama_index.ServiceContext.from_defaults",
"llama_index.QuestionAnswerPrompt",
"llama_index.load_index_from_storage",
"llama_index.MockLLMPredictor",
"llama_index.readers.YoutubeTranscriptReader"
] | [((2731, 2770), 'services.environment_service.EnvService.get_max_deep_compose_price', 'EnvService.get_max_deep_compose_price', ([], {}), '()\n', (2768, 2770), False, 'from services.environment_service import EnvService\n'), ((2784, 2813), 'llama_index.download_loader', 'download_loader', (['"""EpubReader"""'], {}), "('EpubReader')\n", (2799, 2813), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((2831, 2864), 'llama_index.download_loader', 'download_loader', (['"""MarkdownReader"""'], {}), "('MarkdownReader')\n", (2846, 2864), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((2880, 2911), 'llama_index.download_loader', 'download_loader', (['"""RemoteReader"""'], {}), "('RemoteReader')\n", (2895, 2911), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((2932, 2968), 'llama_index.download_loader', 'download_loader', (['"""RemoteDepthReader"""'], {}), "('RemoteDepthReader')\n", (2947, 2968), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((2988, 3005), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (3003, 3005), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((3273, 3305), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (3288, 3305), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((3331, 3453), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embedding_model', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser'}), '(embed_model=embedding_model, callback_manager=\n callback_manager, node_parser=node_parser)\n', (3359, 3453), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((3474, 3518), 'httpx.Timeout', 'httpx.Timeout', (['(1)'], {'read': '(1)', 'write': '(1)', 'connect': '(1)'}), '(1, read=1, write=1, connect=1)\n', (3487, 3518), False, 'import httpx\n'), ((3582, 3713), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embedding_model', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser', 'llm': 'llm'}), '(embed_model=embedding_model, callback_manager=\n callback_manager, node_parser=node_parser, llm=llm)\n', (3610, 3713), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((4561, 4703), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'response_mode', 'use_async': '(True)', 'refine_template': 'CHAT_REFINE_PROMPT', 'service_context': 'service_context'}), '(response_mode=response_mode, use_async=True,\n refine_template=CHAT_REFINE_PROMPT, service_context=service_context)\n', (4585, 4703), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((4759, 4848), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (4779, 4848), False, 'from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine, RetryGuidelineQueryEngine\n'), ((8371, 8388), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (8386, 8388), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((8688, 8720), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (8703, 8720), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((8743, 8865), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embedding_model', 'callback_manager': 'callback_manager', 'node_parser': 'node_parser'}), '(embed_model=embedding_model, callback_manager=\n callback_manager, node_parser=node_parser)\n', (8771, 8865), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((3199, 3251), 'llama_index.langchain_helpers.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(20)'}), '(chunk_size=1024, chunk_overlap=20)\n', (3216, 3251), False, 'from llama_index.langchain_helpers.text_splitter import TokenTextSplitter\n'), ((4232, 4347), 'llama_index.retrievers.TreeSelectLeafRetriever', 'TreeSelectLeafRetriever', ([], {'index': 'index', 'child_branch_factor': 'child_branch_factor', 'service_context': 'service_context'}), '(index=index, child_branch_factor=\n child_branch_factor, service_context=service_context)\n', (4255, 4347), False, 'from llama_index.retrievers import VectorIndexRetriever, TreeSelectLeafRetriever\n'), ((4420, 4515), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': 'nodes', 'service_context': 'service_context'}), '(index=index, similarity_top_k=nodes, service_context=\n service_context)\n', (4440, 4515), False, 'from llama_index.retrievers import VectorIndexRetriever, TreeSelectLeafRetriever\n'), ((10748, 10773), 'os.getenv', 'os.getenv', (['"""OPENAI_TOKEN"""'], {}), "('OPENAI_TOKEN')\n", (10757, 10773), False, 'import os\n'), ((10803, 10825), 'collections.defaultdict', 'defaultdict', (['IndexData'], {}), '(IndexData)\n', (10814, 10825), False, 'from collections import defaultdict\n'), ((10846, 10872), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (10870, 10872), False, 'import asyncio\n'), ((10940, 11301), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['"""Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question: {query_str}\n"""'], {}), '(\n """Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question: {query_str}\n"""\n )\n', (10960, 11301), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((11518, 11531), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (11529, 11531), False, 'from collections import defaultdict\n'), ((11675, 11717), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['original_path'], {}), '(original_path)\n', (11702, 11717), False, 'from services.environment_service import EnvService\n'), ((18915, 18991), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model', 'temperature': 'temperature', 'top_p': 'top_p', 'max_retries': '(2)'}), '(model=model, temperature=temperature, top_p=top_p, max_retries=2)\n', (18925, 18991), False, 'from langchain.chat_models import ChatOpenAI\n'), ((19226, 19380), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'memory_key': '"""memory"""', 'return_messages': '(True)', 'llm': 'llm', 'max_token_limit': "(100000 if 'preview' in model else max_token_limit)"}), "(memory_key='memory', return_messages=True,\n llm=llm, max_token_limit=100000 if 'preview' in model else max_token_limit)\n", (19257, 19380), False, 'from langchain.memory import ConversationSummaryBufferMemory\n'), ((21043, 21249), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'agent_kwargs', 'memory': 'memory', 'handle_parsing_errors': '"""Check your output and make sure it conforms!"""'}), "(tools=tools, llm=llm, agent=AgentType.OPENAI_FUNCTIONS,\n verbose=True, agent_kwargs=agent_kwargs, memory=memory,\n handle_parsing_errors='Check your output and make sure it conforms!')\n", (21059, 21249), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((21443, 21687), 'discord.Embed', 'discord.Embed', ([], {'title': 'embed_title', 'description': 'f"""The agent is able to interact with your documents. Simply drag your documents into discord or give the agent a link from where to download the documents.\nModel: {model}"""', 'color': '(39259)'}), '(title=embed_title, description=\n f"""The agent is able to interact with your documents. Simply drag your documents into discord or give the agent a link from where to download the documents.\nModel: {model}"""\n , color=39259)\n', (21456, 21687), False, 'import discord\n'), ((23850, 23948), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'use_async': '(True)'}), '(document, service_context=\n service_context, use_async=True)\n', (23884, 23948), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((24134, 24232), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'use_async': '(True)'}), '(document, service_context=\n service_context, use_async=True)\n', (24168, 24232), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((25156, 25255), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'use_async': '(True)'}), '(documents, service_context=\n service_context, use_async=True)\n', (25190, 25255), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((25833, 25932), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context', 'use_async': '(True)'}), '(documents, service_context=\n service_context, use_async=True)\n', (25867, 25932), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((26107, 26158), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'file_path'}), '(persist_dir=file_path)\n', (26135, 26158), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((26175, 26215), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (26198, 26215), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((26333, 26431), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['document'], {'service_context': 'service_context', 'use_async': '(True)'}), '(document, service_context=\n service_context, use_async=True)\n', (26367, 26431), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((36388, 36477), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (36408, 36477), False, 'from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine, RetryGuidelineQueryEngine\n'), ((3058, 3105), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""text-davinci-003"""'], {}), "('text-davinci-003')\n", (3085, 3105), False, 'import tiktoken\n'), ((4969, 5007), 'llama_index.indices.query.query_transform.StepDecomposeQueryTransform', 'StepDecomposeQueryTransform', (['multistep'], {}), '(multistep)\n', (4996, 5007), False, 'from llama_index.indices.query.query_transform import StepDecomposeQueryTransform\n'), ((8606, 8658), 'llama_index.langchain_helpers.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(20)'}), '(chunk_size=1024, chunk_overlap=20)\n', (8623, 8658), False, 'from llama_index.langchain_helpers.text_splitter import TokenTextSplitter\n'), ((13484, 13522), 'aiofiles.tempfile.TemporaryDirectory', 'aiofiles.tempfile.TemporaryDirectory', ([], {}), '()\n', (13520, 13522), False, 'import aiofiles\n'), ((19576, 20507), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a superpowered version of GPT that is able to answer questions about the data you\'re connected to. Each different tool you have represents a different dataset to interact with. If you are asked to perform a task that spreads across multiple datasets, use multiple tools for the same prompt. When the user types links in chat, you will have already been connected to the data at the link by the time you respond. When using tools, the input should be clearly created based on the request of the user. For example, if a user uploads an invoice and asks how many usage hours of X was present in the invoice, a good query is \'X hours\'. Avoid using single word queries unless the request is very simple. You can query multiple times to break down complex requests and retrieve more information. When calling functions, no special characters are allowed in the function name, keep that in mind."""'}), '(content=\n "You are a superpowered version of GPT that is able to answer questions about the data you\'re connected to. Each different tool you have represents a different dataset to interact with. If you are asked to perform a task that spreads across multiple datasets, use multiple tools for the same prompt. When the user types links in chat, you will have already been connected to the data at the link by the time you respond. When using tools, the input should be clearly created based on the request of the user. For example, if a user uploads an invoice and asks how many usage hours of X was present in the invoice, a good query is \'X hours\'. Avoid using single word queries unless the request is very simple. You can query multiple times to break down complex requests and retrieve more information. When calling functions, no special characters are allowed in the function name, keep that in mind."\n )\n', (19589, 20507), False, 'from langchain.schema import SystemMessage\n'), ((20703, 20868), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""Dummy-Tool-Do-Not-Use"""', 'func': 'dummy_tool', 'description': 'f"""This is a dummy tool that does nothing, do not ever mention this tool or use this tool."""'}), "(name='Dummy-Tool-Do-Not-Use', func=dummy_tool, description=\n f'This is a dummy tool that does nothing, do not ever mention this tool or use this tool.'\n )\n", (20707, 20868), False, 'from langchain.tools import Tool\n'), ((22169, 22227), 'utils.safe_ctx_respond.safe_ctx_respond', 'safe_ctx_respond', ([], {'ctx': 'ctx', 'content': '"""Conversation started."""'}), "(ctx=ctx, content='Conversation started.')\n", (22185, 22227), False, 'from utils.safe_ctx_respond import safe_ctx_respond\n'), ((23326, 23344), 'discord.ext.pages.append', 'pages.append', (['page'], {}), '(page)\n', (23338, 23344), False, 'from discord.ext import pages\n'), ((26633, 26656), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (26654, 26656), False, 'import aiohttp\n'), ((46236, 46325), 'llama_index.langchain_helpers.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'separator': '""" """', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(separator=' ', chunk_size=chunk_size, chunk_overlap=\n chunk_overlap)\n", (46253, 46325), False, 'from llama_index.langchain_helpers.text_splitter import TokenTextSplitter\n'), ((47599, 47616), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (47614, 47616), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((47651, 47669), 'llama_index.MockLLMPredictor', 'MockLLMPredictor', ([], {}), '()\n', (47667, 47669), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((47705, 47724), 'llama_index.MockEmbedding', 'MockEmbedding', (['(1536)'], {}), '(1536)\n', (47718, 47724), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((47945, 47982), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter_mock]'], {}), '([token_counter_mock])\n', (47960, 47982), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((48019, 48160), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_mock', 'embed_model': 'embedding_model_mock', 'callback_manager': 'callback_manager_mock'}), '(llm_predictor=llm_predictor_mock, embed_model=\n embedding_model_mock, callback_manager=callback_manager_mock)\n', (48047, 48160), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((55795, 55863), 'discord.ext.pages.Paginator', 'pages.Paginator', ([], {'pages': 'embed_pages', 'timeout': 'None', 'author_check': '(False)'}), '(pages=embed_pages, timeout=None, author_check=False)\n', (55810, 55863), False, 'from discord.ext import pages\n'), ((63797, 63889), 'discord.ui.Button', 'discord.ui.Button', ([], {'label': '"""Compose"""', 'style': 'discord.ButtonStyle.green', 'custom_id': '"""compose"""'}), "(label='Compose', style=discord.ButtonStyle.green,\n custom_id='compose')\n", (63814, 63889), False, 'import discord\n'), ((7687, 7736), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}"""'], {}), "(f'indexes/{user_id}')\n", (7714, 7736), False, 'from services.environment_service import EnvService\n'), ((7966, 8022), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}_search"""'], {}), "(f'indexes/{user_id}_search')\n", (7993, 8022), False, 'from services.environment_service import EnvService\n'), ((8304, 8325), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8323, 8325), False, 'import traceback\n'), ((8449, 8496), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""text-davinci-003"""'], {}), "('text-davinci-003')\n", (8476, 8496), False, 'import tiktoken\n'), ((12023, 12044), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (12042, 12044), False, 'import traceback\n'), ((12905, 12977), 'functools.partial', 'partial', (['self.index_chat_chains[ctx.channel.id].agent_chain.run', 'message'], {}), '(self.index_chat_chains[ctx.channel.id].agent_chain.run, message)\n', (12912, 12977), False, 'from functools import partial\n'), ((13560, 13645), 'aiofiles.tempfile.NamedTemporaryFile', 'aiofiles.tempfile.NamedTemporaryFile', ([], {'suffix': 'suffix', 'dir': 'temp_path', 'delete': '(False)'}), '(suffix=suffix, dir=temp_path, delete=False\n )\n', (13596, 13645), False, 'import aiofiles\n'), ((19068, 19134), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'top_p': 'top_p', 'model_name': 'model'}), '(temperature=temperature, top_p=top_p, model_name=model)\n', (19078, 19134), False, 'from langchain.chat_models import ChatOpenAI\n'), ((19500, 19543), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""memory"""'}), "(variable_name='memory')\n", (19519, 19543), False, 'from langchain.prompts import MessagesPlaceholder\n'), ((23007, 23069), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Index Query Results"""', 'description': 'chunk'}), "(title=f'Index Query Results', description=chunk)\n", (23020, 23069), False, 'import discord\n'), ((23199, 23254), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Page {count}"""', 'description': 'chunk'}), "(title=f'Page {count}', description=chunk)\n", (23212, 23254), False, 'import discord\n'), ((24081, 24099), 'llama_index.GoogleDocsReader', 'GoogleDocsReader', ([], {}), '()\n', (24097, 24099), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((27234, 27277), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[f.name]'}), '(input_files=[f.name])\n', (27255, 27277), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((27543, 27566), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (27564, 27566), False, 'import aiohttp\n'), ((28806, 28827), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (28825, 28827), False, 'import traceback\n'), ((28904, 28971), 'llama_index.BeautifulSoupWebReader', 'BeautifulSoupWebReader', ([], {'website_extractor': 'DEFAULT_WEBSITE_EXTRACTOR'}), '(website_extractor=DEFAULT_WEBSITE_EXTRACTOR)\n', (28926, 28971), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((29186, 29313), 'functools.partial', 'functools.partial', (['GPTVectorStoreIndex.from_documents'], {'documents': 'documents', 'service_context': 'service_context', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents=documents,\n service_context=service_context, use_async=True)\n', (29203, 29313), False, 'import functools\n'), ((30898, 30936), 'aiofiles.tempfile.TemporaryDirectory', 'aiofiles.tempfile.TemporaryDirectory', ([], {}), '()\n', (30934, 30936), False, 'import aiofiles\n'), ((32352, 32373), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (32371, 32373), False, 'import traceback\n'), ((35560, 35581), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (35579, 35581), False, 'import traceback\n'), ((35733, 35754), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (35752, 35754), False, 'import traceback\n'), ((36629, 36662), 'models.check_model.UrlCheck.check_youtube_link', 'UrlCheck.check_youtube_link', (['link'], {}), '(link)\n', (36656, 36662), False, 'from models.check_model import UrlCheck\n'), ((39345, 39564), 'llama_index.langchain_helpers.agents.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'engine', 'name': 'f"""{link_cleaned}-index"""', 'description': 'f"""Use this tool if the query seems related to this summary: {summary}"""', 'tool_kwargs': "{'return_direct': False}", 'max_iterations': '(5)'}), "(query_engine=engine, name=f'{link_cleaned}-index',\n description=\n f'Use this tool if the query seems related to this summary: {summary}',\n tool_kwargs={'return_direct': False}, max_iterations=5)\n", (39360, 39564), False, 'from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaToolkit, create_llama_chat_agent, LlamaIndexTool\n'), ((39742, 39786), 'llama_index.langchain_helpers.agents.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', (['tool_config'], {}), '(tool_config)\n', (39773, 39786), False, 'from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaToolkit, create_llama_chat_agent, LlamaIndexTool\n'), ((39934, 40324), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'self.index_chat_chains[index_chat_ctx.channel.id].llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'self.index_chat_chains[index_chat_ctx.channel.id].agent_kwargs', 'memory': 'self.index_chat_chains[index_chat_ctx.channel.id].memory', 'handle_parsing_errors': '"""Check your output and make sure it conforms!"""', 'max_iterations': '(5)'}), "(tools=tools, llm=self.index_chat_chains[index_chat_ctx.\n channel.id].llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True,\n agent_kwargs=self.index_chat_chains[index_chat_ctx.channel.id].\n agent_kwargs, memory=self.index_chat_chains[index_chat_ctx.channel.id].\n memory, handle_parsing_errors=\n 'Check your output and make sure it conforms!', max_iterations=5)\n", (39950, 40324), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((42978, 42999), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (42997, 42999), False, 'import traceback\n'), ((44522, 44543), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (44541, 44543), False, 'import traceback\n'), ((44939, 45001), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{ctx.guild.id}/{index}"""'], {}), "(f'indexes/{ctx.guild.id}/{index}')\n", (44966, 45001), False, 'from services.environment_service import EnvService\n'), ((45671, 45692), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (45690, 45692), False, 'import traceback\n'), ((46478, 46533), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'chunk_text', 'extra_info': 'document.metadata'}), '(text=chunk_text, extra_info=document.metadata)\n', (46486, 46533), False, 'from llama_index.readers.schema.base import Document\n'), ((46819, 46877), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}/{_index}"""'], {}), "(f'indexes/{user_id}/{_index}')\n", (46846, 46877), False, 'from services.environment_service import EnvService\n'), ((47269, 47318), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-4-32k"""'}), "(temperature=0, model_name='gpt-4-32k')\n", (47279, 47318), False, 'from langchain.chat_models import ChatOpenAI\n'), ((53270, 53291), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (53289, 53291), False, 'import traceback\n'), ((53791, 53834), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': 'model'}), '(temperature=0, model_name=model)\n', (53801, 53834), False, 'from langchain.chat_models import ChatOpenAI\n'), ((56156, 56177), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (56175, 56177), False, 'import traceback\n'), ((59155, 59228), 'llama_index.readers.schema.base.Document', 'Document', ([], {'text': 'channel_content', 'extra_info': "{'channel_name': channel_name}"}), "(text=channel_content, extra_info={'channel_name': channel_name})\n", (59163, 59228), False, 'from llama_index.readers.schema.base import Document\n'), ((7102, 7114), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7112, 7114), False, 'from datetime import date\n'), ((7123, 7135), 'datetime.date.today', 'date.today', ([], {}), '()\n', (7133, 7135), False, 'from datetime import date\n'), ((11916, 11935), 'pathlib.Path', 'Path', (['original_path'], {}), '(original_path)\n', (11920, 11935), False, 'from pathlib import Path\n'), ((16619, 16830), 'llama_index.langchain_helpers.agents.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'engine', 'name': 'f"""{filename}-index"""', 'description': 'f"""Use this tool if the query seems related to this summary: {summary}"""', 'tool_kwargs': "{'return_direct': False}", 'max_iterations': '(5)'}), "(query_engine=engine, name=f'{filename}-index', description=\n f'Use this tool if the query seems related to this summary: {summary}',\n tool_kwargs={'return_direct': False}, max_iterations=5)\n", (16634, 16830), False, 'from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaToolkit, create_llama_chat_agent, LlamaIndexTool\n'), ((17048, 17092), 'llama_index.langchain_helpers.agents.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', (['tool_config'], {}), '(tool_config)\n', (17079, 17092), False, 'from llama_index.langchain_helpers.agents import IndexToolConfig, LlamaToolkit, create_llama_chat_agent, LlamaIndexTool\n'), ((17245, 17592), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'self.index_chat_chains[message.channel.id].llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'self.index_chat_chains[message.channel.id].agent_kwargs', 'memory': 'self.index_chat_chains[message.channel.id].memory', 'handle_parsing_errors': '"""Check your output and make sure it conforms!"""'}), "(tools=tools, llm=self.index_chat_chains[message.channel.id\n ].llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, agent_kwargs=\n self.index_chat_chains[message.channel.id].agent_kwargs, memory=self.\n index_chat_chains[message.channel.id].memory, handle_parsing_errors=\n 'Check your output and make sure it conforms!')\n", (17261, 17592), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((18841, 18890), 'models.embed_statics_model.EmbedStatics.get_index_chat_preparation_message', 'EmbedStatics.get_index_chat_preparation_message', ([], {}), '()\n', (18888, 18890), False, 'from models.embed_statics_model import EmbedStatics\n'), ((24914, 24939), 'llama_index.readers.YoutubeTranscriptReader', 'YoutubeTranscriptReader', ([], {}), '()\n', (24937, 24939), False, 'from llama_index.readers import YoutubeTranscriptReader\n'), ((25561, 25607), 'llama_index.GithubRepositoryReader', 'GithubRepositoryReader', ([], {'owner': 'owner', 'repo': 'repo'}), '(owner=owner, repo=repo)\n', (25583, 25607), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((26838, 26894), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pdf"""', 'delete': '(False)'}), "(suffix='.pdf', delete=False)\n", (26865, 26894), False, 'import tempfile\n'), ((30978, 31063), 'aiofiles.tempfile.NamedTemporaryFile', 'aiofiles.tempfile.NamedTemporaryFile', ([], {'suffix': 'suffix', 'dir': 'temp_path', 'delete': '(False)'}), '(suffix=suffix, dir=temp_path, delete=False\n )\n', (31014, 31063), False, 'import aiofiles\n'), ((31886, 31907), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (31905, 31907), False, 'import traceback\n'), ((32754, 32795), 'models.embed_statics_model.EmbedStatics.build_index_progress_embed', 'EmbedStatics.build_index_progress_embed', ([], {}), '()\n', (32793, 32795), False, 'from models.embed_statics_model import EmbedStatics\n'), ((32931, 32954), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (32952, 32954), False, 'import aiohttp\n'), ((33623, 33644), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (33642, 33644), False, 'import traceback\n'), ((34114, 34147), 'functools.partial', 'partial', (['loader.load_data', '[link]'], {}), '(loader.load_data, [link])\n', (34121, 34147), False, 'from functools import partial\n'), ((34253, 34373), 'functools.partial', 'functools.partial', (['GPTVectorStoreIndex'], {'documents': 'documents', 'service_context': 'service_context_no_llm', 'use_async': '(True)'}), '(GPTVectorStoreIndex, documents=documents, service_context\n =service_context_no_llm, use_async=True)\n', (34270, 34373), False, 'import functools\n'), ((34827, 34848), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (34846, 34848), False, 'import traceback\n'), ((35809, 35856), 'models.embed_statics_model.EmbedStatics.get_index_set_success_embed', 'EmbedStatics.get_index_set_success_embed', (['price'], {}), '(price)\n', (35849, 35856), False, 'from models.embed_statics_model import EmbedStatics\n'), ((41745, 41786), 'models.embed_statics_model.EmbedStatics.build_index_progress_embed', 'EmbedStatics.build_index_progress_embed', ([], {}), '()\n', (41784, 41786), False, 'from models.embed_statics_model import EmbedStatics\n'), ((42246, 42267), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (42265, 42267), False, 'import traceback\n'), ((43054, 43101), 'models.embed_statics_model.EmbedStatics.get_index_set_success_embed', 'EmbedStatics.get_index_set_success_embed', (['price'], {}), '(price)\n', (43094, 43101), False, 'from models.embed_statics_model import EmbedStatics\n'), ((43736, 43797), 'functools.partial', 'partial', (['self.index_discord', 'document', 'service_context_no_llm'], {}), '(self.index_discord, document, service_context_no_llm)\n', (43743, 43797), False, 'from functools import partial\n'), ((44028, 44049), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (44047, 44049), False, 'import traceback\n'), ((45094, 45162), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{ctx.user.id}_search/{index}"""'], {}), "(f'indexes/{ctx.user.id}_search/{index}')\n", (45121, 45162), False, 'from services.environment_service import EnvService\n'), ((45248, 45309), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{ctx.user.id}/{index}"""'], {}), "(f'indexes/{ctx.user.id}/{index}')\n", (45275, 45309), False, 'from services.environment_service import EnvService\n'), ((45423, 45464), 'functools.partial', 'partial', (['self.index_load_file', 'index_file'], {}), '(self.index_load_file, index_file)\n', (45430, 45464), False, 'from functools import partial\n'), ((46938, 47003), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}_search/{_index}"""'], {}), "(f'indexes/{user_id}_search/{_index}')\n", (46965, 47003), False, 'from services.environment_service import EnvService\n'), ((47118, 47159), 'functools.partial', 'partial', (['self.index_load_file', 'index_file'], {}), '(self.index_load_file, index_file)\n', (47125, 47159), False, 'from functools import partial\n'), ((48341, 48441), 'functools.partial', 'partial', (['GPTTreeIndex.from_documents'], {'documents': 'documents', 'service_context': 'service_context_mock'}), '(GPTTreeIndex.from_documents, documents=documents, service_context=\n service_context_mock)\n', (48348, 48441), False, 'from functools import partial\n'), ((49269, 49385), 'functools.partial', 'partial', (['GPTTreeIndex.from_documents'], {'documents': 'documents', 'service_context': 'self.service_context', 'use_async': '(True)'}), '(GPTTreeIndex.from_documents, documents=documents, service_context=\n self.service_context, use_async=True)\n', (49276, 49385), False, 'from functools import partial\n'), ((50492, 50616), 'functools.partial', 'partial', (['GPTVectorStoreIndex.from_documents'], {'documents': 'documents', 'service_context': 'service_context_no_llm', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents=documents,\n service_context=service_context_no_llm, use_async=True)\n', (50499, 50616), False, 'from functools import partial\n'), ((52163, 52224), 'functools.partial', 'partial', (['self.index_discord', 'document', 'service_context_no_llm'], {}), '(self.index_discord, document, service_context_no_llm)\n', (52170, 52224), False, 'from functools import partial\n'), ((52591, 52612), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (52610, 52612), False, 'import traceback\n'), ((53897, 53949), 'models.embed_statics_model.EmbedStatics.build_index_query_progress_embed', 'EmbedStatics.build_index_query_progress_embed', (['query'], {}), '(query)\n', (53942, 53949), False, 'from models.embed_statics_model import EmbedStatics\n'), ((54109, 54311), 'functools.partial', 'partial', (['get_and_query', 'ctx.user.id', 'self.index_storage', 'query', 'response_mode', 'nodes', 'child_branch_factor'], {'service_context': 'service_context_no_llm', 'multistep': '(llm_predictor if multistep else None)'}), '(get_and_query, ctx.user.id, self.index_storage, query,\n response_mode, nodes, child_branch_factor, service_context=\n service_context_no_llm, multistep=llm_predictor if multistep else None)\n', (54116, 54311), False, 'from functools import partial\n'), ((63481, 63527), 'discord.SelectOption', 'discord.SelectOption', ([], {'label': '"""Yes"""', 'value': '"""yes"""'}), "(label='Yes', value='yes')\n", (63501, 63527), False, 'import discord\n'), ((63545, 63589), 'discord.SelectOption', 'discord.SelectOption', ([], {'label': '"""No"""', 'value': '"""no"""'}), "(label='No', value='no')\n", (63565, 63589), False, 'import discord\n'), ((6286, 6335), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}"""'], {}), "(f'indexes/{user_id}')\n", (6313, 6335), False, 'from services.environment_service import EnvService\n'), ((6537, 6593), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}_search"""'], {}), "(f'indexes/{user_id}_search')\n", (6564, 6593), False, 'from services.environment_service import EnvService\n'), ((7790, 7846), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}/{file}"""'], {}), "(f'indexes/{user_id}/{file}')\n", (7817, 7846), False, 'from services.environment_service import EnvService\n'), ((7892, 7913), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7911, 7913), False, 'import traceback\n'), ((8114, 8177), 'services.environment_service.EnvService.find_shared_file', 'EnvService.find_shared_file', (['f"""indexes/{user_id}_search/{file}"""'], {}), "(f'indexes/{user_id}_search/{file}')\n", (8141, 8177), False, 'from services.environment_service import EnvService\n'), ((8244, 8265), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8263, 8265), False, 'import traceback\n'), ((18637, 18658), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (18656, 18658), False, 'import traceback\n'), ((23775, 23821), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]'}), '(input_files=[file_path])\n', (23796, 23821), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((25712, 25758), 'llama_index.GithubRepositoryReader', 'GithubRepositoryReader', ([], {'owner': 'owner', 'repo': 'repo'}), '(owner=owner, repo=repo)\n', (25734, 25758), False, 'from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, QuestionAnswerPrompt, BeautifulSoupWebReader, GPTTreeIndex, GoogleDocsReader, MockLLMPredictor, OpenAIEmbedding, GithubRepositoryReader, MockEmbedding, download_loader, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage, get_response_synthesizer, VectorStoreIndex\n'), ((30818, 30859), 'models.embed_statics_model.EmbedStatics.build_index_progress_embed', 'EmbedStatics.build_index_progress_embed', ([], {}), '()\n', (30857, 30859), False, 'from models.embed_statics_model import EmbedStatics\n'), ((36820, 36888), 'functools.partial', 'partial', (['self.index_youtube_transcript', 'link', 'service_context_no_llm'], {}), '(self.index_youtube_transcript, link, service_context_no_llm)\n', (36827, 36888), False, 'from functools import partial\n'), ((44344, 44391), 'models.embed_statics_model.EmbedStatics.get_index_set_success_embed', 'EmbedStatics.get_index_set_success_embed', (['price'], {}), '(price)\n', (44384, 44391), False, 'from models.embed_statics_model import EmbedStatics\n'), ((45583, 45626), 'models.embed_statics_model.EmbedStatics.get_index_load_success_embed', 'EmbedStatics.get_index_load_success_embed', ([], {}), '()\n', (45624, 45626), False, 'from models.embed_statics_model import EmbedStatics\n'), ((47807, 47854), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""text-davinci-003"""'], {}), "('text-davinci-003')\n", (47834, 47854), False, 'import tiktoken\n'), ((53090, 53137), 'models.embed_statics_model.EmbedStatics.get_index_set_success_embed', 'EmbedStatics.get_index_set_success_embed', (['price'], {}), '(price)\n', (53130, 53137), False, 'from models.embed_statics_model import EmbedStatics\n'), ((55986, 56050), 'models.embed_statics_model.EmbedStatics.build_index_query_success_embed', 'EmbedStatics.build_index_query_success_embed', (['query', 'total_price'], {}), '(query, total_price)\n', (56030, 56050), False, 'from models.embed_statics_model import EmbedStatics\n'), ((59758, 59855), 'models.embed_statics_model.EmbedStatics.get_index_compose_failure_embed', 'EmbedStatics.get_index_compose_failure_embed', (['"""You must have at least one index to compose."""'], {}), "(\n 'You must have at least one index to compose.')\n", (59802, 59855), False, 'from models.embed_statics_model import EmbedStatics\n'), ((61269, 61292), 'random.randint', 'random.randint', (['(0)', '(9999)'], {}), '(0, 9999)\n', (61283, 61292), False, 'import random\n'), ((61987, 62080), 'discord.SelectOption', 'discord.SelectOption', ([], {'label': 'LONG_TO_SHORT_CACHE[index]', 'value': 'LONG_TO_SHORT_CACHE[index]'}), '(label=LONG_TO_SHORT_CACHE[index], value=\n LONG_TO_SHORT_CACHE[index])\n', (62007, 62080), False, 'import discord\n'), ((65878, 65899), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (65897, 65899), False, 'import traceback\n'), ((67068, 67089), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (67087, 67089), False, 'import traceback\n'), ((6932, 6954), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (6952, 6954), False, 'from services.environment_service import EnvService\n'), ((7328, 7350), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (7348, 7350), False, 'from services.environment_service import EnvService\n'), ((30615, 30675), 'models.embed_statics_model.EmbedStatics.get_index_set_failure_embed', 'EmbedStatics.get_index_set_failure_embed', (['"""Unsupported file"""'], {}), "('Unsupported file')\n", (30655, 30675), False, 'from models.embed_statics_model import EmbedStatics\n'), ((37144, 37211), 'functools.partial', 'partial', (['self.index_github_repository', 'link', 'service_context_no_llm'], {}), '(self.index_github_repository, link, service_context_no_llm)\n', (37151, 37211), False, 'from functools import partial\n'), ((49890, 49902), 'datetime.date.today', 'date.today', ([], {}), '()\n', (49900, 49902), False, 'from datetime import date\n'), ((49911, 49923), 'datetime.date.today', 'date.today', ([], {}), '()\n', (49921, 49923), False, 'from datetime import date\n'), ((50916, 50928), 'datetime.date.today', 'date.today', ([], {}), '()\n', (50926, 50928), False, 'from datetime import date\n'), ((50937, 50949), 'datetime.date.today', 'date.today', ([], {}), '()\n', (50947, 50949), False, 'from datetime import date\n'), ((56237, 56376), 'models.embed_statics_model.EmbedStatics.get_index_query_failure_embed', 'EmbedStatics.get_index_query_failure_embed', (['"""Failed to send query. You may not have an index set, load an index with /index load"""'], {}), "(\n 'Failed to send query. You may not have an index set, load an index with /index load'\n )\n", (56279, 56376), False, 'from models.embed_statics_model import EmbedStatics\n'), ((64697, 64782), 'models.embed_statics_model.EmbedStatics.get_index_compose_failure_embed', 'EmbedStatics.get_index_compose_failure_embed', (['"""You must select at least 1 index"""'], {}), "('You must select at least 1 index'\n )\n", (64741, 64782), False, 'from models.embed_statics_model import EmbedStatics\n'), ((65000, 65047), 'models.embed_statics_model.EmbedStatics.get_index_compose_progress_embed', 'EmbedStatics.get_index_compose_progress_embed', ([], {}), '()\n', (65045, 65047), False, 'from models.embed_statics_model import EmbedStatics\n'), ((66356, 66407), 'models.embed_statics_model.EmbedStatics.get_index_compose_success_embed', 'EmbedStatics.get_index_compose_success_embed', (['price'], {}), '(price)\n', (66400, 66407), False, 'from models.embed_statics_model import EmbedStatics\n'), ((14362, 14382), 'pathlib.Path', 'Path', (['temp_file.name'], {}), '(temp_file.name)\n', (14366, 14382), False, 'from pathlib import Path\n'), ((31360, 31380), 'pathlib.Path', 'Path', (['temp_file.name'], {}), '(temp_file.name)\n', (31364, 31380), False, 'from pathlib import Path\n'), ((50065, 50087), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (50085, 50087), False, 'from services.environment_service import EnvService\n'), ((51088, 51110), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (51108, 51110), False, 'from services.environment_service import EnvService\n'), ((52664, 52686), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (52684, 52686), False, 'from services.environment_service import EnvService\n'), ((52856, 52878), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (52876, 52878), False, 'from services.environment_service import EnvService\n'), ((28402, 28529), 'functools.partial', 'functools.partial', (['GPTVectorStoreIndex.from_documents'], {'documents': 'documents', 'service_context': 'service_context', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents=documents,\n service_context=service_context, use_async=True)\n', (28419, 28529), False, 'import functools\n'), ((52999, 53011), 'datetime.date.today', 'date.today', ([], {}), '()\n', (53009, 53011), False, 'from datetime import date\n'), ((53020, 53032), 'datetime.date.today', 'date.today', ([], {}), '()\n', (53030, 53032), False, 'from datetime import date\n'), ((62799, 62892), 'discord.SelectOption', 'discord.SelectOption', ([], {'label': 'LONG_TO_SHORT_CACHE[index]', 'value': 'LONG_TO_SHORT_CACHE[index]'}), '(label=LONG_TO_SHORT_CACHE[index], value=\n LONG_TO_SHORT_CACHE[index])\n', (62819, 62892), False, 'import discord\n'), ((33339, 33441), 'models.embed_statics_model.EmbedStatics.get_index_set_failure_embed', 'EmbedStatics.get_index_set_failure_embed', (['"""Invalid URL or could not connect to the provided URL."""'], {}), "(\n 'Invalid URL or could not connect to the provided URL.')\n", (33379, 33441), False, 'from models.embed_statics_model import EmbedStatics\n')] |
import os
from langchain import OpenAI
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex
import sqlalchemy
import time
DatabaseReader = download_loader('DatabaseReader')
databasePath = f'sqlite:///{os.path.dirname(__file__)}/vulns.db'
print('Reading database:'+ databasePath)
dbEngine=sqlalchemy.create_engine(databasePath)
sql_database = SQLDatabase(dbEngine, include_tables=["processed_references"])
# NOTE: the table_name specified here is the table that you
# want to extract into from unstructured documents.
index = GPTSQLStructStoreIndex(
[],
sql_database=sql_database,
table_name="processed_references",
)
response = index.query('Tell me what would be required to exploit GHSA-9j49-mfvp-vmhm in practice')
print(response)
# sqliteReader = DatabaseReader(
# engine=dbEngine
# )
#
# query = f"""
# SELECT normalized_content FROM processed_references WHERE vulnerability_id = 'GHSA-9j49-mfvp-vmhm' UNION SELECT normalized_content FROM processed_references LIMIT 100;
# """
# documents = sqliteReader.load_data(query=query)
# documents = SimpleDirectoryReader('data').load_data()
# llm_predictor = LLMPredictor(llm=OpenAI(model_name="davinci-instruct-beta:2.0.0"))
#
# savePath = f'/{os.path.dirname(__file__)}/../indexes/index.json'
# #
# # index = GPTSimpleVectorIndex(documents)#, llm_predictor=llm_predictor)
# # index.save_to_disk(savePath)
#
# index = GPTSimpleVectorIndex.load_from_disk(savePath)
#
#
# response = index.query("Summarize the vulnerability CVE-2021-23406", response_mode="tree_summarize")
# print(response)
| [
"llama_index.GPTSQLStructStoreIndex",
"llama_index.SQLDatabase",
"llama_index.download_loader"
] | [((223, 256), 'llama_index.download_loader', 'download_loader', (['"""DatabaseReader"""'], {}), "('DatabaseReader')\n", (238, 256), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex\n'), ((372, 410), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['databasePath'], {}), '(databasePath)\n', (396, 410), False, 'import sqlalchemy\n'), ((427, 489), 'llama_index.SQLDatabase', 'SQLDatabase', (['dbEngine'], {'include_tables': "['processed_references']"}), "(dbEngine, include_tables=['processed_references'])\n", (438, 489), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex\n'), ((611, 704), 'llama_index.GPTSQLStructStoreIndex', 'GPTSQLStructStoreIndex', (['[]'], {'sql_database': 'sql_database', 'table_name': '"""processed_references"""'}), "([], sql_database=sql_database, table_name=\n 'processed_references')\n", (633, 704), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, download_loader, SQLDatabase, GPTSQLStructStoreIndex\n'), ((285, 310), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (300, 310), False, 'import os\n')] |
import argparse
import logging
import sys
import re
import os
import argparse
import requests
from pathlib import Path
from urllib.parse import urlparse
from llama_index import ServiceContext, StorageContext
from llama_index import set_global_service_context
from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.llms import OpenAI
from llama_index.readers.file.flat_reader import FlatReader
from llama_index.vector_stores import MilvusVectorStore
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index.node_parser.text import SentenceWindowNodeParser
from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate
from llama_index.postprocessor import MetadataReplacementPostProcessor
from llama_index.postprocessor import SentenceTransformerRerank
#from llama_index.indices import ZillizCloudPipelineIndex
from custom.zilliz.base import ZillizCloudPipelineIndex
from llama_index.indices.query.schema import QueryBundle
from llama_index.schema import BaseNode, ImageNode, MetadataMode
from custom.history_sentence_window import HistorySentenceWindowNodeParser
from custom.llms.QwenLLM import QwenUnofficial
from custom.llms.GeminiLLM import Gemini
from custom.llms.proxy_model import ProxyModel
from pymilvus import MilvusClient
QA_PROMPT_TMPL_STR = (
"请你仔细阅读相关内容,结合历史资料进行回答,每一条史资料使用'出处:《书名》原文内容'的形式标注 (如果回答请清晰无误地引用原文,先给出回答,再贴上对应的原文,使用《书名》[]对原文进行标识),,如果发现资料无法得到答案,就回答不知道 \n"
"搜索的相关历史资料如下所示.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"问题: {query_str}\n"
"答案: "
)
QA_SYSTEM_PROMPT = "你是一个严谨的历史知识问答智能体,你会仔细阅读历史材料并给出准确的回答,你的回答都会非常准确,因为你在回答的之后,使用在《书名》[]内给出原文用来支撑你回答的证据.并且你会在开头说明原文是否有回答所需的知识"
REFINE_PROMPT_TMPL_STR = (
"你是一个历史知识回答修正机器人,你严格按以下方式工作"
"1.只有原答案为不知道时才进行修正,否则输出原答案的内容\n"
"2.修正的时候为了体现你的精准和客观,你非常喜欢使用《书名》[]将原文展示出来.\n"
"3.如果感到疑惑的时候,就用原答案的内容回答。"
"新的知识: {context_msg}\n"
"问题: {query_str}\n"
"原答案: {existing_answer}\n"
"新答案: "
)
def is_valid_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def is_github_folder_url(url):
return url.startswith('https://raw.githubusercontent.com/') and '.' not in os.path.basename(url)
def get_branch_head_sha(owner, repo, branch):
url = f"https://api.github.com/repos/{owner}/{repo}/git/ref/heads/{branch}"
response = requests.get(url)
data = response.json()
sha = data['object']['sha']
return sha
def get_github_repo_contents(repo_url):
# repo_url example: https://raw.githubusercontent.com/wxywb/history_rag/master/data/history_24/
repo_owner = repo_url.split('/')[3]
repo_name = repo_url.split('/')[4]
branch = repo_url.split('/')[5]
folder_path = '/'.join(repo_url.split('/')[6:])
sha = get_branch_head_sha(repo_owner, repo_name, branch)
url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/git/trees/{sha}?recursive=1"
try:
response = requests.get(url)
if response.status_code == 200:
data = response.json()
raw_urls = []
for file in data['tree']:
if file['path'].startswith(folder_path) and file['path'].endswith('.txt'):
raw_url = f"https://raw.githubusercontent.com/{repo_owner}/{repo_name}/{branch}/{file['path']}"
raw_urls.append(raw_url)
return raw_urls
else:
print(f"Failed to fetch contents. Status code: {response.status_code}")
except Exception as e:
print(f"Failed to fetch contents. Error: {str(e)}")
return []
class Executor:
def __init__(self, model):
pass
def build_index(self, path, overwrite):
pass
def build_query_engine(self):
pass
def delete_file(self, path):
pass
def query(self, question):
pass
class MilvusExecutor(Executor):
def __init__(self, config):
self.index = None
self.query_engine = None
self.config = config
self.node_parser = HistorySentenceWindowNodeParser.from_defaults(
sentence_splitter=lambda text: re.findall("[^,.;。?!]+[,.;。?!]?", text),
window_size=config.milvus.window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",)
embed_model = HuggingFaceEmbedding(model_name=config.embedding.name)
# 使用Qwen 通义千问模型
if config.llm.name.find("qwen") != -1:
llm = QwenUnofficial(temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
elif config.llm.name.find("gemini") != -1:
llm = Gemini(temperature=config.llm.temperature, model_name=config.llm.name, max_tokens=2048)
elif 'proxy_model' in config.llm:
llm = ProxyModel(model_name=config.llm.name, api_base=config.llm.api_base, api_key=config.llm.api_key,
temperature=config.llm.temperature, max_tokens=2048)
print(f"使用{config.llm.name},PROXY_SERVER_URL为{config.llm.api_base},PROXY_API_KEY为{config.llm.api_key}")
else:
api_base = None
if 'api_base' in config.llm:
api_base = config.llm.api_base
llm = OpenAI(api_base = api_base, temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
set_global_service_context(service_context)
rerank_k = config.milvus.rerank_topk
self.rerank_postprocessor = SentenceTransformerRerank(
model=config.rerank.name, top_n=rerank_k)
self._milvus_client = None
self._debug = False
def set_debug(self, mode):
self._debug = mode
def build_index(self, path, overwrite):
config = self.config
vector_store = MilvusVectorStore(
uri = f"http://{config.milvus.host}:{config.milvus.port}",
collection_name = config.milvus.collection_name,
overwrite=overwrite,
dim=config.embedding.dim)
self._milvus_client = vector_store.milvusclient
if path.endswith('.txt'):
if os.path.exists(path) is False:
print(f'(rag) 没有找到文件{path}')
return
else:
documents = FlatReader().load_data(Path(path))
documents[0].metadata['file_name'] = documents[0].metadata['filename']
elif os.path.isfile(path):
print('(rag) 目前仅支持txt文件')
elif os.path.isdir(path):
if os.path.exists(path) is False:
print(f'(rag) 没有找到目录{path}')
return
else:
documents = SimpleDirectoryReader(path).load_data()
else:
return
storage_context = StorageContext.from_defaults(vector_store=vector_store)
nodes = self.node_parser.get_nodes_from_documents(documents)
self.index = VectorStoreIndex(nodes, storage_context=storage_context, show_progress=True)
def _get_index(self):
config = self.config
vector_store = MilvusVectorStore(
uri = f"http://{config.milvus.host}:{config.milvus.port}",
collection_name = config.milvus.collection_name,
dim=config.embedding.dim)
self.index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
self._milvus_client = vector_store.milvusclient
def build_query_engine(self):
config = self.config
if self.index is None:
self._get_index()
self.query_engine = self.index.as_query_engine(node_postprocessors=[
self.rerank_postprocessor,
MetadataReplacementPostProcessor(target_metadata_key="window")
])
self.query_engine._retriever.similarity_top_k=config.milvus.retrieve_topk
message_templates = [
ChatMessage(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM),
ChatMessage(
content=QA_PROMPT_TMPL_STR,
role=MessageRole.USER,
),
]
chat_template = ChatPromptTemplate(message_templates=message_templates)
self.query_engine.update_prompts(
{"response_synthesizer:text_qa_template": chat_template}
)
self.query_engine._response_synthesizer._refine_template.conditionals[0][1].message_templates[0].content = REFINE_PROMPT_TMPL_STR
def delete_file(self, path):
config = self.config
if self._milvus_client is None:
self._get_index()
num_entities_prev = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
res = self._milvus_client.delete(collection_name=config.milvus.collection_name, filter=f"file_name=='{path}'")
num_entities = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
print(f'(rag) 现有{num_entities}条,删除{num_entities_prev - num_entities}条数据')
def query(self, question):
if self.index is None:
self._get_index()
if question.endswith('?') or question.endswith('?'):
question = question[:-1]
if self._debug is True:
contexts = self.query_engine.retrieve(QueryBundle(question))
for i, context in enumerate(contexts):
print(f'{question}', i)
content = context.node.get_content(metadata_mode=MetadataMode.LLM)
print(content)
print('-------------------------------------------------------参考资料---------------------------------------------------------')
response = self.query_engine.query(question)
return response
class PipelineExecutor(Executor):
def __init__(self, config):
self.ZILLIZ_CLUSTER_ID = os.getenv("ZILLIZ_CLUSTER_ID")
self.ZILLIZ_TOKEN = os.getenv("ZILLIZ_TOKEN")
self.ZILLIZ_PROJECT_ID = os.getenv("ZILLIZ_PROJECT_ID")
self.ZILLIZ_CLUSTER_ENDPOINT = f"https://{self.ZILLIZ_CLUSTER_ID}.api.gcp-us-west1.zillizcloud.com"
self.config = config
if len(self.ZILLIZ_CLUSTER_ID) == 0:
print('ZILLIZ_CLUSTER_ID 参数为空')
exit()
if len(self.ZILLIZ_TOKEN) == 0:
print('ZILLIZ_TOKEN 参数为空')
exit()
self.config = config
self._debug = False
if config.llm.name.find("qwen") != -1:
llm = QwenUnofficial(temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
elif config.llm.name.find("gemini") != -1:
llm = Gemini(model_name=config.llm.name, temperature=config.llm.temperature, max_tokens=2048)
else:
api_base = None
if 'api_base' in config.llm:
api_base = config.llm.api_base
llm = OpenAI(api_base = api_base, temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=None)
self.service_context = service_context
set_global_service_context(service_context)
self._initialize_pipeline(service_context)
#rerank_k = config.rerankl
#self.rerank_postprocessor = SentenceTransformerRerank(
# model="BAAI/bge-reranker-large", top_n=rerank_k)
def set_debug(self, mode):
self._debug = mode
def _initialize_pipeline(self, service_context: ServiceContext):
config = self.config
try:
self.index = ZillizCloudPipelineIndex(
project_id = self.ZILLIZ_PROJECT_ID,
cluster_id=self.ZILLIZ_CLUSTER_ID,
token=self.ZILLIZ_TOKEN,
collection_name=config.pipeline.collection_name,
service_context=service_context,
)
if len(self._list_pipeline_ids()) == 0:
self.index.create_pipelines(
metadata_schema={"digest_from":"VarChar"}, chunk_size=self.config.pipeline.chunk_size
)
except Exception as e:
print('(rag) zilliz pipeline 连接异常', str(e))
exit()
try:
self._milvus_client = MilvusClient(
uri=self.ZILLIZ_CLUSTER_ENDPOINT,
token=self.ZILLIZ_TOKEN
)
except Exception as e:
print('(rag) zilliz cloud 连接异常', str(e))
def build_index(self, path, overwrite):
config = self.config
if not is_valid_url(path) or 'github' not in path:
print('(rag) 不是一个合法的url,请尝试`https://raw.githubusercontent.com/wxywb/history_rag/master/data/history_24/baihuasanguozhi.txt`')
return
if overwrite == True:
self._milvus_client.drop_collection(config.pipeline.collection_name)
pipeline_ids = self._list_pipeline_ids()
self._delete_pipeline_ids(pipeline_ids)
self._initialize_pipeline(self.service_context)
if is_github_folder_url(path):
urls = get_github_repo_contents(path)
for url in urls:
print(f'(rag) 正在构建索引 {url}')
self.build_index(url, False) # already deleted original collection
elif path.endswith('.txt'):
self.index.insert_doc_url(
url=path,
metadata={"digest_from": HistorySentenceWindowNodeParser.book_name(os.path.basename(path))},
)
else:
print('(rag) 只有github上以txt结尾或文件夹可以被支持。')
def build_query_engine(self):
config = self.config
self.query_engine = self.index.as_query_engine(
search_top_k=config.pipeline.retrieve_topk)
message_templates = [
ChatMessage(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM),
ChatMessage(
content=QA_PROMPT_TMPL_STR,
role=MessageRole.USER,
),
]
chat_template = ChatPromptTemplate(message_templates=message_templates)
self.query_engine.update_prompts(
{"response_synthesizer:text_qa_template": chat_template}
)
self.query_engine._response_synthesizer._refine_template.conditionals[0][1].message_templates[0].content = REFINE_PROMPT_TMPL_STR
def delete_file(self, path):
config = self.config
if self._milvus_client is None:
self._get_index()
num_entities_prev = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
res = self._milvus_client.delete(collection_name=config.milvus.collection_name, filter=f"doc_name=='{path}'")
num_entities = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
print(f'(rag) 现有{num_entities}条,删除{num_entities_prev - num_entities}条数据')
def query(self, question):
if self.index is None:
self.get_index()
if question.endswith("?") or question.endswith("?"):
question = question[:-1]
if self._debug is True:
contexts = self.query_engine.retrieve(QueryBundle(question))
for i, context in enumerate(contexts):
print(f'{question}', i)
content = context.node.get_content(metadata_mode=MetadataMode.LLM)
print(content)
print('-------------------------------------------------------参考资料---------------------------------------------------------')
response = self.query_engine.query(question)
return response
def _list_pipeline_ids(self):
url = f"https://controller.api.gcp-us-west1.zillizcloud.com/v1/pipelines?projectId={self.ZILLIZ_PROJECT_ID}"
headers = {
"Authorization": f"Bearer {self.ZILLIZ_TOKEN}",
"Accept": "application/json",
"Content-Type": "application/json",
}
collection_name = self.config.milvus.collection_name
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise RuntimeError(response.text)
response_dict = response.json()
if response_dict["code"] != 200:
raise RuntimeError(response_dict)
pipeline_ids = []
for pipeline in response_dict['data']:
if collection_name in pipeline['name']:
pipeline_ids.append(pipeline['pipelineId'])
return pipeline_ids
def _delete_pipeline_ids(self, pipeline_ids):
for pipeline_id in pipeline_ids:
url = f"https://controller.api.gcp-us-west1.zillizcloud.com/v1/pipelines/{pipeline_id}/"
headers = {
"Authorization": f"Bearer {self.ZILLIZ_TOKEN}",
"Accept": "application/json",
"Content-Type": "application/json",
}
response = requests.delete(url, headers=headers)
if response.status_code != 200:
raise RuntimeError(response.text)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.postprocessor.SentenceTransformerRerank",
"llama_index.ServiceContext.from_defaults",
"llama_index.prompts.ChatMessage",
"llama_index.vector_stores.MilvusVectorStore",
"llama_index.llms.OpenAI",
"llama_index.readers.file.flat_reader.FlatReader",
"llama_index.StorageContext.from_defaults",
"llama_index.indices.query.schema.QueryBundle",
"llama_index.set_global_service_context",
"llama_index.postprocessor.MetadataReplacementPostProcessor",
"llama_index.prompts.ChatPromptTemplate",
"llama_index.VectorStoreIndex",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((2448, 2465), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2460, 2465), False, 'import requests\n'), ((2063, 2076), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (2071, 2076), False, 'from urllib.parse import urlparse\n'), ((3032, 3049), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3044, 3049), False, 'import requests\n'), ((4423, 4477), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'config.embedding.name'}), '(model_name=config.embedding.name)\n', (4443, 4477), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((5451, 5513), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (5479, 5513), False, 'from llama_index import ServiceContext, StorageContext\n'), ((5522, 5565), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (5548, 5565), False, 'from llama_index import set_global_service_context\n'), ((5647, 5714), 'llama_index.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'model': 'config.rerank.name', 'top_n': 'rerank_k'}), '(model=config.rerank.name, top_n=rerank_k)\n', (5672, 5714), False, 'from llama_index.postprocessor import SentenceTransformerRerank\n'), ((5955, 6132), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': 'f"""http://{config.milvus.host}:{config.milvus.port}"""', 'collection_name': 'config.milvus.collection_name', 'overwrite': 'overwrite', 'dim': 'config.embedding.dim'}), "(uri=f'http://{config.milvus.host}:{config.milvus.port}',\n collection_name=config.milvus.collection_name, overwrite=overwrite, dim\n =config.embedding.dim)\n", (5972, 6132), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((6938, 6993), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (6966, 6993), False, 'from llama_index import ServiceContext, StorageContext\n'), ((7084, 7160), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'show_progress': '(True)'}), '(nodes, storage_context=storage_context, show_progress=True)\n', (7100, 7160), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n'), ((7240, 7391), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': 'f"""http://{config.milvus.host}:{config.milvus.port}"""', 'collection_name': 'config.milvus.collection_name', 'dim': 'config.embedding.dim'}), "(uri=f'http://{config.milvus.host}:{config.milvus.port}',\n collection_name=config.milvus.collection_name, dim=config.embedding.dim)\n", (7257, 7391), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((7450, 7511), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (7484, 7511), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n'), ((8241, 8296), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'message_templates': 'message_templates'}), '(message_templates=message_templates)\n', (8259, 8296), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((9980, 10010), 'os.getenv', 'os.getenv', (['"""ZILLIZ_CLUSTER_ID"""'], {}), "('ZILLIZ_CLUSTER_ID')\n", (9989, 10010), False, 'import os\n'), ((10039, 10064), 'os.getenv', 'os.getenv', (['"""ZILLIZ_TOKEN"""'], {}), "('ZILLIZ_TOKEN')\n", (10048, 10064), False, 'import os\n'), ((10098, 10128), 'os.getenv', 'os.getenv', (['"""ZILLIZ_PROJECT_ID"""'], {}), "('ZILLIZ_PROJECT_ID')\n", (10107, 10128), False, 'import os\n'), ((11138, 11193), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'None'}), '(llm=llm, embed_model=None)\n', (11166, 11193), False, 'from llama_index import ServiceContext, StorageContext\n'), ((11249, 11292), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (11275, 11292), False, 'from llama_index import set_global_service_context\n'), ((14123, 14178), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'message_templates': 'message_templates'}), '(message_templates=message_templates)\n', (14141, 14178), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((16170, 16204), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (16182, 16204), False, 'import requests\n'), ((2283, 2304), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (2299, 2304), False, 'import os\n'), ((4568, 4662), 'custom.llms.QwenLLM.QwenUnofficial', 'QwenUnofficial', ([], {'temperature': 'config.llm.temperature', 'model': 'config.llm.name', 'max_tokens': '(2048)'}), '(temperature=config.llm.temperature, model=config.llm.name,\n max_tokens=2048)\n', (4582, 4662), False, 'from custom.llms.QwenLLM import QwenUnofficial\n'), ((6573, 6593), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (6587, 6593), False, 'import os\n'), ((8020, 8082), 'llama_index.prompts.ChatMessage', 'ChatMessage', ([], {'content': 'QA_SYSTEM_PROMPT', 'role': 'MessageRole.SYSTEM'}), '(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM)\n', (8031, 8082), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((8096, 8158), 'llama_index.prompts.ChatMessage', 'ChatMessage', ([], {'content': 'QA_PROMPT_TMPL_STR', 'role': 'MessageRole.USER'}), '(content=QA_PROMPT_TMPL_STR, role=MessageRole.USER)\n', (8107, 8158), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((10611, 10705), 'custom.llms.QwenLLM.QwenUnofficial', 'QwenUnofficial', ([], {'temperature': 'config.llm.temperature', 'model': 'config.llm.name', 'max_tokens': '(2048)'}), '(temperature=config.llm.temperature, model=config.llm.name,\n max_tokens=2048)\n', (10625, 10705), False, 'from custom.llms.QwenLLM import QwenUnofficial\n'), ((11702, 11913), 'custom.zilliz.base.ZillizCloudPipelineIndex', 'ZillizCloudPipelineIndex', ([], {'project_id': 'self.ZILLIZ_PROJECT_ID', 'cluster_id': 'self.ZILLIZ_CLUSTER_ID', 'token': 'self.ZILLIZ_TOKEN', 'collection_name': 'config.pipeline.collection_name', 'service_context': 'service_context'}), '(project_id=self.ZILLIZ_PROJECT_ID, cluster_id=self\n .ZILLIZ_CLUSTER_ID, token=self.ZILLIZ_TOKEN, collection_name=config.\n pipeline.collection_name, service_context=service_context)\n', (11726, 11913), False, 'from custom.zilliz.base import ZillizCloudPipelineIndex\n'), ((12376, 12447), 'pymilvus.MilvusClient', 'MilvusClient', ([], {'uri': 'self.ZILLIZ_CLUSTER_ENDPOINT', 'token': 'self.ZILLIZ_TOKEN'}), '(uri=self.ZILLIZ_CLUSTER_ENDPOINT, token=self.ZILLIZ_TOKEN)\n', (12388, 12447), False, 'from pymilvus import MilvusClient\n'), ((13902, 13964), 'llama_index.prompts.ChatMessage', 'ChatMessage', ([], {'content': 'QA_SYSTEM_PROMPT', 'role': 'MessageRole.SYSTEM'}), '(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM)\n', (13913, 13964), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((13978, 14040), 'llama_index.prompts.ChatMessage', 'ChatMessage', ([], {'content': 'QA_PROMPT_TMPL_STR', 'role': 'MessageRole.USER'}), '(content=QA_PROMPT_TMPL_STR, role=MessageRole.USER)\n', (13989, 14040), False, 'from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate\n'), ((17063, 17100), 'requests.delete', 'requests.delete', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (17078, 17100), False, 'import requests\n'), ((4728, 4819), 'custom.llms.GeminiLLM.Gemini', 'Gemini', ([], {'temperature': 'config.llm.temperature', 'model_name': 'config.llm.name', 'max_tokens': '(2048)'}), '(temperature=config.llm.temperature, model_name=config.llm.name,\n max_tokens=2048)\n', (4734, 4819), False, 'from custom.llms.GeminiLLM import Gemini\n'), ((6292, 6312), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6306, 6312), False, 'import os\n'), ((6657, 6676), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6670, 6676), False, 'import os\n'), ((9436, 9457), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', (['question'], {}), '(question)\n', (9447, 9457), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((10771, 10862), 'custom.llms.GeminiLLM.Gemini', 'Gemini', ([], {'model_name': 'config.llm.name', 'temperature': 'config.llm.temperature', 'max_tokens': '(2048)'}), '(model_name=config.llm.name, temperature=config.llm.temperature,\n max_tokens=2048)\n', (10777, 10862), False, 'from custom.llms.GeminiLLM import Gemini\n'), ((11007, 11113), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'api_base': 'api_base', 'temperature': 'config.llm.temperature', 'model': 'config.llm.name', 'max_tokens': '(2048)'}), '(api_base=api_base, temperature=config.llm.temperature, model=config.\n llm.name, max_tokens=2048)\n', (11013, 11113), False, 'from llama_index.llms import OpenAI\n'), ((15313, 15334), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', (['question'], {}), '(question)\n', (15324, 15334), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((4209, 4248), 're.findall', 're.findall', (['"""[^,.;。?!]+[,.;。?!]?"""', 'text'], {}), "('[^,.;。?!]+[,.;。?!]?', text)\n", (4219, 4248), False, 'import re\n'), ((4876, 5033), 'custom.llms.proxy_model.ProxyModel', 'ProxyModel', ([], {'model_name': 'config.llm.name', 'api_base': 'config.llm.api_base', 'api_key': 'config.llm.api_key', 'temperature': 'config.llm.temperature', 'max_tokens': '(2048)'}), '(model_name=config.llm.name, api_base=config.llm.api_base,\n api_key=config.llm.api_key, temperature=config.llm.temperature,\n max_tokens=2048)\n', (4886, 5033), False, 'from custom.llms.proxy_model import ProxyModel\n'), ((5320, 5426), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'api_base': 'api_base', 'temperature': 'config.llm.temperature', 'model': 'config.llm.name', 'max_tokens': '(2048)'}), '(api_base=api_base, temperature=config.llm.temperature, model=config.\n llm.name, max_tokens=2048)\n', (5326, 5426), False, 'from llama_index.llms import OpenAI\n'), ((6460, 6470), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6464, 6470), False, 'from pathlib import Path\n'), ((7821, 7883), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (7853, 7883), False, 'from llama_index.postprocessor import MetadataReplacementPostProcessor\n'), ((6437, 6449), 'llama_index.readers.file.flat_reader.FlatReader', 'FlatReader', ([], {}), '()\n', (6447, 6449), False, 'from llama_index.readers.file.flat_reader import FlatReader\n'), ((6693, 6713), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6707, 6713), False, 'import os\n'), ((6838, 6865), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (6859, 6865), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document\n'), ((13579, 13601), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (13595, 13601), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ================================================== #
# This file is a part of PYGPT package #
# Website: https://pygpt.net #
# GitHub: https://github.com/szczyglis-dev/py-gpt #
# MIT License #
# Created By : Marcin Szczygliński #
# Updated Date: 2024.02.28 02:00:00 #
# ================================================== #
import os.path
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.service_context import ServiceContext
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from pygpt_net.provider.vector_stores.base import BaseStore # <--- vector store must inherit from BaseStore
class ExampleVectorStore(BaseStore):
def __init__(self, *args, **kwargs):
super(ExampleVectorStore, self).__init__(*args, **kwargs)
"""
Example vector store provider.
This example is based on the `SimpleProvider` (SimpleVectorStore) from the `pygpt_net.provider.vector_stores.simple`.
See `pygpt_net.provider.vector_stores` for more examples.
The rest of the shared methods (like `exists`, `delete`, `truncate`, etc.) are declared in the base class: `BaseStore`.
:param args: args
:param kwargs: kwargs
"""
self.window = kwargs.get('window', None)
self.id = "example_store" # identifier must be unique
self.prefix = "example_" # prefix for index config files subdirectory in "idx" directory in %workdir%
self.indexes = {} # indexes cache dictionary (in-memory)
def create(self, id: str):
"""
Create the empty index with the provided `id` (`base` is default)
In this example, we create an empty index with the name `id` and store it in the `self.indexes` dictionary.
Example is a simple copy of the `SimpleVectorStore` provider.
The `create` method is called when the index does not exist.
See `pygpt_net.core.idx` for more details how it is handled internally.
:param id: index name
"""
path = self.get_path(id) # get path for the index configuration, declared in the `BaseStore` class
# check if index does not exist on disk and create it if not exists
if not os.path.exists(path):
index = VectorStoreIndex([]) # create empty index
# store the index on disk
self.store(
id=id,
index=index,
)
def get(self, id: str, service_context: ServiceContext = None) -> BaseIndex:
"""
Get the index instance with the provided `id` (`base` is default)
In this example, we get the index with the name `id` from the `self.indexes` dictionary.
The `get` method is called when getting the index instance.
It must return the `BaseIndex` index instance.
See `pygpt_net.core.idx` for more details how it is handled internally.
:param id: index name
:param service_context: Service context
:return: index instance
"""
# check if index exists on disk and load it
if not self.exists(id):
# if index does not exist, then create it
self.create(id)
# get path for the index configuration on disk (in "%workdir%/idx" directory)
path = self.get_path(id)
# get the storage context
storage_context = StorageContext.from_defaults(
persist_dir=path,
)
# load index from storage and update it in the `self.indexes` dictionary
self.indexes[id] = load_index_from_storage(
storage_context,
service_context=service_context,
)
# return the index instance
return self.indexes[id]
def store(self, id: str, index: BaseIndex = None):
"""
Store (persist) the index instance with the provided `id` (`base` is default)
In this example, we store the index with the name `id` in the `self.indexes` dictionary.
The `store` method is called when storing (persisting) index to disk/db.
It must provide logic to store the index in the storage.
See `pygpt_net.core.idx` for more details how it is handled internally.
:param id: index name
:param index: index instance
"""
# prepare the index instance
if index is None:
index = self.indexes[id]
# get path for the index configuration on disk (in "%workdir%/idx" directory)
path = self.get_path(id)
# persist the index on disk
index.storage_context.persist(
persist_dir=path,
)
# update the index in the `self.indexes` dictionary
self.indexes[id] = index
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.core.load_index_from_storage",
"llama_index.core.indices.vector_store.base.VectorStoreIndex"
] | [((3613, 3659), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'path'}), '(persist_dir=path)\n', (3641, 3659), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((3792, 3865), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (3815, 3865), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((2500, 2520), 'llama_index.core.indices.vector_store.base.VectorStoreIndex', 'VectorStoreIndex', (['[]'], {}), '([])\n', (2516, 2520), False, 'from llama_index.core.indices.vector_store.base import VectorStoreIndex\n')] |
from __future__ import annotations
from typing import Optional
import os
from llama_index.core import ServiceContext
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.core.llms import OpenAI as LlamaIndexOpenAI
from llama_index.core.llms.llm import LLM # noqa: TCH002
from llama_index.core.llms.openai_utils import ALL_AVAILABLE_MODELS, CHAT_MODELS
from openssa.utils.config import Config
# import sys
# import logging
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# Add the extended models to the list of available models in LlamaIndex
_EXTENDED_CHAT_MODELS = {
"01-ai/Yi-34B-Chat": 4096,
"Intel/neural-chat-7b-v3-1": 4096,
"llama2-70b": 4096,
"llama2-13b": 4096,
"llama2-7b": 4096,
}
ALL_AVAILABLE_MODELS.update(_EXTENDED_CHAT_MODELS)
CHAT_MODELS.update(_EXTENDED_CHAT_MODELS)
# TODO: there should be a single Aitomatic api_base and api_key
Config.AITOMATIC_API_KEY: Optional[str] = os.environ.get("AITOMATIC_API_KEY")
Config.AITOMATIC_API_URL: Optional[str] = (
os.environ.get("AITOMATIC_API_URL")
or "https://aimo-api-mvp.platform.aitomatic.com/api/v1"
)
Config.AITOMATIC_API_URL_7B: Optional[str] = (
os.environ.get("AITOMATIC_API_URL_7B") or "https://llama2-7b.lepton.run/api/v1"
)
Config.AITOMATIC_API_URL_70B: Optional[str] = (
os.environ.get("AITOMATIC_API_URL_70B") or "https://llama2-70b.lepton.run/api/v1"
)
Config.OPENAI_API_KEY: Optional[str] = os.environ.get("OPENAI_API_KEY")
Config.OPENAI_API_URL: Optional[str] = (
os.environ.get("OPENAI_API_URL") or "https://api.openai.com/v1"
)
Config.AZURE_OPENAI_API_KEY: Optional[str] = os.environ.get("AZURE_OPENAI_API_KEY")
Config.AZURE_OPENAI_API_URL: Optional[str] = (
os.environ.get("AZURE_OPENAI_API_URL") or "https://aiva-japan.openai.azure.com"
)
Config.LEPTON_API_KEY: Optional[str] = os.environ.get("LEPTON_API_KEY")
Config.LEPTON_API_URL: Optional[str] = (
os.environ.get("LEPTON_API_URL") or "https://llama2-7b.lepton.run/api/v1"
)
class LlamaIndexApi: # no-pylint: disable=too-many-public-methods
class LLMs:
"""
This class represents the LLMs from different services
"""
class _AnOpenAIAPIProvider:
"""
This class represents an OpenAI-API provider
"""
@classmethod
def _get(cls, model=None, api_base=None, api_key=None, additional_kwargs=None) -> LLM:
if model is None:
if api_base is None:
llm = LlamaIndexOpenAI(api_key=api_key, additional_kwargs=additional_kwargs)
else:
llm = LlamaIndexOpenAI(api_key=api_key, additional_kwargs=additional_kwargs)
elif api_base is None:
llm = LlamaIndexOpenAI(api_key=api_key, additional_kwargs=additional_kwargs)
else:
llm = LlamaIndexOpenAI(model=model, api_base=api_base, api_key=api_key)
# Forcibly set the get_openai method to the _get_client method
llm.__dict__['get_openai'] = llm._get_client # pylint: disable=protected-access
return llm
class Aitomatic(_AnOpenAIAPIProvider):
"""
This class represents the Aitomatic-hosted LLMs
"""
@classmethod
def get(cls, model=None, api_base=None, api_key=None, additional_kwargs=None) -> LLM:
if model is None:
model = "llama2-7b"
if api_key is None:
api_key = Config.AITOMATIC_API_KEY
return super()._get(model=model, api_base=api_base, api_key=api_key, additional_kwargs=additional_kwargs)
@classmethod
def get_llama2_70b(cls) -> LLM:
# TODO: there should be a single Aitomatic api_base and api_key
llm = cls.get(
model="llama2-70b",
api_base=Config.AITOMATIC_API_URL_70B,
api_key=Config.LEPTON_API_KEY,
)
return llm
@classmethod
def get_llama2_7b(cls) -> LLM:
# TODO: there should be a single Aitomatic api_base and api_key
llm = cls.get(
model="llama2-7b",
api_base=Config.AITOMATIC_API_URL,
api_key=Config.LEPTON_API_KEY,
)
return llm
@classmethod
def get_13b(cls) -> LLM:
# TODO: there should be a single Aitomatic api_base and api_key
# not running
llm = cls.get(
model="gpt-3.5-turbo-0613",
api_base="http://35.199.34.91:8000/v1",
additional_kwargs={"stop": "\n"},
)
return llm
@classmethod
def get_yi_34b(cls) -> LLM: # running
llm = cls.get(
model="01-ai/Yi-34B-Chat",
api_base="http://35.230.174.89:8000/v1",
additional_kwargs={"stop": "\n###"},
)
return llm
@classmethod
def get_intel_neural_chat_7b(cls) -> LLM: # running
llm = cls.get(
model="Intel/neural-chat-7b-v3-1",
api_base="http://34.145.174.152:8000/v1",
)
return llm
@classmethod
def get_aimo(cls):
llm = cls.get(api_base=os.environ.get("AIMO_STANDARD_URL_BASE"))
return llm
class OpenAI(_AnOpenAIAPIProvider):
"""
This class represents the OpenAI-hosted LLMs
"""
@classmethod
def get(cls, model=None) -> LLM:
if model is None:
model = "gpt-3.5-turbo-1106"
return super()._get(model=model, api_key=Config.OPENAI_API_KEY)
@classmethod
def get_gpt_35_turbo_1106(cls) -> LLM:
return cls.get(model="gpt-3.5-turbo-1106")
@classmethod
def get_gpt_35_turbo_0613(cls) -> LLM:
return cls.get(model="gpt-3.5-turbo")
@classmethod
def get_gpt_35_turbo(cls) -> LLM:
return cls.get(model="gpt-3.5-turbo-0613")
@classmethod
def get_gpt_4(cls) -> LLM:
return cls.get(model="gpt-4")
class Azure:
"""
This class represents the Azure-hosted LLMs
"""
@classmethod
def _get(cls, model=None, engine=None, api_base=None) -> LLM:
if model is None:
model = "gpt-35-turbo-16k"
if engine is None:
engine = "aiva-dev-gpt35"
if api_base is None:
api_base = Config.AZURE_OPENAI_API_URL
return AzureOpenAI(
engine=model,
model=model,
temperature=0.0,
api_version="2023-09-01-preview",
api_key=Config.AZURE_OPENAI_API_KEY,
azure_endpoint=api_base,
)
@classmethod
def get(cls) -> LLM:
return cls.get_gpt_35()
@classmethod
def get_gpt_35(cls) -> LLM:
return cls._get(model="gpt-35-turbo")
@classmethod
def get_gpt_35_16k(cls) -> LLM:
return cls._get(model="gpt-35-turbo-16k")
@classmethod
def get_gpt_4(cls) -> LLM:
return cls.get_gpt_4_32k()
@classmethod
def get_gpt_4_32k(cls) -> LLM:
return cls._get(model="gpt-4-32k")
class Embeddings:
"""
This class represents the different embedding services
"""
class Aitomatic:
"""
This class represents the Aitomatic-hosted embedding service
"""
@classmethod
def _get(cls, api_base=None, api_key=None) -> OpenAIEmbedding:
if api_key is None:
api_key = Config.AITOMATIC_API_KEY
return OpenAIEmbedding(api_base=api_base, api_key=api_key)
@classmethod
def get(cls) -> OpenAIEmbedding: # running
return cls._get(api_base=Config.AITOMATIC_API_URL)
@classmethod
def get_llama2_7b(cls) -> OpenAIEmbedding:
return cls._get(api_base=Config.AITOMATIC_API_URL_7B)
@classmethod
def get_llama2_70b(cls) -> OpenAIEmbedding:
return cls._get(api_base=Config.AITOMATIC_API_URL_70B)
class OpenAI:
"""
This class represents the OpenAI-hosted embedding service
"""
@classmethod
def get(cls) -> OpenAIEmbedding:
return OpenAIEmbedding(api_key=Config.OPENAI_API_KEY)
class Azure:
"""
This class represents the Azure-hosted embedding service
"""
@classmethod
def get(cls) -> AzureOpenAIEmbedding:
return AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="text-embedding-ada-002",
api_key=Config.AZURE_OPENAI_API_KEY,
api_version="2023-09-01-preview",
azure_endpoint=Config.AZURE_OPENAI_API_URL,
)
class ServiceContexts:
"""
This class represents the service contexts for different models.
"""
class _AServiceContextHelper:
"""
This class represents the service contexts for the different embedding services.
"""
@classmethod
def _get(cls, llm=None, embedding=None) -> ServiceContext:
sc = ServiceContext.from_defaults(llm=llm, embed_model=embedding)
return sc
class Aitomatic(_AServiceContextHelper):
"""
This class represents the service contexts for the Aitomatic-hosted models.
"""
@classmethod
def get_llama2_7b(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Aitomatic.get_llama2_7b()
embedding = LlamaIndexApi.Embeddings.Aitomatic.get_llama2_7b()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_llama_2_70b(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Aitomatic.get_llama2_7b()
embedding = LlamaIndexApi.Embeddings.Aitomatic.get_llama2_70b()
return cls._get(llm=llm, embedding=embedding)
class OpenAI(_AServiceContextHelper):
"""
This class represents the service contexts for the OpenAI-hosted models.
"""
@classmethod
def get_gpt_35_turbo_1106(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.OpenAI.get_gpt_35_turbo_1106()
embedding = LlamaIndexApi.Embeddings.OpenAI.get()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_gpt_35_turbo(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.OpenAI.get_gpt_35_turbo()
embedding = LlamaIndexApi.Embeddings.OpenAI.get()
return cls._get(llm=llm, embedding=embedding)
class Azure(_AServiceContextHelper):
"""
This class represents the service contexts for the Azure-hosted models.
"""
@classmethod
def get(cls) -> ServiceContext:
return cls.get_gpt_35()
@classmethod
def get_gpt_35(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Azure.get_gpt_35()
embedding = LlamaIndexApi.Embeddings.Azure.get()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_gpt_35_16k(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Azure.get_gpt_35_16k()
embedding = LlamaIndexApi.Embeddings.Azure.get()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_gpt4(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Azure.get_gpt_4()
embedding = LlamaIndexApi.Embeddings.Azure.get()
return cls._get(llm=llm, embedding=embedding)
@classmethod
def get_gpt4_32k(cls) -> ServiceContext:
llm = LlamaIndexApi.LLMs.Azure.get_gpt_4_32k()
embedding = LlamaIndexApi.Embeddings.Azure.get()
return cls._get(llm=llm, embedding=embedding)
# Convenience methods
get_aitomatic_llm = LLMs.Aitomatic.get
get_openai_llm = LLMs.OpenAI.get
get_azure_llm = LLMs.Azure.get
| [
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding",
"llama_index.llms.azure_openai.AzureOpenAI",
"llama_index.core.llms.openai_utils.ALL_AVAILABLE_MODELS.update",
"llama_index.core.ServiceContext.from_defaults",
"llama_index.core.llms.openai_utils.CHAT_MODELS.update",
"llama_index.core.llms.OpenAI",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((948, 998), 'llama_index.core.llms.openai_utils.ALL_AVAILABLE_MODELS.update', 'ALL_AVAILABLE_MODELS.update', (['_EXTENDED_CHAT_MODELS'], {}), '(_EXTENDED_CHAT_MODELS)\n', (975, 998), False, 'from llama_index.core.llms.openai_utils import ALL_AVAILABLE_MODELS, CHAT_MODELS\n'), ((999, 1040), 'llama_index.core.llms.openai_utils.CHAT_MODELS.update', 'CHAT_MODELS.update', (['_EXTENDED_CHAT_MODELS'], {}), '(_EXTENDED_CHAT_MODELS)\n', (1017, 1040), False, 'from llama_index.core.llms.openai_utils import ALL_AVAILABLE_MODELS, CHAT_MODELS\n'), ((1148, 1183), 'os.environ.get', 'os.environ.get', (['"""AITOMATIC_API_KEY"""'], {}), "('AITOMATIC_API_KEY')\n", (1162, 1183), False, 'import os\n'), ((1639, 1671), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1653, 1671), False, 'import os\n'), ((1829, 1867), 'os.environ.get', 'os.environ.get', (['"""AZURE_OPENAI_API_KEY"""'], {}), "('AZURE_OPENAI_API_KEY')\n", (1843, 1867), False, 'import os\n'), ((2041, 2073), 'os.environ.get', 'os.environ.get', (['"""LEPTON_API_KEY"""'], {}), "('LEPTON_API_KEY')\n", (2055, 2073), False, 'import os\n'), ((1232, 1267), 'os.environ.get', 'os.environ.get', (['"""AITOMATIC_API_URL"""'], {}), "('AITOMATIC_API_URL')\n", (1246, 1267), False, 'import os\n'), ((1381, 1419), 'os.environ.get', 'os.environ.get', (['"""AITOMATIC_API_URL_7B"""'], {}), "('AITOMATIC_API_URL_7B')\n", (1395, 1419), False, 'import os\n'), ((1515, 1554), 'os.environ.get', 'os.environ.get', (['"""AITOMATIC_API_URL_70B"""'], {}), "('AITOMATIC_API_URL_70B')\n", (1529, 1554), False, 'import os\n'), ((1717, 1749), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_URL"""'], {}), "('OPENAI_API_URL')\n", (1731, 1749), False, 'import os\n'), ((1919, 1957), 'os.environ.get', 'os.environ.get', (['"""AZURE_OPENAI_API_URL"""'], {}), "('AZURE_OPENAI_API_URL')\n", (1933, 1957), False, 'import os\n'), ((2119, 2151), 'os.environ.get', 'os.environ.get', (['"""LEPTON_API_URL"""'], {}), "('LEPTON_API_URL')\n", (2133, 2151), False, 'import os\n'), ((7221, 7381), 'llama_index.llms.azure_openai.AzureOpenAI', 'AzureOpenAI', ([], {'engine': 'model', 'model': 'model', 'temperature': '(0.0)', 'api_version': '"""2023-09-01-preview"""', 'api_key': 'Config.AZURE_OPENAI_API_KEY', 'azure_endpoint': 'api_base'}), "(engine=model, model=model, temperature=0.0, api_version=\n '2023-09-01-preview', api_key=Config.AZURE_OPENAI_API_KEY,\n azure_endpoint=api_base)\n", (7232, 7381), False, 'from llama_index.llms.azure_openai import AzureOpenAI\n'), ((8543, 8594), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_base': 'api_base', 'api_key': 'api_key'}), '(api_base=api_base, api_key=api_key)\n', (8558, 8594), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((9267, 9313), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_key': 'Config.OPENAI_API_KEY'}), '(api_key=Config.OPENAI_API_KEY)\n', (9282, 9313), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((9536, 9759), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'deployment_name': '"""text-embedding-ada-002"""', 'api_key': 'Config.AZURE_OPENAI_API_KEY', 'api_version': '"""2023-09-01-preview"""', 'azure_endpoint': 'Config.AZURE_OPENAI_API_URL'}), "(model='text-embedding-ada-002', deployment_name=\n 'text-embedding-ada-002', api_key=Config.AZURE_OPENAI_API_KEY,\n api_version='2023-09-01-preview', azure_endpoint=Config.\n AZURE_OPENAI_API_URL)\n", (9556, 9759), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n'), ((10272, 10332), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embedding'}), '(llm=llm, embed_model=embedding)\n', (10300, 10332), False, 'from llama_index.core import ServiceContext\n'), ((2724, 2794), 'llama_index.core.llms.OpenAI', 'LlamaIndexOpenAI', ([], {'api_key': 'api_key', 'additional_kwargs': 'additional_kwargs'}), '(api_key=api_key, additional_kwargs=additional_kwargs)\n', (2740, 2794), True, 'from llama_index.core.llms import OpenAI as LlamaIndexOpenAI\n'), ((2851, 2921), 'llama_index.core.llms.OpenAI', 'LlamaIndexOpenAI', ([], {'api_key': 'api_key', 'additional_kwargs': 'additional_kwargs'}), '(api_key=api_key, additional_kwargs=additional_kwargs)\n', (2867, 2921), True, 'from llama_index.core.llms import OpenAI as LlamaIndexOpenAI\n'), ((2987, 3057), 'llama_index.core.llms.OpenAI', 'LlamaIndexOpenAI', ([], {'api_key': 'api_key', 'additional_kwargs': 'additional_kwargs'}), '(api_key=api_key, additional_kwargs=additional_kwargs)\n', (3003, 3057), True, 'from llama_index.core.llms import OpenAI as LlamaIndexOpenAI\n'), ((3106, 3171), 'llama_index.core.llms.OpenAI', 'LlamaIndexOpenAI', ([], {'model': 'model', 'api_base': 'api_base', 'api_key': 'api_key'}), '(model=model, api_base=api_base, api_key=api_key)\n', (3122, 3171), True, 'from llama_index.core.llms import OpenAI as LlamaIndexOpenAI\n'), ((5783, 5823), 'os.environ.get', 'os.environ.get', (['"""AIMO_STANDARD_URL_BASE"""'], {}), "('AIMO_STANDARD_URL_BASE')\n", (5797, 5823), False, 'import os\n')] |
from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
import graphsignal
import logging
import time
import random
load_dotenv()
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
graphsignal.configure(api_key=os.getenv('GRAPHSIGNAL_API_KEY'), deployment='DevSecOpsKB')
# set context window
context_window = 4096
# set number of output tokens
num_output = 512
#LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_output))
#constructs service_context
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, context_window=context_window, num_output=num_output)
#set the global service context object
from llama_index import set_global_service_context
set_global_service_context(service_context)
#loads data from the specified directory path
documents = SimpleDirectoryReader("./data").load_data()
#when first building the index
index = GPTVectorStoreIndex.from_documents(documents)
def data_querying(input_text):
#queries the index with the input text
response = index.as_query_engine().query(input_text)
return response.response
# predefine a list of 10 questions
questions = [
'what does Trivy image scan do?',
'What are the main benefits of using Harden Runner?',
'What is the 3-2-1 rule in DevOps self-service model?',
'What is Infracost? and what does it do?',
'What is the terraform command to auto generate README?',
'How to pin Terraform module source to a particular branch?',
'What are the benefits of reusable Terraform modules?',
'How do I resolve error "npm ERR! code E400"?',
'How to fix error "NoCredentialProviders: no valid providers in chain"?',
'How to fix error "Credentials could not be loaded, please check your action inputs: Could not load credentials from any providers"?'
]
start_time = time.time()
while time.time() - start_time < 1800: # let it run for 30 minutes (1800 seconds)
try:
num = random.randint(0, len(questions) - 1)
print("Question: ", questions[num])
answer = data_querying(questions[num])
print("Answer: ", answer)
except:
logger.error("Error during data query", exc_info=True)
time.sleep(5 * random.random())
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.set_global_service_context",
"llama_index.SimpleDirectoryReader"
] | [((244, 257), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (255, 257), False, 'from dotenv import load_dotenv\n'), ((259, 280), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (278, 280), False, 'import logging\n'), ((290, 309), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (307, 309), False, 'import logging\n'), ((790, 906), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'context_window': 'context_window', 'num_output': 'num_output'}), '(llm_predictor=llm_predictor, context_window=\n context_window, num_output=num_output)\n', (818, 906), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((993, 1036), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1019, 1036), False, 'from llama_index import set_global_service_context\n'), ((1180, 1225), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1214, 1225), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2128, 2139), 'time.time', 'time.time', ([], {}), '()\n', (2137, 2139), False, 'import time\n'), ((372, 404), 'os.getenv', 'os.getenv', (['"""GRAPHSIGNAL_API_KEY"""'], {}), "('GRAPHSIGNAL_API_KEY')\n", (381, 404), False, 'import os\n'), ((663, 741), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'num_output'}), "(temperature=0.5, model_name='gpt-3.5-turbo', max_tokens=num_output)\n", (673, 741), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1096, 1127), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data"""'], {}), "('./data')\n", (1117, 1127), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, ServiceContext, GPTVectorStoreIndex\n'), ((2147, 2158), 'time.time', 'time.time', ([], {}), '()\n', (2156, 2158), False, 'import time\n'), ((2505, 2520), 'random.random', 'random.random', ([], {}), '()\n', (2518, 2520), False, 'import random\n')] |
# Ref https://github.com/amrrs/QABot-LangChain/blob/main/Q%26A_Bot_with_Llama_Index_and_LangChain.ipynb
#from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex,GPTSimpleVectorIndex, PromptHelper
from llama_index import LLMPredictor, ServiceContext
import sys
import os
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 256
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-002", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
index_obj = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
index_obj.save_to_disk('model/index.json')
return index_obj
def ask_bot(input_index='model/index.json'):
index_obj = GPTSimpleVectorIndex.load_from_disk(input_index)
while True:
query = input('What do you want to ask the bot? \n')
if query == "nothing":
return
response = index_obj.query(query, response_mode="compact")
print("\nBot says: \n\n" + response.response + "\n\n\n")
index = construct_index("data/")
ask_bot('model/index.json')
| [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.GPTSimpleVectorIndex.from_documents",
"llama_index.PromptHelper",
"llama_index.GPTSimpleVectorIndex.load_from_disk"
] | [((716, 815), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (728, 815), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTSimpleVectorIndex, PromptHelper\n'), ((1035, 1092), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (1063, 1092), False, 'from llama_index import LLMPredictor, ServiceContext\n'), ((1109, 1188), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1144, 1188), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTSimpleVectorIndex, PromptHelper\n'), ((1322, 1370), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['input_index'], {}), '(input_index)\n', (1357, 1370), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTSimpleVectorIndex, PromptHelper\n'), ((867, 943), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-002"""', 'max_tokens': 'num_outputs'}), "(temperature=0, model_name='text-davinci-002', max_tokens=num_outputs)\n", (873, 943), False, 'from langchain import OpenAI\n'), ((962, 999), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {}), '(directory_path)\n', (983, 999), False, 'from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTSimpleVectorIndex, PromptHelper\n')] |
# chroma.py
import streamlit as st
import os
import re
from pathlib import Path
import chromadb
from chromadb.config import Settings
from llama_index import GPTVectorStoreIndex, load_index_from_storage
from llama_index.vector_stores import ChromaVectorStore
from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model
import logging
from utils.qa_template import QA_PROMPT
from llama_index.storage.storage_context import StorageContext
def get_collection_index_path(collection):
return (f'./data/{collection}-index.json')
# INDEX_PATH = './data/chroma_index.json'
PERSIST_DIRECTORY = './data/chromadb'
service_context = get_service_context()
@st.cache_resource
def create_chroma_client():
return chromadb.Client(Settings(chroma_db_impl="chromadb.db.duckdb.PersistentDuckDB",persist_directory=PERSIST_DIRECTORY, anonymized_telemetry=False))
def get_chroma_collection(collection_name):
client = create_chroma_client()
try:
return client.get_collection(collection_name)
except Exception as e:
logging.error(f"Failed to get collection '{collection_name}': {e}")
return None
@st.cache_resource
def load_chroma_index(collection):
# collection_index_path = get_collection_index_path(collection)
_chroma_collection = get_chroma_collection(collection)
vector_store = ChromaVectorStore(chroma_collection=_chroma_collection)
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIRECTORY, vector_store=vector_store)
if Path(PERSIST_DIRECTORY).exists():
index = load_index_from_storage(storage_context, service_context=service_context)
logging.info('Index loaded for collection ' + collection )
else:
index = None
return index
# def build_chroma_index(documents, collection, reindex=False, chunk_size_limit=512, model_name='sentence-transformers/all-MiniLM-L6-v2'):
# collection_index_path = get_collection_index_path(collection)
# chroma_client = create_chroma_client()
# if reindex is True:
# chroma_client.delete_collection(collection)
# os.remove(get_collection_index_path(collection))
# _chroma_collection = chroma_client.get_or_create_collection(collection)
# index = None
# index = GPTChromaIndex.from_documents(documents, chroma_collection=_chroma_collection,
# service_context=get_service_context(embed_model=get_embed_model(model_name), chunk_size_limit=chunk_size_limit)
# )
# index.save_to_disk(collection_index_path)
# chroma_client.persist()
def create_or_refresh_chroma_index(documents, collection, reindex=False, chunk_size_limit=512, model_name='sentence-transformers/all-MiniLM-L6-v2'):
collection_index_path = get_collection_index_path(collection)
chroma_client = create_chroma_client()
if reindex is True:
logging.info(chroma_client.list_collections())
if collection in chroma_client.list_collections():
chroma_client.delete_collection(collection)
_chroma_collection = chroma_client.get_or_create_collection(collection)
vector_store = ChromaVectorStore(chroma_collection=_chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = None
index = GPTVectorStoreIndex.from_documents(documents, storage_context=storage_context,
service_context=get_service_context(embed_model=get_embed_model(model_name=model_name), chunk_size_limit=chunk_size_limit)
)
index.storage_context.persist(persist_dir=PERSIST_DIRECTORY)
chroma_client.persist()
else:
refresh_chroma_index(documents, collection)
def refresh_chroma_index(documents, collection):
index = load_chroma_index(collection)
logging.info('refreshing collection ' + collection)
refreshed_docs = index.refresh(documents)
chroma_client = create_chroma_client()
chroma_client.persist()
return refreshed_docs
def query_index(query_str, collection, similarity_top_k=5, response_mode='compact', streaming=False, model_name=sentenceTransformers.OPTION1.value):
index = None
_chroma_collection = get_chroma_collection(collection)
index = load_chroma_index(collection)
query_engine = index.as_query_engine(chroma_collection=_chroma_collection,
mode="embedding",
similarity_top_k=similarity_top_k,
response_mode=response_mode, # default, compact, tree_summarize, no_text
service_context=get_service_context(embed_model=get_embed_model(model_name=model_name)),
text_qa_template=QA_PROMPT,
verbose= True,
use_async= True,
streaming= streaming
)
return query_engine.query(query_str)
def persist_chroma_index():
chroma_client = create_chroma_client()
chroma_client.persist()
def generate_chroma_compliant_name(name: str) -> str:
# Replace non-alphanumeric characters with underscores
new_name = re.sub(r"[^a-zA-Z0-9_\-\.]", "_", name)
# Replace consecutive periods with a single underscore
new_name = re.sub(r"\.{2,}", "_", new_name)
# Ensure the name starts and ends with an alphanumeric character
if not new_name[0].isalnum():
new_name = "a" + new_name[1:]
if not new_name[-1].isalnum():
new_name = new_name[:-1] + "a"
# Truncate or pad the name to be between 3 and 63 characters
new_name = new_name[:63]
while len(new_name) < 3:
new_name += "0"
return new_name
| [
"llama_index.load_index_from_storage",
"llama_index.storage.storage_context.StorageContext.from_defaults",
"llama_index.vector_stores.ChromaVectorStore"
] | [((665, 686), 'utils.model_settings.get_service_context', 'get_service_context', ([], {}), '()\n', (684, 686), False, 'from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model\n'), ((1363, 1418), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': '_chroma_collection'}), '(chroma_collection=_chroma_collection)\n', (1380, 1418), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((1441, 1532), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIRECTORY', 'vector_store': 'vector_store'}), '(persist_dir=PERSIST_DIRECTORY, vector_store=\n vector_store)\n', (1469, 1532), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((3852, 3903), 'logging.info', 'logging.info', (["('refreshing collection ' + collection)"], {}), "('refreshing collection ' + collection)\n", (3864, 3903), False, 'import logging\n'), ((5186, 5226), 're.sub', 're.sub', (['"""[^a-zA-Z0-9_\\\\-\\\\.]"""', '"""_"""', 'name'], {}), "('[^a-zA-Z0-9_\\\\-\\\\.]', '_', name)\n", (5192, 5226), False, 'import re\n'), ((5300, 5332), 're.sub', 're.sub', (['"""\\\\.{2,}"""', '"""_"""', 'new_name'], {}), "('\\\\.{2,}', '_', new_name)\n", (5306, 5332), False, 'import re\n'), ((762, 893), 'chromadb.config.Settings', 'Settings', ([], {'chroma_db_impl': '"""chromadb.db.duckdb.PersistentDuckDB"""', 'persist_directory': 'PERSIST_DIRECTORY', 'anonymized_telemetry': '(False)'}), "(chroma_db_impl='chromadb.db.duckdb.PersistentDuckDB',\n persist_directory=PERSIST_DIRECTORY, anonymized_telemetry=False)\n", (770, 893), False, 'from chromadb.config import Settings\n'), ((1585, 1658), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1608, 1658), False, 'from llama_index import GPTVectorStoreIndex, load_index_from_storage\n'), ((1667, 1724), 'logging.info', 'logging.info', (["('Index loaded for collection ' + collection)"], {}), "('Index loaded for collection ' + collection)\n", (1679, 1724), False, 'import logging\n'), ((3159, 3214), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': '_chroma_collection'}), '(chroma_collection=_chroma_collection)\n', (3176, 3214), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((3241, 3296), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (3269, 3296), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1069, 1136), 'logging.error', 'logging.error', (['f"""Failed to get collection \'{collection_name}\': {e}"""'], {}), '(f"Failed to get collection \'{collection_name}\': {e}")\n', (1082, 1136), False, 'import logging\n'), ((1535, 1558), 'pathlib.Path', 'Path', (['PERSIST_DIRECTORY'], {}), '(PERSIST_DIRECTORY)\n', (1539, 1558), False, 'from pathlib import Path\n'), ((4673, 4711), 'utils.model_settings.get_embed_model', 'get_embed_model', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (4688, 4711), False, 'from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model\n'), ((3486, 3524), 'utils.model_settings.get_embed_model', 'get_embed_model', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (3501, 3524), False, 'from utils.model_settings import sentenceTransformers, get_service_context, get_embed_model\n')] |
# /app/src/tools/doc_search.py
import logging
# Primary Components
from llama_index import ServiceContext, VectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient
from src.utils.config import load_config, setup_environment_variables
from src.utils.embedding_selector import EmbeddingConfig, EmbeddingSelector
logger = logging.getLogger(__name__)
class DocumentSearch:
"""
Class to perform document searches using a vector store index.
Attributes:
- collection (str): Name of the collection to be queried.
- query (str): User input query for searching documents.
- CONFIG (dict): Loaded configuration settings.
- client (QdrantClient): Client to interact with the Qdrant service.
"""
def __init__(self, query: str, collection: str):
"""
Initializes with collection name and user input.
Parameters:
- collection (str): Name of the collection to be queried.
- query (str): User input query for searching documents.
"""
self.collection = collection
self.query = query
self.CONFIG = load_config()
setup_environment_variables(self.CONFIG)
self.client = QdrantClient(url="http://RAG_BOT_QDRANT:6333")
# self.embed_model = OpenAIEmbedding()
# self.embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1")
self.embedding_config = EmbeddingConfig(type=self.CONFIG["Embedding_Type"])
self.embed_model = EmbeddingSelector(self.embedding_config).get_embedding_model()
def setup_index(self) -> VectorStoreIndex:
"""
Sets up and returns the vector store index for the collection.
Returns:
- VectorStoreIndex: The set up vector store index.
Raises:
- Exception: Propagates any exceptions that occur during the index setup.
"""
try:
vector_store = QdrantVectorStore(client=self.client, collection_name=self.collection)
service_context = ServiceContext.from_defaults(embed_model=self.embed_model)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
return index
except Exception as e:
logging.error(f"setup_index: Error - {str(e)}")
raise e
def search_documents(self):
"""
Searches and returns documents based on the user input query.
Returns:
- Any: The response received from querying the index.
Raises:
- Exception: Propagates any exceptions that occur during the document search.
"""
try:
query_engine = (self.setup_index()).as_query_engine()
response = query_engine.query(self.query)
logging.info(f"search_documents: Response - {response}")
return response
except Exception as e:
logging.error(f"search_documents: Error - {str(e)}")
raise e
| [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((384, 411), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (401, 411), False, 'import logging\n'), ((1157, 1170), 'src.utils.config.load_config', 'load_config', ([], {}), '()\n', (1168, 1170), False, 'from src.utils.config import load_config, setup_environment_variables\n'), ((1179, 1219), 'src.utils.config.setup_environment_variables', 'setup_environment_variables', (['self.CONFIG'], {}), '(self.CONFIG)\n', (1206, 1219), False, 'from src.utils.config import load_config, setup_environment_variables\n'), ((1242, 1288), 'qdrant_client.QdrantClient', 'QdrantClient', ([], {'url': '"""http://RAG_BOT_QDRANT:6333"""'}), "(url='http://RAG_BOT_QDRANT:6333')\n", (1254, 1288), False, 'from qdrant_client import QdrantClient\n'), ((1481, 1532), 'src.utils.embedding_selector.EmbeddingConfig', 'EmbeddingConfig', ([], {'type': "self.CONFIG['Embedding_Type']"}), "(type=self.CONFIG['Embedding_Type'])\n", (1496, 1532), False, 'from src.utils.embedding_selector import EmbeddingConfig, EmbeddingSelector\n'), ((1982, 2052), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'self.client', 'collection_name': 'self.collection'}), '(client=self.client, collection_name=self.collection)\n', (1999, 2052), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((2083, 2141), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'self.embed_model'}), '(embed_model=self.embed_model)\n', (2111, 2141), False, 'from llama_index import ServiceContext, VectorStoreIndex\n'), ((2162, 2260), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (2196, 2260), False, 'from llama_index import ServiceContext, VectorStoreIndex\n'), ((2850, 2906), 'logging.info', 'logging.info', (['f"""search_documents: Response - {response}"""'], {}), "(f'search_documents: Response - {response}')\n", (2862, 2906), False, 'import logging\n'), ((1560, 1600), 'src.utils.embedding_selector.EmbeddingSelector', 'EmbeddingSelector', (['self.embedding_config'], {}), '(self.embedding_config)\n', (1577, 1600), False, 'from src.utils.embedding_selector import EmbeddingConfig, EmbeddingSelector\n')] |
import logging
import traceback
from typing import Sequence, List, Optional, Dict
from llama_index import Document
from llama_index.callbacks import CBEventType, CallbackManager
from llama_index.callbacks.schema import EventPayload
from llama_index.node_parser import NodeParser, SimpleNodeParser
from llama_index.node_parser.extractors import MetadataExtractor
from llama_index.schema import BaseNode, MetadataMode, TextNode, NodeRelationship
from llama_index.text_splitter import TokenTextSplitter, SplitterType, get_default_text_splitter
from llama_index.utils import get_tqdm_iterable
from pydantic import Field
from ghostcoder.codeblocks import create_parser, CodeBlock, CodeBlockType
from ghostcoder.utils import count_tokens
class CodeNodeParser(NodeParser):
"""Route to the right node parser depending on language set in document metadata"""
text_splitter: SplitterType = Field(
description="The text splitter to use when splitting documents."
)
include_metadata: bool = Field(
default=True, description="Whether or not to consider metadata when splitting."
)
include_prev_next_rel: bool = Field(
default=True, description="Include prev/next node relationships."
)
metadata_extractor: Optional[MetadataExtractor] = Field(
default=None, description="Metadata extraction pipeline to apply to nodes."
)
callback_manager: CallbackManager = Field(
default_factory=CallbackManager, exclude=True
)
@classmethod
def from_defaults(
cls,
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
text_splitter: Optional[SplitterType] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
metadata_extractor: Optional[MetadataExtractor] = None,
) -> "CodeNodeParser":
callback_manager = callback_manager or CallbackManager([])
text_splitter = text_splitter or get_default_text_splitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
return cls(
text_splitter=text_splitter,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
metadata_extractor=metadata_extractor,
_node_parser_map={}
)
@classmethod
def class_name(cls):
return "CodeNodeParser"
def get_nodes_from_documents(
self,
documents: Sequence[Document],
show_progress: bool = False,
) -> List[BaseNode]:
with self.callback_manager.event(
CBEventType.NODE_PARSING, payload={EventPayload.DOCUMENTS: documents}
) as event:
documents_with_progress = get_tqdm_iterable(
documents, show_progress, "Parsing documents into nodes"
)
all_nodes: List[BaseNode] = []
for document in documents_with_progress:
language = document.metadata.get("language", None)
if language:
try:
parser = create_parser(language)
except Exception as e:
logging.warning(f"Could not get parser for language {language}. Will not parse document {document.id_}")
continue
content = document.get_content(metadata_mode=MetadataMode.NONE)
if not content:
logging.warning(f"Could not get content for document {document.id_}")
continue
codeblock = parser.parse(content)
logging.debug(codeblock.to_tree(include_tree_sitter_type=False,
show_tokens=True,
include_types=[CodeBlockType.FUNCTION, CodeBlockType.CLASS]))
splitted_blocks = codeblock.split_blocks()
for splitted_block in splitted_blocks:
definitions, parent = self.get_parent_and_definitions(splitted_block)
node_metadata = document.metadata
node_metadata["definition"] = splitted_block.content
node_metadata["block_type"] = str(splitted_block.type)
if splitted_block.identifier:
node_metadata["identifier"] = splitted_block.identifier
else:
node_metadata["identifier"] = splitted_block.content[:80].replace("\n", "\\n")
node_metadata["start_line"] = splitted_block.start_line
tokens = count_tokens(parent.to_string())
if tokens > 4000:
logging.info(f"Skip node [{node_metadata['identifier']}] in {document.id_} with {tokens} tokens")
continue
if tokens > 1000:
logging.info(f"Big node [{node_metadata['identifier']}] in {document.id_} with {tokens} tokens")
# TODO: Add relationships between code blocks
node = TextNode(
text=parent.to_string(),
embedding=document.embedding,
metadata=node_metadata,
excluded_embed_metadata_keys=document.excluded_embed_metadata_keys,
excluded_llm_metadata_keys=document.excluded_llm_metadata_keys,
metadata_seperator=document.metadata_seperator,
metadata_template=document.metadata_template,
text_template=document.text_template,
relationships={NodeRelationship.SOURCE: document.as_related_node_info()},
)
all_nodes.append(node)
event.on_end(payload={EventPayload.NODES: all_nodes})
return all_nodes
def get_parent_and_definitions(self, codeblock: CodeBlock) -> (List[str], CodeBlock):
definitions = [codeblock.content]
if codeblock.parent:
parent_defs, parent = self.get_parent_and_definitions(codeblock.parent)
definitions.extend(parent_defs)
return definitions, parent
else:
return definitions, codeblock
| [
"llama_index.utils.get_tqdm_iterable",
"llama_index.callbacks.CallbackManager",
"llama_index.text_splitter.get_default_text_splitter"
] | [((893, 964), 'pydantic.Field', 'Field', ([], {'description': '"""The text splitter to use when splitting documents."""'}), "(description='The text splitter to use when splitting documents.')\n", (898, 964), False, 'from pydantic import Field\n'), ((1008, 1099), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Whether or not to consider metadata when splitting."""'}), "(default=True, description=\n 'Whether or not to consider metadata when splitting.')\n", (1013, 1099), False, 'from pydantic import Field\n'), ((1143, 1215), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Include prev/next node relationships."""'}), "(default=True, description='Include prev/next node relationships.')\n", (1148, 1215), False, 'from pydantic import Field\n'), ((1284, 1371), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Metadata extraction pipeline to apply to nodes."""'}), "(default=None, description=\n 'Metadata extraction pipeline to apply to nodes.')\n", (1289, 1371), False, 'from pydantic import Field\n'), ((1421, 1473), 'pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (1426, 1473), False, 'from pydantic import Field\n'), ((1964, 1983), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1979, 1983), False, 'from llama_index.callbacks import CBEventType, CallbackManager\n'), ((2026, 2143), 'llama_index.text_splitter.get_default_text_splitter', 'get_default_text_splitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (2051, 2143), False, 'from llama_index.text_splitter import TokenTextSplitter, SplitterType, get_default_text_splitter\n'), ((2903, 2978), 'llama_index.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['documents', 'show_progress', '"""Parsing documents into nodes"""'], {}), "(documents, show_progress, 'Parsing documents into nodes')\n", (2920, 2978), False, 'from llama_index.utils import get_tqdm_iterable\n'), ((3260, 3283), 'ghostcoder.codeblocks.create_parser', 'create_parser', (['language'], {}), '(language)\n', (3273, 3283), False, 'from ghostcoder.codeblocks import create_parser, CodeBlock, CodeBlockType\n'), ((3634, 3703), 'logging.warning', 'logging.warning', (['f"""Could not get content for document {document.id_}"""'], {}), "(f'Could not get content for document {document.id_}')\n", (3649, 3703), False, 'import logging\n'), ((3351, 3465), 'logging.warning', 'logging.warning', (['f"""Could not get parser for language {language}. Will not parse document {document.id_}"""'], {}), "(\n f'Could not get parser for language {language}. Will not parse document {document.id_}'\n )\n", (3366, 3465), False, 'import logging\n'), ((4987, 5094), 'logging.info', 'logging.info', (['f"""Skip node [{node_metadata[\'identifier\']}] in {document.id_} with {tokens} tokens"""'], {}), '(\n f"Skip node [{node_metadata[\'identifier\']}] in {document.id_} with {tokens} tokens"\n )\n', (4999, 5094), False, 'import logging\n'), ((5193, 5299), 'logging.info', 'logging.info', (['f"""Big node [{node_metadata[\'identifier\']}] in {document.id_} with {tokens} tokens"""'], {}), '(\n f"Big node [{node_metadata[\'identifier\']}] in {document.id_} with {tokens} tokens"\n )\n', (5205, 5299), False, 'import logging\n')] |
# RAG/TAG Tiger - llm.py
# Copyright (c) 2024 Stuart Riffle
# github.com/stuartriffle/ragtag-tiger
import os
import torch
from .files import *
from .lograg import lograg, lograg_verbose, lograg_error
from .timer import TimerUntil
openai_model_default = "gpt-3.5-turbo-instruct"
google_model_default = "models/text-bison-001"
anthropic_model_default = "claude-2"
mistral_default = "mistral-small"
perplexity_default = "llama-2-70b-chat"
replicate_default = "mistralai/mixtral-8x7b-instruct-v0.1"
fireworks_ai_default = "accounts/fireworks/models/mixtral-8x7b-instruct"
together_ai_default = "codellama/CodeLlama-70b-Instruct-hf"
default_timeout = 180
default_temperature = 0.1
default_max_tokens = 500
default_llm_provider = "huggingface"
hf_model_nicknames = { "default": "codellama/CodeLlama-7b-Instruct-hf" }
def load_llm(provider, model, server, api_key, params, global_params, verbose=False, set_service_context=True, torch_device=None):
result = None
streaming_supported = True
try:
with TimerUntil("ready"):
all_params = global_params.copy()
model_params = dict([param.split("=") for param in params]) if params else {}
for k, v in model_params.items():
all_params[k] = v
model_kwargs = {}
for k, v in all_params.items():
model_kwargs[k] = float(v) if v.replace(".", "", 1).isdigit() else v
temperature = float(model_kwargs.get("temperature", default_temperature))
max_tokens = int(model_kwargs.get("max_tokens", default_max_tokens))
### OpenAI
if provider == "openai" and not server:
model_name = model or openai_model_default
api_key = api_key or os.environ.get("OPENAI_API_KEY", "")
lograg(f"OpenAI model \"{model_name}\"...")
from llama_index.llms import OpenAI
result = OpenAI(
model=model_name,
timeout=default_timeout,
api_key=api_key,
additional_kwargs=model_kwargs,
temperature=temperature,
max_tokens=max_tokens,
verbose=verbose)
### OpenAI API-compatible third party server
elif provider == "openai" and server:
# Auto-populate API key and model for known providers
if "together.ai" in server or "together.xyz" in server:
api_key = api_key or os.environ.get("TOGETHERAI_API_KEY", "")
model = model or together_ai_default
if "fireworks.ai" in server:
api_key = api_key or os.environ.get("FIREWORKS_API_KEY", "")
model = model or fireworks_ai_default
api_key = api_key or os.environ.get("OPENAI_API_KEY", "")
model_name = model or "default"
lograg(f"Model \"{model_name}\" on \"{server}\"...")
from llama_index.llms import OpenAILike
result = OpenAILike(
api_key=api_key,
model=model_name,
additional_kwargs=model_kwargs,
api_base=server,
max_iterations=100,
timeout=default_timeout,
max_tokens=max_tokens,
temperature=temperature,
verbose=verbose)
### Google
elif provider == "google":
gemini_api_key = os.environ.get("GEMINI_API_KEY", "")
google_api_key = os.environ.get("GOOGLE_API_KEY", "")
model_name = model or google_model_default
import google.generativeai as genai
genai.configure(api_key=google_api_key)
if "gemini" in str(model_name).lower():
lograg(f"Google Gemini model \"{model_name}\"...")
from llama_index.llms import Gemini
result = Gemini(
api_key=api_key or gemini_api_key,
model_name=model_name,
max_tokens=max_tokens,
temperature=temperature,
model_kwargs=model_kwargs)
else:
lograg(f"Google PaLM model \"{model_name}\"...")
from llama_index.llms import PaLM
result = PaLM(
api_key=api_key or google_api_key,
model_name=model_name,
generate_kwargs=model_kwargs)
streaming_supported = False
### Llama.cpp
elif provider == "llamacpp":
if torch.cuda.is_available():
# FIXME - this does nothing? Always on CPU
model_kwargs["n_gpu_layers"] = -1
lograg(f"llama.cpp model \"{cleanpath(model)}\"...")
from llama_index.llms import LlamaCPP
result = LlamaCPP(
model_path=model,
model_kwargs=model_kwargs,
max_new_tokens=max_tokens,
temperature=temperature,
verbose=verbose)
### Mistral
elif provider == "mistral":
api_key = api_key or os.environ.get("MISTRAL_API_KEY", None)
model_name = model or mistral_default
lograg(f"Mistral model \"{model_name}\"...")
from llama_index.llms import MistralAI
result = MistralAI(
api_key=api_key,
model=model_name,
max_tokens=max_tokens,
temperature=temperature,
additional_kwargs=model_kwargs)
### Perplexity
elif provider == "perplexity":
api_key = api_key or os.environ.get("PERPLEXITYAI_API_KEY", "")
model_name = model or perplexity_default
lograg(f"Perplexity model \"{model_name}\"...")
from llama_index.llms import Perplexity
result = Perplexity(
api_key=api_key,
model=model_name,
max_tokens=max_tokens,
temperature=temperature,
model_kwargs=model_kwargs)
### Replicate
elif provider == "replicate":
api_key = api_key or os.environ.get("REPLICATE_API_TOKEN", "")
model_name = model or replicate_default
lograg(f"Replicate model \"{model_name}\"...")
from llama_index.llms import Replicate
result = Replicate(
model=model_name,
temperature=temperature,
additional_kwargs=model_kwargs)
### HuggingFace
else:
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
model_desc = ""
model_name = model or "default"
if model_name in hf_model_nicknames:
model_desc = f" (\"{model_name}\")"
model_name = hf_model_nicknames[model_name]
lograg(f"HuggingFace model \"{model_name}\"{model_desc}...")
from llama_index.llms import HuggingFaceLLM
result = HuggingFaceLLM(
model_name=model_name,
model_kwargs=model_kwargs,
max_new_tokens=max_tokens,
device_map=torch_device or "auto")
#system_prompt=system_prompt)
from llama_index import ServiceContext, set_global_service_context
service_context = ServiceContext.from_defaults(
embed_model='local',
llm=result)
if set_service_context:
set_global_service_context(service_context)
except Exception as e:
lograg_error(f"failure initializing LLM: {e}", exit_code=1)
return result, streaming_supported, service_context
def split_llm_config(config):
"""Split an LLM from a config string of format "[alias=]provider[,model[,server[,api-key[,parameters...]]]]" into its components"""
fields = config.strip("\"' ").split(",")
provider = fields[0].strip() if len(fields) > 0 else default_llm_provider
model = fields[1].strip() if len(fields) > 1 else None
server = fields[2].strip() if len(fields) > 2 else None
api_key = fields[3].strip() if len(fields) > 3 else None
params = fields[4:] if len(fields) > 4 else []
alias = None
if "=" in provider:
alias, provider = provider.split("=", 1)
provider = provider.strip()
return provider, model, server, api_key, params, alias
def load_llm_config(config, global_params, set_service_context=True):
"""Load an LLM from a config string like "provider,model,server,api-key,param1,param2,..."""
provider, model, server, api_key, params, _ = split_llm_config(config)
return load_llm(provider.lower(), model, server, api_key, params, global_params, set_service_context)
| [
"llama_index.llms.Gemini",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.PaLM",
"llama_index.llms.OpenAI",
"llama_index.llms.LlamaCPP",
"llama_index.llms.Replicate",
"llama_index.llms.HuggingFaceLLM",
"llama_index.llms.MistralAI",
"llama_index.set_global_service_context",
"llama_index.llms.Perplexity",
"llama_index.llms.OpenAILike"
] | [((8029, 8090), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': '"""local"""', 'llm': 'result'}), "(embed_model='local', llm=result)\n", (8057, 8090), False, 'from llama_index import ServiceContext, set_global_service_context\n'), ((1986, 2158), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'model_name', 'timeout': 'default_timeout', 'api_key': 'api_key', 'additional_kwargs': 'model_kwargs', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'verbose': 'verbose'}), '(model=model_name, timeout=default_timeout, api_key=api_key,\n additional_kwargs=model_kwargs, temperature=temperature, max_tokens=\n max_tokens, verbose=verbose)\n', (1992, 2158), False, 'from llama_index.llms import OpenAI\n'), ((8177, 8220), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (8203, 8220), False, 'from llama_index import ServiceContext, set_global_service_context\n'), ((1811, 1847), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (1825, 1847), False, 'import os\n'), ((3186, 3404), 'llama_index.llms.OpenAILike', 'OpenAILike', ([], {'api_key': 'api_key', 'model': 'model_name', 'additional_kwargs': 'model_kwargs', 'api_base': 'server', 'max_iterations': '(100)', 'timeout': 'default_timeout', 'max_tokens': 'max_tokens', 'temperature': 'temperature', 'verbose': 'verbose'}), '(api_key=api_key, model=model_name, additional_kwargs=\n model_kwargs, api_base=server, max_iterations=100, timeout=\n default_timeout, max_tokens=max_tokens, temperature=temperature,\n verbose=verbose)\n', (3196, 3404), False, 'from llama_index.llms import OpenAILike\n'), ((2934, 2970), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (2948, 2970), False, 'import os\n'), ((3684, 3720), 'os.environ.get', 'os.environ.get', (['"""GEMINI_API_KEY"""', '""""""'], {}), "('GEMINI_API_KEY', '')\n", (3698, 3720), False, 'import os\n'), ((3754, 3790), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_API_KEY"""', '""""""'], {}), "('GOOGLE_API_KEY', '')\n", (3768, 3790), False, 'import os\n'), ((3918, 3957), 'google.generativeai.configure', 'genai.configure', ([], {'api_key': 'google_api_key'}), '(api_key=google_api_key)\n', (3933, 3957), True, 'import google.generativeai as genai\n'), ((2614, 2654), 'os.environ.get', 'os.environ.get', (['"""TOGETHERAI_API_KEY"""', '""""""'], {}), "('TOGETHERAI_API_KEY', '')\n", (2628, 2654), False, 'import os\n'), ((2799, 2838), 'os.environ.get', 'os.environ.get', (['"""FIREWORKS_API_KEY"""', '""""""'], {}), "('FIREWORKS_API_KEY', '')\n", (2813, 2838), False, 'import os\n'), ((4171, 4315), 'llama_index.llms.Gemini', 'Gemini', ([], {'api_key': '(api_key or gemini_api_key)', 'model_name': 'model_name', 'max_tokens': 'max_tokens', 'temperature': 'temperature', 'model_kwargs': 'model_kwargs'}), '(api_key=api_key or gemini_api_key, model_name=model_name, max_tokens\n =max_tokens, temperature=temperature, model_kwargs=model_kwargs)\n', (4177, 4315), False, 'from llama_index.llms import Gemini\n'), ((4606, 4702), 'llama_index.llms.PaLM', 'PaLM', ([], {'api_key': '(api_key or google_api_key)', 'model_name': 'model_name', 'generate_kwargs': 'model_kwargs'}), '(api_key=api_key or google_api_key, model_name=model_name,\n generate_kwargs=model_kwargs)\n', (4610, 4702), False, 'from llama_index.llms import PaLM\n'), ((4923, 4948), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4946, 4948), False, 'import torch\n'), ((5215, 5342), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model', 'model_kwargs': 'model_kwargs', 'max_new_tokens': 'max_tokens', 'temperature': 'temperature', 'verbose': 'verbose'}), '(model_path=model, model_kwargs=model_kwargs, max_new_tokens=\n max_tokens, temperature=temperature, verbose=verbose)\n', (5223, 5342), False, 'from llama_index.llms import LlamaCPP\n'), ((5792, 5920), 'llama_index.llms.MistralAI', 'MistralAI', ([], {'api_key': 'api_key', 'model': 'model_name', 'max_tokens': 'max_tokens', 'temperature': 'temperature', 'additional_kwargs': 'model_kwargs'}), '(api_key=api_key, model=model_name, max_tokens=max_tokens,\n temperature=temperature, additional_kwargs=model_kwargs)\n', (5801, 5920), False, 'from llama_index.llms import MistralAI\n'), ((5557, 5596), 'os.environ.get', 'os.environ.get', (['"""MISTRAL_API_KEY"""', 'None'], {}), "('MISTRAL_API_KEY', None)\n", (5571, 5596), False, 'import os\n'), ((6387, 6511), 'llama_index.llms.Perplexity', 'Perplexity', ([], {'api_key': 'api_key', 'model': 'model_name', 'max_tokens': 'max_tokens', 'temperature': 'temperature', 'model_kwargs': 'model_kwargs'}), '(api_key=api_key, model=model_name, max_tokens=max_tokens,\n temperature=temperature, model_kwargs=model_kwargs)\n', (6397, 6511), False, 'from llama_index.llms import Perplexity\n'), ((6142, 6184), 'os.environ.get', 'os.environ.get', (['"""PERPLEXITYAI_API_KEY"""', '""""""'], {}), "('PERPLEXITYAI_API_KEY', '')\n", (6156, 6184), False, 'import os\n'), ((6972, 7061), 'llama_index.llms.Replicate', 'Replicate', ([], {'model': 'model_name', 'temperature': 'temperature', 'additional_kwargs': 'model_kwargs'}), '(model=model_name, temperature=temperature, additional_kwargs=\n model_kwargs)\n', (6981, 7061), False, 'from llama_index.llms import Replicate\n'), ((7661, 7791), 'llama_index.llms.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'model_name': 'model_name', 'model_kwargs': 'model_kwargs', 'max_new_tokens': 'max_tokens', 'device_map': "(torch_device or 'auto')"}), "(model_name=model_name, model_kwargs=model_kwargs,\n max_new_tokens=max_tokens, device_map=torch_device or 'auto')\n", (7675, 7791), False, 'from llama_index.llms import HuggingFaceLLM\n'), ((6731, 6772), 'os.environ.get', 'os.environ.get', (['"""REPLICATE_API_TOKEN"""', '""""""'], {}), "('REPLICATE_API_TOKEN', '')\n", (6745, 6772), False, 'import os\n')] |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode
from opentelemetry.trace.span import Span
from opentelemetry.context import Context, get_current, attach, detach
from typing import Any, Dict, List, Optional, Callable
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.base import CallbackManager
from llama_index.callbacks.schema import CBEventType, EventPayload, BASE_TRACE_EVENT
from llama_index.callbacks.token_counting import get_llm_token_counts, TokenCountingEvent
from llama_index.utilities.token_counting import TokenCounter
from llama_index.utils import get_tokenizer
from dataclasses import dataclass
from contextvars import ContextVar
import threading
global_root_trace = ContextVar("trace", default=None)
@dataclass
class SpanWithContext:
"""Object for tracking a span, its context, and its context token"""
span: Span
context: Context
token: object
def __init__(self, span: Span, context: Context, token: object, thread_identity):
self.span = span
self.context = context
self.token = token
self.thread_identity = thread_identity
class OpenTelemetryCallbackHandler(BaseCallbackHandler):
"""Callback handler for creating OpenTelemetry traces from llamaindex traces and events."""
def __init__(
self,
tracer: Optional[Tracer] = get_tracer(__name__),
tokenizer: Optional[Callable[[str], List]] = None,
) -> None:
"""Initializes the OpenTelemetryCallbackHandler.
Args:
tracer: Optional[Tracer]: A OpenTelemetry tracer used to create OpenTelemetry spans
"""
super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
self._tracer = tracer
self._event_map: Dict[str, SpanWithContext] = {}
self.tokenizer = tokenizer or get_tokenizer()
self._token_counter = TokenCounter(tokenizer=self.tokenizer)
def start_trace(self, trace_id: Optional[str] = None) -> None:
trace_name = "llamaindex.trace"
if trace_id is not None:
trace_name = "llamaindex.trace." + trace_id
span = self._tracer.start_span(trace_name)
ctx = set_span_in_context(span)
token = attach(ctx)
global_root_trace.set(SpanWithContext(span=span, context=ctx, token=token, thread_identity=threading.get_ident()))
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
root_trace = global_root_trace.get()
if root_trace is not None:
if root_trace.thread_identity == threading.get_ident():
detach(root_trace.token)
root_trace.span.end()
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
parent_id: str = "",
**kwargs: Any,
) -> str:
parent_ctx = None
# Case where the parent of this event is another event
if parent_id in self._event_map:
parent_ctx = self._event_map[parent_id].context
# Case where the parent of this event is the root trace, and the root trace exists
elif parent_id is BASE_TRACE_EVENT and global_root_trace.get() is not None:
parent_ctx = global_root_trace.get().context
# Case where the parent of this event is the root trace, but the trace does not exist
else:
return
span_prefix = "llamaindex.event."
span = self._tracer.start_span(span_prefix + event_type.value, context=parent_ctx)
ctx = set_span_in_context(span)
token = attach(ctx)
self._event_map[event_id] = SpanWithContext(span=span, context=ctx, token=token, thread_identity=threading.get_ident())
span.set_attribute("event_id", event_id)
if payload is not None:
if event_type is CBEventType.QUERY:
span.set_attribute("query.text", payload[EventPayload.QUERY_STR])
elif event_type is CBEventType.RETRIEVE:
pass
elif event_type is CBEventType.EMBEDDING:
span.set_attribute("embedding.model", payload[EventPayload.SERIALIZED]['model_name'])
span.set_attribute("embedding.batch_size", payload[EventPayload.SERIALIZED]['embed_batch_size'])
span.set_attribute("embedding.class_name", payload[EventPayload.SERIALIZED]['class_name'])
elif event_type is CBEventType.SYNTHESIZE:
span.set_attribute("synthesize.query_text", payload[EventPayload.QUERY_STR])
elif event_type is CBEventType.CHUNKING:
for i, chunk in enumerate(payload[EventPayload.CHUNKS]):
span.set_attribute(f"chunk.{i}", chunk)
elif event_type is CBEventType.TEMPLATING:
if payload[EventPayload.QUERY_WRAPPER_PROMPT]:
span.set_attribute("query_wrapper_prompt", payload[EventPayload.QUERY_WRAPPER_PROMPT])
if payload[EventPayload.SYSTEM_PROMPT]:
span.set_attribute("system_prompt", payload[EventPayload.SYSTEM_PROMPT])
if payload[EventPayload.TEMPLATE]:
span.set_attribute("template", payload[EventPayload.TEMPLATE])
if payload[EventPayload.TEMPLATE_VARS]:
for key, var in payload[EventPayload.TEMPLATE_VARS].items():
span.set_attribute(f"template_variables.{key}", var)
elif event_type is CBEventType.LLM:
span.set_attribute("llm.class_name", payload[EventPayload.SERIALIZED]['class_name'])
span.set_attribute("llm.formatted_prompt", payload[EventPayload.PROMPT])
span.set_attribute("llm.additional_kwargs", str(payload[EventPayload.ADDITIONAL_KWARGS]))
elif event_type is CBEventType.NODE_PARSING:
span.set_attribute("node_parsing.num_documents", len(payload[EventPayload.DOCUMENTS]))
elif event_type is CBEventType.EXCEPTION:
span.set_status(Status(StatusCode.ERROR))
span.record_exception(payload[EventPayload.EXCEPTION])
return event_id
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
if event_id in self._event_map:
span = self._event_map[event_id].span
span.set_attribute("event_id", event_id)
if payload is not None:
if event_type is CBEventType.QUERY:
pass
elif event_type is CBEventType.RETRIEVE:
for i, node_with_score in enumerate(payload[EventPayload.NODES]):
node = node_with_score.node
score = node_with_score.score
span.set_attribute(f"query.node.{i}.id", node.hash)
span.set_attribute(f"query.node.{i}.score", score)
span.set_attribute(f"query.node.{i}.text", node.text)
elif event_type is CBEventType.EMBEDDING:
texts = payload[EventPayload.CHUNKS]
vectors = payload[EventPayload.EMBEDDINGS]
total_chunk_tokens = 0
for text, vector in zip(texts, vectors) :
span.set_attribute(f"embedding_text_{texts.index(text)}", text)
span.set_attribute(f"embedding_vector_{vectors.index(vector)}", vector)
total_chunk_tokens +=self._token_counter.get_string_tokens(text)
span.set_attribute(f"embedding_token_usage", total_chunk_tokens)
elif event_type is CBEventType.SYNTHESIZE:
pass
elif event_type is CBEventType.CHUNKING:
pass
elif event_type is CBEventType.TEMPLATING:
pass
elif event_type is CBEventType.LLM:
span.set_attribute("response.text", str(
payload.get(EventPayload.RESPONSE, "")
) or str(payload.get(EventPayload.COMPLETION, ""))
)
token_counts = get_llm_token_counts(self._token_counter, payload, event_id)
span.set_attribute("llm_prompt.token_usage", token_counts.prompt_token_count)
span.set_attribute("llm_completion.token_usage", token_counts.completion_token_count)
span.set_attribute("total_tokens_used", token_counts.total_token_count)
elif event_type is CBEventType.NODE_PARSING:
span.set_attribute("node_parsing.num_nodes", len(payload[EventPayload.NODES]))
elif event_type is CBEventType.EXCEPTION:
span.set_status(Status(StatusCode.ERROR))
span.record_exception(payload[EventPayload.EXCEPTION])
if self._event_map[event_id].thread_identity == threading.get_ident():
detach(self._event_map[event_id].token)
self._event_map.pop(event_id, None)
span.end()
| [
"llama_index.utilities.token_counting.TokenCounter",
"llama_index.callbacks.token_counting.get_llm_token_counts",
"llama_index.utils.get_tokenizer"
] | [((1450, 1483), 'contextvars.ContextVar', 'ContextVar', (['"""trace"""'], {'default': 'None'}), "('trace', default=None)\n", (1460, 1483), False, 'from contextvars import ContextVar\n'), ((2085, 2105), 'opentelemetry.trace.get_tracer', 'get_tracer', (['__name__'], {}), '(__name__)\n', (2095, 2105), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n'), ((2609, 2647), 'llama_index.utilities.token_counting.TokenCounter', 'TokenCounter', ([], {'tokenizer': 'self.tokenizer'}), '(tokenizer=self.tokenizer)\n', (2621, 2647), False, 'from llama_index.utilities.token_counting import TokenCounter\n'), ((2918, 2943), 'opentelemetry.trace.set_span_in_context', 'set_span_in_context', (['span'], {}), '(span)\n', (2937, 2943), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n'), ((2960, 2971), 'opentelemetry.context.attach', 'attach', (['ctx'], {}), '(ctx)\n', (2966, 2971), False, 'from opentelemetry.context import Context, get_current, attach, detach\n'), ((4386, 4411), 'opentelemetry.trace.set_span_in_context', 'set_span_in_context', (['span'], {}), '(span)\n', (4405, 4411), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n'), ((4428, 4439), 'opentelemetry.context.attach', 'attach', (['ctx'], {}), '(ctx)\n', (4434, 4439), False, 'from opentelemetry.context import Context, get_current, attach, detach\n'), ((2563, 2578), 'llama_index.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (2576, 2578), False, 'from llama_index.utils import get_tokenizer\n'), ((3367, 3388), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (3386, 3388), False, 'import threading\n'), ((3406, 3430), 'opentelemetry.context.detach', 'detach', (['root_trace.token'], {}), '(root_trace.token)\n', (3412, 3430), False, 'from opentelemetry.context import Context, get_current, attach, detach\n'), ((4545, 4566), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (4564, 4566), False, 'import threading\n'), ((9898, 9919), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (9917, 9919), False, 'import threading\n'), ((9937, 9976), 'opentelemetry.context.detach', 'detach', (['self._event_map[event_id].token'], {}), '(self._event_map[event_id].token)\n', (9943, 9976), False, 'from opentelemetry.context import Context, get_current, attach, detach\n'), ((3071, 3092), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (3090, 3092), False, 'import threading\n'), ((9123, 9183), 'llama_index.callbacks.token_counting.get_llm_token_counts', 'get_llm_token_counts', (['self._token_counter', 'payload', 'event_id'], {}), '(self._token_counter, payload, event_id)\n', (9143, 9183), False, 'from llama_index.callbacks.token_counting import get_llm_token_counts, TokenCountingEvent\n'), ((6876, 6900), 'opentelemetry.trace.Status', 'Status', (['StatusCode.ERROR'], {}), '(StatusCode.ERROR)\n', (6882, 6900), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n'), ((9737, 9761), 'opentelemetry.trace.Status', 'Status', (['StatusCode.ERROR'], {}), '(StatusCode.ERROR)\n', (9743, 9761), False, 'from opentelemetry.trace import Tracer, get_tracer, set_span_in_context, Status, StatusCode\n')] |
from pathlib import Path
from llama_index import download_loader
ImageReader = download_loader("ImageReader")
# If the Image has key-value pairs text, use text_type = "key_value"
loader = ImageReader(text_type = "key_value")
documents = loader.load_data(file=Path('./receipt.webp'))
print(documents) | [
"llama_index.download_loader"
] | [((80, 110), 'llama_index.download_loader', 'download_loader', (['"""ImageReader"""'], {}), "('ImageReader')\n", (95, 110), False, 'from llama_index import download_loader\n'), ((261, 283), 'pathlib.Path', 'Path', (['"""./receipt.webp"""'], {}), "('./receipt.webp')\n", (265, 283), False, 'from pathlib import Path\n')] |
# https://github.com/jerryjliu/llama_index/blob/main/examples/langchain_demo/LangchainDemo.ipynb
# Using LlamaIndex as a Callable Tool
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain import HuggingFaceHub
from llama_index import LangchainEmbedding, ServiceContext
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext
from llama_index.query_engine import SubQuestionQueryEngine
documents = SimpleDirectoryReader('data/experiment').load_data()
repo_id = "tiiuae/falcon-7b"
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.1, 'truncation': 'only_first',
"max_length": 1024})
llm_predictor = LLMPredictor(llm=llm)
service_context = ServiceContext.from_defaults(chunk_size=512, llm_predictor=llm_predictor, embed_model=embed_model)
index = VectorStoreIndex.from_documents(documents=documents, service_context=service_context)
engine = index.as_query_engine(similarity_top_k=3)
query_engine_tools = [
QueryEngineTool(
query_engine=engine,
metadata=ToolMetadata(name='Paulindex', description='Provides information about Paul Graham Essay')
)
]
s_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
response = s_engine.query('Explain childhood')
print(response)
### As a chat bot
# tools = [
# Tool(
# name="LlamaIndex",
# func=lambda q: str(index.as_query_engine().query(q)),
# description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
# return_direct=True
# ),
# ]
# memory = ConversationBufferMemory(memory_key="chat_history")
# # llm = ChatOpenAI(temperature=0)
# agent_executor = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory)
#
# agent_executor.run(input="hi, i am bob")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.LLMPredictor",
"llama_index.ServiceContext.from_defaults",
"llama_index.tools.ToolMetadata",
"llama_index.query_engine.SubQuestionQueryEngine.from_defaults"
] | [((874, 992), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'truncation': 'only_first', 'max_length': 1024}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'truncation': 'only_first', 'max_length': 1024})\n", (888, 992), False, 'from langchain import HuggingFaceHub\n'), ((1057, 1078), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1069, 1078), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1097, 1199), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(512)', 'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(chunk_size=512, llm_predictor=llm_predictor,\n embed_model=embed_model)\n', (1125, 1199), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1205, 1295), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(documents=documents, service_context=\n service_context)\n', (1236, 1295), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1544, 1619), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools'}), '(query_engine_tools=query_engine_tools)\n', (1580, 1619), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((842, 865), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (863, 865), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((727, 767), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data/experiment"""'], {}), "('data/experiment')\n", (748, 767), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1433, 1528), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""Paulindex"""', 'description': '"""Provides information about Paul Graham Essay"""'}), "(name='Paulindex', description=\n 'Provides information about Paul Graham Essay')\n", (1445, 1528), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')] |
"""This module provides functionality for loading chat prompts.
The main function in this module is `load_chat_prompt`, which loads a chat prompt from a given JSON file.
The JSON file should contain two keys: "system_template" and "human_template", which correspond to the system and user messages respectively.
Typical usage example:
from wandbot.chat import prompts
chat_prompt = prompts.load_chat_prompt('path_to_your_json_file.json')
"""
import json
import logging
import pathlib
from typing import Union
from llama_index import ChatPromptTemplate
from llama_index.llms import ChatMessage, MessageRole
logger = logging.getLogger(__name__)
def partial_format(s, **kwargs):
# Manually parse the string and extract the field names
place_holders = set()
field_name = ""
in_field = False
for c in s:
if c == "{" and not in_field:
in_field = True
elif c == "}" and in_field:
place_holders.add(field_name)
field_name = ""
in_field = False
elif in_field:
field_name += c
replacements = {k: kwargs.get(k, "{" + k + "}") for k in place_holders}
# Escape all curly braces
s = s.replace("{", "{{").replace("}", "}}")
# Replace the placeholders
for k, v in replacements.items():
s = s.replace("{{" + k + "}}", v)
return s
ROLE_MAP = {
"system": MessageRole.SYSTEM,
"human": MessageRole.USER,
"assistant": MessageRole.ASSISTANT,
}
def load_chat_prompt(
f_name: Union[pathlib.Path, str] = None,
language_code: str = "en",
query_intent: str = "",
) -> ChatPromptTemplate:
"""
Loads a chat prompt from a given file.
This function reads a JSON file specified by f_name and constructs a ChatPromptTemplate
object from the data. The JSON file should contain two keys: "system_template" and "human_template",
which correspond to the system and user messages respectively.
Args:
f_name: A string or a pathlib.Path object representing the path to the JSON file.
If None, a default path is used.
Returns:
A ChatPromptTemplate object constructed from the data in the JSON file.
"""
f_name = pathlib.Path(f_name)
template = json.load(f_name.open("r"))
human_template = partial_format(
template["messages"][-1]["human"],
language_code=language_code,
query_intent=query_intent,
)
messages = []
for message in template["messages"][:-1]:
for k, v in message.items():
messages.append(ChatMessage(role=ROLE_MAP[k], content=v))
messages.append(ChatMessage(role=MessageRole.USER, content=human_template))
prompt = ChatPromptTemplate(messages)
return prompt
| [
"llama_index.llms.ChatMessage",
"llama_index.ChatPromptTemplate"
] | [((626, 653), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (643, 653), False, 'import logging\n'), ((2217, 2237), 'pathlib.Path', 'pathlib.Path', (['f_name'], {}), '(f_name)\n', (2229, 2237), False, 'import pathlib\n'), ((2706, 2734), 'llama_index.ChatPromptTemplate', 'ChatPromptTemplate', (['messages'], {}), '(messages)\n', (2724, 2734), False, 'from llama_index import ChatPromptTemplate\n'), ((2633, 2691), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': 'human_template'}), '(role=MessageRole.USER, content=human_template)\n', (2644, 2691), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((2571, 2611), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'ROLE_MAP[k]', 'content': 'v'}), '(role=ROLE_MAP[k], content=v)\n', (2582, 2611), False, 'from llama_index.llms import ChatMessage, MessageRole\n')] |
# Copyright 2023 osiworx
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import os
from llama_index.vector_stores.milvus import MilvusVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
)
from llama_index.core.storage.storage_context import StorageContext
vector_store = MilvusVectorStore(
uri = "http://localhost:19530",
port = 19530 ,
collection_name = 'llama_index_prompts_large',
dim = 384,
similarity_metric = "L2",
)
sample_files_path = "E:\short_large"
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L12-v2")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
for subdir, dirs, files in os.walk(sample_files_path):
if len(files) > 0:
now = datetime.datetime.now()
print(f'{now.strftime("%H:%M:%S")} adding folder: {subdir}')
documents = SimpleDirectoryReader(subdir).load_data()
# here we set the file_path to become no part of the embedding, its not for this usecase
# also we check if a doc has zero content then we don't try to embedd it as it would result in an error
docs = []
for doc in documents:
doc.excluded_llm_metadata_keys.append("file_path")
doc.excluded_embed_metadata_keys.append("file_path")
if doc.text != '':
docs = docs + [doc]
del documents
vector_index = VectorStoreIndex.from_documents(docs, storage_context=storage_context,embed_model=embed_model, show_progress=True)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.vector_stores.milvus.MilvusVectorStore",
"llama_index.core.storage.storage_context.StorageContext.from_defaults",
"llama_index.core.SimpleDirectoryReader"
] | [((894, 1036), 'llama_index.vector_stores.milvus.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': '"""http://localhost:19530"""', 'port': '(19530)', 'collection_name': '"""llama_index_prompts_large"""', 'dim': '(384)', 'similarity_metric': '"""L2"""'}), "(uri='http://localhost:19530', port=19530, collection_name\n ='llama_index_prompts_large', dim=384, similarity_metric='L2')\n", (911, 1036), False, 'from llama_index.vector_stores.milvus import MilvusVectorStore\n'), ((1122, 1196), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L12-v2"""'}), "(model_name='sentence-transformers/all-MiniLM-L12-v2')\n", (1142, 1196), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((1216, 1271), 'llama_index.core.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1244, 1271), False, 'from llama_index.core.storage.storage_context import StorageContext\n'), ((1301, 1327), 'os.walk', 'os.walk', (['sample_files_path'], {}), '(sample_files_path)\n', (1308, 1327), False, 'import os\n'), ((1366, 1389), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1387, 1389), False, 'import datetime\n'), ((2023, 2142), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'embed_model': 'embed_model', 'show_progress': '(True)'}), '(docs, storage_context=storage_context,\n embed_model=embed_model, show_progress=True)\n', (2054, 2142), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n'), ((1480, 1509), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['subdir'], {}), '(subdir)\n', (1501, 1509), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n')] |
import numpy as np
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.llms.litellm import LiteLLM
from langchain_google_genai import ChatGoogleGenerativeAI
from trulens_eval.feedback.provider.langchain import Langchain
from trulens_eval import Tru, Feedback, TruLlama
from trulens_eval.feedback import Groundedness
# Setup RAG
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir="base_index"),
embed_model="local:../models/bge-small-en-v1.5",
)
llm = LiteLLM(model="gemini/gemini-pro", temperature=0.1)
query_engine = index.as_query_engine(llm=llm)
# Evaluate with trulens-eval
# Define provider and database
_llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0)
provider = Langchain(chain=_llm)
database_url = "sqlite:///data/trulens.db"
tru = Tru(database_url=database_url, database_redact_keys=True)
# tru.reset_database()
# Using TruLlama
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons, name="Answer Relevance"
).on_input_output()
f_context_relevance = (
Feedback(provider.relevance_with_cot_reasons, name="Context Relevance")
.on_input()
.on(TruLlama.select_source_nodes().node.text)
.aggregate(np.mean)
)
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness")
.on(TruLlama.select_source_nodes().node.text)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
app_id = "Chain2"
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks=[
f_qa_relevance,
f_context_relevance,
f_groundedness,
],
)
qns = ...
for qn in qns:
with tru_recorder as recording:
res = query_engine.query(qn)
# Results
# dashboard
tru.run_dashboard(port=8601)
# # dataframe
# records_df, feednack = tru.get_records_and_feednack(app_ids=[app_id])
# records_df.head()
| [
"llama_index.core.StorageContext.from_defaults",
"llama_index.llms.litellm.LiteLLM"
] | [((519, 570), 'llama_index.llms.litellm.LiteLLM', 'LiteLLM', ([], {'model': '"""gemini/gemini-pro"""', 'temperature': '(0.1)'}), "(model='gemini/gemini-pro', temperature=0.1)\n", (526, 570), False, 'from llama_index.llms.litellm import LiteLLM\n'), ((687, 744), 'langchain_google_genai.ChatGoogleGenerativeAI', 'ChatGoogleGenerativeAI', ([], {'model': '"""gemini-pro"""', 'temperature': '(0)'}), "(model='gemini-pro', temperature=0)\n", (709, 744), False, 'from langchain_google_genai import ChatGoogleGenerativeAI\n'), ((756, 777), 'trulens_eval.feedback.provider.langchain.Langchain', 'Langchain', ([], {'chain': '_llm'}), '(chain=_llm)\n', (765, 777), False, 'from trulens_eval.feedback.provider.langchain import Langchain\n'), ((828, 885), 'trulens_eval.Tru', 'Tru', ([], {'database_url': 'database_url', 'database_redact_keys': '(True)'}), '(database_url=database_url, database_redact_keys=True)\n', (831, 885), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1245, 1289), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (1257, 1289), False, 'from trulens_eval.feedback import Groundedness\n'), ((1551, 1657), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_context_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_context_relevance, f_groundedness])\n', (1559, 1657), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((401, 455), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""base_index"""'}), "(persist_dir='base_index')\n", (429, 455), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((945, 1015), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (953, 1015), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1165, 1195), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1193, 1195), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1069, 1140), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Context Relevance')\n", (1077, 1140), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1313, 1390), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (1321, 1390), False, 'from trulens_eval import Tru, Feedback, TruLlama\n'), ((1399, 1429), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1427, 1429), False, 'from trulens_eval import Tru, Feedback, TruLlama\n')] |
import os
from dotenv import load_dotenv
from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine
from llama_index.llms.openai import OpenAI
from llama_index.llms.types import ChatMessage, MessageRole
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.retrievers import PathwayRetriever
from traceloop.sdk import Traceloop
from pathway.xpacks.llm.vector_store import VectorStoreClient
load_dotenv()
Traceloop.init(app_name=os.environ.get("APP_NAME", "PW - LlamaIndex (Streamlit)"))
DEFAULT_PATHWAY_HOST = "demo-document-indexing.pathway.stream"
PATHWAY_HOST = os.environ.get("PATHWAY_HOST", DEFAULT_PATHWAY_HOST)
PATHWAY_PORT = int(os.environ.get("PATHWAY_PORT", "80"))
def get_additional_headers():
headers = {}
key = os.environ.get("PATHWAY_API_KEY")
if key is not None:
headers = {"X-Pathway-API-Key": key}
return headers
vector_client = VectorStoreClient(
PATHWAY_HOST,
PATHWAY_PORT,
# additional_headers=get_additional_headers(),
)
retriever = PathwayRetriever(host=PATHWAY_HOST, port=PATHWAY_PORT)
retriever.client = VectorStoreClient(
host=PATHWAY_HOST,
port=PATHWAY_PORT,
# additional_headers=get_additional_headers()
)
llm = OpenAI(model="gpt-3.5-turbo")
query_engine = RetrieverQueryEngine.from_args(
retriever,
)
pathway_explaination = "Pathway is a high-throughput, low-latency data processing framework that handles live data & streaming for you."
DEFAULT_MESSAGES = [
ChatMessage(role=MessageRole.USER, content="What is Pathway?"),
ChatMessage(role=MessageRole.ASSISTANT, content=pathway_explaination),
]
chat_engine = CondensePlusContextChatEngine.from_defaults(
retriever=retriever,
system_prompt="""You are RAG AI that answers users questions based on provided sources.
IF QUESTION IS NOT RELATED TO ANY OF THE CONTEXT DOCUMENTS, SAY IT'S NOT POSSIBLE TO ANSWER USING PHRASE `The looked-up documents do not provde information about...`""",
verbose=True,
chat_history=DEFAULT_MESSAGES,
llm=llm,
)
| [
"llama_index.llms.openai.OpenAI",
"llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.llms.types.ChatMessage",
"llama_index.retrievers.PathwayRetriever"
] | [((443, 456), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (454, 456), False, 'from dotenv import load_dotenv\n'), ((622, 674), 'os.environ.get', 'os.environ.get', (['"""PATHWAY_HOST"""', 'DEFAULT_PATHWAY_HOST'], {}), "('PATHWAY_HOST', DEFAULT_PATHWAY_HOST)\n", (636, 674), False, 'import os\n'), ((932, 977), 'pathway.xpacks.llm.vector_store.VectorStoreClient', 'VectorStoreClient', (['PATHWAY_HOST', 'PATHWAY_PORT'], {}), '(PATHWAY_HOST, PATHWAY_PORT)\n', (949, 977), False, 'from pathway.xpacks.llm.vector_store import VectorStoreClient\n'), ((1054, 1108), 'llama_index.retrievers.PathwayRetriever', 'PathwayRetriever', ([], {'host': 'PATHWAY_HOST', 'port': 'PATHWAY_PORT'}), '(host=PATHWAY_HOST, port=PATHWAY_PORT)\n', (1070, 1108), False, 'from llama_index.retrievers import PathwayRetriever\n'), ((1128, 1183), 'pathway.xpacks.llm.vector_store.VectorStoreClient', 'VectorStoreClient', ([], {'host': 'PATHWAY_HOST', 'port': 'PATHWAY_PORT'}), '(host=PATHWAY_HOST, port=PATHWAY_PORT)\n', (1145, 1183), False, 'from pathway.xpacks.llm.vector_store import VectorStoreClient\n'), ((1254, 1283), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (1260, 1283), False, 'from llama_index.llms.openai import OpenAI\n'), ((1300, 1341), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {}), '(retriever)\n', (1330, 1341), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((1668, 2062), 'llama_index.chat_engine.condense_plus_context.CondensePlusContextChatEngine.from_defaults', 'CondensePlusContextChatEngine.from_defaults', ([], {'retriever': 'retriever', 'system_prompt': '"""You are RAG AI that answers users questions based on provided sources.\n IF QUESTION IS NOT RELATED TO ANY OF THE CONTEXT DOCUMENTS, SAY IT\'S NOT POSSIBLE TO ANSWER USING PHRASE `The looked-up documents do not provde information about...`"""', 'verbose': '(True)', 'chat_history': 'DEFAULT_MESSAGES', 'llm': 'llm'}), '(retriever=retriever,\n system_prompt=\n """You are RAG AI that answers users questions based on provided sources.\n IF QUESTION IS NOT RELATED TO ANY OF THE CONTEXT DOCUMENTS, SAY IT\'S NOT POSSIBLE TO ANSWER USING PHRASE `The looked-up documents do not provde information about...`"""\n , verbose=True, chat_history=DEFAULT_MESSAGES, llm=llm)\n', (1711, 2062), False, 'from llama_index.chat_engine.condense_plus_context import CondensePlusContextChatEngine\n'), ((695, 731), 'os.environ.get', 'os.environ.get', (['"""PATHWAY_PORT"""', '"""80"""'], {}), "('PATHWAY_PORT', '80')\n", (709, 731), False, 'import os\n'), ((792, 825), 'os.environ.get', 'os.environ.get', (['"""PATHWAY_API_KEY"""'], {}), "('PATHWAY_API_KEY')\n", (806, 825), False, 'import os\n'), ((1512, 1574), 'llama_index.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""What is Pathway?"""'}), "(role=MessageRole.USER, content='What is Pathway?')\n", (1523, 1574), False, 'from llama_index.llms.types import ChatMessage, MessageRole\n'), ((1580, 1649), 'llama_index.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'pathway_explaination'}), '(role=MessageRole.ASSISTANT, content=pathway_explaination)\n', (1591, 1649), False, 'from llama_index.llms.types import ChatMessage, MessageRole\n'), ((483, 540), 'os.environ.get', 'os.environ.get', (['"""APP_NAME"""', '"""PW - LlamaIndex (Streamlit)"""'], {}), "('APP_NAME', 'PW - LlamaIndex (Streamlit)')\n", (497, 540), False, 'import os\n')] |
# My OpenAI Key
import logging
import os
import sys
from IPython.display import Markdown, display
from llama_index import GPTTreeIndex, SimpleDirectoryReader
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
documents = SimpleDirectoryReader("data").load_data()
index = GPTTreeIndex.from_documents(documents)
index.save_to_disk("index.json")
# try loading
new_index = GPTTreeIndex.load_from_disk("index.json")
# set Logging to DEBUG for more detailed outputs
response = new_index.query("What did the author do growing up?")
print(response)
# set Logging to DEBUG for more detailed outputs
response = new_index.query("What did the author do after his time at Y Combinator?")
print(response)
| [
"llama_index.SimpleDirectoryReader",
"llama_index.GPTTreeIndex.load_from_disk",
"llama_index.GPTTreeIndex.from_documents"
] | [((160, 218), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (179, 218), False, 'import logging\n'), ((324, 351), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (333, 351), False, 'import os\n'), ((415, 453), 'llama_index.GPTTreeIndex.from_documents', 'GPTTreeIndex.from_documents', (['documents'], {}), '(documents)\n', (442, 453), False, 'from llama_index import GPTTreeIndex, SimpleDirectoryReader\n'), ((515, 556), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['"""index.json"""'], {}), "('index.json')\n", (542, 556), False, 'from llama_index import GPTTreeIndex, SimpleDirectoryReader\n'), ((250, 290), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (271, 290), False, 'import logging\n'), ((219, 238), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (236, 238), False, 'import logging\n'), ((365, 394), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (386, 394), False, 'from llama_index import GPTTreeIndex, SimpleDirectoryReader\n')] |
from llama_index import Document
import json, os
from llama_index.node_parser import SimpleNodeParser
from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex
from langchain import OpenAI
from llama_index.composability import ComposableGraph
from llama_index.data_structs.node_v2 import Node, DocumentRelationship
class ConfigLLM:
# define LLM
name = "gpt-3.5-turbo"
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo"))
# define prompt helper
# set maximum input size
max_input_size = 2096
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
def index_construct_and_save(timechunk_path: str, save_loc: str):
for filename in os.listdir(timechunk_path):
file = os.path.join(timechunk_path, filename)
data = json.load(open(file=file, mode="r"))
# keys, text = list(zip(*data.items()))
nodes = [Node(text=text, doc_id=keys) for keys, text in data.items()]
index = GPTTreeIndex(nodes=nodes)
index.save_to_disk(f"{save_loc}/{filename}.json")
def load_index_with_summary(index_loc: str):
index_list = []
index_summary_list = []
for filename in os.listdir(index_loc):
index_file = os.path.join(index_loc, filename)
index = GPTTreeIndex.load_from_disk(index_file)
summary = index.query(
"What is the summary of this document chunk?", mode="summarize"
)
index_summary_list.append(str(summary))
index_list.append(index)
#! logging
print("index list", len(index_list), index_list)
return index_list, index_summary_list
def compose_graph_and_save(index_loc: str, save_loc: str):
index_list, index_summary_list = load_index_with_summary(index_loc)
#! logging
print(index_summary_list)
graph = ComposableGraph.from_indices(GPTListIndex, index_list, index_summary_list)
graph.save_to_disk(save_loc)
def load_graph(graph_location: str):
return ComposableGraph.load_from_disk(graph_location)
def query_graph(query: str, graph: ComposableGraph):
response = graph.query(query, query_configs=get_query_configs())
return response
def parse_response(response: ComposableGraph.query):
print("-" * 50)
print(response)
print("-" * 50)
print(
str(response),
# response.source_nodes,
[node_with_score.node.doc_id for node_with_score in response.source_nodes],
# [node.ref_doc_id for node in response.source_nodes],
response.get_formatted_sources(),
sep="\n" + "+" * 80 + "\n",
)
print("-" * 50)
def query_composed_index(query: str, graph_loc: str):
graph = load_graph(graph_loc)
response = query_graph(query, graph)
parse_response(response)
def query_single_index(query: str, index_loc: str):
index = GPTTreeIndex.load_from_disk(index_loc)
response = index.query(query)
parse_response(response)
def get_query_configs():
# set query config
query_configs = [
{
"index_struct_type": "simple_dict",
"query_mode": "default",
"query_kwargs": {"similarity_top_k": 1},
},
{
"index_struct_type": "keyword_table",
"query_mode": "simple",
"query_kwargs": {},
},
]
return query_configs
| [
"llama_index.GPTTreeIndex.load_from_disk",
"llama_index.composability.ComposableGraph.from_indices",
"llama_index.GPTTreeIndex",
"llama_index.data_structs.node_v2.Node",
"llama_index.PromptHelper",
"llama_index.composability.ComposableGraph.load_from_disk"
] | [((705, 764), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (717, 764), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((853, 879), 'os.listdir', 'os.listdir', (['timechunk_path'], {}), '(timechunk_path)\n', (863, 879), False, 'import json, os\n'), ((1328, 1349), 'os.listdir', 'os.listdir', (['index_loc'], {}), '(index_loc)\n', (1338, 1349), False, 'import json, os\n'), ((1966, 2040), 'llama_index.composability.ComposableGraph.from_indices', 'ComposableGraph.from_indices', (['GPTListIndex', 'index_list', 'index_summary_list'], {}), '(GPTListIndex, index_list, index_summary_list)\n', (1994, 2040), False, 'from llama_index.composability import ComposableGraph\n'), ((2124, 2170), 'llama_index.composability.ComposableGraph.load_from_disk', 'ComposableGraph.load_from_disk', (['graph_location'], {}), '(graph_location)\n', (2154, 2170), False, 'from llama_index.composability import ComposableGraph\n'), ((2974, 3012), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['index_loc'], {}), '(index_loc)\n', (3001, 3012), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((896, 934), 'os.path.join', 'os.path.join', (['timechunk_path', 'filename'], {}), '(timechunk_path, filename)\n', (908, 934), False, 'import json, os\n'), ((1129, 1154), 'llama_index.GPTTreeIndex', 'GPTTreeIndex', ([], {'nodes': 'nodes'}), '(nodes=nodes)\n', (1141, 1154), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((1372, 1405), 'os.path.join', 'os.path.join', (['index_loc', 'filename'], {}), '(index_loc, filename)\n', (1384, 1405), False, 'import json, os\n'), ((1422, 1461), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['index_file'], {}), '(index_file)\n', (1449, 1461), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((437, 486), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (443, 486), False, 'from langchain import OpenAI\n'), ((1052, 1080), 'llama_index.data_structs.node_v2.Node', 'Node', ([], {'text': 'text', 'doc_id': 'keys'}), '(text=text, doc_id=keys)\n', (1056, 1080), False, 'from llama_index.data_structs.node_v2 import Node, DocumentRelationship\n')] |