|
from langchain_community.document_loaders import PyPDFLoader |
|
from langchain_text_splitters import RecursiveCharacterTextSplitter |
|
from langchain_core.messages import AIMessage, HumanMessage |
|
from fastapi import FastAPI |
|
from pydantic import BaseModel |
|
import os |
|
from rag import Rag |
|
from storePDF import get_documents_from_path |
|
|
|
folder_path = "files" |
|
all_documents = get_documents_from_path(folder_path) |
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200, add_start_index=True) |
|
texts = text_splitter.split_documents(all_documents) |
|
|
|
rag_llm = Rag(); |
|
|
|
rag_llm.createRagChain() |
|
|
|
|
|
chat_history = [] |
|
|
|
class ChatInput(BaseModel): |
|
question: str |
|
|
|
app = FastAPI() |
|
|
|
@app.get("/") |
|
async def root(): |
|
return {"message": "Hello World"} |
|
|
|
@app.post("/generatechat/") |
|
async def generateResponse(chat_input: ChatInput): |
|
ai_msg = rag_llm.generateResponse(chat_input.question, chat_history) |
|
chat_history.extend( |
|
[ |
|
HumanMessage(content=chat_input.question), |
|
AIMessage(content=ai_msg["answer"]), |
|
] |
|
) |
|
return {"response": ai_msg} |
|
|
|
def run_server(): |
|
import uvicorn |
|
uvicorn.run(app, host="127.0.0.1", port=8000) |
|
print("Server is running") |
|
|
|
if __name__ == "__main__": |
|
run_server() |