FridayMaster commited on
Commit
7dbc572
1 Parent(s): fe28712

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -1,9 +1,9 @@
1
  import pandas as pd
2
  import fitz # PyMuPDF for PDF extraction
3
  import spacy
4
- from langchain_community.chains import ConversationalRetrievalChain
5
- from langchain_community.llms import OpenAI # Updated import
6
- from langchain_community.vectorstores import FAISS # Updated import
7
  import torch
8
  from transformers import AutoTokenizer, AutoModel
9
  import gradio as gr
@@ -38,7 +38,7 @@ class CustomEmbeddingModel:
38
  embedding_model = CustomEmbeddingModel('distilbert-base-uncased') # Replace with your model name
39
 
40
  # Load Spacy model for preprocessing
41
- nlp = spacy.load("en_core_web_sm") # Ensure the model is installed
42
 
43
  def preprocess_text(text):
44
  doc = nlp(text)
@@ -57,6 +57,8 @@ vector_store = FAISS.from_documents(documents, embeddings)
57
  # Create LangChain model and chain
58
  llm_model = OpenAI('gpt-3.5-turbo') # You can replace this with a different LLM if desired
59
  retriever = vector_store.as_retriever()
 
 
60
  chain = ConversationalRetrievalChain.from_llm(llm_model, retriever=retriever)
61
 
62
  # Function to generate a response
@@ -77,3 +79,4 @@ iface = gr.Interface(
77
  if __name__ == "__main__":
78
  iface.launch()
79
 
 
 
1
  import pandas as pd
2
  import fitz # PyMuPDF for PDF extraction
3
  import spacy
4
+ from langchain.chains import ConversationalRetrievalChain # Ensure this class is available or use an alternative
5
+ from langchain.llms import OpenAI
6
+ from langchain.vectorstores import FAISS
7
  import torch
8
  from transformers import AutoTokenizer, AutoModel
9
  import gradio as gr
 
38
  embedding_model = CustomEmbeddingModel('distilbert-base-uncased') # Replace with your model name
39
 
40
  # Load Spacy model for preprocessing
41
+ nlp = spacy.load("en_core_web_sm")
42
 
43
  def preprocess_text(text):
44
  doc = nlp(text)
 
57
  # Create LangChain model and chain
58
  llm_model = OpenAI('gpt-3.5-turbo') # You can replace this with a different LLM if desired
59
  retriever = vector_store.as_retriever()
60
+
61
+ # Create a conversational chain
62
  chain = ConversationalRetrievalChain.from_llm(llm_model, retriever=retriever)
63
 
64
  # Function to generate a response
 
79
  if __name__ == "__main__":
80
  iface.launch()
81
 
82
+