Spaces:
Runtime error
Runtime error
iabualhaol
commited on
Commit
·
36ea7dc
1
Parent(s):
b0240e9
Update app.py
Browse files
app.py
CHANGED
@@ -1,66 +1,15 @@
|
|
1 |
-
import os
|
2 |
import gradio as gr
|
3 |
-
from
|
4 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
-
from langchain.vectorstores import Chroma
|
6 |
-
from langchain.embeddings import OpenAIEmbeddings
|
7 |
-
from langchain.chat_models import ChatOpenAI
|
8 |
-
from langchain.chains import RetrievalQA
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
conversation_history = ""
|
14 |
|
15 |
-
def
|
16 |
-
|
|
|
|
|
|
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
persist_directory = "./storage"
|
21 |
-
|
22 |
-
loader = PyMuPDFLoader(pdf_path.name)
|
23 |
-
documents = loader.load()
|
24 |
-
|
25 |
-
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=10)
|
26 |
-
texts = text_splitter.split_documents(documents)
|
27 |
-
|
28 |
-
embeddings = OpenAIEmbeddings()
|
29 |
-
vectordb = Chroma.from_documents(documents=texts,
|
30 |
-
embedding=embeddings,
|
31 |
-
persist_directory=persist_directory)
|
32 |
-
vectordb.persist()
|
33 |
-
|
34 |
-
retriever = vectordb.as_retriever(search_kwargs={"k": 3})
|
35 |
-
llm = ChatOpenAI(model_name='gpt-4')
|
36 |
-
|
37 |
-
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
|
38 |
-
|
39 |
-
# Update conversation history with the user's latest question
|
40 |
-
conversation_history += f"User: {user_input}\n"
|
41 |
-
|
42 |
-
query = f"{conversation_history}###Prompt {user_input}"
|
43 |
-
try:
|
44 |
-
llm_response = qa(query)
|
45 |
-
response_text = llm_response["result"]
|
46 |
-
|
47 |
-
# Update conversation history with the model's latest answer
|
48 |
-
conversation_history += f"Model: {response_text}\n"
|
49 |
-
|
50 |
-
return conversation_history # Return the entire conversation history
|
51 |
-
except Exception as err:
|
52 |
-
return f'Exception occurred. Please try again: {str(err)}'
|
53 |
-
|
54 |
-
iface = gr.Interface(
|
55 |
-
fn=main,
|
56 |
-
inputs=[
|
57 |
-
gr.inputs.Textbox(label="OpenAI API Key", type="password"),
|
58 |
-
gr.inputs.File(label="Upload PDF"),
|
59 |
-
gr.inputs.Textbox(label="Enter Query")
|
60 |
-
],
|
61 |
-
outputs="text",
|
62 |
-
live=False,
|
63 |
-
show_submit_button=True,
|
64 |
-
description="Enter your OpenAI API Key, upload a PDF, and enter a query to get a response."
|
65 |
-
)
|
66 |
-
iface.launch(share=True)
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
+
model_name = "distilbert-base-uncased-finetuned-sst-2-english"
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
6 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
|
|
7 |
|
8 |
+
def predict_sentiment(text):
|
9 |
+
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
|
10 |
+
outputs = model(**inputs)
|
11 |
+
probs = outputs.logits.softmax(dim=1).detach().numpy()[0]
|
12 |
+
return {"Negative": float(probs[0]), "Positive": float(probs[1])}
|
13 |
|
14 |
+
iface = gr.Interface(fn=predict_sentiment, inputs="text", outputs="label")
|
15 |
+
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|