Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,64 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
"""
|
5 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
-
"""
|
7 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def respond(
|
11 |
-
message,
|
12 |
-
history:
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
):
|
18 |
-
system_message = "
|
19 |
messages = [{"role": "system", "content": system_message}]
|
20 |
|
21 |
for val in history:
|
@@ -26,8 +69,12 @@ def respond(
|
|
26 |
|
27 |
messages.append({"role": "user", "content": message})
|
28 |
|
29 |
-
|
|
|
|
|
|
|
30 |
|
|
|
31 |
for message in client.chat_completion(
|
32 |
messages,
|
33 |
max_tokens=max_tokens,
|
@@ -36,36 +83,32 @@ def respond(
|
|
36 |
top_p=top_p,
|
37 |
):
|
38 |
token = message.choices[0].delta.content
|
39 |
-
|
40 |
response += token
|
41 |
yield response
|
42 |
|
43 |
-
|
44 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
45 |
-
"""
|
46 |
-
demo = gr.ChatInterface(
|
47 |
-
respond,
|
48 |
-
additional_inputs=[
|
49 |
-
gr.Textbox(value = "You are a good listener and concise and point-specific speaker. You advise relaxation exercises, suggest avoiding negative thoughts, and guide through steps to manage stress. Discuss what's on your mind, or ask me for a quick relaxation exercise.", label="System message"),
|
50 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
51 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
52 |
-
gr.Slider(
|
53 |
-
minimum=0.1,
|
54 |
-
maximum=1.0,
|
55 |
-
value=0.95,
|
56 |
-
step=0.05,
|
57 |
-
label="Top-p (nucleus sampling)",
|
58 |
-
),
|
59 |
-
],
|
60 |
-
|
61 |
-
examples = [
|
62 |
-
["What are the most common mistakes candidates make during interviews, and how can I avoid them?"],
|
63 |
-
["Do you have any tips for handling nerves or anxiety during interviews?"],
|
64 |
-
["What are effective strategies for answering behavioral interview questions?"]
|
65 |
-
],
|
66 |
-
title = 'Job Interview Prep Coach'
|
67 |
-
)
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
if __name__ == "__main__":
|
71 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
from typing import List, Tuple
|
4 |
+
import fitz # PyMuPDF
|
5 |
+
from sentence_transformers import SentenceTransformer, util
|
6 |
+
import numpy as np
|
7 |
+
import faiss
|
8 |
|
|
|
|
|
|
|
9 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
10 |
|
11 |
+
class MyApp:
|
12 |
+
def __init__(self) -> None:
|
13 |
+
self.documents = []
|
14 |
+
self.embeddings = None
|
15 |
+
self.index = None
|
16 |
+
self.load_pdf("YOURPDFFILE")
|
17 |
+
self.build_vector_db()
|
18 |
+
|
19 |
+
def load_pdf(self, file_path: str) -> None:
|
20 |
+
"""Extracts text from a PDF file and stores it in the app's documents."""
|
21 |
+
doc = fitz.open(file_path)
|
22 |
+
self.documents = []
|
23 |
+
for page_num in range(len(doc)):
|
24 |
+
page = doc[page_num]
|
25 |
+
text = page.get_text()
|
26 |
+
self.documents.append({"page": page_num + 1, "content": text})
|
27 |
+
print("PDF processed successfully!")
|
28 |
+
|
29 |
+
def build_vector_db(self) -> None:
|
30 |
+
"""Builds a vector database using the content of the PDF."""
|
31 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
32 |
+
# Generate embeddings for all document contents
|
33 |
+
self.embeddings = model.encode([doc["content"] for doc in self.documents])
|
34 |
+
# Create a FAISS index
|
35 |
+
self.index = faiss.IndexFlatL2(self.embeddings.shape[1])
|
36 |
+
# Add the embeddings to the index
|
37 |
+
self.index.add(np.array(self.embeddings))
|
38 |
+
print("Vector database built successfully!")
|
39 |
+
|
40 |
+
def search_documents(self, query: str, k: int = 3) -> List[str]:
|
41 |
+
"""Searches for relevant documents using vector similarity."""
|
42 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
43 |
+
# Generate an embedding for the query
|
44 |
+
query_embedding = model.encode([query])
|
45 |
+
# Perform a search in the FAISS index
|
46 |
+
D, I = self.index.search(np.array(query_embedding), k)
|
47 |
+
# Retrieve the top-k documents
|
48 |
+
results = [self.documents[i]["content"] for i in I[0]]
|
49 |
+
return results if results else ["No relevant documents found."]
|
50 |
+
|
51 |
+
app = MyApp()
|
52 |
|
53 |
def respond(
|
54 |
+
message: str,
|
55 |
+
history: List[Tuple[str, str]],
|
56 |
+
system_message: str,
|
57 |
+
max_tokens: int,
|
58 |
+
temperature: float,
|
59 |
+
top_p: float,
|
60 |
):
|
61 |
+
system_message = "You are a knowledgeable DBT coach. You always talk about one options at at a time. you add greetings and you ask questions like real counsellor. Remember you are helpful and a good listener. You are concise and never ask multiple questions, or give long response. You response like a human counsellor accurately and correctly. consider the users as your client. and practice verbal cues only where needed. Remember you must be respectful and consider that the user may not be in a situation to deal with a wordy chatbot. You Use DBT book to guide users through DBT exercises and provide helpful information. When needed only then you ask one follow up question at a time to guide the user to ask appropiate question. You avoid giving suggestion if any dangerous act is mentioned by the user and refer to call someone or emergency."
|
62 |
messages = [{"role": "system", "content": system_message}]
|
63 |
|
64 |
for val in history:
|
|
|
69 |
|
70 |
messages.append({"role": "user", "content": message})
|
71 |
|
72 |
+
# RAG - Retrieve relevant documents
|
73 |
+
retrieved_docs = app.search_documents(message)
|
74 |
+
context = "\n".join(retrieved_docs)
|
75 |
+
messages.append({"role": "system", "content": "Relevant documents: " + context})
|
76 |
|
77 |
+
response = ""
|
78 |
for message in client.chat_completion(
|
79 |
messages,
|
80 |
max_tokens=max_tokens,
|
|
|
83 |
top_p=top_p,
|
84 |
):
|
85 |
token = message.choices[0].delta.content
|
|
|
86 |
response += token
|
87 |
yield response
|
88 |
|
89 |
+
demo = gr.Blocks()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
+
with demo:
|
92 |
+
gr.Markdown("🧘♀️ **Dialectical Behaviour Therapy**")
|
93 |
+
gr.Markdown(
|
94 |
+
"‼️Disclaimer: This chatbot is based on a DBT exercise book that is publicly available. "
|
95 |
+
"We are not medical practitioners, and the use of this chatbot is at your own responsibility.‼️"
|
96 |
+
)
|
97 |
+
|
98 |
+
chatbot = gr.ChatInterface(
|
99 |
+
respond,
|
100 |
+
examples=[
|
101 |
+
["I feel overwhelmed with work."],
|
102 |
+
["Can you guide me through a quick meditation?"],
|
103 |
+
["How do I stop worrying about things I can't control?"],
|
104 |
+
["What are some DBT skills for managing anxiety?"],
|
105 |
+
["Can you explain mindfulness in DBT?"],
|
106 |
+
["I am interested in DBT excercises"],
|
107 |
+
["I feel restless. Please help me."],
|
108 |
+
["I have destructive thoughts coming to my mind repetatively."]
|
109 |
+
],
|
110 |
+
title='Dialectical Behaviour Therapy Assistant 👩⚕️'
|
111 |
+
)
|
112 |
|
113 |
if __name__ == "__main__":
|
114 |
demo.launch()
|