Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
3 |
|
4 |
-
|
5 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
-
"""
|
7 |
client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")
|
8 |
|
9 |
|
@@ -14,19 +13,28 @@ def respond(
|
|
14 |
max_tokens,
|
15 |
temperature,
|
16 |
top_p,
|
|
|
17 |
):
|
18 |
messages = [{"role": "system", "content": system_message}]
|
19 |
|
|
|
20 |
for val in history:
|
21 |
if val[0]:
|
22 |
messages.append({"role": "user", "content": val[0]})
|
23 |
if val[1]:
|
24 |
messages.append({"role": "assistant", "content": val[1]})
|
25 |
|
|
|
26 |
messages.append({"role": "user", "content": message})
|
27 |
|
28 |
-
|
|
|
|
|
|
|
|
|
29 |
|
|
|
|
|
30 |
for message in client.chat_completion(
|
31 |
messages,
|
32 |
max_tokens=max_tokens,
|
@@ -35,11 +43,22 @@ def respond(
|
|
35 |
top_p=top_p,
|
36 |
):
|
37 |
token = message.choices[0].delta.content
|
38 |
-
|
39 |
response += token
|
40 |
yield response
|
41 |
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
# CSS for styling the interface
|
44 |
css = """
|
45 |
body {
|
@@ -89,26 +108,22 @@ body {
|
|
89 |
}
|
90 |
"""
|
91 |
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
gr.Textbox(value="You are a virtual health assistant designed to provide accurate and reliable information related to health, wellness, and medical topics. Your primary goal is to assist users with their health-related queries, offer general guidance, and suggest when to consult a licensed medical professional.If a user asks a question that is unrelated to health, wellness, or medical topics, respond politely but firmly with:'I'm sorry, I can't help with that because I am a virtual health assistant designed to assist with health-related needs. Please let me know if you have any health-related questions.'Never provide advice or information outside the health domain. Remain professional, empathetic, and clear in all responses. Always prioritize user safety and encourage professional medical consultation for critical or complex health concerns..", label="System message", visible=False),
|
99 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens", visible=False),
|
100 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", visible=False),
|
101 |
-
gr.Slider(
|
102 |
-
minimum=0.1,
|
103 |
-
maximum=1.0,
|
104 |
-
value=0.95,
|
105 |
-
step=0.05,
|
106 |
-
label="Top-p (nucleus sampling)", visible=False
|
107 |
-
),
|
108 |
],
|
109 |
-
|
|
|
|
|
|
|
|
|
110 |
)
|
111 |
|
112 |
-
|
113 |
if __name__ == "__main__":
|
114 |
demo.launch(share=True)
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
from PyPDF2 import PdfReader
|
4 |
|
5 |
+
# Initialize the Inference Client
|
|
|
|
|
6 |
client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")
|
7 |
|
8 |
|
|
|
13 |
max_tokens,
|
14 |
temperature,
|
15 |
top_p,
|
16 |
+
uploaded_pdf=None
|
17 |
):
|
18 |
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
+
# Add previous conversation history to the messages
|
21 |
for val in history:
|
22 |
if val[0]:
|
23 |
messages.append({"role": "user", "content": val[0]})
|
24 |
if val[1]:
|
25 |
messages.append({"role": "assistant", "content": val[1]})
|
26 |
|
27 |
+
# If a new message is entered, add it to the conversation history
|
28 |
messages.append({"role": "user", "content": message})
|
29 |
|
30 |
+
# If a PDF is uploaded, process its content
|
31 |
+
if uploaded_pdf is not None:
|
32 |
+
file_content = extract_pdf_text(uploaded_pdf)
|
33 |
+
if file_content:
|
34 |
+
messages.append({"role": "user", "content": f"Document Content: {file_content}"})
|
35 |
|
36 |
+
# Get response from the model
|
37 |
+
response = ""
|
38 |
for message in client.chat_completion(
|
39 |
messages,
|
40 |
max_tokens=max_tokens,
|
|
|
43 |
top_p=top_p,
|
44 |
):
|
45 |
token = message.choices[0].delta.content
|
|
|
46 |
response += token
|
47 |
yield response
|
48 |
|
49 |
|
50 |
+
def extract_pdf_text(file):
|
51 |
+
"""Extract text from a PDF file."""
|
52 |
+
try:
|
53 |
+
reader = PdfReader(file)
|
54 |
+
text = ""
|
55 |
+
for page in reader.pages:
|
56 |
+
text += page.extract_text()
|
57 |
+
return text
|
58 |
+
except Exception as e:
|
59 |
+
return f"Error extracting text from PDF: {str(e)}"
|
60 |
+
|
61 |
+
|
62 |
# CSS for styling the interface
|
63 |
css = """
|
64 |
body {
|
|
|
108 |
}
|
109 |
"""
|
110 |
|
111 |
+
# Gradio interface
|
112 |
+
demo = gr.Interface(
|
113 |
+
fn=respond,
|
114 |
+
inputs=[
|
115 |
+
gr.Textbox(label="Your Message", placeholder="Type your question here...", lines=4),
|
116 |
+
gr.File(label="Upload a PDF", file_count="single", type="file"),
|
|
|
117 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens", visible=False),
|
118 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature", visible=False),
|
119 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", visible=False),
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
],
|
121 |
+
outputs="text",
|
122 |
+
css=css, # Custom CSS
|
123 |
+
live=True,
|
124 |
+
title="Health Assistant Chat",
|
125 |
+
description="This is a health assistant that can chat with you about health-related topics. You can also upload a document for analysis.",
|
126 |
)
|
127 |
|
|
|
128 |
if __name__ == "__main__":
|
129 |
demo.launch(share=True)
|