Spaces:
Runtime error
Runtime error
ariankhalfani
commited on
Commit
•
0119dc3
1
Parent(s):
29db71d
Update chatbot.py
Browse files- chatbot.py +8 -0
chatbot.py
CHANGED
@@ -12,6 +12,9 @@ import re
|
|
12 |
# Configure logging
|
13 |
logging.basicConfig(level=logging.DEBUG)
|
14 |
|
|
|
|
|
|
|
15 |
# Configure Hugging Face API URL and headers for Meta-Llama-3-70B-Instruct
|
16 |
api_url = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct"
|
17 |
huggingface_api_key = os.getenv("HF_API_TOKEN")
|
@@ -225,12 +228,17 @@ def chatbot(audio, input_type, text):
|
|
225 |
payload = {
|
226 |
"inputs": f"role: ophthalmologist assistant patient history: {patient_history} question: {query}"
|
227 |
}
|
|
|
|
|
|
|
228 |
response = query_huggingface(payload)
|
229 |
if isinstance(response, list):
|
230 |
raw_response = response[0].get("generated_text", "Sorry, I couldn't generate a response.")
|
231 |
else:
|
232 |
raw_response = response.get("generated_text", "Sorry, I couldn't generate a response.")
|
233 |
|
|
|
|
|
234 |
clean_response = cleanup_response(raw_response)
|
235 |
return clean_response, None
|
236 |
|
|
|
12 |
# Configure logging
|
13 |
logging.basicConfig(level=logging.DEBUG)
|
14 |
|
15 |
+
# Load environment variables
|
16 |
+
load_dotenv()
|
17 |
+
|
18 |
# Configure Hugging Face API URL and headers for Meta-Llama-3-70B-Instruct
|
19 |
api_url = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct"
|
20 |
huggingface_api_key = os.getenv("HF_API_TOKEN")
|
|
|
228 |
payload = {
|
229 |
"inputs": f"role: ophthalmologist assistant patient history: {patient_history} question: {query}"
|
230 |
}
|
231 |
+
|
232 |
+
logging.debug(f"Raw input to the LLM: {payload['inputs']}")
|
233 |
+
|
234 |
response = query_huggingface(payload)
|
235 |
if isinstance(response, list):
|
236 |
raw_response = response[0].get("generated_text", "Sorry, I couldn't generate a response.")
|
237 |
else:
|
238 |
raw_response = response.get("generated_text", "Sorry, I couldn't generate a response.")
|
239 |
|
240 |
+
logging.debug(f"Raw output from the LLM: {raw_response}")
|
241 |
+
|
242 |
clean_response = cleanup_response(raw_response)
|
243 |
return clean_response, None
|
244 |
|