Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -43,6 +43,26 @@ def get_relevant_info(query, top_k=3):
|
|
| 43 |
return data.iloc[top_indices]
|
| 44 |
|
| 45 |
# Function to generate response using Hugging Face Model API
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
def generate_response(input_text, relevant_info):
|
| 47 |
# Concatenate the relevant information as context for the model
|
| 48 |
context = "\n".join(relevant_info['combined_description'].tolist())
|
|
@@ -55,6 +75,11 @@ def generate_response(input_text, relevant_info):
|
|
| 55 |
try:
|
| 56 |
response = requests.post(api_url, headers=headers, json=payload)
|
| 57 |
response_data = response.json()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
if isinstance(response_data, list) and "generated_text" in response_data[0]:
|
| 59 |
return response_data[0]["generated_text"]
|
| 60 |
else:
|
|
@@ -63,6 +88,11 @@ def generate_response(input_text, relevant_info):
|
|
| 63 |
st.error(f"Error during API request: {e}")
|
| 64 |
return "Error processing your request."
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
# Streamlit UI for the FAQ Chatbot
|
| 67 |
def main():
|
| 68 |
st.title("Medical FAQ Chatbot")
|
|
|
|
| 43 |
return data.iloc[top_indices]
|
| 44 |
|
| 45 |
# Function to generate response using Hugging Face Model API
|
| 46 |
+
# def generate_response(input_text, relevant_info):
|
| 47 |
+
# # Concatenate the relevant information as context for the model
|
| 48 |
+
# context = "\n".join(relevant_info['combined_description'].tolist())
|
| 49 |
+
# input_with_context = f"Context: {context}\n\nUser Query: {input_text}"
|
| 50 |
+
|
| 51 |
+
# api_url = "https://api-inference.huggingface.co/models/m42-health/Llama3-Med42-8B"
|
| 52 |
+
# headers = {"Authorization": f"Bearer {os.getenv('HUGGINGFACEHUB_API_TOKEN')}"}
|
| 53 |
+
# payload = {"inputs": input_with_context}
|
| 54 |
+
|
| 55 |
+
# try:
|
| 56 |
+
# response = requests.post(api_url, headers=headers, json=payload)
|
| 57 |
+
# response_data = response.json()
|
| 58 |
+
# if isinstance(response_data, list) and "generated_text" in response_data[0]:
|
| 59 |
+
# return response_data[0]["generated_text"]
|
| 60 |
+
# else:
|
| 61 |
+
# return "Unexpected response format from API."
|
| 62 |
+
# except Exception as e:
|
| 63 |
+
# st.error(f"Error during API request: {e}")
|
| 64 |
+
# return "Error processing your request."
|
| 65 |
+
|
| 66 |
def generate_response(input_text, relevant_info):
|
| 67 |
# Concatenate the relevant information as context for the model
|
| 68 |
context = "\n".join(relevant_info['combined_description'].tolist())
|
|
|
|
| 75 |
try:
|
| 76 |
response = requests.post(api_url, headers=headers, json=payload)
|
| 77 |
response_data = response.json()
|
| 78 |
+
|
| 79 |
+
# Print or display the raw response data
|
| 80 |
+
st.write("Raw API response:", response_data)
|
| 81 |
+
|
| 82 |
+
# Check and parse the response
|
| 83 |
if isinstance(response_data, list) and "generated_text" in response_data[0]:
|
| 84 |
return response_data[0]["generated_text"]
|
| 85 |
else:
|
|
|
|
| 88 |
st.error(f"Error during API request: {e}")
|
| 89 |
return "Error processing your request."
|
| 90 |
|
| 91 |
+
# Check and parse the response if it's a single JSON object
|
| 92 |
+
if isinstance(response_data, dict) and "generated_text" in response_data:
|
| 93 |
+
return response_data["generated_text"]
|
| 94 |
+
|
| 95 |
+
|
| 96 |
# Streamlit UI for the FAQ Chatbot
|
| 97 |
def main():
|
| 98 |
st.title("Medical FAQ Chatbot")
|