Spaces:
Sleeping
Sleeping
File size: 1,287 Bytes
c918151 827268a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import gradio as gr
import requests
history = [] # Initialize an empty list to store conversation history
def chat_with_model(prompt,context):
# Ensure this URL matches your FastAPI configuration
global history
url = "http://43.205.120.87/chat"
# print(history)
response = requests.get(url, json={"prompt": prompt, "history": history})
if response.status_code == 200:
data = response.json()
history.append({"role": "user", "content": prompt}) # Add user's prompt to history
history.append({"role": "assistant", "content": data["response"]}) # Add bot's response to history
return data["response"]
else:
return "Error communicating with the backend."
# Defining the Gradio interface
iface = gr.ChatInterface(fn=chat_with_model,
textbox=gr.Textbox(placeholder="Type your message here..."),
#outputs=gr.Textbox(label="Chatbot's response"),
title="Akhil's Chatbot (powered by GPT-3.5 Turbo)",
theme='HaleyCH/HaleyCH_Theme',
# allow_flagging="never",
# css=custom_css,
description="Type your prompt and get a response from GPT-3.5 Turbo!")
iface.launch() |