Spaces:
Running
Running
kendrickfff
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -8,22 +8,32 @@ os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "./firm-catalyst-437006-s4-407500
|
|
8 |
# Initialize the LLM
|
9 |
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
|
10 |
|
11 |
-
|
|
|
12 |
try:
|
13 |
-
# Prepare the prompt
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
except Exception as e:
|
17 |
-
return f"Error: {str(e)}"
|
18 |
|
19 |
-
# Create
|
20 |
iface = gr.Interface(
|
21 |
fn=chat_with_gemini,
|
22 |
-
inputs="text",
|
23 |
-
outputs="text",
|
24 |
title="Chatbot with Gemini 1.5",
|
25 |
-
description="Ask me anything!"
|
|
|
|
|
26 |
)
|
27 |
|
28 |
-
# Launch the interface with debugging
|
29 |
iface.launch(debug=True)
|
|
|
8 |
# Initialize the LLM
|
9 |
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
|
10 |
|
11 |
+
# Chat function
|
12 |
+
def chat_with_gemini(user_input, chat_history=[]):
|
13 |
try:
|
14 |
+
# Prepare the prompt and include chat history
|
15 |
+
conversation = "\n".join([f"User: {msg[0]}\nBot: {msg[1]}" for msg in chat_history])
|
16 |
+
conversation += f"\nUser: {user_input}\nBot:"
|
17 |
+
|
18 |
+
# Get response from the model
|
19 |
+
response = llm.predict(conversation) # Assuming 'predict' method is used for response generation
|
20 |
+
|
21 |
+
# Update chat history
|
22 |
+
chat_history.append((user_input, response))
|
23 |
+
return response, chat_history
|
24 |
except Exception as e:
|
25 |
+
return f"Error: {str(e)}", chat_history
|
26 |
|
27 |
+
# Create Gradio interface
|
28 |
iface = gr.Interface(
|
29 |
fn=chat_with_gemini,
|
30 |
+
inputs=["text", "state"], # Use "state" to maintain chat history
|
31 |
+
outputs=["text", "state"], # Return updated chat history
|
32 |
title="Chatbot with Gemini 1.5",
|
33 |
+
description="Ask me anything!",
|
34 |
+
theme="default", # Optional: Specify a theme if desired
|
35 |
+
live=True
|
36 |
)
|
37 |
|
38 |
+
# Launch the interface with debugging enabled
|
39 |
iface.launch(debug=True)
|