Spaces:
Sleeping
Sleeping
File size: 1,374 Bytes
b0397d2 a602ed1 b0397d2 7e0e867 b0397d2 7e0e867 b0397d2 7e0e867 b0397d2 7e0e867 b0397d2 7e0e867 b0397d2 7e0e867 b0397d2 7e0e867 b0397d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import os
import gradio as gr
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
# Set the path to the service account key
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "./firm-catalyst-437006-s4-407500537db5.json"
# Initialize the LLM
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
# Chat function
def chat_with_gemini(user_input, chat_history=[]):
try:
# Prepare the prompt and include chat history
conversation = "\n".join([f"User: {msg[0]}\nBot: {msg[1]}" for msg in chat_history])
conversation += f"\nUser: {user_input}\nBot:"
# Get response from the model
response = llm.predict(conversation) # Assuming 'predict' method is used for response generation
# Update chat history
chat_history.append((user_input, response))
return response, chat_history
except Exception as e:
return f"Error: {str(e)}", chat_history
# Create Gradio interface
iface = gr.Interface(
fn=chat_with_gemini,
inputs=["text", "state"], # Use "state" to maintain chat history
outputs=["text", "state"], # Return updated chat history
title="Chatbot with Gemini 1.5",
description="Ask me anything!",
theme="default", # Optional: Specify a theme if desired
live=True
)
# Launch the interface with debugging enabled
iface.launch(debug=True)
|