Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI | |
# Set the path to the service account key | |
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "./firm-catalyst-437006-s4-407500537db5.json" | |
# Initialize the LLM | |
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro") | |
# Chat function | |
def chat_with_gemini(user_input, chat_history=[]): | |
try: | |
# Prepare the prompt and include chat history | |
conversation = "\n".join([f"User: {msg[0]}\nBot: {msg[1]}" for msg in chat_history]) | |
conversation += f"\nUser: {user_input}\nBot:" | |
# Get response from the model | |
response = llm.predict(conversation) # Assuming 'predict' method is used for response generation | |
# Update chat history | |
chat_history.append((user_input, response)) | |
return response, chat_history | |
except Exception as e: | |
return f"Error: {str(e)}", chat_history | |
# Create Gradio interface | |
iface = gr.Interface( | |
fn=chat_with_gemini, | |
inputs=["text", "state"], # Use "state" to maintain chat history | |
outputs=["text", "state"], # Return updated chat history | |
title="Chatbot with Gemini 1.5", | |
description="Ask me anything!", | |
theme="default", # Optional: Specify a theme if desired | |
live=True | |
) | |
# Launch the interface with debugging enabled | |
iface.launch(debug=True) | |