Spaces:
Sleeping
Sleeping
!pip install langchain google-generativeai langchain-google-genai gradio | |
import os | |
# Set the path to the service account key | |
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/content/firm-catalyst-437006-s4-407500537db5.json" | |
import os | |
import gradio as gr | |
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI | |
# Set the path to the service account key | |
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/content/firm-catalyst-437006-s4-407500537db5.json" | |
# Initialize the LLM | |
llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro") | |
def chat_with_gemini(user_input): | |
try: | |
# Prepare the prompt in the expected format | |
response = llm.predict(user_input) # Using the 'predict' method instead | |
return response | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# Create a Gradio interface | |
iface = gr.Interface( | |
fn=chat_with_gemini, | |
inputs="text", | |
outputs="text", | |
title="Chatbot with Gemini 1.5", | |
description="Ask me anything!" | |
) | |
# Launch the interface with debugging | |
iface.launch(debug=True) | |