approve-dev / main.py
randydev's picture
Create main.py
a9422f7 verified
raw
history blame
11.1 kB
import asyncio
import logging
import os
from pyrogram import Client, filters
from pyrogram import *
from pyrogram.types import Message
# Your other imports
from dotenv import load_dotenv
from database import db
from logger import LOGS
from RyuzakiLib import GeminiLatest # and other imports as needed
import google.generativeai as genai
from google.api_core.exceptions import InvalidArgument
# Load environment variables
load_dotenv()
API_ID = os.getenv("API_ID")
API_HASH = os.getenv("API_HASH")
BOT_TOKEN = os.getenv("BOT_TOKEN")
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
# Validate essential environment variables
if not all([API_ID, API_HASH, BOT_TOKEN, GOOGLE_API_KEY]):
LOGS.critical("Missing one or more essential environment variables.")
exit(1)
# Initialize Pyrogram Client
client = Client(
"chatbotai",
api_id=API_ID,
api_hash=API_HASH,
bot_token=BOT_TOKEN
)
# Define your handler
@client.on_message(
filters.incoming
& (
filters.text
| filters.photo
| filters.video
| filters.audio
| filters.voice
| filters.regex(r"\b(Randy|Rendi)\b(.*)", flags=re.IGNORECASE)
)
& filters.private
& ~filters.via_bot
& ~filters.forwarded,
group=2,
)
async def chatbot_talk(client: Client, message: Message):
try:
genai.configure(api_key=GOOGLE_API_KEY)
# Handling Photo Messages
if message.photo:
file_path = await message.download()
caption = message.caption or "What's this?"
x = GeminiLatest(api_keys=GOOGLE_API_KEY)
# Send initial processing message
ai_reply = await message.reply_text("Processing...")
try:
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id)
backup_chat.append({"role": "user", "parts": [{"text": caption}]})
response_reads = x.get_response_image(caption, file_path)
if len(response_reads) > 4096:
with open("chat.txt", "w+", encoding="utf8") as out_file:
out_file.write(response_reads)
await message.reply_document(
document="chat.txt",
disable_notification=True
)
await ai_reply.delete()
os.remove("chat.txt")
else:
await ai_reply.edit_text(response_reads)
backup_chat.append({"role": "model", "parts": [{"text": response_reads}]})
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat)
os.remove(file_path)
return
except InvalidArgument as e:
await ai_reply.edit_text(f"Error: {e}")
return
except Exception as e:
await ai_reply.edit_text(f"Error: {e}")
return
# Handling Audio or Voice Messages
if message.audio or message.voice:
ai_reply = await message.reply_text("Processing...")
audio_file_name = await message.download()
caption = message.caption or "What's this?"
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
safety_settings={
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
}
)
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id)
backup_chat.append({"role": "user", "parts": [{"text": caption}]})
ai_reply.edit_text("Uploading file...")
audio_file = genai.upload_file(path=audio_file_name)
while audio_file.state.name == "PROCESSING":
await asyncio.sleep(10)
audio_file = genai.get_file(audio_file.name)
if audio_file.state.name == "FAILED":
await ai_reply.edit_text(f"Error: {audio_file.state.name}")
return
try:
response = model.generate_content(
[audio_file, caption],
request_options={"timeout": 600}
)
if len(response.text) > 4096:
with open("chat.txt", "w+", encoding="utf8") as out_file:
out_file.write(response.text)
await message.reply_document(
document="chat.txt",
disable_notification=True
)
await ai_reply.delete()
os.remove("chat.txt")
else:
await ai_reply.edit_text(response.text)
backup_chat.append({"role": "model", "parts": [{"text": response.text}]})
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat)
audio_file.delete()
os.remove(audio_file_name)
return
except InvalidArgument as e:
await ai_reply.edit_text(f"Error: {e}")
return
except Exception as e:
await ai_reply.edit_text(f"Error: {e}")
return
# Handling Video Messages
if message.video:
ai_reply = await message.reply_text("Processing...")
video_file_name = await message.download(file_name="newvideo.mp4")
caption = message.caption or "What's this?"
model = genai.GenerativeModel(
model_name="gemini-1.5-pro",
safety_settings={
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
}
)
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id)
backup_chat.append({"role": "user", "parts": [{"text": caption}]})
ai_reply.edit_text("Uploading file...")
video_file = genai.upload_file(path=video_file_name)
while video_file.state.name == "PROCESSING":
await asyncio.sleep(10)
video_file = genai.get_file(video_file.name)
if video_file.state.name == "FAILED":
await ai_reply.edit_text(f"Error: {video_file.state.name}")
return
try:
response = model.generate_content(
[video_file, caption],
request_options={"timeout": 600}
)
if len(response.text) > 4096:
with open("chat.txt", "w+", encoding="utf8") as out_file:
out_file.write(response.text)
await message.reply_document(
document="chat.txt",
disable_notification=True
)
await ai_reply.delete()
os.remove("chat.txt")
else:
await ai_reply.edit_text(response.text)
backup_chat.append({"role": "model", "parts": [{"text": response.text}]})
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat)
video_file.delete()
os.remove(video_file_name)
return
except InvalidArgument as e:
await ai_reply.edit_text(f"Error: {e}")
return
except Exception as e:
await ai_reply.edit_text(f"Error: {e}")
return
# Handling Text Messages
if message.text:
query = message.text.strip()
match = re.search(r"\b(Randy|Rendi)\b(.*)", query, flags=re.IGNORECASE)
if match:
rest_of_sentence = match.group(2).strip()
query_base = rest_of_sentence if rest_of_sentence else query
else:
query_base = query
parts = query.split(maxsplit=1)
command = parts[0].lower()
pic_query = parts[1].strip() if len(parts) > 1 else ""
try:
model_flash = genai.GenerativeModel(
model_name="gemini-1.5-flash"
)
backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id)
backup_chat.append({"role": "user", "parts": [{"text": query_base}]})
chat_session = model_flash.start_chat(history=backup_chat)
response_data = chat_session.send_message(query_base)
output = response_data.text
if len(output) > 4096:
with open("chat.txt", "w+", encoding="utf8") as out_file:
out_file.write(output)
await message.reply_document(
document="chat.txt",
disable_notification=True
)
os.remove("chat.txt")
else:
await message.reply_text(output)
backup_chat.append({"role": "model", "parts": [{"text": output}]})
await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat)
except Exception as e:
await message.reply_text(str(e))
# End of handler
# Define the main coroutine
async def main():
await db.connect() # Connect to your database
LOGS.info("Connected to the database.")
await client.start() # Start the Pyrogram client
LOGS.info("Bot started successfully.")
await idle() # Keep the bot running until interrupted
LOGS.info("Bot stopping...")
await client.stop() # Ensure the client stops gracefully
# Entry point
if __name__ == "__main__":
try:
asyncio.run(main())
except (KeyboardInterrupt, SystemExit):
LOGS.info("Bot has been terminated by the user.")
except Exception as e:
LOGS.error(f"Unexpected error: {e}")