randydev commited on
Commit
430fc73
1 Parent(s): c4e3480

Delete main.py

Browse files
Files changed (1) hide show
  1. main.py +0 -270
main.py DELETED
@@ -1,270 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
- # Copyright 2020-2024 (c) Randy W @xtdevs, @xtsea
4
- #
5
- # from : https://github.com/TeamKillerX
6
- # Channel : @RendyProjects
7
- # This program is free software: you can redistribute it and/or modify
8
- # it under the terms of the GNU Affero General Public License as published by
9
- # the Free Software Foundation, either version 3 of the License, or
10
- # (at your option) any later version.
11
- #
12
- # This program is distributed in the hope that it will be useful,
13
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
- # GNU Affero General Public License for more details.
16
- #
17
- # You should have received a copy of the GNU Affero General Public License
18
- # along with this program. If not, see <https://www.gnu.org/licenses/>.
19
-
20
- import requests
21
- import time
22
- import json
23
- import asyncio
24
- import logging
25
- import io
26
- import os
27
- import re
28
- from os import getenv
29
- from dotenv import load_dotenv
30
- from PIL import Image
31
-
32
- from pyrogram import *
33
- from pyrogram import Client, filters
34
- from pyrogram.types import *
35
- from pyrogram.errors import *
36
- from RyuzakiLib import FaceAI, FullStackDev, GeminiLatest, RendyDevChat
37
-
38
- import google.generativeai as genai
39
- from google.api_core.exceptions import InvalidArgument
40
- from database import db
41
- from logger import LOGS
42
-
43
- logging.basicConfig(level=logging.INFO)
44
- logging.getLogger("pyrogram.syncer").setLevel(logging.WARNING)
45
- logging.getLogger("pyrogram.client").setLevel(logging.WARNING)
46
- loop = asyncio.get_event_loop()
47
-
48
- load_dotenv()
49
- API_ID = os.environ["API_ID"]
50
- API_HASH = os.environ["API_HASH"]
51
- BOT_TOKEN = os.environ["BOT_TOKEN"]
52
- GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
53
-
54
- client = Client(
55
- "chatbotai",
56
- api_id=API_ID,
57
- api_hash=API_HASH,
58
- bot_token=BOT_TOKEN
59
- )
60
-
61
- async def geni_files_delete(name: str):
62
- url = f"https://generativelanguage.googleapis.com/v1beta/{name}"
63
- params = {"key": GOOGLE_API_KEY}
64
- response = requests.delete(url, params=params)
65
- if response.status_code != 200:
66
- return None
67
- return response.text
68
-
69
- @client.on_message(
70
- filters.incoming
71
- & (
72
- filters.text
73
- | filters.photo
74
- | filters.video
75
- | filters.audio
76
- | filters.voice
77
- | filters.regex(r"\b(Randy|Rendi)\b(.*)", flags=re.IGNORECASE)
78
- )
79
- & filters.private
80
- & ~filters.via_bot
81
- & ~filters.forwarded,
82
- group=2,
83
- )
84
- async def chatbot_talk(client: Client, message: Message):
85
- chat_user = await db.get_chatbot(message.chat.id)
86
- genai.configure(api_key=GOOGLE_API_KEY)
87
- if message.photo:
88
- file_path = await message.download()
89
- caption = message.caption or "What's this?"
90
- x = GeminiLatest(api_keys=GOOGLE_API_KEY)
91
- if client.me.is_premium:
92
- ai_reply = await message.reply_text(f"{custom_loading}Processing...")
93
- else:
94
- ai_reply = await message.reply_text(f"Processing...")
95
- try:
96
- backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id)
97
- backup_chat.append({"role": "user", "parts": [{"text": caption}]})
98
- response_reads = x.get_response_image(caption, file_path)
99
- if len(response_reads) > 4096:
100
- with open("chat.txt", "w+", encoding="utf8") as out_file:
101
- out_file.write(response_reads)
102
- await message.reply_document(
103
- document="chat.txt",
104
- disable_notification=True
105
- )
106
- await ai_reply.delete()
107
- os.remove("chat.txt")
108
- else:
109
- await ai_reply.edit_text(response_reads)
110
- backup_chat.append({"role": "model", "parts": [{"text": response_reads}]})
111
- await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat)
112
- os.remove(file_path)
113
- return
114
- except InvalidArgument as e:
115
- return await ai_reply.edit_text(f"Error: {e}")
116
- except Exception as e:
117
- return await ai_reply.edit_text(f"Error: {e}")
118
-
119
- if message.audio or message.voice:
120
- if client.me.is_premium:
121
- ai_reply = await message.reply_text(f"{custom_loading}Processing...")
122
- else:
123
- ai_reply = await message.reply_text(f"Processing...")
124
- if message.audio:
125
- audio_file_name = await message.download()
126
- if message.voice:
127
- audio_file_name = await message.download()
128
- caption = message.caption or "What's this?"
129
- model = genai.GenerativeModel(
130
- model_name="gemini-1.5-flash",
131
- safety_settings={
132
- genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
133
- genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
134
- genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE,
135
- genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
136
- }
137
- )
138
- backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id)
139
- backup_chat.append({"role": "user", "parts": [{"text": caption}]})
140
- if client.me.is_premium:
141
- await ai_reply.edit_text(f"{custom_loading}Uploading file..")
142
- else:
143
- await ai_reply.edit_text("Uploading file..")
144
- audio_file = genai.upload_file(path=audio_file_name)
145
- while audio_file.state.name == "PROCESSING":
146
- await asyncio.sleep(10)
147
- audio_file = genai.get_file(audio_file.name)
148
- if audio_file.state.name == "FAILED":
149
- return await ai_reply.edit_text(f"Error: {audio_file.state.name}")
150
- try:
151
- response = model.generate_content(
152
- [audio_file, caption],
153
- request_options={"timeout": 600}
154
- )
155
- if len(response.text) > 4096:
156
- with open("chat.txt", "w+", encoding="utf8") as out_file:
157
- out_file.write(response.text)
158
- await message.reply_document(
159
- document="chat.txt",
160
- disable_notification=True
161
- )
162
- await ai_reply.delete()
163
- os.remove("chat.txt")
164
- else:
165
- await ai_reply.edit_text(response.text)
166
- backup_chat.append({"role": "model", "parts": [{"text": response.text}]})
167
- await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat)
168
- audio_file.delete()
169
- os.remove(audio_file_name)
170
- return
171
- except InvalidArgument as e:
172
- return await ai_reply.edit_text(f"Error: {e}")
173
- except Exception as e:
174
- return await ai_reply.edit_text(f"Error: {e}")
175
-
176
- if message.video:
177
- if client.me.is_premium:
178
- ai_reply = await message.reply_text(f"{custom_loading}Processing...")
179
- else:
180
- ai_reply = await message.reply_text(f"Processing...")
181
- video_file_name = await message.download(file_name="newvideo.mp4")
182
- caption = message.caption or "What's this?"
183
- model = genai.GenerativeModel(
184
- model_name="gemini-1.5-pro",
185
- safety_settings={
186
- genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
187
- genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
188
- genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE,
189
- genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
190
- }
191
- )
192
- backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id)
193
- backup_chat.append({"role": "user", "parts": [{"text": caption}]})
194
- if client.me.is_premium:
195
- await ai_reply.edit_text(f"{custom_loading}Uploading file..")
196
- else:
197
- await ai_reply.edit_text("Uploading file..")
198
- video_file = genai.upload_file(path=video_file_name)
199
- while video_file.state.name == "PROCESSING":
200
- await asyncio.sleep(10)
201
- video_file = genai.get_file(video_file.name)
202
- if video_file.state.name == "FAILED":
203
- return await ai_reply.edit_text(f"Error: {video_file.state.name}")
204
- try:
205
- response = model.generate_content(
206
- [video_file, caption],
207
- request_options={"timeout": 600}
208
- )
209
- if len(response.text) > 4096:
210
- with open("chat.txt", "w+", encoding="utf8") as out_file:
211
- out_file.write(response.text)
212
- await message.reply_document(
213
- document="chat.txt",
214
- disable_notification=True
215
- )
216
- await ai_reply.delete()
217
- os.remove("chat.txt")
218
- else:
219
- await ai_reply.edit_text(response.text)
220
- backup_chat.append({"role": "model", "parts": [{"text": response.text}]})
221
- await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat)
222
- video_file.delete()
223
- os.remove(video_file_name)
224
- return
225
- except InvalidArgument as e:
226
- return await ai_reply.edit_text(f"Error: {e}")
227
- except Exception as e:
228
- return await ai_reply.edit_text(f"Error: {e}")
229
-
230
- if message.text:
231
- query = message.text.strip()
232
- match = re.search(r"\b(Randy|Rendi)\b(.*)", query, flags=re.IGNORECASE)
233
- if match:
234
- rest_of_sentence = match.group(2).strip()
235
- query_base = rest_of_sentence if rest_of_sentence else query
236
- else:
237
- query_base = query
238
- parts = query.split(maxsplit=1)
239
- command = parts[0].lower()
240
- pic_query = parts[1].strip() if len(parts) > 1 else ""
241
- try:
242
- model_flash = genai.GenerativeModel(
243
- model_name="gemini-1.5-flash"
244
- )
245
- backup_chat = await db._get_chatbot_chat_from_db(message.from_user.id)
246
- backup_chat.append({"role": "user", "parts": [{"text": query_base}]})
247
- chat_session = model_flash.start_chat(history=backup_chat)
248
- response_data = chat_session.send_message(query_base)
249
- output = response_data.text
250
- if len(output) > 4096:
251
- with open("chat.txt", "w+", encoding="utf8") as out_file:
252
- out_file.write(output)
253
- await message.reply_document(
254
- document="chat.txt",
255
- disable_notification=True
256
- )
257
- os.remove("chat.txt")
258
- else:
259
- await message.reply_text(output)
260
- backup_chat.append({"role": "model", "parts": [{"text": output}]})
261
- await db._update_chatbot_chat_in_db(message.from_user.id, backup_chat)
262
- except Exception as e:
263
- return await message.reply_text(str(e))
264
-
265
- async def main():
266
- await db.connect()
267
- await client.start()
268
-
269
- if __name__ == "__main__":
270
- asyncio.run(main())