Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,9 @@
|
|
1 |
-
|
2 |
import discord
|
3 |
import logging
|
4 |
import os
|
|
|
5 |
import asyncio
|
6 |
-
import aiohttp
|
7 |
import subprocess
|
8 |
-
from huggingface_hub import InferenceClient
|
9 |
|
10 |
# ๋ก๊น
์ค์
|
11 |
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()])
|
@@ -18,7 +16,7 @@ intents.guilds = True
|
|
18 |
intents.guild_messages = True
|
19 |
|
20 |
# ์ถ๋ก API ํด๋ผ์ด์ธํธ ์ค์
|
21 |
-
hf_client = InferenceClient(token=os.getenv("HF_TOKEN"))
|
22 |
|
23 |
# ํน์ ์ฑ๋ ID
|
24 |
SPECIFIC_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID"))
|
@@ -33,7 +31,7 @@ class MyClient(discord.Client):
|
|
33 |
|
34 |
async def on_ready(self):
|
35 |
logging.info(f'{self.user}๋ก ๋ก๊ทธ์ธ๋์์ต๋๋ค!')
|
36 |
-
|
37 |
logging.info("Web.py server has been started.")
|
38 |
|
39 |
async def on_message(self, message):
|
@@ -75,32 +73,22 @@ async def generate_response(message):
|
|
75 |
messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}] + conversation_history
|
76 |
logging.debug(f'Messages to be sent to the model: {messages}')
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
# Gradio ์๋ฒ๋ฅผ ์คํํ ํฌํธ๋ฅผ ์ฐพ๊ธฐ ์ํด ์ฌ์ฉํ ์ ์๋ ํฌํธ๋ฅผ ๋์ ์ผ๋ก ์ค์
|
94 |
-
import socket
|
95 |
-
port = 7860
|
96 |
-
while True:
|
97 |
-
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
98 |
-
if s.connect_ex(('localhost', port)) != 0:
|
99 |
-
break
|
100 |
-
port += 1
|
101 |
-
python_executable = 'python3' if os.name == 'posix' else 'python'
|
102 |
-
subprocess.Popen([python_executable, "web.py"], env={"GRADIO_SERVER_PORT": str(port)})
|
103 |
|
104 |
if __name__ == "__main__":
|
105 |
discord_client = MyClient(intents=intents)
|
106 |
-
discord_client.run(os.getenv('DISCORD_TOKEN'))
|
|
|
|
|
1 |
import discord
|
2 |
import logging
|
3 |
import os
|
4 |
+
from huggingface_hub import InferenceClient
|
5 |
import asyncio
|
|
|
6 |
import subprocess
|
|
|
7 |
|
8 |
# ๋ก๊น
์ค์
|
9 |
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s: %(message)s', handlers=[logging.StreamHandler()])
|
|
|
16 |
intents.guild_messages = True
|
17 |
|
18 |
# ์ถ๋ก API ํด๋ผ์ด์ธํธ ์ค์
|
19 |
+
hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus", token=os.getenv("HF_TOKEN"))
|
20 |
|
21 |
# ํน์ ์ฑ๋ ID
|
22 |
SPECIFIC_CHANNEL_ID = int(os.getenv("DISCORD_CHANNEL_ID"))
|
|
|
31 |
|
32 |
async def on_ready(self):
|
33 |
logging.info(f'{self.user}๋ก ๋ก๊ทธ์ธ๋์์ต๋๋ค!')
|
34 |
+
subprocess.Popen(["python", "web.py"])
|
35 |
logging.info("Web.py server has been started.")
|
36 |
|
37 |
async def on_message(self, message):
|
|
|
73 |
messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}] + conversation_history
|
74 |
logging.debug(f'Messages to be sent to the model: {messages}')
|
75 |
|
76 |
+
loop = asyncio.get_event_loop()
|
77 |
+
response = await loop.run_in_executor(None, lambda: hf_client.chat_completion(
|
78 |
+
messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85))
|
79 |
+
|
80 |
+
full_response = []
|
81 |
+
for part in response:
|
82 |
+
logging.debug(f'Part received from stream: {part}')
|
83 |
+
if part.choices and part.choices[0].delta and part.choices[0].delta.content:
|
84 |
+
full_response.append(part.choices[0].delta.content)
|
85 |
+
|
86 |
+
full_response_text = ''.join(full_response)
|
87 |
+
logging.debug(f'Full model response: {full_response_text}')
|
88 |
+
|
89 |
+
conversation_history.append({"role": "assistant", "content": full_response_text})
|
90 |
+
return f"{user_mention}, {full_response_text}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
if __name__ == "__main__":
|
93 |
discord_client = MyClient(intents=intents)
|
94 |
+
discord_client.run(os.getenv('DISCORD_TOKEN'))
|