Sakalti commited on
Commit
d8cee9a
1 Parent(s): b37cb74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -64
app.py CHANGED
@@ -1,9 +1,11 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import json
4
- import os
5
 
 
 
 
6
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
 
7
 
8
  def respond(
9
  message,
@@ -37,69 +39,28 @@ def respond(
37
  response += token
38
  yield response
39
 
40
- def load_rooms():
41
- if os.path.exists("rooms.json"):
42
- with open("rooms.json", "r", encoding="utf-8") as f:
43
- return json.load(f)
44
- return {}
45
 
46
- def save_rooms(rooms):
47
- with open("rooms.json", "w", encoding="utf-8") as f:
48
- json.dump(rooms, f, ensure_ascii=False, indent=4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- def create_room(rooms, room_name):
51
- if room_name not in rooms:
52
- rooms[room_name] = []
53
- save_rooms(rooms)
54
- return rooms
55
-
56
- def switch_room(room_name, rooms):
57
- return rooms.get(room_name, [])
58
-
59
- def add_message_to_room(room_name, rooms, message, response):
60
- if room_name in rooms:
61
- rooms[room_name].append((message, response))
62
- save_rooms(rooms)
63
-
64
- def chat_interface(room_name, rooms, message, history, system_message, max_tokens, temperature, top_p):
65
- response = list(respond(message, history, system_message, max_tokens, temperature, top_p))[-1]
66
- add_message_to_room(room_name, rooms, message, response)
67
- return response, history + [(message, response)]
68
-
69
- def main():
70
- rooms = load_rooms()
71
- room_names = list(rooms.keys())
72
- with gr.Blocks() as demo:
73
- with gr.Row():
74
- with gr.Column():
75
- room_name_dropdown = gr.Dropdown(room_names, label="会話部屋", value=room_names[0] if room_names else None)
76
- new_room_name = gr.Textbox(label="新しい会話部屋の名前")
77
- create_room_button = gr.Button("新しい会話部屋を作成")
78
- create_room_button.click(
79
- fn=lambda name, r: (create_room(r, name), name, switch_room(name, r)),
80
- inputs=[new_room_name, gr.State(rooms)],
81
- outputs=[room_name_dropdown, room_name_dropdown, gr.Chatbot]
82
- )
83
- room_name_dropdown.change(
84
- fn=lambda name, r: (switch_room(name, r),),
85
- inputs=[room_name_dropdown, gr.State(rooms)],
86
- outputs=[gr.Chatbot]
87
- )
88
- with gr.Column(scale=3):
89
- chatbot = gr.Chatbot(label="会話")
90
- message = gr.Textbox(label="メッセージ")
91
- system_message = gr.Textbox(value="あなたは親切なチャットボットでありQwenというLLMです。", label="システムメッセージ")
92
- max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="新規トークン最大")
93
- temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="温度")
94
- top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (核 sampling)")
95
- submit_button = gr.Button("送信")
96
- submit_button.click(
97
- fn=chat_interface,
98
- inputs=[room_name_dropdown, gr.State(rooms), message, chatbot, system_message, max_tokens, temperature, top_p],
99
- outputs=[chatbot, chatbot]
100
- )
101
-
102
- demo.launch()
103
 
104
  if __name__ == "__main__":
105
- main()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
  client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
8
+ #応答部分
9
 
10
  def respond(
11
  message,
 
39
  response += token
40
  yield response
41
 
 
 
 
 
 
42
 
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
+ #インターフェース
47
+ demo = gr.ChatInterface(
48
+ respond,
49
+ additional_inputs=[
50
+ gr.Textbox(value="ユーザーの応答と依頼に答えてください。ポジティブに", label="システムメッセージ"),
51
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="新規トークン最大"),
52
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="温度"),
53
+ gr.Slider(
54
+ minimum=0.1,
55
+ maximum=1.0,
56
+ value=0.95,
57
+ step=0.05,
58
+ label="Top-p (核 sampling)",
59
+ ),
60
+ ],
61
+ concurrency_limit=30 # 例: 同時に4つのリクエストを処理
62
+ )
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  if __name__ == "__main__":
66
+ demo.launch()