callanwu commited on
Commit
e14e6c0
·
1 Parent(s): 54f55cc
Gradio_Config/__pycache__/gradio_base.cpython-38.pyc ADDED
Binary file (15.9 kB). View file
 
Gradio_Config/__pycache__/gradio_config.cpython-38.pyc ADDED
Binary file (12.4 kB). View file
 
Gradio_Config/gradio_base.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Emoji comes from this website:
18
+ # https://emojipedia.org/
19
+ import subprocess
20
+ from gradio_config import GradioConfig as gc
21
+ import gradio as gr
22
+ from typing import List, Tuple, Any
23
+ import time
24
+ import socket
25
+ import psutil
26
+ import os
27
+ from abc import abstractmethod
28
+
29
+ def convert2list4agentname(sop):
30
+ """
31
+ Extract the agent names of all states
32
+ return:
33
+ only name: [name1, name2, ...]
34
+ agent_name: [name1(role1), name2(role2), ...]
35
+ """
36
+ only_name = []
37
+ agent_name = []
38
+ roles_to_names = sop.roles_to_names
39
+ for state_name,roles_names in roles_to_names.items():
40
+ for role,name in roles_names.items():
41
+ agent_name.append(f"{name}({role})")
42
+ only_name.append(name)
43
+ agent_name = list(set(agent_name))
44
+ agent_name.sort()
45
+ return agent_name, only_name
46
+
47
+ def is_port_in_use(port):
48
+ """Check if the port is available"""
49
+ for conn in psutil.net_connections():
50
+ if conn.laddr.port == port:
51
+ return True
52
+ return False
53
+
54
+ def check_port(port):
55
+ """Determine available ports"""
56
+ if os.path.isfile("PORT.txt"):
57
+ port = int(open("PORT.txt","r",encoding='utf-8').readlines()[0])
58
+ else:
59
+ for i in range(10):
60
+ if is_port_in_use(port+i) == False:
61
+ port += i
62
+ break
63
+ with open("PORT.txt", "w") as f:
64
+ f.writelines(str(port))
65
+ return port
66
+
67
+ # Determine some heads
68
+ SPECIAL_SIGN = {
69
+ "START": "<START>",
70
+ "SPLIT": "<SELFDEFINESEP>",
71
+ "END": "<ENDSEP>"
72
+ }
73
+ HOST = "127.0.0.1"
74
+ # The starting port number for the search.
75
+ PORT = 15000
76
+ PORT = check_port(PORT)
77
+
78
+ def print_log(message:str):
79
+ print(f"[{time.ctime()}]{message}")
80
+
81
+ global_dialog = {
82
+ "user": [],
83
+ "agent": {},
84
+ "system": []
85
+ }
86
+
87
+ class UIHelper:
88
+ """Static Class"""
89
+
90
+ @classmethod
91
+ def wrap_css(cls, content, name) -> str:
92
+ """
93
+ Description:
94
+ Wrap CSS around each output, and return it in HTML format for rendering with Markdown.
95
+ Input:
96
+ content: Output content
97
+ name: Whose output is it
98
+ Output:
99
+ HTML
100
+ """
101
+ assert name in gc.OBJECT_INFO, \
102
+ f"The current name `{name}` is not registered with an image. The names of the currently registered agents are in `{gc.OBJECT_INFO.keys()}`. Please use `GradioConfig.add_agent()` from `Gradio_Config/gradio_config.py` to bind the name of the new agent."
103
+ output = ""
104
+ info = gc.OBJECT_INFO[name]
105
+ if info["id"] == "USER":
106
+ output = gc.BUBBLE_CSS["USER"].format(
107
+ info["bubble_color"], # Background-color
108
+ info["text_color"], # Color of the agent's name
109
+ name, # Agent name
110
+ info["text_color"], # Font color
111
+ info["font_size"], # Font size
112
+ content, # Content
113
+ info["head_url"] # URL of the avatar
114
+ )
115
+ elif info["id"] == "SYSTEM":
116
+ output = gc.BUBBLE_CSS["SYSTEM"].format(
117
+ info["bubble_color"], # Background-color
118
+ info["font_size"], # Font size
119
+ info["text_color"], # Font color
120
+ name, # Agent name
121
+ content # Content
122
+ )
123
+ elif info["id"] == "AGENT":
124
+ output = gc.BUBBLE_CSS["AGENT"].format(
125
+ info["head_url"], # URL of the avatar
126
+ info["bubble_color"], # Background-color
127
+ info["text_color"], # Font color
128
+ name, # Agent name
129
+ info["text_color"], # Font color
130
+ info["font_size"], # Font size
131
+ content, # Content
132
+ )
133
+ else:
134
+ assert False, f"Id `{info['id']}` is invalid. The valid id is in ['SYSTEM', 'AGENT', 'USER']"
135
+ return output
136
+
137
+ @classmethod
138
+ def novel_filter(cls, content, agent_name):
139
+
140
+ """比如<CONTENT>...</CONTENT>,就应该输出CONTENT:..."""
141
+ IS_RECORDER = agent_name.lower() in ["recorder", "summary"]
142
+ if IS_RECORDER:
143
+ BOLD_FORMAT = """<div style="color: #000000; display:inline">
144
+ <b>{}</b>
145
+ </div>
146
+ <span style="color: black;">
147
+ """
148
+ else:
149
+ BOLD_FORMAT = "<b>{}</b>"
150
+ CENTER_FORMAT = """<div style="background-color: #F0F0F0; text-align: center; padding: 5px; color: #000000">
151
+ <b>{}</b>
152
+ </div>
153
+ """
154
+ START_FORMAT = "<{}>"
155
+ END_FORMAT = "</{}>"
156
+ mapping = {
157
+ "TARGET": "🎯 Current Target: ",
158
+ "NUMBER": "🍖 Required Number: ",
159
+ "THOUGHT": "🤔 Overall Thought: ",
160
+ "FIRST NAME": "⚪ First Name: ",
161
+ "LAST NAME": "⚪ Last Name: ",
162
+ "ROLE": "🤠 Character Properties: ",
163
+ "RATIONALES": "🤔 Design Rationale: ",
164
+ "BACKGROUND": "🚊 Character Background: ",
165
+ "ID": "🔴 ID: ",
166
+ "TITLE": "🧩 Chapter Title: ",
167
+ "ABSTRACT": "🎬 Abstract: ",
168
+ "CHARACTER INVOLVED": "☃️ Character Involved: ",
169
+ "ADVICE": "💬 Advice:",
170
+ "NAME": "📛 Name: ",
171
+ "GENDER": "👩‍👩‍👦‍👦 Gender: ",
172
+ "AGE": "⏲️ Age: ",
173
+ "WORK": "👨‍🔧 Work: ",
174
+ "PERSONALITY": "🧲 Character Personality: ",
175
+ "SPEECH STYLE": "🗣️ Speaking Style: ",
176
+ "RELATION": "🏠 Relation with Others: ",
177
+ "WORD COUNT": "🎰 Word Count: ",
178
+ "CHARACTER DESIGN": "📈 Character Design: ",
179
+ "CHARACTER REQUIRE": "📈 Character Require: ",
180
+ "CHARACTER NAME": "📈 Character Naming Analysis: ",
181
+ "CHARACTER NOW": "📈 Character Now: ",
182
+ "OUTLINE DESIGN": "📈 Outline Design: ",
183
+ "OUTLINE REQUIRE": "📈 Outline Require: ",
184
+ "OUTLINE NOW": "📈 Outline Now: ",
185
+ "SUB TASK": "🎯 Current Sub Task: ",
186
+ "CHARACTER ADVICE": "💬 Character Design Advice: ",
187
+ "OUTLINE ADVANTAGE": "📈 Outline Advantage: ",
188
+ "OUTLINE DISADVANTAGE": "📈 Outline Disadvantage: ",
189
+ "OUTLINE ADVICE": "💬 Outline Advice: ",
190
+ "NEXT": "➡️ Next Advice: ",
191
+ "TOTAL NUMBER": "🔢 Total Number: "
192
+ }
193
+ for i in range(1, 10):
194
+ mapping[f"CHARACTER {i}"] = f"🦄 Character {i}"
195
+ mapping[f"SECTION {i}"] = f"🏷️ Chapter {i}"
196
+ for key in mapping:
197
+ if key in [f"CHARACTER {i}" for i in range(1, 10)] \
198
+ or key in [f"SECTION {i}" for i in range(1, 10)] \
199
+ :
200
+ content = content.replace(
201
+ START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key])
202
+ )
203
+ elif key in ["TOTAL NUMBER"]:
204
+ content = content.replace(
205
+ START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key]) + """<span style="color: black;">"""
206
+ )
207
+ content = content.replace(
208
+ END_FORMAT.format(key), "</span>"
209
+ )
210
+ else:
211
+ content = content.replace(
212
+ START_FORMAT.format(key), BOLD_FORMAT.format(mapping[key])
213
+ )
214
+
215
+ content = content.replace(
216
+ END_FORMAT.format(key), "</span>" if IS_RECORDER else ""
217
+ )
218
+ return content
219
+
220
+ @classmethod
221
+ def singleagent_filter(cls, content, agent_name):
222
+ return content
223
+
224
+ @classmethod
225
+ def debate_filter(cls, content, agent_name):
226
+ return content
227
+
228
+ @classmethod
229
+ def code_filter(cls, content, agent_name):
230
+ # return content.replace("```python", "<pre><code>").replace("```","</pre></code>")
231
+ return content
232
+
233
+ @classmethod
234
+ def general_filter(cls, content, agent_name):
235
+ return content
236
+
237
+ @classmethod
238
+ def filter(cls, content: str, agent_name: str, ui_name: str):
239
+ """
240
+ Description:
241
+ Make certain modifications to the output content to enhance its aesthetics when content is showed in gradio.
242
+ Input:
243
+ content: output content
244
+ agent_name: Whose output is it
245
+ ui_name: What UI is currently launching
246
+ Output:
247
+ Modified content
248
+ """
249
+ mapping = {
250
+ "SingleAgentUI": cls.singleagent_filter,
251
+ "DebateUI": cls.debate_filter,
252
+ "NovelUI": cls.novel_filter,
253
+ "CodeUI": cls.code_filter,
254
+ "GeneralUI": cls.general_filter
255
+ }
256
+ if ui_name in mapping:
257
+ return mapping[ui_name](content, agent_name)
258
+ else:
259
+ return content
260
+
261
+ class Client:
262
+ """
263
+ For inter-process communication, this is the client.
264
+ `gradio_backend.PY` serves as the backend, while `run_gradio` is the frontend.
265
+ Communication between the frontend and backend is accomplished using Sockets.
266
+ """
267
+ # =======================Radio Const String======================
268
+ SINGLE_MODE = "Single Mode"
269
+ AUTO_MODE = "Auto Mode"
270
+ MODE_LABEL = "Select the execution mode"
271
+ MODE_INFO = "Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
272
+ # ===============================================================
273
+ mode = AUTO_MODE
274
+ FIRST_RUN:bool = True
275
+ # if last agent is user, then next agent will be executed automatically rather than click button
276
+ LAST_USER:bool = False
277
+
278
+ receive_server = None
279
+ send_server = None
280
+ current_node = None
281
+ cache = {}
282
+
283
+ def __init__(self, host=HOST, port=PORT, bufsize=1024):
284
+ assert Client.mode in [Client.SINGLE_MODE, Client.AUTO_MODE]
285
+ self.SIGN = SPECIAL_SIGN
286
+ self.bufsize = bufsize
287
+ assert bufsize > 0
288
+ self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
289
+ self.client_socket.connect((host, port))
290
+ while True:
291
+ data = self.client_socket.recv(self.bufsize).decode('utf-8')
292
+ if data == "hi":
293
+ self.client_socket.send("hello agent".encode('utf-8'))
294
+ time.sleep(1)
295
+ elif data == "check":
296
+ break
297
+ print_log("Client: connecting successfully......")
298
+
299
+ def start_server(self):
300
+ while True:
301
+ message = yield
302
+ if message == 'exit':
303
+ break
304
+ self.send_message(message=message)
305
+
306
+ def send_message(self, message):
307
+ """Send the messaget to the server."""
308
+ if isinstance(message, list) or isinstance(message, dict):
309
+ message = str(message)
310
+ assert isinstance(message, str)
311
+ message = message + self.SIGN["SPLIT"]
312
+ self.client_socket.send(message.encode('utf-8'))
313
+
314
+ def receive_message(self, end_identifier: str = None, split_identifier: str = SPECIAL_SIGN["SPLIT"]) -> List:
315
+ """Receive messages from the server, and it will block the process. Supports receiving long text."""
316
+ remaining = ""
317
+ while True:
318
+ # receive message
319
+ dataset = self.client_socket.recv(self.bufsize)
320
+ try:
321
+ # If decoding fails, it indicates that the current transmission is a long text.
322
+ dataset = dataset.decode('utf-8')
323
+ except UnicodeDecodeError:
324
+ if not isinstance(remaining, bytes):
325
+ remaining = remaining.encode('utf-8')
326
+ assert isinstance(dataset, bytes)
327
+ remaining += dataset
328
+ try:
329
+ dataset = remaining.decode('utf-8')
330
+ remaining = ""
331
+ except UnicodeDecodeError:
332
+ continue
333
+ assert isinstance(remaining, str)
334
+ dataset = remaining + dataset
335
+ list_dataset = dataset.split(split_identifier)
336
+ if len(list_dataset) == 1:
337
+ # If there is only one result from the split, it indicates that the current sequence itself has not yet ended.
338
+ remaining = list_dataset[0]
339
+ continue
340
+ else:
341
+ remaining = list_dataset[-1]
342
+ # Recieve successfully
343
+ list_dataset = list_dataset[:-1]
344
+ return_value = []
345
+ for item in list_dataset:
346
+ if end_identifier is not None and item == end_identifier:
347
+ break
348
+ return_value.append(item)
349
+ identifier = yield return_value
350
+ if identifier is not None:
351
+ end_identifier, split_identifier = identifier
352
+
353
+ def listening_for_start_(self):
354
+ """
355
+ When the server starts, the client is automatically launched.
356
+ At this point, process synchronization is required,
357
+ such as sending client data to the server for rendering,
358
+ then the server sending the modified data back to the client,
359
+ and simultaneously sending a startup command.
360
+ Once the client receives the data, it will start running.
361
+ """
362
+ Client.receive_server = self.receive_message()
363
+ # Waiting for information from the server.
364
+ data: list = next(Client.receive_server)
365
+ assert len(data) == 1
366
+ data = eval(data[0])
367
+ assert isinstance(data, dict)
368
+ Client.cache.update(data)
369
+ # Waiting for start command from the server.
370
+ data:list = Client.receive_server.send(None)
371
+ assert len(data) == 1
372
+ assert data[0] == "<START>"
373
+
374
+ class WebUI:
375
+ """
376
+ The base class for the frontend, which encapsulates some functions for process information synchronization.
377
+ When a new frontend needs to be created, you should inherit from this class,
378
+ then implement the `construct_ui()` method and set up event listeners.
379
+ Finally, execute `run()` to load it.
380
+ """
381
+
382
+ def receive_message(
383
+ self,
384
+ end_identifier:str=None,
385
+ split_identifier:str=SPECIAL_SIGN["SPLIT"]
386
+ )->List:
387
+ """This is the same as in Client class."""
388
+ yield "hello"
389
+ remaining = ""
390
+ while True:
391
+ dataset = self.client_socket.recv(self.bufsize)
392
+ try:
393
+ dataset = dataset.decode('utf-8')
394
+ except UnicodeDecodeError:
395
+ if not isinstance(remaining, bytes):
396
+ remaining = remaining.encode('utf-8')
397
+ assert isinstance(dataset, bytes)
398
+ remaining += dataset
399
+ try:
400
+ dataset = remaining.decode('utf-8')
401
+ remaining = ""
402
+ except UnicodeDecodeError:
403
+ continue
404
+ assert isinstance(remaining, str)
405
+ dataset = remaining + dataset
406
+ list_dataset = dataset.split(split_identifier)
407
+ if len(list_dataset) == 1:
408
+ remaining = list_dataset[0]
409
+ continue
410
+ else:
411
+ remaining = list_dataset[-1]
412
+ list_dataset = list_dataset[:-1]
413
+ return_value = []
414
+ for item in list_dataset:
415
+ if end_identifier is not None and item == end_identifier:
416
+ break
417
+ return_value.append(item)
418
+ identifier = yield return_value
419
+ if identifier is not None:
420
+ end_identifier, split_identifier = identifier
421
+
422
+ def send_message(self, message:str):
423
+ """Send message to client."""
424
+ SEP = self.SIGN["SPLIT"]
425
+ self.client_socket.send(
426
+ (message+SEP).encode("utf-8")
427
+ )
428
+
429
+ def _connect(self):
430
+ # check
431
+ if self.server_socket:
432
+ self.server_socket.close()
433
+ assert not os.path.isfile("PORT.txt")
434
+ self.socket_port = check_port(PORT)
435
+ # Step1. initialize
436
+ self.server_socket = socket.socket(
437
+ socket.AF_INET, socket.SOCK_STREAM
438
+ )
439
+ # Step2. binding ip and port
440
+ self.server_socket.bind((self.socket_host, self.socket_port))
441
+ # Step3. run client
442
+ self._start_client()
443
+
444
+ # Step4. listening for connect
445
+ self.server_socket.listen(1)
446
+
447
+ # Step5. test connection
448
+ client_socket, client_address = self.server_socket.accept()
449
+ print_log("server: establishing connection......")
450
+ self.client_socket = client_socket
451
+ while True:
452
+ client_socket.send("hi".encode('utf-8'))
453
+ time.sleep(1)
454
+ data = client_socket.recv(self.bufsize).decode('utf-8')
455
+ if data == "hello agent":
456
+ client_socket.send("check".encode('utf-8'))
457
+ print_log("server: connect successfully")
458
+ break
459
+ assert os.path.isfile("PORT.txt")
460
+ os.remove("PORT.txt")
461
+ if self.receive_server:
462
+ del self.receive_server
463
+ self.receive_server = self.receive_message()
464
+ assert next(self.receive_server) == "hello"
465
+
466
+ @abstractmethod
467
+ def render_and_register_ui(self):
468
+ # You need to implement this function.
469
+ # The function's purpose is to bind the name of the agent with an image.
470
+ # The name of the agent is stored in `self.cache[]`,
471
+ # and the function for binding is in the method `add_agents` of the class `GradioConfig` in `Gradio_Config/gradio_config.py``.
472
+ # This function will be executed in `self.first_recieve_from_client()`
473
+ pass
474
+
475
+ def first_recieve_from_client(self, reset_mode:bool=False):
476
+ """
477
+ This function is used to receive information from the client and is typically executed during the initialization of the class.
478
+ If `reset_mode` is False, it will bind the name of the agent with an image.
479
+ """
480
+ self.FIRST_RECIEVE_FROM_CLIENT = True
481
+ data_list:List = self.receive_server.send(None)
482
+ assert len(data_list) == 1
483
+ data = eval(data_list[0])
484
+ assert isinstance(data, dict)
485
+ self.cache.update(data)
486
+ if not reset_mode:
487
+ self.render_and_register_ui()
488
+
489
+ def _second_send(self, message:dict):
490
+ # Send the modified message.
491
+ # It will be executed in `self.send_start_cmd()` automtically.
492
+ self.send_message(str(message))
493
+
494
+ def _third_send(self):
495
+ # Send start command.
496
+ # It will be executed in `self.send_start_cmd()` automtically.
497
+ self.send_message(self.SIGN['START'])
498
+
499
+ def send_start_cmd(self, message:dict={"hello":"hello"}):
500
+ # If you have no message to send, you can ignore the args `message`.
501
+ assert self.FIRST_RECIEVE_FROM_CLIENT, "Please make sure you have executed `self.first_recieve_from_client()` manually."
502
+ self._second_send(message=message)
503
+ time.sleep(1)
504
+ self._third_send()
505
+ self.FIRST_RECIEVE_FROM_CLIENT = False
506
+
507
+ def __init__(
508
+ self,
509
+ client_cmd: list, # ['python','test.py','--a','b','--c','d']
510
+ socket_host: str = HOST,
511
+ socket_port: int = PORT,
512
+ bufsize: int = 1024,
513
+ ui_name: str = ""
514
+ ):
515
+ self.ui_name = ui_name
516
+ self.server_socket = None
517
+ self.SIGN = SPECIAL_SIGN
518
+ self.socket_host = socket_host
519
+ self.socket_port = socket_port
520
+ self.bufsize = bufsize
521
+ self.client_cmd = client_cmd
522
+
523
+ self.receive_server = None
524
+ self.cache = {}
525
+ assert self.bufsize > 0
526
+ self._connect()
527
+
528
+ def _start_client(self):
529
+ print(f"server: excuting `{' '.join(self.client_cmd)}` ...")
530
+ self.backend = subprocess.Popen(self.client_cmd)
531
+
532
+ def _close_client(self):
533
+ print(f"server: killing `{' '.join(self.client_cmd)}` ...")
534
+ self.backend.terminate()
535
+
536
+ def reset(self):
537
+ print("server: restarting ...")
538
+ self._close_client()
539
+ time.sleep(1)
540
+ self._connect()
541
+
542
+ def render_bubble(self, rendered_data, agent_response, node_name, render_node_name:bool=True):
543
+ # Rendered bubbles (HTML format) are used for gradio output.
544
+ output = f"**{node_name}**<br>" if render_node_name else ""
545
+ for item in agent_response:
546
+ for agent_name in item:
547
+ content = item[agent_name].replace("\n", "<br>")
548
+ content = UIHelper.filter(content, agent_name, self.ui_name)
549
+ output = f"{output}<br>{UIHelper.wrap_css(content, agent_name)}"
550
+ rendered_data[-1] = [rendered_data[-1][0], output]
551
+ return rendered_data
552
+
553
+ def run(self,share: bool = True):
554
+ self.demo.queue()
555
+ self.demo.launch(share=share)
556
+
557
+
558
+ if __name__ == '__main__':
559
+ pass
Gradio_Config/gradio_config.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ from PIL import Image
19
+ import requests
20
+ from typing import List, Tuple
21
+
22
+ class GradioConfig:
23
+ # How many avatars are currently registered
24
+ POINTER = 0
25
+
26
+ # Avatar image. You can add or replace.
27
+ AGENT_HEAD_URL = [
28
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687579617434043.jpg",
29
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687592097408547.jpg",
30
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561699613.jpg",
31
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561275758.jpg",
32
+ "https://img.touxiangwu.com/uploads/allimg/2021090300/ry5k31wt33c.jpg",
33
+ "https://img.touxiangwu.com/uploads/allimg/2021090300/0ls2gmwhrf5.jpg",
34
+ "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
35
+ "https://img.touxiangwu.com/zb_users/upload/2023/03/202303271679886128550253.jpg",
36
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711344407060.jpg",
37
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711345834296.jpg",
38
+ "https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311194291520.jpg",
39
+ "https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311196958993.jpg",
40
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/vr0bkov0dwl.jpg",
41
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/auqx5zfsv5g.jpg",
42
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/llofpivtwls.jpg",
43
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/3j2sdot3ye0.jpg",
44
+ "https://img.touxiangwu.com/2020/3/nQfYf2.jpg",
45
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068774532.jpg",
46
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068289945.jpg",
47
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918069785183.jpg",
48
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561292003.jpg",
49
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561578616.jpg",
50
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726564597524.jpg"
51
+ ]
52
+ USER_HEAD_URL = "https://img.touxiangwu.com/zb_users/upload/2023/05/202305301685407468585486.jpg"
53
+
54
+ # The css style of gradio.Chatbot
55
+ CSS = """
56
+ #chatbot1 .user {
57
+ background-color:transparent;
58
+ border-color:transparent;
59
+ }
60
+ #chatbot1 .bot {
61
+ background-color:transparent;
62
+ border-color:transparent;
63
+ }
64
+ #btn {color: red; border-color: red;}
65
+ """
66
+
67
+ ID = ["USER", "AGENT", "SYSTEM"]
68
+
69
+ # Bubble template
70
+ BUBBLE_CSS = {
71
+ # Background-color Name-color Name-content Font-color Font-size Content Avatar-URL
72
+ "USER": """
73
+ <div style="display: flex; align-items: flex-start; justify-content: flex-end;">
74
+ <div style="background-color: {}; border-radius: 20px 0px 20px 20px; padding: 15px; min-width: 100px; max-width: 300px;">
75
+ <p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
76
+ <p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
77
+ </div>
78
+ <img src="{}" alt="USER" style="width: 50px; height: 50px; border-radius: 50%; margin-left: 10px;">
79
+ </div>
80
+ """,
81
+
82
+ # Avatar-URL Background-color Name-color Name-Content Font-color Font-size Content
83
+ "AGENT": """
84
+ <div style="display: flex; align-items: flex-start;">
85
+ <img src="{}" alt="AGENT" style="width: 50px; height: 50px; border-radius: 50%; margin-right: 10px;">
86
+ <div style="background-color: {}; border-radius: 0px 20px 20px 20px; padding: 15px; min-width: 100px; max-width: 600px;">
87
+ <p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
88
+ <p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
89
+ </div>
90
+ </div>
91
+ """,
92
+
93
+ # Backrgound-color Font-size Font-color Name Content
94
+ "SYSTEM": """
95
+ <div style="display: flex; align-items: center; justify-content: center;">
96
+ <div style="background-color: {}; border-radius: 20px; padding: 1px; min-width: 200px; max-width: 1000px;">
97
+ <p style="margin: 0; padding: 0; text-align: center; font-size: {}px; font-weight: bold; font-family: '微软雅黑', sans-serif; color: {};">{}:{}</p>
98
+ </div>
99
+ </div>
100
+ """
101
+ }
102
+
103
+ ROLE_2_NAME = {}
104
+
105
+ OBJECT_INFO = {
106
+
107
+ "User": {
108
+ # https://img-blog.csdnimg.cn/img_convert/7c20bc39ac69b6972a22e18762d02db3.jpeg
109
+ "head_url": USER_HEAD_URL,
110
+ "bubble_color": "#95EC69",
111
+ "text_color": "#000000",
112
+ "font_size": 0,
113
+ "id": "USER"
114
+ },
115
+
116
+ "System": {
117
+ # https://img-blog.csdnimg.cn/img_convert/e7e5887cfff67df8c2205c2ef0e5e7fa.png
118
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2023/03/202303141678768524747045.jpg",
119
+ "bubble_color": "#7F7F7F", ##FFFFFF
120
+ "text_color": "#FFFFFF", ##000000
121
+ "font_size": 0,
122
+ "id": "SYSTEM"
123
+ },
124
+
125
+ "wait": {
126
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2022/12/202212011669881536145501.jpg",
127
+ "bubble_color": "#E7CBA6",
128
+ "text_color": "#000000",
129
+ "font_size": 0,
130
+ "id": "AGENT"
131
+ },
132
+
133
+ "Recorder": {
134
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
135
+ "bubble_color": "#F7F7F7",
136
+ "text_color": "#000000",
137
+ "font_size": 0,
138
+ "id": "AGENT"
139
+ }
140
+ }
141
+
142
+ @classmethod
143
+ def color_for_img(cls, url):
144
+ """
145
+ Extract the main colors from the picture and set them as the background color,
146
+ then determine the corresponding text color.
147
+ """
148
+
149
+ def get_main_color(image):
150
+ image = image.convert("RGB")
151
+ width, height = image.size
152
+ pixels = image.getcolors(width * height)
153
+ most_common_pixel = max(pixels, key=lambda item: item[0])
154
+ return most_common_pixel[1]
155
+
156
+ def is_dark_color(rgb_color):
157
+ r, g, b = rgb_color
158
+ luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
159
+ return luminance < 0.5
160
+
161
+ def download_image(url):
162
+ print(f"binding: {url}")
163
+ response = requests.get(url)
164
+ if response.status_code == 200:
165
+ with open('image.jpg', 'wb') as f:
166
+ f.write(response.content)
167
+
168
+ def rgb_to_hex(color):
169
+ return "#{:02X}{:02X}{:02X}".format(color[0], color[1], color[2])
170
+
171
+ def get_color(image_url):
172
+ download_image(image_url)
173
+
174
+ image = Image.open("image.jpg")
175
+ main_color = get_main_color(image)
176
+ is_dark = is_dark_color(main_color)
177
+
178
+ if is_dark:
179
+ font_color = "#FFFFFF"
180
+ else:
181
+ font_color = "#000000"
182
+
183
+ return rgb_to_hex(main_color), font_color
184
+
185
+ return get_color(url)
186
+
187
+ @classmethod
188
+ def init(cls, JSON):
189
+ # Deprecated
190
+ with open(JSON) as f:
191
+ sop = json.load(f)
192
+ cnt = 0
193
+ FISRT_NODE = True
194
+ fisrt_node_roles = []
195
+ for node_name in sop['nodes']:
196
+ node_info = sop['nodes'][node_name]
197
+ agent_states = node_info['agent_states']
198
+ for agent_role in agent_states:
199
+ name = agent_states[agent_role]['style']['name']
200
+ cls.ROLE_2_NAME[agent_role] = name
201
+ if FISRT_NODE:
202
+ fisrt_node_roles.append(agent_role)
203
+ bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cnt])
204
+ cls.OBJECT_INFO[name] = {
205
+ "head_url": f"{cls.AGENT_HEAD_URL[cnt]}",
206
+ "bubble_color": bubble_color,
207
+ "text_color": text_color,
208
+ "font_size": 0,
209
+ "id": "AGENT"
210
+ }
211
+ cnt += 1
212
+ if FISRT_NODE:
213
+ FISRT_NODE = False
214
+ print(cls.OBJECT_INFO)
215
+ for usr_name in cls.OBJECT_INFO:
216
+ if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
217
+ cls.OBJECT_INFO[usr_name]["font_size"] = 12
218
+ elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
219
+ cls.OBJECT_INFO[usr_name]["font_size"] = 16
220
+ else:
221
+ assert False
222
+ return fisrt_node_roles
223
+
224
+ @classmethod
225
+ def add_agent(cls, agents_name:List):
226
+ for name in agents_name:
227
+ bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cls.POINTER])
228
+ cls.OBJECT_INFO[name] = {
229
+ "head_url": f"{cls.AGENT_HEAD_URL[cls.POINTER]}",
230
+ "bubble_color": bubble_color,
231
+ "text_color": text_color,
232
+ "font_size": 0,
233
+ "id": "AGENT"
234
+ }
235
+ cls.POINTER += 1
236
+ for usr_name in cls.OBJECT_INFO:
237
+ if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
238
+ cls.OBJECT_INFO[usr_name]["font_size"] = 12
239
+ elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
240
+ cls.OBJECT_INFO[usr_name]["font_size"] = 16
241
+ else:
242
+ assert False
243
+
244
+
245
+ class StateConfig:
246
+ """UI configuration for the step progress bar (indicating the current node)"""
247
+
248
+ CSS = """
249
+ :root {
250
+ --gradient-start: 100%;
251
+ --gradient-end: 0%;
252
+ }
253
+ .container.progress-bar-container {
254
+ position: relative;
255
+ display: flex;
256
+ align-items: flex-end;
257
+ width: 100%;
258
+ overflow-x: auto;
259
+ padding-bottom: 30px;
260
+ padding-top: 20px
261
+ }
262
+ .container.progress-bar-container::-webkit-scrollbar {
263
+ width: 8px;
264
+ background-color: transparent;
265
+ }
266
+
267
+ .container.progress-bar-container::-webkit-scrollbar-thumb {
268
+ background-color: transparent;
269
+ }
270
+
271
+ .progress-bar-container .progressbar {
272
+ counter-reset: step;
273
+ white-space: nowrap;
274
+ }
275
+ .progress-bar-container .progressbar li {
276
+ list-style: none;
277
+ display: inline-block;
278
+ width: 200px;
279
+ position: relative;
280
+ text-align: center;
281
+ cursor: pointer;
282
+ white-space: normal;
283
+ }
284
+ .progress-bar-container .progressbar li:before {
285
+ content: counter(step);
286
+ counter-increment: step;
287
+ width: 30px;
288
+ height: 30px;
289
+ line-height: 30px;
290
+ border: 1px solid #ddd;
291
+ border-radius: 100%;
292
+ display: block;
293
+ text-align: center;
294
+ margin: 0 auto 10px auto;
295
+ background-color: #ffffff;
296
+ }
297
+ .progress-bar-container .progressbar li:after {
298
+ content: attr(data-content);
299
+ position: absolute;
300
+ width: 87%;
301
+ height: 2px;
302
+ background-color: #dddddd;
303
+ top: 15px;
304
+ left: -45%;
305
+ }
306
+ .progress-bar-container .progressbar li:first-child:after {
307
+ content: none;
308
+ }
309
+ .progress-bar-container .progressbar li.active {
310
+ color: green;
311
+ }
312
+ .progress-bar-container .progressbar li.active:before {
313
+ border-color: green;
314
+ background-color: green;
315
+ color: white;
316
+ }
317
+ .progress-bar-container .progressbar li.active + li:after {
318
+ background: linear-gradient(to right, green var(--gradient-start), lightgray var(--gradient-end));
319
+ }
320
+ .progress-bar-container .small-element {
321
+ transform: scale(0.8);
322
+ }
323
+ .progress-bar-container .progressbar li span {
324
+ position: absolute;
325
+ top: 40px;
326
+ left: 0;
327
+ width: 100%;
328
+ text-align: center;
329
+ }
330
+ .progress-bar-container .progressbar li .data-content {
331
+ position: absolute;
332
+ width: 100%;
333
+ top: -10px;
334
+ left: -100px;
335
+ text-align: center;
336
+ }
337
+ """
338
+
339
+ FORMAT = """
340
+ <html>
341
+ <head>
342
+ <style>
343
+ {}
344
+ </style>
345
+ </head>
346
+ <body>
347
+ <br>
348
+ <center>
349
+ <div class="container progress-bar-container">
350
+ <ul class="progressbar">
351
+ {}
352
+ </ul>
353
+ </div>
354
+ </center>
355
+ </body>
356
+ </html>
357
+ """
358
+
359
+ STATES_NAME:List[str] = None
360
+
361
+ @classmethod
362
+ def _generate_template(cls, types:str)->str:
363
+ # normal: A state with no execution.
364
+ # active-show-up: Active state, and content displayed above the horizontal line.
365
+ # active-show-down: Active state, and content displayed below the horizontal line.
366
+ # active-show-both: Active state, and content displayed both above and below the horizontal line.
367
+ # active-show-none: Active state, with no content displayed above the horizontal line.
368
+
369
+ assert types.lower() in ["normal","active-show-up", "active-show-down", "active-show-both", "active", "active-show-none"]
370
+ both_templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
371
+ <div class="data-content">
372
+ <center>
373
+ <p style="line-height: 1px;"></p>
374
+ {}
375
+ <p>
376
+ {}
377
+ </p>
378
+ </center>
379
+ </div>
380
+ <span>{}</span>
381
+ </li>"""
382
+
383
+ if types.lower() == "normal":
384
+ templates = "<li><span>{}</span></li>"
385
+ elif types.lower() == "active":
386
+ templates = """<li class="active"><span>{}</span></li>"""
387
+ elif types.lower() == "active-show-up":
388
+ templates = both_templates.format("{}","{}", "{}", "", "{}")
389
+ elif types.lower() == "active-show-down":
390
+ templates = both_templates.format("{}","{}", "", "{}", "{}")
391
+ elif types.lower() == "active-show-both":
392
+ templates = both_templates
393
+ elif types.lower() == "active-show-none":
394
+ templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
395
+ <span>{}</span>
396
+ </li>"""
397
+ else:
398
+ assert False
399
+ return templates
400
+
401
+ @classmethod
402
+ def update_states(cls, current_states:List[int], current_templates:List[str], show_content:List[Tuple[str]])->str:
403
+ assert len(current_states) == len(current_templates)
404
+ # You can dynamically change the number of states.
405
+ # assert len(current_states) == len(cls.STATES_NAME)
406
+ css_code = []
407
+ for idx in range(len(current_states)):
408
+ if idx == 0:
409
+ if current_states[idx] != 0:
410
+ css_code = [f"{cls._generate_template('active').format(cls.STATES_NAME[idx])}"]
411
+ else:
412
+ css_code = [f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"]
413
+ continue
414
+ if current_states[idx-1] == 0:
415
+ # new_code = f"{cls._generate_template('normal').format(*(show_content[idx]))}"
416
+ new_code = f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"
417
+ else:
418
+ new_code = f"{cls._generate_template(current_templates[idx]).format(current_states[idx-1], 100-current_states[idx-1],*(show_content[idx-1]), cls.STATES_NAME[idx])}"
419
+ if current_states[idx-1] != 100 or (current_states[idx]==0 and current_states[idx-1]==100):
420
+ new_code = new_code.replace("""li class="active" ""","""li """)
421
+ css_code.append(new_code)
422
+ return "\n".join(css_code)
423
+
424
+ @classmethod
425
+ def create_states(cls, states_name:List[str], manual_create_end_nodes:bool=False):
426
+ # Create states
427
+ if manual_create_end_nodes:
428
+ states_name.append("Done")
429
+ css_code = ""
430
+ cls.STATES_NAME: List[str] = states_name
431
+ for name in states_name:
432
+ css_code = f"{css_code}\n{cls._generate_template('normal').format(name)}"
433
+ return css_code
434
+
435
+
436
+ if __name__ == '__main__':
437
+ pass
config.json ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "API_KEY": "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",
4
+ "PROXY": "http://127.0.0.1:7890",
5
+ "MAX_CHAT_HISTORY": "1000",
6
+ "TOP_K": "1"
7
+ },
8
+ "LLM_type": "OpenAI",
9
+ "LLM": {
10
+ "temperature": 0.3,
11
+ "model": "gpt-3.5-turbo-16k-0613",
12
+ "log_path": "logs/god"
13
+ },
14
+ "root": "design_state",
15
+ "relations": {
16
+ "design_state": {
17
+ "0": "design_state",
18
+ "1": "develop_state"
19
+ },
20
+ "develop_state": {
21
+ "0": "develop_state",
22
+ "1": "debug_state"
23
+ },
24
+ "debug_state": {
25
+ "0": "debug_state",
26
+ "1": "end_state"
27
+ }
28
+ },
29
+ "agents": {
30
+ "Alice": {
31
+ "style": "august",
32
+ "roles": {
33
+ "design_state": "Boss",
34
+ "develop_state": "Boss",
35
+ "debug_state": "Boss"
36
+ }
37
+ },
38
+ "Bob": {
39
+ "style": "professional",
40
+ "roles": {
41
+ "design_state": "Architect_1"
42
+ }
43
+ },
44
+ "Belle": {
45
+ "style": "professional",
46
+ "roles": {
47
+ "design_state": "Architect_2"
48
+ }
49
+ },
50
+ "Candy": {
51
+ "style": "professional",
52
+ "roles": {
53
+ "develop_state": "Developer_1",
54
+ "debug_state": "Developer_1"
55
+ }
56
+ },
57
+ "Carl": {
58
+ "style": "professional",
59
+ "roles": {
60
+ "develop_state": "Developer_2",
61
+ "debug_state": "Developer_2"
62
+ }
63
+ },
64
+ "David": {
65
+ "style": "professional",
66
+ "roles": {
67
+ "debug_state": "Debugger"
68
+ }
69
+ },
70
+ "Eva": {
71
+ "style": "professional",
72
+ "roles": {
73
+ "debug_state": "Coder"
74
+ }
75
+ },
76
+ "Michael": {
77
+ "style": "professional",
78
+ "roles": {
79
+ "design_state": "Leader",
80
+ "develop_state": "Leader",
81
+ "debug_state": "Leader"
82
+ }
83
+ }
84
+ },
85
+ "states": {
86
+ "end_state": {
87
+ "agent_states": {}
88
+ },
89
+ "design_state": {
90
+ "LLM_type": "OpenAI",
91
+ "LLM": {
92
+ "temperature": 0.3,
93
+ "model": "gpt-3.5-turbo-16k-0613",
94
+ "log_path": "logs/god"
95
+ },
96
+ "roles": [
97
+ "Boss",
98
+ "Architect_1",
99
+ "Leader",
100
+ "Architect_2"
101
+ ],
102
+ "controller": {
103
+ "controller_type": "order",
104
+ "max_chat_nums": 4,
105
+ "judge_system_prompt": "",
106
+ "judge_last_prompt": "",
107
+ "judge_extract_words": "end"
108
+ },
109
+ "environment_prompt": "Imagine a scenario where the boss has presented a requirement. The architect is tasked with proposing a Python framework based on this requirement. The leader's role is to provide feedback on the architect's proposal, and another architect will finalize the framework based on the leader's comments.The target game <game>a snake game with python</game>",
110
+ "begin_role": "Boss",
111
+ "begin_query": "Please write code for the target game",
112
+ "agent_states": {
113
+ "Boss": {
114
+ "LLM_type": "OpenAI",
115
+ "LLM": {
116
+ "temperature": 0.3,
117
+ "model": "gpt-3.5-turbo-16k-0613",
118
+ "log_path": "logs/Boss"
119
+ },
120
+ "style": {
121
+ "role": "Boss",
122
+ "style": "august"
123
+ },
124
+ "task": {
125
+ "task": "Present the project requirements to the team and articulate the project's objectives clearly."
126
+ },
127
+ "rule": {
128
+ "rule": "It's crucial to communicate the project's objectives, key deliverables, and any specific requirements comprehensively. This ensures that the entire team understands the project's significance and direction."
129
+ },
130
+ "demonstrations": {
131
+ "demonstrations": "Prepare a comprehensive project overview that encompasses the project's scope, objectives, any constraints, and desired outcomes. This document should outline the required features, performance goals, and budget constraints, for example."
132
+ }
133
+ },
134
+ "Architect_1": {
135
+ "LLM_type": "OpenAI",
136
+ "LLM": {
137
+ "temperature": 0.3,
138
+ "model": "gpt-3.5-turbo-16k-0613",
139
+ "log_path": "logs/Architect"
140
+ },
141
+ "style": {
142
+ "role": "Architect",
143
+ "style": "professional"
144
+ },
145
+ "task": {
146
+ "task": "Propose a Python framework based on the BOSS's requirements."
147
+ },
148
+ "rule": {
149
+ "rule": "Thoroughly analyze the project requirements, evaluate potential technologies, and select suitable design principles to meet the project's needs."
150
+ },
151
+ "demonstrations": {
152
+ "demonstrations": "Create a detailed architectural proposal document, including the rationale for choosing the proposed framework and accompanying design diagrams. For instance, provide an architectural diagram outlining the framework's high-level structure and a detailed explanation of why this architecture was selected."
153
+ }
154
+ },
155
+ "Leader": {
156
+ "LLM_type": "OpenAI",
157
+ "LLM": {
158
+ "temperature": 0.3,
159
+ "model": "gpt-3.5-turbo-16k-0613",
160
+ "log_path": "logs/Leader"
161
+ },
162
+ "style": {
163
+ "role": "Leader",
164
+ "style": "professional"
165
+ },
166
+ "task": {
167
+ "task": "evaluate the architecture proposal and provide specific feedback for improvement"
168
+ },
169
+ "rule": {
170
+ "rule": " Offer constructive feedback aligned with the project's objectives to enhance the proposed framework."
171
+ },
172
+ "demonstrations": {
173
+ "demonstrations": "Review Architect1's proposal meticulously and provide written feedback. Ensure the feedback is specific and includes actionable suggestions for improvement. For instance, you can point out areas that need improvement and explain how suggested changes align with project goals."
174
+ }
175
+ },
176
+ "Architect_2": {
177
+ "LLM_type": "OpenAI",
178
+ "LLM": {
179
+ "temperature": 0.3,
180
+ "model": "gpt-3.5-turbo-16k-0613",
181
+ "log_path": "logs/Architect"
182
+ },
183
+ "style": {
184
+ "role": "Architect_2",
185
+ "style": "professional"
186
+ },
187
+ "task": {
188
+ "task": "Finalize the Python framework based on Leader's feedback."
189
+ },
190
+ "rule": {
191
+ "rule": " Integrate Leader's feedback into the architectural proposal and make necessary adjustments to refine the framework."
192
+ },
193
+ "demonstrations": {
194
+ "demonstrations": "Revise the architectural proposal document to reflect the improvements suggested by Leader. Provide clear documentation of the changes made, including any revised design diagrams and explanations for incorporating Leader's feedback."
195
+ },
196
+ "ExtractComponent": {
197
+ "extract_words": [
198
+ "system"
199
+ ],
200
+ "system_prompt": "Please extract the modified system as completely as possible.",
201
+ "last_prompt": ""
202
+ }
203
+ }
204
+ }
205
+ },
206
+ "develop_state": {
207
+ "LLM_type": "OpenAI",
208
+ "LLM": {
209
+ "temperature": 0.3,
210
+ "model": "gpt-3.5-turbo-16k-0613",
211
+ "log_path": "logs/god"
212
+ },
213
+ "roles": [
214
+ "Boss",
215
+ "Developer_1",
216
+ "Leader",
217
+ "Developer_2"
218
+ ],
219
+ "controller": {
220
+ "controller_type": "order",
221
+ "max_chat_nums": 4,
222
+ "judge_system_prompt": "",
223
+ "judge_last_prompt": "",
224
+ "judge_extract_words": "end"
225
+ },
226
+ "environment_prompt": "In this scenario, the boss has provided a requirement. The developer's task is to write code based on the architecture proposed by the architect. The leader evaluates the written code for elegance, readability, and functionality, providing feedback. Another developer makes necessary modifications to the code.The target game <game>a snake game with python</game>",
227
+ "begin_role": "Boss",
228
+ "begin_query": "Please write code for the target game",
229
+ "agent_states": {
230
+ "Boss": {
231
+ "LLM_type": "OpenAI",
232
+ "LLM": {
233
+ "temperature": 0.3,
234
+ "model": "gpt-3.5-turbo-16k-0613",
235
+ "log_path": "logs/Boss"
236
+ },
237
+ "style": {
238
+ "role": "Boss",
239
+ "style": "august"
240
+ },
241
+ "task": {
242
+ "task": "Communicate the project requirements and vision to the team."
243
+ },
244
+ "rule": {
245
+ "rule": "Clearly define the project's objectives, functionality, and any specific requirements."
246
+ },
247
+ "demonstrations": {
248
+ "demonstrations": "Create a detailed project brief that outlines the scope, objectives, and specific features required for the project in a clear and concise manner. This document should provide a comprehensive understanding of what the project aims to achieve."
249
+ }
250
+ },
251
+ "Developer_1": {
252
+ "LLM_type": "OpenAI",
253
+ "LLM": {
254
+ "temperature": 0.3,
255
+ "model": "gpt-3.5-turbo-16k-0613",
256
+ "log_path": "logs/Developer"
257
+ },
258
+ "style": {
259
+ "role": "Developer",
260
+ "style": "professional"
261
+ },
262
+ "task": {
263
+ "task": "write elegant, readable, extensible, and efficient code"
264
+ },
265
+ "rule": {
266
+ "rule": "write code that conforms to standards like PEP8, is modular, easy to read, and maintainable. Output the code only,Ensure that the code adheres to the architectural guidelines, coding standards, and best practices."
267
+ },
268
+ "demonstrations": {
269
+ "demonstrations": "Follow the architectural proposal closely while writing code. Document the code adequately, use meaningful variable names, and maintain proper code structure. For example, provide code snippets that demonstrate adherence to coding standards and architectural design. Output the code only."
270
+ },
271
+ "CustomizeComponent": {
272
+ "template": "You need to write code based on the following framework: {system}",
273
+ "keywords": [
274
+ "system"
275
+ ]
276
+ }
277
+ },
278
+ "Leader": {
279
+ "LLM_type": "OpenAI",
280
+ "LLM": {
281
+ "temperature": 0.3,
282
+ "model": "gpt-3.5-turbo-16k-0613",
283
+ "log_path": "logs/Leader"
284
+ },
285
+ "style": {
286
+ "role": "Leader",
287
+ "style": "professional"
288
+ },
289
+ "task": {
290
+ "task": "Evaluate the written code for elegance, readability, and functionality."
291
+ },
292
+ "rule": {
293
+ "rule": "Provide constructive feedback that helps improve code quality and alignment with project goals."
294
+ },
295
+ "demonstrations": {
296
+ "demonstrations": " Thoroughly review the code written by Developer1. Offer feedback on code organization, naming conventions, code efficiency, and any functional improvements needed. For instance, provide specific examples of code sections that require refinement and explain how these changes enhance code quality."
297
+ }
298
+ },
299
+ "Developer_2": {
300
+ "LLM_type": "OpenAI",
301
+ "LLM": {
302
+ "temperature": 0.3,
303
+ "model": "gpt-3.5-turbo-16k-0613",
304
+ "log_path": "logs/Developer"
305
+ },
306
+ "style": {
307
+ "role": "Developer",
308
+ "style": "professional"
309
+ },
310
+ "task": {
311
+ "task": " Make necessary modifications to the code based on Leader's feedback."
312
+ },
313
+ "rule": {
314
+ "rule": "make code modifications that conform to standards like PEP8, are modular, easy to read, and maintainable. Output the modified code only,Incorporate Leader's feedback into the code and address any issues or improvements identified."
315
+ },
316
+ "demonstrations": {
317
+ "demonstrations": " Review the feedback provided by Leader and apply the suggested modifications to the code. Document the changes made and ensure that the updated code aligns with the project's goals and architectural guidelines. Provide examples of code segments before and after the modifications to illustrate the improvements. Output the modified code only."
318
+ },
319
+ "ExtractComponent": {
320
+ "extract_words": [
321
+ "code"
322
+ ],
323
+ "system_prompt": "Please extract the modified code as completely as possible.",
324
+ "last_prompt": ""
325
+ }
326
+ }
327
+ }
328
+ },
329
+ "debug_state": {
330
+ "LLM_type": "OpenAI",
331
+ "LLM": {
332
+ "temperature": 0.3,
333
+ "model": "gpt-3.5-turbo-16k-0613",
334
+ "log_path": "logs/god"
335
+ },
336
+ "roles": [
337
+ "Boss",
338
+ "Debugger",
339
+ "Developer_1",
340
+ "Leader",
341
+ "Developer_2",
342
+ "Coder"
343
+ ],
344
+ "controller": {
345
+ "controller_type": "order",
346
+ "max_chat_nums": 6,
347
+ "judge_system_prompt": "",
348
+ "judge_last_prompt": "",
349
+ "judge_extract_words": "end"
350
+ },
351
+ "environment_prompt": "In this scenario, the boss has provided a requirement. The debugger simulates a compiler to determine whether the code is runnable and provides feedback. The developer writes code based on the debugger's feedback. The leader evaluates whether the final code meets the boss's requirements and provides feedback for further modifications. The coder writes the final code to a file.The target game <game>a snake game with python</game>",
352
+ "begin_role": "Boss",
353
+ "begin_query": "Please make the code both runnable and more efficient.",
354
+ "agent_states": {
355
+ "Boss": {
356
+ "LLM_type": "OpenAI",
357
+ "LLM": {
358
+ "temperature": 0.3,
359
+ "model": "gpt-3.5-turbo-16k-0613",
360
+ "log_path": "logs/Boss"
361
+ },
362
+ "style": {
363
+ "role": "Boss",
364
+ "style": "august"
365
+ },
366
+ "task": {
367
+ "task": " Communicate the project requirements and vision to the team."
368
+ },
369
+ "rule": {
370
+ "rule": "Clearly define the project's objectives, functionality, and any specific requirements."
371
+ },
372
+ "demonstrations": {
373
+ "demonstrations": " Create a detailed project brief that outlines the scope, objectives, and specific features required for the project in a clear and concise manner. This document should provide a comprehensive understanding of what the project aims to achieve."
374
+ }
375
+ },
376
+ "Debugger": {
377
+ "LLM_type": "OpenAI",
378
+ "LLM": {
379
+ "temperature": 0.3,
380
+ "model": "gpt-3.5-turbo-16k-0613",
381
+ "log_path": "logs/Debugger"
382
+ },
383
+ "style": {
384
+ "role": "Debugger",
385
+ "style": "professional"
386
+ },
387
+ "task": {
388
+ "task": "Simulate a compiler to determine whether the code is runnable and provide feedback."
389
+ },
390
+ "rule": {
391
+ "rule": "Thoroughly test the code for syntax errors, logical issues, and other potential problems. Offer detailed feedback that helps the developer understand and resolve any issues.Please pay special attention to some logic bugs in the game, such as whether the game can run normally."
392
+ },
393
+ "demonstrations": {
394
+ "demonstrations": " Run the code provided by Developer1 through a simulated compiler or debugger. Document any errors, warnings, or issues encountered during the process. Provide feedback that includes specific examples of code problems and suggested solutions."
395
+ },
396
+ "CustomizeComponent": {
397
+ "template": "You need to Run the following code: {code}, through a simulated compiler or debugger. Document any errors, warnings, or issues encountered during the process. Provide feedback that includes specific examples of code problems and suggested solutions.",
398
+ "keywords": [
399
+ "code"
400
+ ]
401
+ }
402
+ },
403
+ "Developer_1": {
404
+ "LLM_type": "OpenAI",
405
+ "LLM": {
406
+ "temperature": 0.3,
407
+ "model": "gpt-3.5-turbo-16k-0613",
408
+ "log_path": "logs/Developer"
409
+ },
410
+ "style": {
411
+ "role": "Developer",
412
+ "style": "professional"
413
+ },
414
+ "task": {
415
+ "task": "write elegant, readable, extensible, and efficient code based on the debugger's feedback."
416
+ },
417
+ "rule": {
418
+ "rule": "write code that conforms to standards like PEP8, is modular, easy to read, and maintainable.Address the issues identified by the Debugger and ensure that the code meets the project's requirements."
419
+ },
420
+ "demonstrations": {
421
+ "demonstrations": " Review the feedback provided by the Debugger and make the necessary modifications to the code. Document the changes made and ensure that the code is free of errors and warnings. Provide examples of code segments before and after the modifications. Output the modified code only."
422
+ }
423
+ },
424
+ "Leader": {
425
+ "LLM_type": "OpenAI",
426
+ "LLM": {
427
+ "temperature": 0.3,
428
+ "model": "gpt-3.5-turbo-16k-0613",
429
+ "log_path": "logs/Leader"
430
+ },
431
+ "style": {
432
+ "role": "Leader",
433
+ "style": "professional"
434
+ },
435
+ "task": {
436
+ "task": "Evaluate whether the final code meets the boss's requirements and provide feedback for further modifications."
437
+ },
438
+ "rule": {
439
+ "rule": " Assess the code's alignment with the project's objectives, functionality, and quality standards. Offer constructive feedback to enhance the code's overall quality."
440
+ },
441
+ "demonstrations": {
442
+ "demonstrations": " Carefully review the code provided by Developer1 after addressing Debugger's feedback. Offer feedback on code organization, readability, and any functional improvements needed. Provide specific examples of code sections that require further refinement and explain how these changes enhance the code's quality."
443
+ }
444
+ },
445
+ "Developer_2": {
446
+ "LLM_type": "OpenAI",
447
+ "LLM": {
448
+ "temperature": 0.3,
449
+ "model": "gpt-3.5-turbo-16k-0613",
450
+ "log_path": "logs/Developer"
451
+ },
452
+ "style": {
453
+ "role": "Developer",
454
+ "style": "professional"
455
+ },
456
+ "task": {
457
+ "task": "Make further modifications to the code based on Leader's feedback."
458
+ },
459
+ "rule": {
460
+ "rule": "Incorporate Leader's feedback into the code and address any issues or improvements identified.,make code modifications that conform to standards like PEP8, are modular, easy to read, and maintainable. Output the modified code only"
461
+ },
462
+ "demonstrations": {
463
+ "demonstrations": " Review the feedback provided by Leader and apply the suggested modifications to the code. Document the changes made and ensure that the updated code aligns with the project's goals and quality standards. Provide examples of code segments before and after the modifications. Output the modified code only."
464
+ },
465
+ "ExtractComponent": {
466
+ "extract_words": [
467
+ "code"
468
+ ],
469
+ "system_prompt": "Please extract the modified code as completely as possible.",
470
+ "last_prompt": ""
471
+ }
472
+ },
473
+ "Coder": {
474
+ "LLM_type": "OpenAI",
475
+ "LLM": {
476
+ "temperature": 0.3,
477
+ "model": "gpt-3.5-turbo-16k-0613",
478
+ "log_path": "logs/Coder"
479
+ },
480
+ "style": {
481
+ "role": "Coder",
482
+ "style": "professional"
483
+ },
484
+ "CodeComponent": {
485
+ "file_name": "rps_game.py",
486
+ "keyword": "code"
487
+ }
488
+ }
489
+ }
490
+ }
491
+ }
492
+ }
gradio_backend.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ import os
3
+ import argparse
4
+ import sys
5
+ sys.path.append("../../../src/agents")
6
+ sys.path.append("../../Gradio_Config")
7
+ from agents.SOP import SOP
8
+ from agents.Agent import Agent
9
+ from agents.Environment import Environment
10
+ from agents.Memory import Memory
11
+ from gradio_base import Client
12
+ from run_gradio import DebateUI
13
+
14
+ def process(action):
15
+ response = action.response
16
+ send_name = action.name
17
+ send_role = action.role
18
+ if not action.is_user:
19
+ print(f"{send_name}({send_role}):{response}")
20
+ memory = Memory(send_role, send_name, response)
21
+ return memory
22
+
23
+ def gradio_process(action,current_state):
24
+ response = action.response
25
+ all = ""
26
+ for i,res in enumerate(response):
27
+ all+=res
28
+ state = 10
29
+ if action.is_user:
30
+ state = 30
31
+ elif action.state_begin:
32
+ state = 12
33
+ action.state_begin = False
34
+ elif i>0:
35
+ state = 11
36
+ send_name = f"{action.name}({action.role})"
37
+ Client.send_server(str([state, send_name, res, current_state.name]))
38
+ if state == 30:
39
+ # print("client: waiting for input.")
40
+ data: list = next(Client.receive_server)
41
+ content = ""
42
+ for item in data:
43
+ if item.startswith("<USER>"):
44
+ content = item.split("<USER>")[1]
45
+ break
46
+ # print(f"client: recieved `{content}` from server")
47
+ action.response = content
48
+ break
49
+ else:
50
+ action.response = all
51
+
52
+ def block_when_next(current_agent, current_state):
53
+ if Client.LAST_USER:
54
+ assert not current_agent.is_user
55
+ Client.LAST_USER = False
56
+ return
57
+ if current_agent.is_user:
58
+ # if next turn is user, we don't handle it here
59
+ Client.LAST_USER = True
60
+ return
61
+ if Client.FIRST_RUN:
62
+ Client.FIRST_RUN = False
63
+ else:
64
+ # block current process
65
+ if Client.mode == Client.SINGLE_MODE:
66
+ Client.send_server(str([98, f"{current_agent.name}({current_agent.state_roles[current_state.name]})", " ", current_state.name]))
67
+ data: list = next(Client.receive_server)
68
+
69
+
70
+ def init(config):
71
+ if not os.path.exists("logs"):
72
+ os.mkdir("logs")
73
+ sop = SOP.from_config(config)
74
+ agents,roles_to_names,names_to_roles = Agent.from_config(config)
75
+ environment = Environment.from_config(config)
76
+ environment.agents = agents
77
+ environment.roles_to_names,environment.names_to_roles = roles_to_names,names_to_roles
78
+ sop.roles_to_names,sop.names_to_roles = roles_to_names,names_to_roles
79
+ for name,agent in agents.items():
80
+ agent.environment = environment
81
+ return agents,sop,environment
82
+
83
+ def run(agents,sop,environment):
84
+ while True:
85
+ current_state,current_agent= sop.next(environment,agents)
86
+ block_when_next(current_agent, current_state)
87
+ if sop.finished:
88
+ print("finished!")
89
+ Client.send_server(str([99, ' ', ' ', current_state.name]))
90
+ os.environ.clear()
91
+ break
92
+ action = current_agent.step(current_state,"") #component_dict = current_state[self.role[current_node.name]] current_agent.compile(component_dict)
93
+ gradio_process(action,current_state)
94
+ memory = process(action)
95
+ environment.update_memory(memory,current_state)
96
+
97
+
98
+ def prepare(agents, sop, environment):
99
+ client = Client()
100
+ Client.send_server = client.send_message
101
+ content = sop.states['Affirmative_Task_Allocation_state'].begin_query
102
+ parse_data = DebateUI.extract(content)
103
+ client.send_message(
104
+ {
105
+ "theme": f"{parse_data[0]}",
106
+ "positive": f"{parse_data[1]}",
107
+ "negative": f"{parse_data[2]}",
108
+ "agents_name": DebateUI.convert2list4agentname(sop)[0],
109
+ "only_name": DebateUI.convert2list4agentname(sop)[0],
110
+ "default_cos_play_id": -1
111
+ }
112
+ )
113
+ client.listening_for_start_()
114
+ client.mode = Client.mode = client.cache["mode"]
115
+ # cover config and then start
116
+ if Client.cache["cosplay"] is not None:
117
+ agents[Client.cache["cosplay"]].is_user = True
118
+ sop.states['Negative_Task_Allocation_state'] = sop.states['Affirmative_Task_Allocation_state'].begin_query = \
119
+ DebateUI.merge(
120
+ theme=Client.cache["theme"], positive=Client.cache["positive"], negative=Client.cache["negative"],
121
+ origin_content=sop.states['Affirmative_Task_Allocation_state'].begin_query
122
+ )
123
+
124
+
125
+ if __name__ == '__main__':
126
+ parser = argparse.ArgumentParser(description='A demo of chatbot')
127
+ parser.add_argument('--agent', type=str, help='path to SOP json', default="config.json")
128
+ args = parser.parse_args()
129
+
130
+ agents,sop,environment = init(args.agent)
131
+
132
+ # add ==============================
133
+ prepare(agents, sop, environment)
134
+ # ==================================
135
+
136
+ run(agents,sop,environment)
137
+
138
+
run_gradio.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append("../../Gradio_Config")
3
+
4
+ from gradio_base import UIHelper, WebUI
5
+ import os
6
+ from gradio_base import WebUI, UIHelper, PORT, HOST, Client
7
+ from gradio_config import GradioConfig as gc
8
+ from typing import List, Tuple, Any
9
+ import gradio as gr
10
+ import time
11
+
12
+ class DebateUI(WebUI):
13
+ FORMAT = "{}\n<debate topic>\n{}\nAffirmative viewpoint:{}\nNegative viewpoint:{}\n<debate topic>{}"
14
+ AUDIENCE = "Audience"
15
+ cache = {}
16
+ all_agents_name = []
17
+ receive_server = None
18
+
19
+ @classmethod
20
+ def extract(cls, content):
21
+ topic = content.split("<debate topic>")[1].split("Affirmative viewpoint:")[0]
22
+ positive = content.split("<debate topic>")[1].split("Affirmative viewpoint:")[1].split("negative viewpoint:")[0]
23
+ negative = content.split("<debate topic>")[1].split("Affirmative viewpoint:")[1].split("negative viewpoint:")[1]
24
+ return topic.strip(), positive.strip(), negative.strip()
25
+
26
+ @classmethod
27
+ def merge(cls, theme, positive, negative, origin_content) -> str:
28
+ return cls.FORMAT.format(
29
+ origin_content.split("<debate topic>")[0],
30
+ theme, positive, negative,
31
+ origin_content.split("<debate topic>")[-1]
32
+ )
33
+
34
+ @classmethod
35
+ def convert2list4agentname(cls, sop):
36
+ only_name = []
37
+ agent_name = []
38
+ roles_to_names = sop.roles_to_names
39
+ for state_name,roles_names in roles_to_names.items():
40
+ for role,name in roles_names.items():
41
+ agent_name.append(f"{name}({role})")
42
+ only_name.append(name)
43
+ agent_name.append(cls.AUDIENCE)
44
+ agent_name = list(set(agent_name))
45
+ agent_name.sort()
46
+ return agent_name, only_name
47
+
48
+ def render_and_register_ui(self):
49
+ gc.add_agent(self.cache["only_name"])
50
+
51
+ def __init__(
52
+ self,
53
+ client_cmd: list,
54
+ socket_host: str = HOST,
55
+ socket_port: int = PORT,
56
+ bufsize: int = 1024,
57
+ ui_name: str = "DebateUI"
58
+ ):
59
+ super(DebateUI, self).__init__(client_cmd, socket_host, socket_port, bufsize, ui_name)
60
+ self.first_recieve_from_client()
61
+ self.data_history = list()
62
+ self.caller = 0
63
+
64
+ def handle_message(self, history:list,
65
+ state, agent_name, token, node_name):
66
+ if state % 10 == 0:
67
+ self.data_history.append({agent_name: token})
68
+ elif state % 10 == 1:
69
+ # Same state. Need to add new bubble in same bubble.
70
+ self.data_history[-1][agent_name] += token
71
+ elif state % 10 == 2:
72
+ # New state. Need to add new bubble.
73
+ history.append([None, ""])
74
+ self.data_history.clear()
75
+ self.data_history.append({agent_name: token})
76
+ else:
77
+ assert False, "Invalid state."
78
+ render_data = self.render_bubble(history, self.data_history, node_name, render_node_name= True or state % 10 == 2)
79
+ return render_data
80
+
81
+ def start_button_when_click(self, theme, positive, negative, choose, mode):
82
+ """
83
+ inputs=[self.text_theme, self.text_positive, self.text_negative, self.radio_choose],
84
+ outputs=[self.chatbot, self.btn_send]
85
+ """
86
+ cosplay = None if choose == self.AUDIENCE else choose.split("(")[0]
87
+ message = dict(theme=theme, positive=positive, negative=negative, cosplay=cosplay, mode=mode)
88
+ self.send_start_cmd(message=message)
89
+ return gr.Chatbot.update(
90
+ visible=True
91
+ ), gr.Button.update(visible=False)
92
+
93
+ def start_button_after_click(self, history):
94
+ """
95
+ inputs=[self.chatbot],
96
+ outputs=[self.chatbot, self.text_user, self.btn_send, self.btn_reset, self.btn_next]
97
+ """
98
+ if self.caller == 0:
99
+ # not single mode
100
+ self.data_history = list()
101
+ self.caller = 0
102
+ receive_server = self.receive_server
103
+ while True:
104
+ data_list: List = receive_server.send(None)
105
+ for item in data_list:
106
+ data = eval(item)
107
+ assert isinstance(data, list)
108
+ state, agent_name, token, node_name = data
109
+ assert isinstance(state, int)
110
+ if state == 30:
111
+ # user input
112
+ yield history,\
113
+ gr.Textbox.update(visible=True, interactive=True), \
114
+ gr.Button.update(visible=True, interactive=True),\
115
+ gr.Button.update(visible=True, interactive=True),\
116
+ gr.Button.update(visible=False)
117
+ return
118
+ elif state == 99:
119
+ # finish
120
+ yield history, gr.Textbox.update(visible=True, interactive=False, value="finish!"), \
121
+ gr.Button.update(visible=True, interactive=False, value="finish!"), gr.Button.update(visible=True, interactive=True),\
122
+ gr.Button.update(visible=False)
123
+ elif state == 98:
124
+ yield history, \
125
+ gr.Textbox.update(visible=False, interactive=False), \
126
+ gr.Button.update(visible=False, interactive=False),\
127
+ gr.Button.update(visible=False, interactive=False),\
128
+ gr.Button.update(visible=True, value=f"Next Agent: 🤖{agent_name} | Next Node: ⭕{node_name}")
129
+ return
130
+ else:
131
+ history = self.handle_message(history, state, agent_name, token, node_name)
132
+ yield history, \
133
+ gr.Textbox.update(visible=False, interactive=False), \
134
+ gr.Button.update(visible=False, interactive=False),\
135
+ gr.Button.update(visible=False, interactive=False),\
136
+ gr.Button.update(visible=False)
137
+
138
+ def send_button_when_click(self, text_user, history:list):
139
+ """
140
+ inputs=[self.text_user, self.chatbot],
141
+ outputs=[self.text_user, self.btn_send, self.chatbot]
142
+ """
143
+ history.append(
144
+ [UIHelper.wrap_css(text_user, "User"), None]
145
+ )
146
+ # print(f"server: send {text_user} to client")
147
+ self.send_message("<USER>"+text_user+self.SIGN["SPLIT"])
148
+ return gr.Textbox.update(value="", visible=False),\
149
+ gr.Button.update(visible=False), \
150
+ history,\
151
+ gr.Button.update(visible=False)
152
+
153
+ def reset_button_when_click(self, history, text_positive, text_negative, text_theme, text_user, btn_send, btn_start, btn_reset):
154
+ """
155
+ self.chatbot,
156
+ self.text_positive,
157
+ self.text_negative,
158
+ self.text_theme,
159
+ self.text_user,
160
+ self.btn_send,
161
+ self.btn_start,
162
+ self.btn_reset
163
+ self.btn_next
164
+ """
165
+ self.caller = 0
166
+ return None, \
167
+ "", \
168
+ "", \
169
+ "", \
170
+ "", \
171
+ gr.Button.update(value="Restarting...", interactive=False, visible=True),\
172
+ gr.Button.update(value="Restarting...", interactive=False, visible=True),\
173
+ gr.Button.update(value="Restarting...", interactive=False, visible=True),\
174
+ gr.Button.update(value="Restarting...", interactive=False, visible=False)
175
+
176
+ def reset_button_after_click(self, history, text_positive, text_negative, text_theme, text_user, btn_send, btn_start, btn_reset):
177
+ self.reset()
178
+ self.first_recieve_from_client(reset_mode=True)
179
+ return gr.Chatbot.update(value=None, visible=False),\
180
+ gr.Textbox.update(value=f"{self.cache['positive']}", interactive=True, visible=True),\
181
+ gr.Textbox.update(value=f"{self.cache['negative']}", interactive=True, visible=True),\
182
+ gr.Textbox.update(value=f"{self.cache['theme']}", interactive=True, visible=True),\
183
+ gr.Textbox.update(value=f"", interactive=True, visible=False),\
184
+ gr.Button.update(interactive=True, visible=False, value="Send"),\
185
+ gr.Button.update(interactive=True, visible=True, value="Start"),\
186
+ gr.Button.update(interactive=False, visible=False, value="Restart"),\
187
+ gr.Button.update(interactive=True, visible=False, value="Next Agent")
188
+
189
+ def btn_next_when_click(self):
190
+ yield gr.Button.update(visible=False)
191
+ self.send_message("nothing")
192
+ self.caller = 1 # will note clear the self.data_history
193
+ time.sleep(0.5)
194
+ return
195
+
196
+ def construct_ui(
197
+ self,
198
+ theme:str=None,
199
+ positive:str=None,
200
+ negative:str=None,
201
+ agents_name:List=None,
202
+ default_cos_play_id:int=None
203
+ ):
204
+ theme = self.cache["theme"] if theme is None else theme
205
+ positive = self.cache["positive"] if positive is None else positive
206
+ negative = self.cache["negative"] if negative is None else negative
207
+ agents_name = self.cache["agents_name"] if agents_name is None else agents_name
208
+ default_cos_play_id = self.cache["default_cos_play_id"] if default_cos_play_id is None else default_cos_play_id
209
+
210
+ with gr.Blocks(css=gc.CSS) as demo:
211
+ with gr.Row():
212
+ with gr.Column():
213
+ self.radio_mode = gr.Radio(
214
+ [Client.AUTO_MODE, Client.SINGLE_MODE],
215
+ value=Client.AUTO_MODE,
216
+ interactive=True,
217
+ label = Client.MODE_LABEL,
218
+ info = Client.MODE_INFO
219
+ )
220
+ self.text_theme = gr.Textbox(
221
+ label="Debate Topic:",
222
+ value=theme,
223
+ placeholder="Please input the Debate Topic"
224
+ )
225
+ self.text_positive = gr.Textbox(
226
+ label="Affirmative viewpoint:",
227
+ value=positive,
228
+ placeholder="Please input the Affirmative viewpoint"
229
+ )
230
+ self.text_negative = gr.Textbox(
231
+ label="Negative viewpoint:",
232
+ value=negative,
233
+ placeholder="Please input the Negative viewpoint"
234
+ )
235
+ self.radio_choose = gr.Radio(
236
+ agents_name,
237
+ value=agents_name[default_cos_play_id],
238
+ label="User'agent",
239
+ interactive=True
240
+ )
241
+ self.btn_start = gr.Button(
242
+ value="run"
243
+ )
244
+ VISIBLE = False
245
+ with gr.Column():
246
+ self.chatbot = gr.Chatbot(
247
+ height= 650,
248
+ elem_id="chatbot1",
249
+ label="Dialog",
250
+ visible=VISIBLE
251
+ )
252
+ self.btn_next = gr.Button(
253
+ value="Next Agent Start",
254
+ visible=False
255
+ )
256
+ self.text_user = gr.Textbox(
257
+ label="Input",
258
+ placeholder="Input here",
259
+ visible=VISIBLE
260
+ )
261
+ self.btn_send = gr.Button(
262
+ value="Send",
263
+ visible=VISIBLE
264
+ )
265
+ self.btn_reset = gr.Button(
266
+ value="Restart",
267
+ visible=VISIBLE
268
+ )
269
+
270
+ self.btn_start.click(
271
+ fn=self.start_button_when_click,
272
+ inputs=[self.text_theme, self.text_positive, self.text_negative, self.radio_choose, self.radio_mode],
273
+ outputs=[self.chatbot, self.btn_start]
274
+ ).then(
275
+ fn=self.start_button_after_click,
276
+ inputs=[self.chatbot],
277
+ outputs=[self.chatbot, self.text_user, self.btn_send, self.btn_reset, self.btn_next]
278
+ )
279
+
280
+ self.btn_send.click(
281
+ fn=self.send_button_when_click,
282
+ inputs=[self.text_user, self.chatbot],
283
+ outputs=[self.text_user, self.btn_send, self.chatbot, self.btn_reset]
284
+ ).then(
285
+ fn=self.start_button_after_click,
286
+ inputs=[self.chatbot],
287
+ outputs=[self.chatbot, self.text_user, self.btn_send, self.btn_reset, self.btn_next]
288
+ )
289
+
290
+ self.btn_reset.click(
291
+ fn=self.reset_button_when_click,
292
+ inputs=[
293
+ self.chatbot,
294
+ self.text_positive,
295
+ self.text_negative,
296
+ self.text_theme,
297
+ self.text_user,
298
+ self.btn_send,
299
+ self.btn_start,
300
+ self.btn_reset
301
+ ],
302
+ outputs=[
303
+ self.chatbot,
304
+ self.text_positive,
305
+ self.text_negative,
306
+ self.text_theme,
307
+ self.text_user,
308
+ self.btn_send,
309
+ self.btn_start,
310
+ self.btn_reset,
311
+ self.btn_next
312
+ ]
313
+ ).then(
314
+ fn=self.reset_button_after_click,
315
+ inputs=[
316
+ self.chatbot,
317
+ self.text_positive,
318
+ self.text_negative,
319
+ self.text_theme,
320
+ self.text_user,
321
+ self.btn_send,
322
+ self.btn_start,
323
+ self.btn_reset
324
+ ],
325
+ outputs=[
326
+ self.chatbot,
327
+ self.text_positive,
328
+ self.text_negative,
329
+ self.text_theme,
330
+ self.text_user,
331
+ self.btn_send,
332
+ self.btn_start,
333
+ self.btn_reset,
334
+ self.btn_next
335
+ ]
336
+ )
337
+
338
+ self.btn_next.click(
339
+ fn=self.btn_next_when_click,
340
+ inputs=[],
341
+ outputs=[self.btn_next]
342
+ ).then(
343
+ fn=self.start_button_after_click,
344
+ inputs=[self.chatbot],
345
+ outputs=[self.chatbot, self.text_user, self.btn_send, self.btn_reset, self.btn_next]
346
+ )
347
+
348
+ self.demo = demo
349
+
350
+
351
+ if __name__ == '__main__':
352
+ ui = DebateUI(client_cmd=["python","gradio_backend.py"])
353
+ ui.construct_ui()
354
+ ui.run(share=True)