mimifuel2018 commited on
Commit
c126a1f
1 Parent(s): 6436fcc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -15
app.py CHANGED
@@ -1,26 +1,36 @@
1
  import os
2
  import gradio as gr
3
  from http import HTTPStatus
 
4
  import dashscope
5
  from dashscope import Generation
6
  from dashscope.api_entities.dashscope_response import Role
7
- from typing import List, Optional, Tuple, Dict
8
 
 
9
  default_system = 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.'
 
10
 
11
- dashscope.api_key = os.getenv('YOUR_API_TOKEN') # Ensure this token is managed securely
12
-
13
  History = List[Tuple[str, str]]
14
  Messages = List[Dict[str, str]]
15
 
 
 
 
 
 
 
 
16
  def clear_session() -> History:
17
  return '', []
18
 
 
19
  def modify_system_session(system: str) -> str:
20
- if system is None or len(system) == 0:
21
  system = default_system
22
  return system, system, []
23
 
 
24
  def history_to_messages(history: History, system: str) -> Messages:
25
  messages = [{'role': Role.SYSTEM, 'content': system}]
26
  for h in history:
@@ -28,20 +38,22 @@ def history_to_messages(history: History, system: str) -> Messages:
28
  messages.append({'role': Role.ASSISTANT, 'content': h[1]})
29
  return messages
30
 
 
31
  def messages_to_history(messages: Messages) -> Tuple[str, History]:
32
  assert messages[0]['role'] == Role.SYSTEM
33
  system = messages[0]['content']
34
  history = []
35
  for q, r in zip(messages[1::2], messages[2::2]):
36
- history.append([q['content'], r['content']])
37
  return system, history
38
 
39
- def model_chat(query: Optional[str], history: Optional[History], system: str
40
- ) -> Tuple[str, str, History]:
41
  if query is None:
42
  query = ''
43
  if history is None:
44
  history = []
 
45
  messages = history_to_messages(history, system)
46
  messages.append({'role': Role.USER, 'content': query})
47
 
@@ -51,14 +63,18 @@ def model_chat(query: Optional[str], history: Optional[History], system: str
51
  messages=messages,
52
  result_format='message',
53
  stream=True,
54
- max_new_tokens=150 # Set response length limit
55
  )
56
 
57
  for response in gen:
58
  if response.status_code == HTTPStatus.OK:
59
  role = response.output.choices[0].message.role
60
- response = response.output.choices[0].message.content
61
- system, history = messages_to_history(messages + [{'role': role, 'content': response}])
 
 
 
 
62
  yield '', history, system
63
  else:
64
  raise ValueError('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
@@ -68,7 +84,7 @@ def model_chat(query: Optional[str], history: Optional[History], system: str
68
 
69
  # Gradio Interface Setup
70
  with gr.Blocks() as demo:
71
- gr.Markdown("""<center><font size=8>Qwen2.5-72B-Instruct👾</center>""")
72
 
73
  with gr.Row():
74
  with gr.Column(scale=3):
@@ -76,6 +92,7 @@ with gr.Blocks() as demo:
76
  with gr.Column(scale=1):
77
  modify_system = gr.Button("🛠️ Set system prompt and clear history", scale=2)
78
  system_state = gr.Textbox(value=default_system, visible=False)
 
79
  chatbot = gr.Chatbot(label='Qwen2.5-72B-Instruct')
80
  textbox = gr.Textbox(lines=1, label='Input')
81
 
@@ -83,26 +100,27 @@ with gr.Blocks() as demo:
83
  clear_history = gr.Button("🧹 Clear history")
84
  submit = gr.Button("🚀 Send")
85
 
 
86
  textbox.submit(model_chat,
87
  inputs=[textbox, chatbot, system_state],
88
  outputs=[textbox, chatbot, system_input],
89
- concurrency_limit=5) # Reduced for free plan
90
 
91
  submit.click(model_chat,
92
  inputs=[textbox, chatbot, system_state],
93
  outputs=[textbox, chatbot, system_input],
94
  concurrency_limit=5)
95
-
96
  clear_history.click(fn=clear_session,
97
  inputs=[],
98
  outputs=[textbox, chatbot],
99
  concurrency_limit=5)
100
-
101
  modify_system.click(fn=modify_system_session,
102
  inputs=[system_input],
103
  outputs=[system_state, system_input, chatbot],
104
  concurrency_limit=5)
105
 
106
- # Launching with reduced threads for free plan
107
  demo.queue(api_open=False)
108
  demo.launch(max_threads=10)
 
1
  import os
2
  import gradio as gr
3
  from http import HTTPStatus
4
+ from typing import List, Optional, Tuple, Dict
5
  import dashscope
6
  from dashscope import Generation
7
  from dashscope.api_entities.dashscope_response import Role
 
8
 
9
+ # Configuration
10
  default_system = 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.'
11
+ dashscope.api_key = os.getenv('HF_TOKEN') # Replace 'YOUR_API_TOKEN' with your actual API token.
12
 
13
+ # Typing definitions
 
14
  History = List[Tuple[str, str]]
15
  Messages = List[Dict[str, str]]
16
 
17
+ # Function to log chat history to logs.txt
18
+ def log_history_to_file(query: str, response: str, file_path="logs.txt"):
19
+ with open(file_path, "a") as f:
20
+ f.write(f"User: {query}\n")
21
+ f.write(f"Assistant: {response}\n\n")
22
+
23
+ # Function to clear session history
24
  def clear_session() -> History:
25
  return '', []
26
 
27
+ # Function to modify system session prompt
28
  def modify_system_session(system: str) -> str:
29
+ if not system:
30
  system = default_system
31
  return system, system, []
32
 
33
+ # Convert history to messages format
34
  def history_to_messages(history: History, system: str) -> Messages:
35
  messages = [{'role': Role.SYSTEM, 'content': system}]
36
  for h in history:
 
38
  messages.append({'role': Role.ASSISTANT, 'content': h[1]})
39
  return messages
40
 
41
+ # Convert messages back to history format
42
  def messages_to_history(messages: Messages) -> Tuple[str, History]:
43
  assert messages[0]['role'] == Role.SYSTEM
44
  system = messages[0]['content']
45
  history = []
46
  for q, r in zip(messages[1::2], messages[2::2]):
47
+ history.append((q['content'], r['content']))
48
  return system, history
49
 
50
+ # Main function for chat
51
+ def model_chat(query: Optional[str], history: Optional[History], system: str) -> Tuple[str, str, History]:
52
  if query is None:
53
  query = ''
54
  if history is None:
55
  history = []
56
+
57
  messages = history_to_messages(history, system)
58
  messages.append({'role': Role.USER, 'content': query})
59
 
 
63
  messages=messages,
64
  result_format='message',
65
  stream=True,
66
+ max_new_tokens=150 # Limit response length to save resources
67
  )
68
 
69
  for response in gen:
70
  if response.status_code == HTTPStatus.OK:
71
  role = response.output.choices[0].message.role
72
+ response_text = response.output.choices[0].message.content
73
+
74
+ # Log chat to file
75
+ log_history_to_file(query, response_text)
76
+
77
+ system, history = messages_to_history(messages + [{'role': role, 'content': response_text}])
78
  yield '', history, system
79
  else:
80
  raise ValueError('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
 
84
 
85
  # Gradio Interface Setup
86
  with gr.Blocks() as demo:
87
+ gr.Markdown("<center><font size=8>Qwen2.5-72B-Instruct👾</center>")
88
 
89
  with gr.Row():
90
  with gr.Column(scale=3):
 
92
  with gr.Column(scale=1):
93
  modify_system = gr.Button("🛠️ Set system prompt and clear history", scale=2)
94
  system_state = gr.Textbox(value=default_system, visible=False)
95
+
96
  chatbot = gr.Chatbot(label='Qwen2.5-72B-Instruct')
97
  textbox = gr.Textbox(lines=1, label='Input')
98
 
 
100
  clear_history = gr.Button("🧹 Clear history")
101
  submit = gr.Button("🚀 Send")
102
 
103
+ # Link buttons to functions
104
  textbox.submit(model_chat,
105
  inputs=[textbox, chatbot, system_state],
106
  outputs=[textbox, chatbot, system_input],
107
+ concurrency_limit=5)
108
 
109
  submit.click(model_chat,
110
  inputs=[textbox, chatbot, system_state],
111
  outputs=[textbox, chatbot, system_input],
112
  concurrency_limit=5)
113
+
114
  clear_history.click(fn=clear_session,
115
  inputs=[],
116
  outputs=[textbox, chatbot],
117
  concurrency_limit=5)
118
+
119
  modify_system.click(fn=modify_system_session,
120
  inputs=[system_input],
121
  outputs=[system_state, system_input, chatbot],
122
  concurrency_limit=5)
123
 
124
+ # Launching Gradio Interface with reduced threads for free plan
125
  demo.queue(api_open=False)
126
  demo.launch(max_threads=10)