AliArshad commited on
Commit
40f70ca
·
verified ·
1 Parent(s): e2f4454

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +140 -82
app.py CHANGED
@@ -1,49 +1,81 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
 
 
3
 
4
- client = InferenceClient(
5
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
6
- )
7
 
8
- def format_prompt(message, history):
9
- prompt = "<s>"
10
- for user_prompt, bot_response in history:
11
- prompt += f"[INST] {user_prompt} [/INST]"
12
- prompt += f" {bot_response}</s> "
13
- prompt += f"[INST] {message} [/INST]"
14
- return prompt
15
-
16
- def generate(
17
- prompt, history, system_prompt, temperature=0.9, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0,
18
- ):
19
- temperature = float(temperature)
20
- if temperature < 1e-2:
21
- temperature = 1e-2
22
-
23
- top_p = float(top_p)
24
- generate_kwargs = dict(
25
- temperature=temperature,
26
- max_new_tokens=max_new_tokens,
27
- top_p=top_p,
28
- repetition_penalty=repetition_penalty,
29
- do_sample=True,
30
- seed=42,
31
  )
 
 
 
32
 
33
- # Check if the history is empty. If so, prepend the system prompt.
34
- if not history:
35
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
36
- else:
37
- formatted_prompt = format_prompt(prompt, history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
40
- output = ""
41
- for response in stream:
42
- output += response.token.text
43
- yield output
44
- return output
45
 
46
- # Define a default system prompt for mental health support
47
  DEFAULT_SYSTEM_PROMPT = """You are a supportive AI assistant trained to provide emotional support and general guidance.
48
  Remember to:
49
  1. Show empathy and understanding
@@ -52,52 +84,78 @@ Remember to:
52
  4. Encourage professional help when appropriate
53
  5. Maintain boundaries and ethical guidelines"""
54
 
55
- additional_inputs = [
56
- gr.Textbox(
57
- label="System Prompt",
58
- value=DEFAULT_SYSTEM_PROMPT,
59
- lines=3,
60
- interactive=True,
61
- ),
62
- gr.Slider(
63
- label="Temperature",
64
- minimum=0.1,
65
- maximum=1.0,
66
- value=0.9,
67
- step=0.1
68
- ),
69
- gr.Slider(
70
- label="Max New Tokens",
71
- minimum=64,
72
- maximum=1024,
73
- value=512,
74
- step=64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  )
76
- ]
77
 
78
- examples = [
79
- ["Patient is feeling stressed due to work and has trouble sleeping.", DEFAULT_SYSTEM_PROMPT, 0.9, 512, 0.95, 1.0],
80
- ["Client is dealing with relationship issues and is seeking advice on communication strategies.", DEFAULT_SYSTEM_PROMPT, 0.9, 512, 0.95, 1.0],
81
- ["Individual has recently experienced a loss and is having difficulty coping with grief.", DEFAULT_SYSTEM_PROMPT, 0.9, 512, 0.95, 1.0],
82
- ]
83
 
84
- # Create the interface
85
- demo = gr.ChatInterface(
86
- fn=generate,
87
- chatbot=gr.Chatbot(
88
- show_label=False,
89
- show_share_button=False,
90
- show_copy_button=True,
91
- layout="panel",
92
- type="messages" # Updated to use the new message format
93
- ),
94
- additional_inputs=additional_inputs,
95
- title="PsyAssist - ADVANCING MENTAL HEALTH SUPPORT WITH AI-DRIVEN INTERACTION",
96
- description="""This is an AI-powered mental health support chatbot. While it can provide emotional support and general guidance,
97
- it is not a replacement for professional mental health services. In case of emergency, please contact your local mental health crisis hotline.""",
98
- examples=examples,
99
- concurrency_limit=20,
100
- )
101
 
102
  if __name__ == "__main__":
103
- demo.launch(show_api=False)
 
 
 
 
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
+ import logging
4
+ import sys
5
 
6
+ # Set up logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
9
 
10
+ try:
11
+ client = InferenceClient(
12
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  )
14
+ except Exception as e:
15
+ logger.error(f"Failed to initialize Hugging Face client: {str(e)}")
16
+ sys.exit(1)
17
 
18
+ def format_prompt(message, history):
19
+ try:
20
+ prompt = "<s>"
21
+ if history:
22
+ for user_prompt, bot_response in history:
23
+ prompt += f"[INST] {user_prompt} [/INST]"
24
+ prompt += f" {bot_response}</s> "
25
+ prompt += f"[INST] {message} [/INST]"
26
+ logger.info(f"Formatted prompt: {prompt}")
27
+ return prompt
28
+ except Exception as e:
29
+ logger.error(f"Error in format_prompt: {str(e)}")
30
+ return None
31
+
32
+ def generate(message, chat_history, system_prompt, temperature=0.9, max_new_tokens=512, top_p=0.95):
33
+ try:
34
+ logger.info(f"Received message: {message}")
35
+ logger.info(f"System prompt: {system_prompt}")
36
+
37
+ # Format the full prompt
38
+ if not chat_history:
39
+ full_message = f"{system_prompt}\n\nUser: {message}"
40
+ else:
41
+ full_message = message
42
+
43
+ formatted_prompt = format_prompt(full_message, chat_history)
44
+
45
+ if not formatted_prompt:
46
+ return "I encountered an error formatting your message. Please try again."
47
+
48
+ # Generation parameters
49
+ generate_kwargs = dict(
50
+ temperature=float(temperature),
51
+ max_new_tokens=int(max_new_tokens),
52
+ top_p=float(top_p),
53
+ do_sample=True,
54
+ seed=42,
55
+ )
56
+
57
+ logger.info("Starting generation with parameters: %s", generate_kwargs)
58
+
59
+ # Generate response
60
+ response_stream = client.text_generation(
61
+ formatted_prompt,
62
+ **generate_kwargs,
63
+ stream=True,
64
+ details=True,
65
+ return_full_text=False
66
+ )
67
+
68
+ partial_message = ""
69
+ for response in response_stream:
70
+ if response.token.text:
71
+ partial_message += response.token.text
72
+ yield partial_message
73
 
74
+ except Exception as e:
75
+ logger.error(f"Error in generate function: {str(e)}")
76
+ yield f"I encountered an error: {str(e)}"
 
 
 
77
 
78
+ # Define the default system prompt
79
  DEFAULT_SYSTEM_PROMPT = """You are a supportive AI assistant trained to provide emotional support and general guidance.
80
  Remember to:
81
  1. Show empathy and understanding
 
84
  4. Encourage professional help when appropriate
85
  5. Maintain boundaries and ethical guidelines"""
86
 
87
+ # Define the interface
88
+ with gr.Blocks() as demo:
89
+ chatbot = gr.Chatbot(height=500)
90
+ msg = gr.Textbox(label="Message", placeholder="Type your message here...")
91
+
92
+ with gr.Accordion("Advanced Options", open=False):
93
+ system_prompt = gr.Textbox(
94
+ value=DEFAULT_SYSTEM_PROMPT,
95
+ label="System Prompt",
96
+ lines=3
97
+ )
98
+ temperature = gr.Slider(
99
+ minimum=0.1,
100
+ maximum=1.0,
101
+ value=0.9,
102
+ step=0.1,
103
+ label="Temperature"
104
+ )
105
+ max_new_tokens = gr.Slider(
106
+ minimum=64,
107
+ maximum=1024,
108
+ value=512,
109
+ step=64,
110
+ label="Max Tokens"
111
+ )
112
+
113
+ clear = gr.Button("Clear")
114
+
115
+ def user(user_message, history):
116
+ return "", history + [[user_message, None]]
117
+
118
+ def bot(history, system_prompt, temperature, max_new_tokens):
119
+ if not history:
120
+ return history
121
+
122
+ user_message = history[-1][0]
123
+ history[-1][1] = ""
124
+
125
+ for chunk in generate(
126
+ user_message,
127
+ history[:-1],
128
+ system_prompt,
129
+ temperature,
130
+ max_new_tokens
131
+ ):
132
+ history[-1][1] = chunk
133
+ yield history
134
+
135
+ msg.submit(
136
+ user,
137
+ [msg, chatbot],
138
+ [msg, chatbot],
139
+ queue=False
140
+ ).then(
141
+ bot,
142
+ [chatbot, system_prompt, temperature, max_new_tokens],
143
+ chatbot
144
  )
 
145
 
146
+ clear.click(lambda: None, None, chatbot, queue=False)
 
 
 
 
147
 
148
+ gr.Markdown("""
149
+ # PsyAssist - ADVANCING MENTAL HEALTH SUPPORT WITH AI-DRIVEN INTERACTION
150
+
151
+ **Important Notice**: This is an AI-powered mental health support chatbot. While it can provide emotional support
152
+ and general guidance, it is not a replacement for professional mental health services. In case of emergency,
153
+ please contact your local mental health crisis hotline.
154
+ """)
 
 
 
 
 
 
 
 
 
 
155
 
156
  if __name__ == "__main__":
157
+ try:
158
+ demo.queue().launch(show_api=False)
159
+ except Exception as e:
160
+ logger.error(f"Failed to launch Gradio interface: {str(e)}")
161
+ sys.exit(1)