HoraCare / app.py
saksornr's picture
Update app.py
05c6ebe verified
from openai import OpenAI
import gradio as gr
import os
import json
api_key = os.getenv("TYPHOON_API_KEY") # Replace with your key
client = OpenAI(
base_url='https://api.opentyphoon.ai/v1',
api_key=api_key,
)
default_system_prompt = """\
You are an empathetic Thai woman assistant named แม่หมอแพตตี้. (Thai woman will say 'ค่ะ').
You provide insights and support offering clarity and healing.
You always answer in Thai.
First, you need to know these insight ask each one separately.
- What is the problem that user faced.
- How long that user faced.
If the statement is not clear and concise, you can ask multiple times.
And then, you will open one Tarot cards and explain the future of how to fix the problem."""
def predict(message, history, system_prompt, model_id, temperature):
history_openai_format = [{"role": "system", "content": system_prompt}]
for human, assistant in history[-3:]:
if isinstance(human, str) and human.strip():
history_openai_format.append({"role": "user", "content": human})
if isinstance(assistant, str) and assistant.strip():
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
response = client.chat.completions.create(
model=model_id,
messages=history_openai_format,
temperature=temperature,
stream=True
)
partial_message = ""
for chunk in response:
if chunk.choices[0].delta.content is not None:
partial_message += chunk.choices[0].delta.content
yield partial_message
def chat_bot(user_input, history, system_prompt, model_id, temperature):
bot_response_generator = predict(user_input, history, system_prompt, model_id, temperature)
history.append((user_input, ""))
for bot_response in bot_response_generator:
history[-1] = (user_input, bot_response)
yield "", history
def get_log(history, system_prompt):
history_openai_format = [{"role": "system", "content": system_prompt}]
for human, assistant in history:
if isinstance(human, str) and human.strip():
history_openai_format.append({"role": "user", "content": human})
if isinstance(assistant, str) and assistant.strip():
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format_json = '[\n' + ",\n".join([json.dumps(h, ensure_ascii=False) for h in history_openai_format]) + '\n]'
return history_openai_format_json
CSS ="""
.contain { display: flex; flex-direction: column; }
.gradio-container { height: 100vh !important; }
#component-0 { height: 80%; }
#chatbot { flex-grow: 1; overflow: auto;}
"""
with gr.Blocks(css=CSS) as demo:
gr.HTML("""<h1><center>HoraCare 🫶</center></h1>
<center> Version 2 </center>
""")
with gr.Tab("Chat"):
chatbot = gr.Chatbot(elem_id="chatbot")
msg = gr.Textbox(placeholder="พิมพ์ข้อความของคุณที่นี่...")
with gr.Row():
clear = gr.Button("Clear History")
send = gr.Button("Send Message", variant="primary")
gr.Examples(
examples=[
"เราเศร้าจังเลย อกหักมา ร้องให้ไม่หยุดเลย",
"เราเหงาจังเลยไม่มีใครรัก",
"หัวหน้าจะใล่เราออกทำยังไงดี"
],
inputs=msg,
)
with gr.Tab("Setting") as setting_tab:
gr.Markdown("### Model Setting")
system_prompt = gr.Code(
value=default_system_prompt,
show_label=True,
label="System Prompt",
lines=2
)
all_model_id = [
'typhoon-v1.5-instruct',
'typhoon-v1.5-instruct-fc',
'typhoon-v1.5x-70b-instruct',
]
model_id = gr.Dropdown(all_model_id, value=all_model_id[-1], allow_custom_value=True, label='model_id')
temperature = gr.Slider(0, 1, value=0.5, label='temperature')
gr.Markdown("### Message Log")
msg_log = gr.Code(language='json', label='msg_log')
clear.click(lambda: [], [], chatbot)
msg.submit(chat_bot, [msg, chatbot, system_prompt, model_id, temperature], [msg, chatbot])
send.click(chat_bot, [msg, chatbot, system_prompt, model_id, temperature], [msg, chatbot])
setting_tab.select(get_log, [chatbot, system_prompt,], [msg_log])
demo.launch()