|
import os |
|
import requests |
|
import json |
|
import uuid |
|
|
|
import gradio as gr |
|
import spaces |
|
|
|
API_URL = os.environ.get("API_URL", "default_api_url_if_not_set") |
|
BEARER_TOKEN = os.environ.get("BEARER_TOKEN", "default_token_if_not_set") |
|
headers = { |
|
"Authorization": f"Bearer {BEARER_TOKEN}", |
|
"Content-Type": "application/json" |
|
} |
|
|
|
|
|
def load_topics(filename): |
|
try: |
|
with open(filename, 'r') as file: |
|
data = json.load(file) |
|
return data |
|
except FileNotFoundError: |
|
print(f"Error: The file {filename} was not found.") |
|
return {} |
|
except json.JSONDecodeError: |
|
print("Error: Failed to decode JSON.") |
|
return {} |
|
|
|
|
|
topics_json_path = 'topics.json' |
|
|
|
|
|
topics = load_topics(topics_json_path) |
|
|
|
userdata = dict() |
|
|
|
def query(payload): |
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
json = response.json() |
|
return json |
|
|
|
|
|
@spaces.GPU |
|
def generate( |
|
message: str, |
|
chat_history: list[tuple[str, str]], |
|
system_prompt: str, |
|
max_new_tokens: int = 1024, |
|
temperature: float = 0.6, |
|
top_p: float = 0.9, |
|
top_k: int = 50, |
|
repetition_penalty: float = 1.2, |
|
user_id: uuid.UUID = uuid.uuid4() |
|
) -> str: |
|
user_id_str = user_id.hex |
|
if user_id_str not in userdata or "topicId" not in userdata[user_id_str]: |
|
userdata[user_id_str] = {"topicId": "0", "topic_flag": False} |
|
topic = topics[userdata[user_id_str]["topicId"]] |
|
result = query({ |
|
"inputs":"" , |
|
"message":message, |
|
"chat_history":chat_history, |
|
"system_prompt":system_prompt, |
|
"instruction": topic["instruction"], |
|
"conclusions": topic["conclusions"], |
|
"context": topic["context"], |
|
"max_new_tokens":max_new_tokens, |
|
"temperature":temperature, |
|
"top_p":top_p, |
|
"top_k":top_k, |
|
"repetition_penalty":repetition_penalty, |
|
}) |
|
|
|
conclusion = result.get("conclusion") |
|
if conclusion is not None: |
|
next_topic_id = topic["conclusionAction"][conclusion]["next"] |
|
extra = topic["conclusionAction"][conclusion]["extra"] |
|
userdata[user_id_str]["topicId"] = next_topic_id |
|
userdata[user_id_str]["topic_flag"] = True |
|
return result.get("generated_text") + "\n" + extra + "\n" + topics[next_topic_id]["primer"] |
|
|
|
return result.get("generated_text") |
|
|
|
def update(chatbot_state): |
|
|
|
if user_id.value.hex not in userdata: |
|
userdata[user_id.value.hex] = {"topic_flag": False} |
|
|
|
|
|
user_topic_flag = userdata[user_id.value.hex].get("topic_flag", False) |
|
|
|
|
|
if user_topic_flag: |
|
userdata[user_id.value.hex]["topic_flag"] = False |
|
return [[None, topics[userdata[user_id.value.hex]["topicId"]]["primer"]]] |
|
|
|
|
|
return chatbot_state |
|
|
|
|
|
|
|
system_prompt_input = gr.Textbox(label="System prompt") |
|
max_new_tokens_input = gr.Slider(minimum=1, maximum=2048, value=50, step=1, label="Max New Tokens") |
|
temperature_input = gr.Slider(minimum=0.1, maximum=4.0, step=0.1, value=0.6, label="Temperature") |
|
top_p_input = gr.Slider(minimum=0.05, maximum=1.0, step=0.05, value=0.9, label="Top-p") |
|
top_k_input = gr.Slider(minimum=1, maximum=1000, step=1, value=50, label="Top-k") |
|
repetition_penalty_input = gr.Slider(minimum=1.0, maximum=2.0, step=0.05, value=1.2, label="Repetition Penalty") |
|
user_id = gr.State(uuid.uuid4()) |
|
|
|
chat_interface = gr.ChatInterface( |
|
fn=generate, |
|
chatbot=gr.Chatbot([[None, topics["0"]["primer"]]]), |
|
additional_inputs=[ |
|
system_prompt_input, |
|
max_new_tokens_input, |
|
temperature_input, |
|
top_p_input, |
|
top_k_input, |
|
repetition_penalty_input, |
|
user_id |
|
], |
|
stop_btn=gr.Button("Stop"), |
|
examples=[ |
|
|
|
], |
|
) |
|
|
|
|
|
with gr.Blocks(css="style.css") as demo: |
|
|
|
chat_interface.render() |
|
chat_interface.submit_btn.click(update, inputs=chat_interface.chatbot_state, outputs=chat_interface.chatbot_state) |
|
chat_interface.textbox.input(update, inputs=chat_interface.chatbot_state, outputs=chat_interface.chatbot_state) |
|
|
|
if __name__ == "__main__": |
|
demo.queue(max_size=20).launch(debug=True) |
|
|