|
import gradio as gr |
|
|
|
import os |
|
|
|
from huggingface_hub.file_download import http_get |
|
from llama_cpp import Llama |
|
|
|
|
|
SYSTEM_PROMPT = "Ты — Сайга, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им." |
|
|
|
|
|
def load_model( |
|
directory: str = ".", |
|
model_name: str = "saiga_nemo_12b.Q4_K_M.gguf", |
|
model_url: str = "https://huggingface.co/IlyaGusev/saiga_nemo_12b_gguf/resolve/main/saiga_nemo_12b.Q4_K_M.gguf" |
|
): |
|
final_model_path = os.path.join(directory, model_name) |
|
|
|
print("Downloading all files...") |
|
if not os.path.exists(final_model_path): |
|
with open(final_model_path, "wb") as f: |
|
http_get(model_url, f) |
|
os.chmod(final_model_path, 0o777) |
|
print("Files downloaded!") |
|
|
|
model = Llama( |
|
model_path=final_model_path, |
|
n_ctx=8192 |
|
) |
|
|
|
print("Model loaded!") |
|
return model |
|
|
|
|
|
MODEL = load_model() |
|
|
|
|
|
def user(message, history): |
|
new_history = history + [[message, None]] |
|
return "", new_history |
|
|
|
|
|
def bot( |
|
history, |
|
system_prompt, |
|
top_p, |
|
top_k, |
|
temp |
|
): |
|
model = MODEL |
|
messages = [{"role": "system", "content": SYSTEM_PROMPT}] |
|
|
|
for user_message, bot_message in history[:-1]: |
|
messages.append({"role": "user", "content": user_message}) |
|
if bot_message: |
|
messages.append({"role": "assistant", "content": bot_message}) |
|
|
|
last_user_message = history[-1][0] |
|
messages.append({"role": "user", "content": last_user_message}) |
|
partial_text = "" |
|
for part in model.create_chat_completion( |
|
messages, |
|
temperature=temp, |
|
top_k=top_k, |
|
top_p=top_p, |
|
stream=True, |
|
): |
|
delta = part["choices"][0]["delta"] |
|
if "content" in delta: |
|
partial_text += delta["content"] |
|
history[-1][1] = partial_text |
|
yield history |
|
|
|
|
|
with gr.Blocks( |
|
theme=gr.themes.Soft() |
|
) as demo: |
|
favicon = '<img src="https://cdn.midjourney.com/b88e5beb-6324-4820-8504-a1a37a9ba36d/0_1.png" width="48px" style="display: inline">' |
|
gr.Markdown( |
|
f"""<h1><center>{favicon}Saiga Nemo 12B GGUF Q4_K_M</center></h1> |
|
|
|
This is a demo of a **Russian**-speaking Mistral Nemo based model. |
|
|
|
Это демонстрационная версия [квантованной Сайги Немо с 12 миллиардами параметров](https://huggingface.co/IlyaGusev/saiga_nemo_12b_gguf), работающая на CPU. |
|
""" |
|
) |
|
with gr.Row(): |
|
with gr.Column(scale=5): |
|
system_prompt = gr.Textbox(label="Системный промпт", placeholder="", value=SYSTEM_PROMPT, interactive=False) |
|
chatbot = gr.Chatbot(label="Диалог") |
|
with gr.Column(min_width=80, scale=1): |
|
with gr.Tab(label="Параметры генерации"): |
|
top_p = gr.Slider( |
|
minimum=0.0, |
|
maximum=1.0, |
|
value=0.9, |
|
step=0.05, |
|
interactive=True, |
|
label="Top-p", |
|
) |
|
top_k = gr.Slider( |
|
minimum=10, |
|
maximum=100, |
|
value=30, |
|
step=5, |
|
interactive=True, |
|
label="Top-k", |
|
) |
|
temp = gr.Slider( |
|
minimum=0.0, |
|
maximum=2.0, |
|
value=0.01, |
|
step=0.01, |
|
interactive=True, |
|
label="Температура" |
|
) |
|
with gr.Row(): |
|
with gr.Column(): |
|
msg = gr.Textbox( |
|
label="Отправить сообщение", |
|
placeholder="Отправить сообщение", |
|
show_label=False, |
|
) |
|
with gr.Column(): |
|
with gr.Row(): |
|
submit = gr.Button("Отправить") |
|
stop = gr.Button("Остановить") |
|
clear = gr.Button("Очистить") |
|
with gr.Row(): |
|
gr.Markdown( |
|
"""ПРЕДУПРЕЖДЕНИЕ: Модель может генерировать фактически или этически некорректные тексты. Мы не несём за это ответственность.""" |
|
) |
|
|
|
|
|
submit_event = msg.submit( |
|
fn=user, |
|
inputs=[msg, chatbot], |
|
outputs=[msg, chatbot], |
|
queue=False, |
|
).success( |
|
fn=bot, |
|
inputs=[ |
|
chatbot, |
|
system_prompt, |
|
top_p, |
|
top_k, |
|
temp |
|
], |
|
outputs=chatbot, |
|
queue=True, |
|
) |
|
|
|
|
|
submit_click_event = submit.click( |
|
fn=user, |
|
inputs=[msg, chatbot], |
|
outputs=[msg, chatbot], |
|
queue=False, |
|
).success( |
|
fn=bot, |
|
inputs=[ |
|
chatbot, |
|
system_prompt, |
|
top_p, |
|
top_k, |
|
temp |
|
], |
|
outputs=chatbot, |
|
queue=True, |
|
) |
|
|
|
|
|
stop.click( |
|
fn=None, |
|
inputs=None, |
|
outputs=None, |
|
cancels=[submit_event, submit_click_event], |
|
queue=False, |
|
) |
|
|
|
|
|
clear.click(lambda: None, None, chatbot, queue=False) |
|
|
|
demo.queue(max_size=128) |
|
demo.launch(show_error=True) |
|
|