Spaces:
Sleeping
Sleeping
from huggingface_hub import InferenceClient | |
import gradio as gr | |
import os | |
API_URL = { | |
"Mistral" : "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3", | |
"Mixtral" : "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", | |
"Mathstral" : "https://api-inference.huggingface.co/models/mistralai/mathstral-7B-v0.1", | |
} | |
HF_TOKEN = os.environ['HF_TOKEN'] | |
mistralClient = InferenceClient( | |
API_URL["Mistral"], | |
headers = {"Authorization" : f"Bearer {HF_TOKEN}"}, | |
) | |
mixtralClient = InferenceClient( | |
model = API_URL["Mixtral"], | |
headers = {"Authorization" : f"Bearer {HF_TOKEN}"}, | |
) | |
mathstralClient = InferenceClient( | |
model = API_URL["Mathstral"], | |
headers = {"Authorization" : f"Bearer {HF_TOKEN}"}, | |
) | |
def format_prompt(message, history): | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def generate(prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, | |
repetition_penalty=1.0, model = "Mathstral"): | |
# Selecting model to be used | |
if(model == "Mistral"): | |
client = mistralClient | |
elif(model == "Mixstral"): | |
client = mixtralClient | |
elif(model == "Mathstral"): | |
client = mixtralClient | |
temperature = float(temperature) # Generation arguments | |
if temperature < 1e-2: | |
temperature = 1e-2 | |
top_p = float(top_p) | |
generate_kwargs = dict( | |
temperature=temperature, | |
max_new_tokens=max_new_tokens, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True, | |
seed=42, | |
) | |
formatted_prompt = format_prompt(prompt, history) | |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
output = "" | |
for response in stream: | |
output += response.token.text | |
yield output | |
return output | |
additional_inputs=[ | |
gr.Slider( | |
label="Temperature", | |
value=0.9, | |
minimum=0.0, | |
maximum=1.0, | |
step=0.05, | |
interactive=True, | |
info="Higher values produce more diverse outputs", | |
), | |
gr.Slider( | |
label="Max new tokens", | |
value=2048, | |
minimum=0, | |
maximum=4096, | |
step=64, | |
interactive=True, | |
info="The maximum numbers of new tokens", | |
), | |
gr.Slider( | |
label="Top-p (nucleus sampling)", | |
value=0.90, | |
minimum=0.0, | |
maximum=1, | |
step=0.05, | |
interactive=True, | |
info="Higher values sample more low-probability tokens", | |
), | |
gr.Slider( | |
label="Repetition penalty", | |
value=1.2, | |
minimum=1.0, | |
maximum=2.0, | |
step=0.05, | |
interactive=True, | |
info="Penalize repeated tokens", | |
), | |
gr.Dropdown( | |
choices = ["Mistral","Mixtral", "Mathstral"], | |
value = "Mathstral", | |
label = "Le modèle à utiliser", | |
interactive=True, | |
info = "Mistral : pour des conversations génériques, "+ | |
"Mixtral : conversations plus rapides et plus performantes, "+ | |
"Mathstral : raisonnement mathématiques et scientifique" | |
), | |
] | |
css = """ | |
#mkd { | |
height: 500px; | |
overflow: auto; | |
border: 1px solid #ccc; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
gr.HTML("<h1><center>Mathstral Test</center><h1>") | |
gr.HTML("<h3><center>Dans cette démo, vous pouvez poser des questions mathématiques et scientifiques à Mathstral. 🧮</center><h3>") | |
gr.ChatInterface( | |
generate, | |
additional_inputs=additional_inputs, | |
theme = gr.themes.Soft(), | |
cache_examples=False, | |
examples=[ [l.strip()] for l in open("exercices.md").readlines()], | |
chatbot = gr.Chatbot( | |
latex_delimiters=[ | |
{"left" : "$$", "right": "$$", "display": True }, | |
{"left" : "\\[", "right": "\\]", "display": True }, | |
{"left" : "\\(", "right": "\\)", "display": False }, | |
{"left": "$", "right": "$", "display": False } | |
] | |
) | |
) | |
demo.queue(max_size=100).launch(debug=True) | |