Geneva-12B / app.py
rubenroy's picture
Update app.py
1389809 verified
import gradio as gr
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
model_name = "rubenroy/Geneva-12B-GCv2-5m"
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
@spaces.GPU
def generate(message, chat_history, temperature=0.7, top_p=0.9, top_k=50, max_new_tokens=512, repetition_penalty=1.1):
messages = [
{"role": "system", "content": "You are a helpful assistant named Geneva, a 12 billion parameter Large Language Model, fine-tuned and trained by Ruben Roy. You have been trained with the GammaCorpus v2 dataset, a dataset filled with structured and filtered multi-turn conversations. This dataset was also made by Ruben Roy."}, # Attribution for Mistral removed to prevent unneccesary hallucinations.
]
for user, assistant in chat_history:
messages.append({"role": "user", "content": user})
messages.append({"role": "assistant", "content": assistant})
messages.append({"role": "user", "content": message})
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(
**model_inputs,
temperature=float(temperature),
top_p=float(top_p),
top_k=int(top_k),
max_new_tokens=int(max_new_tokens),
repetition_penalty=float(repetition_penalty),
do_sample=True if float(temperature) > 0 else False
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
TITLE_HTML = """
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css">
<style>
.model-btn {
background: linear-gradient(135deg, #059669 0%, #047857 100%);
color: white !important;
padding: 0.75rem 1rem;
border-radius: 0.5rem;
text-decoration: none !important;
font-weight: 500;
transition: all 0.2s ease;
font-size: 0.9rem;
display: flex;
align-items: center;
justify-content: center;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.model-btn:hover {
background: linear-gradient(135deg, #047857 0%, #065f46 100%);
box-shadow: 0 4px 6px rgba(0,0,0,0.2);
}
.model-section {
flex: 1;
max-width: 800px;
background: rgba(255, 255, 255, 0.05);
padding: 1.5rem;
border-radius: 1rem;
border: 1px solid rgba(255, 255, 255, 0.1);
backdrop-filter: blur(10px);
transition: all 0.3s ease;
}
.info-link {
color: #34d399;
text-decoration: none;
transition: color 0.2s ease;
}
.info-link:hover {
color: #6ee7b7;
text-decoration: underline;
}
.info-section {
margin-top: 0.5rem;
font-size: 0.9rem;
color: #94a3b8;
}
.settings-section {
background: rgba(255, 255, 255, 0.05);
padding: 1.5rem;
border-radius: 1rem;
margin: 1.5rem auto;
border: 1px solid rgba(255, 255, 255, 0.1);
max-width: 800px;
}
.settings-title {
color: #e2e8f0;
font-size: 1.25rem;
font-weight: 600;
margin-bottom: 1rem;
display: flex;
align-items: center;
gap: 0.7rem;
}
.parameter-info {
color: #94a3b8;
font-size: 0.8rem;
margin-top: 0.25rem;
}
</style>
<div style="background: linear-gradient(135deg, #064e3b 0%, #022c22 100%); padding: 1.5rem; border-radius: 1.5rem; text-align: center; margin: 1rem auto; max-width: 1200px; box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);">
<div style="margin-bottom: 1.5rem;">
<div style="display: flex; align-items: center; justify-content: center; gap: 1rem;">
<h1 style="font-size: 2.5rem; font-weight: 800; margin: 0; background: linear-gradient(135deg, #34d399 0%, #6ee7b7 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent;">Geneva</h1>
<div style="width: 2px; height: 2.5rem; background: linear-gradient(180deg, #059669 0%, #34d399 100%);"></div>
<p style="font-size: 1.25rem; color: #94a3b8; margin: 0;">GammaCorpus v2-5m</p>
</div>
<div class="info-section">
<span>Fine-tuned from <a href="https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407" class="info-link">Mistral NeMo Instruct 2407</a> | Model: <a href="https://huggingface.co/rubenroy/Geneva-14B-GCv2-5m" class="info-link">Geneva-14B-GCv2-5m</a> | Training Dataset: <a href="https://huggingface.co/datasets/rubenroy/GammaCorpus-v2-5m" class="info-link">GammaCorpus v2 5m</a></span>
</div>
</div>
<div style="display: flex; gap: 1.5rem; justify-content: center;">
<div class="model-section">
<h2 style="font-size: 1.25rem; color: #e2e8f0; margin-bottom: 1.4rem; margin-top: 1px; font-weight: 600; display: flex; align-items: center; justify-content: center; gap: 0.7rem;">
<i class="fa-solid fa-sparkles"></i>
Geneva Models
</h2>
<div style="display: grid; grid-auto-flow: column; gap: 0.75rem; overflow-x: auto; white-space: nowrap;">
<a href="https://huggingface.co/rubenroy/Geneva-12B-GCv2-5m" class="model-btn">Geneva 12B GCv2 5m</a>
<a href="https://huggingface.co/rubenroy/Geneva-12B-GCv2-1m" class="model-btn">Geneva 12B GCv2 1m</a>
<a href="https://huggingface.co/rubenroy/Geneva-12B-GCv2-500k" class="model-btn">Geneva 12B GCv2 500k</a>
<a href="https://huggingface.co/rubenroy/Geneva-12B-GCv2-100k" class="model-btn">Geneva 12B GCv2 100k</a>
<a href="https://huggingface.co/rubenroy/Geneva-12B-GCv2-50k" class="model-btn">Geneva 12B GCv2 50k</a>
<a href="https://huggingface.co/rubenroy/Geneva-12B-GCv2-10k" class="model-btn">Geneva 12B GCv2 10k</a>
</div>
</div>
</div>
</div>
"""
examples = [
["Explain deep learning in simple terms."],
["Write a short science fiction story."],
["Describe the laws of thermodynamics."],
["Write me a simple game in Python."]
]
with gr.Blocks() as demo:
gr.HTML(TITLE_HTML)
with gr.Accordion("Generation Settings", open=False):
with gr.Row():
with gr.Column():
temperature = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.7,
step=0.1,
label="Temperature",
info="Higher values make the output more random, lower values make it more deterministic",
interactive=True
)
top_p = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.9,
step=0.05,
label="Top P",
info="Controls the cumulative probability threshold for nucleus sampling",
interactive=True
)
top_k = gr.Slider(
minimum=1,
maximum=100,
value=50,
step=1,
label="Top K",
info="Limits the number of tokens to consider for each generation step",
interactive=True
)
with gr.Column():
max_new_tokens = gr.Slider(
minimum=1,
maximum=2048,
value=512,
step=1,
label="Max New Tokens",
info="Maximum number of tokens to generate in the response",
interactive=True
)
repetition_penalty = gr.Slider(
minimum=1.0,
maximum=2.0,
value=1.1,
step=0.1,
label="Repetition Penalty",
info="Higher values stop the model from repeating the same info",
interactive=True
)
chatbot = gr.ChatInterface(
fn=generate,
additional_inputs=[
temperature,
top_p,
top_k,
max_new_tokens,
repetition_penalty
],
examples=examples
)
demo.launch(share=True)