Spaces:
Running
Running
import gradio as gr | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Load the models | |
model_path_1 = "ibm-granite/granite-3.0-1b-a400m-instruct" | |
model_path_2 = "ibm-granite/granite-3.0-1b-a400m-base" | |
tokenizer_1 = AutoTokenizer.from_pretrained(model_path_1) | |
tokenizer_2 = AutoTokenizer.from_pretrained(model_path_2) | |
model_1 = AutoModelForCausalLM.from_pretrained(model_path_1, device_map="auto") | |
model_2 = AutoModelForCausalLM.from_pretrained(model_path_2, device_map="auto") | |
model_1.eval() | |
model_2.eval() | |
# Mood prompts dictionary | |
mood_prompts = { | |
"Fun": "Respond in a light-hearted, playful manner.", | |
"Serious": "Respond in a thoughtful, serious tone.", | |
"Professional": "Respond in a formal, professional manner.", | |
"Upset": "Respond in a slightly irritated, upset tone.", | |
"Empathetic": "Respond in a warm and understanding tone.", | |
"Optimistic": "Respond in a positive, hopeful manner.", | |
"Sarcastic": "Respond with a hint of sarcasm.", | |
"Motivational": "Respond with encouragement and motivation.", | |
"Curious": "Respond with a sense of wonder and curiosity.", | |
"Humorous": "Respond with a touch of humor.", | |
"Cautious": "Respond with careful consideration and caution.", | |
"Assertive": "Respond with confidence and assertiveness.", | |
"Friendly": "Respond in a warm and friendly manner.", | |
"Romantic": "Respond with affection and romance.", | |
"Nostalgic": "Respond with a sense of longing for the past.", | |
"Grateful": "Respond with gratitude and appreciation.", | |
"Inspirational": "Respond with inspiration and positivity.", | |
"Casual": "Respond in a relaxed and informal tone.", | |
"Formal": "Respond with a high level of formality.", | |
"Pessimistic": "Respond with a focus on potential negatives.", | |
"Excited": "Respond with enthusiasm and excitement.", | |
"Melancholic": "Respond with a sense of sadness or longing.", | |
"Confident": "Respond with self-assurance and confidence.", | |
"Suspicious": "Respond with caution and doubt.", | |
"Reflective": "Respond with deep thought and introspection.", | |
"Joyful": "Respond with happiness and joy.", | |
"Mysterious": "Respond with an air of mystery and intrigue.", | |
"Aggressive": "Respond with force and intensity.", | |
"Calm": "Respond with a sense of peace and tranquility.", | |
"Gloomy": "Respond with a sense of sadness or pessimism.", | |
"Encouraging": "Respond with words of support and encouragement.", | |
"Sympathetic": "Respond with understanding and compassion.", | |
"Disappointed": "Respond with a tone of disappointment.", | |
"Proud": "Respond with a sense of pride and accomplishment.", | |
"Playful": "Respond in a fun and playful manner.", | |
"Inquisitive": "Respond with curiosity and interest.", | |
"Supportive": "Respond with reassurance and support.", | |
"Reluctant": "Respond with hesitation and reluctance.", | |
"Confused": "Respond with uncertainty and confusion.", | |
"Energetic": "Respond with high energy and enthusiasm.", | |
"Relaxed": "Respond with a calm and laid-back tone.", | |
"Grumpy": "Respond with a touch of irritation.", | |
"Hopeful": "Respond with a sense of hope and optimism.", | |
"Indifferent": "Respond with a lack of strong emotion.", | |
"Surprised": "Respond with shock and astonishment.", | |
"Tense": "Respond with a sense of urgency or anxiety.", | |
"Enthusiastic": "Respond with eagerness and excitement.", | |
"Worried": "Respond with concern and apprehension." | |
} | |
def generate_response(prompt, mood, max_new_tokens, temperature, top_p, repetition_penalty, model_choice): | |
mood_prompt = mood_prompts.get(mood, "") | |
full_prompt = f"{mood_prompt} {prompt}" | |
# Choose model and tokenizer based on user selection | |
if model_choice == "Granite 3.0-1B A400M Instruct": | |
model = model_1 | |
tokenizer = tokenizer_1 | |
else: | |
model = model_2 | |
tokenizer = tokenizer_2 | |
input_tokens = tokenizer(full_prompt, return_tensors="pt").to(model.device) | |
output = model.generate( | |
**input_tokens, | |
max_new_tokens=max_new_tokens, | |
temperature=temperature, | |
top_p=top_p, | |
repetition_penalty=repetition_penalty, | |
do_sample=True | |
) | |
response = tokenizer.decode(output[0], skip_special_tokens=True) | |
return response.strip() | |
with gr.Blocks(theme="prithivMLmods/Minecraft-Theme") as demo: | |
with gr.Row(): | |
with gr.Column(): | |
prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...", lines=5) | |
mood = gr.Dropdown(label="Select Mood", choices=list(mood_prompts.keys()), value="Professional") | |
model_choice = gr.Radio(label="Select Model", choices=["Granite 3.0-1B A400M Instruct", "Granite 3.0-1B A400M Base"], value="Granite 3.0-1B A400M Instruct") | |
generate_button = gr.Button("Generate Response") | |
max_new_tokens = gr.Slider(minimum=1, maximum=500, value=100, step=1, label="Max New Tokens") | |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature") | |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.1, label="Top P") | |
repetition_penalty = gr.Slider(minimum=1.0, maximum=2.0, value=1.1, step=0.1, label="Repetition Penalty") | |
with gr.Column(): | |
output = gr.Textbox(label="Response", lines=15) | |
generate_button.click( | |
generate_response, | |
inputs=[prompt, mood, max_new_tokens, temperature, top_p, repetition_penalty, model_choice], | |
outputs=output | |
) | |
gr.Markdown("## Examples") | |
examples = gr.Examples( | |
examples=[ | |
["Give me advice on staying motivated.", "Motivational", 100, 0.7, 0.9, 1.1, "Granite 3.0-1B A400M Instruct"], | |
["Describe a futuristic city.", "Optimistic", 200, 0.9, 0.8, 1.0, "Granite 3.0-1B A400M Base"] | |
], | |
inputs=[prompt, mood, max_new_tokens, temperature, top_p, repetition_penalty, model_choice], | |
) | |
demo.launch(share=True) | |