Spaces:
Sleeping
Sleeping
Amitontheweb
commited on
Commit
•
48ddd53
1
Parent(s):
00a32cf
Update app.py
Browse files
app.py
CHANGED
@@ -20,8 +20,8 @@ model_gpt2 = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
|
|
20 |
tokenizer_gemma = AutoTokenizer.from_pretrained("google/gemma-2b")
|
21 |
model_gemma = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=token)
|
22 |
|
23 |
-
|
24 |
-
|
25 |
|
26 |
# Define functions
|
27 |
|
@@ -147,9 +147,9 @@ def load_model(model_selected):
|
|
147 |
tokenizer = tokenizer_gemma
|
148 |
model = model_gemma
|
149 |
|
150 |
-
if model_selected == "
|
151 |
-
tokenizer =
|
152 |
-
model =
|
153 |
|
154 |
|
155 |
|
@@ -311,7 +311,7 @@ with gr.Blocks() as demo:
|
|
311 |
|
312 |
with gr.Column (scale=0, min_width=200) as Models_Strategy:
|
313 |
|
314 |
-
model_selected = gr.Radio (["GPT2", "Gemma 2", "
|
315 |
strategy_selected = gr.Radio (["Sampling", "Beam Search", "Diversity Beam Search","Contrastive"], label="Search strategy", value = "Sampling", interactive=True)
|
316 |
|
317 |
|
|
|
20 |
tokenizer_gemma = AutoTokenizer.from_pretrained("google/gemma-2b")
|
21 |
model_gemma = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=token)
|
22 |
|
23 |
+
tokenizer_qwen = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B")
|
24 |
+
model_qwen = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B")
|
25 |
|
26 |
# Define functions
|
27 |
|
|
|
147 |
tokenizer = tokenizer_gemma
|
148 |
model = model_gemma
|
149 |
|
150 |
+
if model_selected == "Qwen2":
|
151 |
+
tokenizer = tokenizer_qwen
|
152 |
+
model = model_qwen
|
153 |
|
154 |
|
155 |
|
|
|
311 |
|
312 |
with gr.Column (scale=0, min_width=200) as Models_Strategy:
|
313 |
|
314 |
+
model_selected = gr.Radio (["GPT2", "Gemma 2", "Qwen2"], label="ML Model", value="GPT2")
|
315 |
strategy_selected = gr.Radio (["Sampling", "Beam Search", "Diversity Beam Search","Contrastive"], label="Search strategy", value = "Sampling", interactive=True)
|
316 |
|
317 |
|