Spaces:
Sleeping
Sleeping
Amitontheweb
commited on
Commit
•
6c21e3c
1
Parent(s):
2f5d998
Update app.py
Browse files
app.py
CHANGED
@@ -14,14 +14,14 @@ token = os.environ.get("HF_TOKEN")
|
|
14 |
#tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
|
15 |
#model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
|
16 |
|
17 |
-
tokenizer_gpt2 = AutoTokenizer.from_pretrained("openai-community/gpt2")
|
18 |
-
model_gpt2 = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
|
19 |
|
20 |
#tokenizer_gemma = AutoTokenizer.from_pretrained("google/gemma-2b")
|
21 |
#model_gemma = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=token)
|
22 |
|
23 |
-
tokenizer_qwen = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B")
|
24 |
-
model_qwen = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B")
|
25 |
|
26 |
# Define functions
|
27 |
|
@@ -316,16 +316,17 @@ with gr.Blocks() as demo:
|
|
316 |
|
317 |
No_beam_group_list = [2]
|
318 |
|
319 |
-
tokenizer = tokenizer_gpt2
|
320 |
-
model = model_gpt2
|
321 |
|
322 |
with gr.Row():
|
323 |
|
324 |
with gr.Column (scale=0, min_width=200) as Models_Strategy:
|
325 |
|
326 |
model_selected = gr.Radio (["GPT2", "Qwen2"], label="ML Model", value="GPT2")
|
327 |
-
strategy_selected = gr.Radio (["Sampling", "Beam Search", "Diversity Beam Search","Contrastive"], label="Search strategy", value = "Sampling", interactive=True)
|
328 |
load_model_button = gr.Button("Load")
|
|
|
|
|
329 |
|
330 |
with gr.Column(scale=1):
|
331 |
|
|
|
14 |
#tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
|
15 |
#model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
|
16 |
|
17 |
+
#tokenizer_gpt2 = AutoTokenizer.from_pretrained("openai-community/gpt2")
|
18 |
+
#model_gpt2 = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
|
19 |
|
20 |
#tokenizer_gemma = AutoTokenizer.from_pretrained("google/gemma-2b")
|
21 |
#model_gemma = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=token)
|
22 |
|
23 |
+
#tokenizer_qwen = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B")
|
24 |
+
#model_qwen = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B")
|
25 |
|
26 |
# Define functions
|
27 |
|
|
|
316 |
|
317 |
No_beam_group_list = [2]
|
318 |
|
319 |
+
#tokenizer = tokenizer_gpt2
|
320 |
+
#model = model_gpt2
|
321 |
|
322 |
with gr.Row():
|
323 |
|
324 |
with gr.Column (scale=0, min_width=200) as Models_Strategy:
|
325 |
|
326 |
model_selected = gr.Radio (["GPT2", "Qwen2"], label="ML Model", value="GPT2")
|
|
|
327 |
load_model_button = gr.Button("Load")
|
328 |
+
strategy_selected = gr.Radio (["Sampling", "Beam Search", "Diversity Beam Search","Contrastive"], label="Search strategy", value = "Sampling", interactive=True)
|
329 |
+
|
330 |
|
331 |
with gr.Column(scale=1):
|
332 |
|