Neo111x commited on
Commit
4a42c72
·
verified ·
1 Parent(s): 3a6edce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
  # Load the model and tokenizer
6
- model_path = 'LLM4Binary/llm4decompile-1.3b-v1.5' # V1.5 Model
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16)
9
 
@@ -34,7 +34,7 @@ interface = gr.Interface(
34
  gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
35
  ],
36
  outputs=gr.Textbox(label="Generated Response"),
37
- title="LLM4Binary Interactive Demo",
38
  description="Adjust the sliders for temperature, top-k, and top-p to customize the model's response."
39
  )
40
 
 
3
  import torch
4
 
5
  # Load the model and tokenizer
6
+ model_path = 'Neo111x/falcon3-decompiler-3b-v1.5' # V1.5 Model
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16)
9
 
 
34
  gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
35
  ],
36
  outputs=gr.Textbox(label="Generated Response"),
37
+ title="Falcon decompiler Interactive Demo",
38
  description="Adjust the sliders for temperature, top-k, and top-p to customize the model's response."
39
  )
40