Neo111x commited on
Commit
3a6edce
·
verified ·
1 Parent(s): 52a7c01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
  # Load the model and tokenizer
6
  model_path = 'LLM4Binary/llm4decompile-1.3b-v1.5' # V1.5 Model
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
- model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16).cuda()
9
 
10
  # Define the inference function
11
  def generate_response(input_text, temperature, top_k, top_p):
 
5
  # Load the model and tokenizer
6
  model_path = 'LLM4Binary/llm4decompile-1.3b-v1.5' # V1.5 Model
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
+ model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16)
9
 
10
  # Define the inference function
11
  def generate_response(input_text, temperature, top_k, top_p):