Neo111x commited on
Commit
4232d38
·
verified ·
1 Parent(s): 4d744c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -9,7 +9,10 @@ model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloa
9
 
10
  # Define the inference function
11
  def generate_response(input_text, temperature, top_k, top_p):
12
- inputs = tokenizer(input_text, return_tensors="pt")
 
 
 
13
  outputs = model.generate(
14
  **inputs,
15
  max_length=512, # Adjust this if needed
 
9
 
10
  # Define the inference function
11
  def generate_response(input_text, temperature, top_k, top_p):
12
+ before = f"# This is the assembly code:\n"#prompt
13
+ after = "\n# What is the source code?\n"#prompt
14
+ input_func = before+input_text.strip()+after
15
+ inputs = tokenizer(input_func, return_tensors="pt")
16
  outputs = model.generate(
17
  **inputs,
18
  max_length=512, # Adjust this if needed