Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,11 +8,13 @@ tokenizer = AutoTokenizer.from_pretrained(model_path)
|
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16)
|
9 |
|
10 |
# Define the inference function
|
11 |
-
def generate_response(input_text, temperature, top_k, top_p):
|
|
|
12 |
before = f"# This is the assembly code:\n"#prompt
|
13 |
after = "\n# What is the source code?\n"#prompt
|
14 |
input_func = before+input_text.strip()+after
|
15 |
inputs = tokenizer(input_func, return_tensors="pt")
|
|
|
16 |
outputs = model.generate(
|
17 |
**inputs,
|
18 |
max_length=512, # Adjust this if needed
|
@@ -21,7 +23,9 @@ def generate_response(input_text, temperature, top_k, top_p):
|
|
21 |
# top_k=int(top_k),
|
22 |
# top_p=float(top_p),
|
23 |
# temperature=float(temperature)
|
|
|
24 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
25 |
# Split the response into assembly and source code (if applicable)
|
26 |
if "# This is the assembly code:" in response:
|
27 |
parts = response.split("# What is the source code?")
|
@@ -41,8 +45,8 @@ interface = gr.Interface(
|
|
41 |
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
|
42 |
],
|
43 |
outputs=[
|
44 |
-
gr.Markdown(label="Assembly Code"),
|
45 |
-
gr.Markdown(label="Source Code")
|
46 |
],
|
47 |
|
48 |
title="Falcon decompiler Interactive Demo",
|
|
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16)
|
9 |
|
10 |
# Define the inference function
|
11 |
+
def generate_response(input_text, temperature, top_k, top_p, progress=gr.Progress()):
|
12 |
+
progress(0, "Processing input...")
|
13 |
before = f"# This is the assembly code:\n"#prompt
|
14 |
after = "\n# What is the source code?\n"#prompt
|
15 |
input_func = before+input_text.strip()+after
|
16 |
inputs = tokenizer(input_func, return_tensors="pt")
|
17 |
+
progress(0.3, "Running inference...")
|
18 |
outputs = model.generate(
|
19 |
**inputs,
|
20 |
max_length=512, # Adjust this if needed
|
|
|
23 |
# top_k=int(top_k),
|
24 |
# top_p=float(top_p),
|
25 |
# temperature=float(temperature)
|
26 |
+
progress(0.8, "Decoding response...")
|
27 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
28 |
+
progress(1, "Done!")
|
29 |
# Split the response into assembly and source code (if applicable)
|
30 |
if "# This is the assembly code:" in response:
|
31 |
parts = response.split("# What is the source code?")
|
|
|
45 |
gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
|
46 |
],
|
47 |
outputs=[
|
48 |
+
gr.Markdown(label="Assembly Code", placeholder = "Here goes the input function ...."),
|
49 |
+
gr.Markdown(label="Source Code", placeholder = "Here goes the enhanced function ....")
|
50 |
],
|
51 |
|
52 |
title="Falcon decompiler Interactive Demo",
|