File size: 2,300 Bytes
3a93269
4191ffb
 
 
 
4a42c72
4191ffb
3a6edce
4191ffb
 
e8577b2
 
4232d38
 
 
 
e8577b2
4191ffb
 
 
 
be131b2
 
 
 
e8577b2
4191ffb
e8577b2
be131b2
 
 
 
 
 
 
 
4191ffb
 
 
 
 
be131b2
 
 
4191ffb
3a93269
be131b2
308b2b5
 
be131b2
 
4a42c72
308b2b5
3a93269
 
4191ffb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# Load the model and tokenizer
model_path = 'Neo111x/falcon3-decompiler-3b-v1.5'  # V1.5 Model
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16)

# Define the inference function
def generate_response(input_text, temperature, top_k, top_p, progress=gr.Progress()):
    progress(0, "Processing input...")
    before = f"# This is the assembly code:\n"#prompt
    after = "\n# What is the source code?\n"#prompt
    input_func = before+input_text.strip()+after
    inputs = tokenizer(input_func, return_tensors="pt")
    progress(0.3, "Running inference...")
    outputs = model.generate(
        **inputs,
        max_length=512,  # Adjust this if needed
    )
    #   do_sample=True,
    #    top_k=int(top_k),
    #    top_p=float(top_p),
    #    temperature=float(temperature)
    progress(0.8, "Decoding response...")
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    progress(1, "Done!")
    # Split the response into assembly and source code (if applicable)
    if "# This is the assembly code:" in response:
        parts = response.split("# What is the source code?")
        assembly_code = parts[0].replace("# This is the assembly code:", "").strip()
        source_code = parts[1].strip() if len(parts) > 1 else ""
        return f"```c\n{assembly_code}\n```", f"```c\n{source_code}\n```"
    else:
        return "No assembly code found.", "No source code found."

# Create a Gradio interface with sliders
interface = gr.Interface(
    fn=generate_response,
    inputs=[
        gr.Textbox(lines=5, placeholder="Enter assembly code here...", label="Input Assembly Code"),
        gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(1, 100, value=50, step=1, label="Top-k"),
        gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
    ],
    outputs=[
        gr.Textbox(label="Assembly Code"),
        gr.Textbox(label="Source Code")
    ],

    title="Falcon decompiler Interactive Demo",
    description="Adjust the sliders for temperature, top-k, and top-p to customize the model's response."
)

# Launch the Gradio app
interface.launch()