Update README.md
Browse files
README.md
CHANGED
@@ -15,14 +15,12 @@ Decompilation: Use falcon3-decompiler-3b to translate ghidra decompilation outpu
|
|
15 |
```python
|
16 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
17 |
import torch
|
|
|
18 |
|
19 |
model_path = 'LLM4Binary/llm4decompile-1.3b-v1.5' # V1.5 Model
|
20 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
21 |
model = AutoModelForCausalLM.from_pretrained(model_path,torch_dtype=torch.bfloat16).cuda()
|
22 |
|
23 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
24 |
-
import torch
|
25 |
-
import os
|
26 |
|
27 |
asm_func = """
|
28 |
char * func0(char **param_1,int param_2)
|
|
|
15 |
```python
|
16 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
17 |
import torch
|
18 |
+
import os
|
19 |
|
20 |
model_path = 'LLM4Binary/llm4decompile-1.3b-v1.5' # V1.5 Model
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
22 |
model = AutoModelForCausalLM.from_pretrained(model_path,torch_dtype=torch.bfloat16).cuda()
|
23 |
|
|
|
|
|
|
|
24 |
|
25 |
asm_func = """
|
26 |
char * func0(char **param_1,int param_2)
|