Update README.md
Browse files
README.md
CHANGED
@@ -12,6 +12,12 @@ language:
|
|
12 |
- en
|
13 |
tags:
|
14 |
- compression
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
---
|
16 |
|
17 |
# Model Card for Model ID
|
@@ -61,14 +67,14 @@ This model should not be used for tasks that require full preservation of the or
|
|
61 |
|
62 |
Use the code below to get started with the model.
|
63 |
|
64 |
-
|
65 |
from peft import PeftModel, PeftConfig
|
66 |
from transformers import AutoModelForCausalLM
|
67 |
|
68 |
config = PeftConfig.from_pretrained("aoxo/llama-token-compressor")
|
69 |
base_model = AutoModelForCausalLM.from_pretrained("unsloth/Meta-Llama-3.1-8B-bnb-4bit")
|
70 |
model = PeftModel.from_pretrained(base_model, "aoxo/llama-token-compressor")
|
71 |
-
|
72 |
|
73 |
## Training Details
|
74 |
|
|
|
12 |
- en
|
13 |
tags:
|
14 |
- compression
|
15 |
+
- pytorch
|
16 |
+
- facebook
|
17 |
+
- meta
|
18 |
+
- llama
|
19 |
+
- llama-3
|
20 |
+
pipeline_tag: text-generation
|
21 |
---
|
22 |
|
23 |
# Model Card for Model ID
|
|
|
67 |
|
68 |
Use the code below to get started with the model.
|
69 |
|
70 |
+
```python
|
71 |
from peft import PeftModel, PeftConfig
|
72 |
from transformers import AutoModelForCausalLM
|
73 |
|
74 |
config = PeftConfig.from_pretrained("aoxo/llama-token-compressor")
|
75 |
base_model = AutoModelForCausalLM.from_pretrained("unsloth/Meta-Llama-3.1-8B-bnb-4bit")
|
76 |
model = PeftModel.from_pretrained(base_model, "aoxo/llama-token-compressor")
|
77 |
+
```
|
78 |
|
79 |
## Training Details
|
80 |
|