update
Browse files
README.md
CHANGED
@@ -7,6 +7,15 @@ license: bsd-3-clause
|
|
7 |
|
8 |
This model was sharded using torch.float16. Use the code below to load this model, configure the device_map for your GPU/CPU split.
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
```python
|
11 |
def load_model_sharded():
|
12 |
config = AutoConfig.from_pretrained("abacaj/codegen-16B-nl-sharded")
|
@@ -27,7 +36,7 @@ def load_model_sharded():
|
|
27 |
model = load_checkpoint_and_dispatch(
|
28 |
model,
|
29 |
dtype=torch.float16,
|
30 |
-
checkpoint="sharded",
|
31 |
device_map=device_map,
|
32 |
).eval()
|
33 |
|
|
|
7 |
|
8 |
This model was sharded using torch.float16. Use the code below to load this model, configure the device_map for your GPU/CPU split.
|
9 |
|
10 |
+
First pull the model.
|
11 |
+
|
12 |
+
```bash
|
13 |
+
git clone https://huggingface.co/abacaj/codegen-16B-nl-sharded
|
14 |
+
cd codegen-16B-nl-sharded
|
15 |
+
git-lfs install
|
16 |
+
git pull
|
17 |
+
```
|
18 |
+
|
19 |
```python
|
20 |
def load_model_sharded():
|
21 |
config = AutoConfig.from_pretrained("abacaj/codegen-16B-nl-sharded")
|
|
|
36 |
model = load_checkpoint_and_dispatch(
|
37 |
model,
|
38 |
dtype=torch.float16,
|
39 |
+
checkpoint="codegen-16B-nl-sharded",
|
40 |
device_map=device_map,
|
41 |
).eval()
|
42 |
|