Update README.md
Browse files
README.md
CHANGED
@@ -74,12 +74,12 @@ Instantiate the tokenizer, processor, and model.
|
|
74 |
```python
|
75 |
device = torch.device("cuda:0")
|
76 |
|
77 |
-
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/Llama-3.1-8B-Dragonfly-
|
78 |
clip_processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14-336")
|
79 |
image_processor = clip_processor.image_processor
|
80 |
processor = DragonflyProcessor(image_processor=image_processor, tokenizer=tokenizer, image_encoding_style="llava-hd")
|
81 |
|
82 |
-
model = DragonflyForCausalLM.from_pretrained("togethercomputer/Llama-3.1-8B-Dragonfly-
|
83 |
model = model.to(torch.bfloat16)
|
84 |
model = model.to(device)
|
85 |
```
|
|
|
74 |
```python
|
75 |
device = torch.device("cuda:0")
|
76 |
|
77 |
+
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/Llama-3.1-8B-Dragonfly-v2")
|
78 |
clip_processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14-336")
|
79 |
image_processor = clip_processor.image_processor
|
80 |
processor = DragonflyProcessor(image_processor=image_processor, tokenizer=tokenizer, image_encoding_style="llava-hd")
|
81 |
|
82 |
+
model = DragonflyForCausalLM.from_pretrained("togethercomputer/Llama-3.1-8B-Dragonfly-v2")
|
83 |
model = model.to(torch.bfloat16)
|
84 |
model = model.to(device)
|
85 |
```
|