Commit
•
de2d7ca
1
Parent(s):
f6de0a1
nit
Browse files
README.md
CHANGED
@@ -39,17 +39,18 @@ The model supports multi-image and multi-prompt generation. Meaning that you can
|
|
39 |
Below we used [`"llava-hf/llava-interleave-qwen-0.5b-hf"`](https://huggingface.co/llava-hf/llava-interleave-qwen-0.5b-hf) checkpoint.
|
40 |
|
41 |
```python
|
42 |
-
from transformers import pipeline
|
43 |
from PIL import Image
|
44 |
import requests
|
45 |
|
46 |
model_id = "llava-hf/llava-interleave-qwen-7b-dpo-hf"
|
47 |
pipe = pipeline("image-to-text", model=model_id)
|
|
|
48 |
|
49 |
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
|
50 |
image = Image.open(requests.get(url, stream=True).raw)
|
51 |
|
52 |
-
# Define a chat
|
53 |
# Each value in "content" has to be a list of dicts with types ("text", "image")
|
54 |
conversation = [
|
55 |
{
|
@@ -87,7 +88,7 @@ model = LlavaForConditionalGeneration.from_pretrained(
|
|
87 |
|
88 |
processor = AutoProcessor.from_pretrained(model_id)
|
89 |
|
90 |
-
# Define a chat
|
91 |
# Each value in "content" has to be a list of dicts with types ("text", "image")
|
92 |
conversation = [
|
93 |
{
|
|
|
39 |
Below we used [`"llava-hf/llava-interleave-qwen-0.5b-hf"`](https://huggingface.co/llava-hf/llava-interleave-qwen-0.5b-hf) checkpoint.
|
40 |
|
41 |
```python
|
42 |
+
from transformers import pipeline, AutoProcessor
|
43 |
from PIL import Image
|
44 |
import requests
|
45 |
|
46 |
model_id = "llava-hf/llava-interleave-qwen-7b-dpo-hf"
|
47 |
pipe = pipeline("image-to-text", model=model_id)
|
48 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
49 |
|
50 |
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
|
51 |
image = Image.open(requests.get(url, stream=True).raw)
|
52 |
|
53 |
+
# Define a chat history and use `apply_chat_template` to get correctly formatted prompt
|
54 |
# Each value in "content" has to be a list of dicts with types ("text", "image")
|
55 |
conversation = [
|
56 |
{
|
|
|
88 |
|
89 |
processor = AutoProcessor.from_pretrained(model_id)
|
90 |
|
91 |
+
# Define a chat history and use `apply_chat_template` to get correctly formatted prompt
|
92 |
# Each value in "content" has to be a list of dicts with types ("text", "image")
|
93 |
conversation = [
|
94 |
{
|