Commit
•
dc5c07d
1
Parent(s):
d727650
Update README.md
Browse files
README.md
CHANGED
@@ -123,9 +123,18 @@ To generate from images use the below code after loading the model as shown abov
|
|
123 |
import requests
|
124 |
from PIL import Image
|
125 |
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
|
|
129 |
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
130 |
inputs_image = processor(prompt, images=raw_image, return_tensors='pt').to(0, torch.float16)
|
131 |
|
@@ -142,7 +151,7 @@ conversation_1 = [
|
|
142 |
{
|
143 |
"role": "user",
|
144 |
"content": [
|
145 |
-
{"type": "text", "text": "What's the content of the image"},
|
146 |
{"type": "image"},
|
147 |
],
|
148 |
}
|
@@ -156,8 +165,8 @@ conversation_2 = [
|
|
156 |
],
|
157 |
},
|
158 |
]
|
159 |
-
prompt_1 = processor.apply_chat_template(
|
160 |
-
prompt_2 = processor.apply_chat_template(
|
161 |
|
162 |
s = processor(text=[prompt_1, prompt_2], images=image, videos=clip, padding=True, return_tensors="pt").to(model.device)
|
163 |
|
|
|
123 |
import requests
|
124 |
from PIL import Image
|
125 |
|
126 |
+
conversation = [
|
127 |
+
{
|
128 |
+
"role": "user",
|
129 |
+
"content": [
|
130 |
+
{"type": "text", "text": "What are these?"},
|
131 |
+
{"type": "image"},
|
132 |
+
],
|
133 |
+
},
|
134 |
+
]
|
135 |
+
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
136 |
|
137 |
+
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
138 |
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
139 |
inputs_image = processor(prompt, images=raw_image, return_tensors='pt').to(0, torch.float16)
|
140 |
|
|
|
151 |
{
|
152 |
"role": "user",
|
153 |
"content": [
|
154 |
+
{"type": "text", "text": "What's the content of the image>"},
|
155 |
{"type": "image"},
|
156 |
],
|
157 |
}
|
|
|
165 |
],
|
166 |
},
|
167 |
]
|
168 |
+
prompt_1 = processor.apply_chat_template(conversation_1, add_generation_prompt=True)
|
169 |
+
prompt_2 = processor.apply_chat_template(conversation_2, add_generation_prompt=True)
|
170 |
|
171 |
s = processor(text=[prompt_1, prompt_2], images=image, videos=clip, padding=True, return_tensors="pt").to(model.device)
|
172 |
|