Commit
•
f6de0a1
1
Parent(s):
3aecf82
update processor kwargs
Browse files
README.md
CHANGED
@@ -103,7 +103,7 @@ prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
|
103 |
|
104 |
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
105 |
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
106 |
-
inputs = processor(
|
107 |
|
108 |
output = model.generate(**inputs, max_new_tokens=200, do_sample=False)
|
109 |
print(processor.decode(output[0][2:], skip_special_tokens=True))
|
@@ -118,7 +118,7 @@ When prompting with videos/3D/multi-view input, prompt like following:
|
|
118 |
image_tokens = "<image>" * n
|
119 |
prompt = f"<|im_start|>user {image_tokens}\nWhat are these?|im_end|><|im_start|>assistant"
|
120 |
|
121 |
-
# With chat template if you sampled
|
122 |
conversation = [
|
123 |
{
|
124 |
|
|
|
103 |
|
104 |
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
105 |
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
106 |
+
inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(0, torch.float16)
|
107 |
|
108 |
output = model.generate(**inputs, max_new_tokens=200, do_sample=False)
|
109 |
print(processor.decode(output[0][2:], skip_special_tokens=True))
|
|
|
118 |
image_tokens = "<image>" * n
|
119 |
prompt = f"<|im_start|>user {image_tokens}\nWhat are these?|im_end|><|im_start|>assistant"
|
120 |
|
121 |
+
# With chat template if you sampled 5 frames you have to have 5 images in one conversation turn
|
122 |
conversation = [
|
123 |
{
|
124 |
|