ahmed-masry
commited on
Commit
•
6c75a77
1
Parent(s):
88eb904
Update README.md
Browse files
README.md
CHANGED
@@ -23,6 +23,7 @@ You just need to do the following:
|
|
23 |
1. Chage the **_image_path_** to your chart example image path on your system
|
24 |
2. Write the **_input_text_**
|
25 |
|
|
|
26 |
```
|
27 |
from PIL import Image
|
28 |
import requests
|
@@ -50,7 +51,7 @@ inputs['pixel_values'] = inputs['pixel_values'].to(torch.float16)
|
|
50 |
prompt_length = inputs['input_ids'].shape[1]
|
51 |
|
52 |
# Generate
|
53 |
-
generate_ids = model.generate(**inputs, max_new_tokens=512)
|
54 |
output_text = processor.batch_decode(generate_ids[:, prompt_length:], skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
55 |
print(output_text)
|
56 |
|
|
|
23 |
1. Chage the **_image_path_** to your chart example image path on your system
|
24 |
2. Write the **_input_text_**
|
25 |
|
26 |
+
We recommend using beam search with a beam size of 4, but if your machine has low memory, you can remove the num_beams from the generate method.
|
27 |
```
|
28 |
from PIL import Image
|
29 |
import requests
|
|
|
51 |
prompt_length = inputs['input_ids'].shape[1]
|
52 |
|
53 |
# Generate
|
54 |
+
generate_ids = model.generate(**inputs, num_beams=4, max_new_tokens=512)
|
55 |
output_text = processor.batch_decode(generate_ids[:, prompt_length:], skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
56 |
print(output_text)
|
57 |
|