Update README.md
Browse files
README.md
CHANGED
@@ -137,7 +137,7 @@ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast
|
|
137 |
pixel_values = load_image('./web_dfacd48d-d2c2-492f-b94c-41e6a34ea99f.png', max_num=6).to(torch.bfloat16).cuda()
|
138 |
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
139 |
|
140 |
-
question = "<image
|
141 |
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
|
142 |
print(f'User: {question}\nAssistant: {response}')
|
143 |
```
|
|
|
137 |
pixel_values = load_image('./web_dfacd48d-d2c2-492f-b94c-41e6a34ea99f.png', max_num=6).to(torch.bfloat16).cuda()
|
138 |
generation_config = dict(max_new_tokens=1024, do_sample=True)
|
139 |
|
140 |
+
question = "<image>\nYou are a GUI task expert, I will provide you with a high-level instruction, an action history, a screenshot with its corresponding accessibility tree.\n High-level instruction: {high_level_instruction}\n Action history: {action_history}\n Accessibility tree: {a11y_tree}\n Please generate the low-level thought and action for the next step."
|
141 |
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
|
142 |
print(f'User: {question}\nAssistant: {response}')
|
143 |
```
|