File size: 4,689 Bytes
7fa80e9 de6a4a2 7fa80e9 de6a4a2 560a2a7 de6a4a2 560a2a7 de6a4a2 560a2a7 de6a4a2 f3a13dd de6a4a2 9b70520 de6a4a2 13a0d78 de6a4a2 398c325 de6a4a2 398c325 de6a4a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
import gradio as gr
import spaces
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch
import base64
from PIL import Image, ImageDraw
from io import BytesIO
import re
models = {
"OS-Copilot/OS-Atlas-Base-7B": Qwen2VLForConditionalGeneration.from_pretrained("OS-Copilot/OS-Atlas-Base-7B", torch_dtype="auto", device_map="auto"),
}
processors = {
"OS-Copilot/OS-Atlas-Base-7B": AutoProcessor.from_pretrained("OS-Copilot/OS-Atlas-Base-7B")
}
def image_to_base64(image):
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
def draw_bounding_boxes(image, bounding_boxes, outline_color="red", line_width=2):
draw = ImageDraw.Draw(image)
for box in bounding_boxes:
xmin, ymin, xmax, ymax = box
draw.rectangle([xmin, ymin, xmax, ymax], outline=outline_color, width=line_width)
return image
def rescale_bounding_boxes(bounding_boxes, original_width, original_height, scaled_width=1000, scaled_height=1000):
x_scale = original_width / scaled_width
y_scale = original_height / scaled_height
rescaled_boxes = []
for box in bounding_boxes:
xmin, ymin, xmax, ymax = box
rescaled_box = [
xmin * x_scale,
ymin * y_scale,
xmax * x_scale,
ymax * y_scale
]
rescaled_boxes.append(rescaled_box)
return rescaled_boxes
@spaces.GPU
def run_example(image, text_input, model_id="OS-Copilot/OS-Atlas-Base-7B"):
model = models[model_id].eval()
processor = processors[model_id]
prompt = f"In this UI screenshot, what is the position of the element corresponding to the command \"{text_input}\" (with bbox)?"
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": f"data:image;base64,{image_to_base64(image)}"},
{"type": "text", "text": prompt},
],
}
]
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
inputs = inputs.to("cuda")
generated_ids = model.generate(**inputs, max_new_tokens=128)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=False, clean_up_tokenization_spaces=False
)
print(output_text)
text = output_text[0]
object_ref_pattern = r"<\|object_ref_start\|>(.*?)<\|object_ref_end\|>"
box_pattern = r"<\|box_start\|>(.*?)<\|box_end\|>"
object_ref = re.search(object_ref_pattern, text).group(1)
box_content = re.search(box_pattern, text).group(1)
boxes = [tuple(map(int, pair.strip("()").split(','))) for pair in box_content.split("),(")]
boxes = [[boxes[0][0], boxes[0][1], boxes[1][0], boxes[1][1]]]
scaled_boxes = rescale_bounding_boxes(boxes, image.width, image.height)
return object_ref, draw_bounding_boxes(image, scaled_boxes)
css = """
#output {
height: 500px;
overflow: auto;
border: 1px solid #ccc;
}
"""
with gr.Blocks(css=css) as demo:
gr.Markdown(
"""
# Demo for OS-ATLAS: A Foundation Action Model For Generalist GUI Agents
""")
with gr.Row():
with gr.Column():
input_img = gr.Image(label="Input Image", type="pil")
model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="OS-Copilot/OS-Atlas-Base-7B")
text_input = gr.Textbox(label="User Prompt")
submit_btn = gr.Button(value="Submit")
with gr.Column():
model_output_text = gr.Textbox(label="Model Output Text")
annotated_image = gr.Image(label="Annotated Image")
gr.Examples(
examples=[
["assets/web_6f93090a-81f6-489e-bb35-1a2838b18c01.png", "select search textfield"],
["assets/web_6f93090a-81f6-489e-bb35-1a2838b18c01.png", "switch to discussions"],
],
inputs=[input_img, text_input],
outputs=[model_output_text, annotated_image],
fn=run_example,
cache_examples=True,
label="Try examples"
)
submit_btn.click(run_example, [input_img, text_input, model_selector], [model_output_text, annotated_image])
demo.launch(debug=True) |