File size: 2,950 Bytes
8d66a23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d7a8ca9
8d66a23
 
 
1edb6d8
 
4a41e28
 
d7a8ca9
 
 
 
 
4a41e28
 
 
 
d7a8ca9
4a41e28
d7a8ca9
 
 
 
 
 
 
4a41e28
d7a8ca9
 
c80c92c
d7a8ca9
 
 
 
4a41e28
 
 
 
d7a8ca9
 
 
 
 
 
 
 
8d66a23
 
d7a8ca9
 
 
 
 
 
 
 
 
 
8d66a23
 
 
 
 
d7a8ca9
 
8d66a23
 
d7a8ca9
8d66a23
 
 
d7a8ca9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
<!DOCTYPE html>
<html>
    <head>
        <meta charset="utf-8">
        <meta name="viewport" content="width=device-width, initial-scale=1">
        <title>Gradio-Lite: Serverless Gradio Running Entirely in Your Browser</title>
        <meta name="description" content="Gradio-Lite: Serverless Gradio Running Entirely in Your Browser">

        <script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js"></script>
        <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" />

        <style>
            html, body {
                margin: 0;
                padding: 0;
                height: 100%;
            }
        </style>
    </head>
    <body>
        <gradio-lite>
            <gradio-file name="app.py" entrypoint>
from transformers_js import import_transformers_js, as_url
import gradio as gr


# Reference: https://huggingface.co/spaces/Xenova/yolov9-web/blob/main/index.js

IMAGE_SIZE = 256;

transformers = await import_transformers_js()
AutoProcessor = transformers.AutoProcessor
AutoModel = transformers.AutoModel
RawImage = transformers.RawImage

processor = await AutoProcessor.from_pretrained('Xenova/yolov9-c')

# For this demo, we resize the image to IMAGE_SIZE x IMAGE_SIZE
processor.feature_extractor.size = { "width": IMAGE_SIZE, "height": IMAGE_SIZE }

model = await AutoModel.from_pretrained('Xenova/yolov9-c')


async def detect(image_path):
    image = await RawImage.read(image_path)

    processed_input = await processor(image)

    result = await model(images=processed_input["pixel_values"])

    outputs = result["outputs"]  # Tensor
    np_outputs = outputs.to_numpy()  # [xmin, ymin, xmax, ymax, score, id][]
    gradio_labels = [
        # List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]
        (
            (
                int(xmin * image.width / IMAGE_SIZE),
                int(ymin * image.height / IMAGE_SIZE),
                int(xmax * image.width / IMAGE_SIZE),
                int(ymax * image.height / IMAGE_SIZE),
            ),
            model.config.id2label[str(int(id))],
        )
        for xmin, ymin, xmax, ymax, score, id in np_outputs
    ]

    annotated_image_data = image_path, gradio_labels
    return annotated_image_data, np_outputs

demo = gr.Interface(
    detect,
    gr.Image(type="filepath"),
    [
        gr.AnnotatedImage(),
        gr.JSON(),
    ],
    examples=[
        ["cats.jpg"],
        ["city-streets.jpg"],
    ]
)

demo.launch()
            </gradio-file>

            <gradio-file name="cats.jpg" url="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cats.jpg" />
            <gradio-file name="city-streets.jpg" url="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg" />

            <gradio-requirements>
transformers_js_py
            </gradio-requirements>
        </gradio-lite>
    </body>
</html>