Upload 7 files
Browse files- .gitattributes +35 -35
- README.md +64 -1
- config.json +19 -0
- onnx/model.onnx +3 -0
- onnx/model_fp16.onnx +3 -0
- onnx/model_quantized.onnx +3 -0
- preprocessor_config.json +13 -0
.gitattributes
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,3 +1,66 @@
|
|
1 |
---
|
2 |
-
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
library_name: transformers.js
|
3 |
+
license: gpl-3.0
|
4 |
+
pipeline_tag: object-detection
|
5 |
---
|
6 |
+
|
7 |
+
https://github.com/WongKinYiu/yolov9 with ONNX weights to be compatible with Transformers.js.
|
8 |
+
|
9 |
+
|
10 |
+
## Usage (Transformers.js)
|
11 |
+
|
12 |
+
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@xenova/transformers) using:
|
13 |
+
```bash
|
14 |
+
npm i @xenova/transformers
|
15 |
+
```
|
16 |
+
|
17 |
+
**Example:** Perform object-detection with `Xenova/gelan-c_all`.
|
18 |
+
|
19 |
+
```js
|
20 |
+
import { AutoModel, AutoProcessor, RawImage } from '@xenova/transformers';
|
21 |
+
|
22 |
+
// Load model
|
23 |
+
const model = await AutoModel.from_pretrained('Xenova/gelan-c_all', {
|
24 |
+
// quantized: false, // (Optional) Use unquantized version.
|
25 |
+
})
|
26 |
+
|
27 |
+
// Load processor
|
28 |
+
const processor = await AutoProcessor.from_pretrained('Xenova/gelan-c_all');
|
29 |
+
// processor.feature_extractor.size = { shortest_edge: 128 } // (Optional) Update resize value
|
30 |
+
|
31 |
+
// Read image and run processor
|
32 |
+
const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/city-streets.jpg';
|
33 |
+
const image = await RawImage.read(url);
|
34 |
+
const inputs = await processor(image);
|
35 |
+
|
36 |
+
// Run object detection
|
37 |
+
const threshold = 0.3;
|
38 |
+
const { outputs } = await model(inputs);
|
39 |
+
const predictions = outputs.tolist();
|
40 |
+
|
41 |
+
for (const [xmin, ymin, xmax, ymax, score, id] of predictions) {
|
42 |
+
if (score < threshold) break;
|
43 |
+
const bbox = [xmin, ymin, xmax, ymax].map(x => x.toFixed(2)).join(', ')
|
44 |
+
console.log(`Found "${model.config.id2label[id]}" at [${bbox}] with score ${score.toFixed(2)}.`)
|
45 |
+
}
|
46 |
+
// Found "car" at [63.06, 118.80, 139.61, 146.78] with score 0.84.
|
47 |
+
// Found "bicycle" at [158.32, 166.13, 195.02, 189.03] with score 0.81.
|
48 |
+
// Found "bicycle" at [123.22, 183.83, 162.71, 206.30] with score 0.79.
|
49 |
+
// Found "bicycle" at [0.56, 180.92, 39.26, 203.94] with score 0.78.
|
50 |
+
// Found "car" at [157.10, 132.38, 223.72, 167.69] with score 0.77.
|
51 |
+
// Found "person" at [193.04, 90.98, 207.09, 116.78] with score 0.77.
|
52 |
+
// Found "person" at [12.49, 164.97, 27.63, 197.55] with score 0.66.
|
53 |
+
// Found "traffic light" at [102.80, 74.25, 124.12, 95.75] with score 0.62.
|
54 |
+
// ...
|
55 |
+
```
|
56 |
+
|
57 |
+
## Demo
|
58 |
+
|
59 |
+
Test it out [here](https://huggingface.co/spaces/Xenova/video-object-detection)!
|
60 |
+
|
61 |
+
<video controls autoplay loop src="https://cdn-uploads.huggingface.co/production/uploads/61b253b7ac5ecaae3d1efe0c/AgNFx_3cPMh5zjR91n9Dt.mp4"></video>
|
62 |
+
|
63 |
+
---
|
64 |
+
|
65 |
+
|
66 |
+
Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
|
config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"id2label": {
|
3 |
+
"0": "paper",
|
4 |
+
"1": "glass",
|
5 |
+
"2": "cardboard",
|
6 |
+
"3": "trash",
|
7 |
+
"4": "metal",
|
8 |
+
"5": "plastic"
|
9 |
+
},
|
10 |
+
"label2id": {
|
11 |
+
"paper": 0,
|
12 |
+
"glass": 1,
|
13 |
+
"cardboard": 2,
|
14 |
+
"trash": 3,
|
15 |
+
"metal": 4,
|
16 |
+
"plastic": 5
|
17 |
+
},
|
18 |
+
"model_type": "yolov5"
|
19 |
+
}
|
onnx/model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d35026e0a0de2107e4e768c44d82761e94c61c463723e76e976bf77e1f620cc
|
3 |
+
size 28553048
|
onnx/model_fp16.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0306c79104caf6909f4216d16cf71bcde90d45ecc3a1b3b967211fb1a81b9721
|
3 |
+
size 51243235
|
onnx/model_quantized.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3c6b494f2df3abce73e607dd11c709dd0e42afa4b32017383823a51c06006494
|
3 |
+
size 26097243
|
preprocessor_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": false,
|
3 |
+
"do_pad": false,
|
4 |
+
"do_rescale": true,
|
5 |
+
"do_resize": true,
|
6 |
+
"feature_extractor_type": "ImageFeatureExtractor",
|
7 |
+
"resample": 2,
|
8 |
+
"rescale_factor": 0.00392156862745098,
|
9 |
+
"size": {
|
10 |
+
"shortest_edge": 224
|
11 |
+
},
|
12 |
+
"size_divisibility": 32
|
13 |
+
}
|