Spaces:
Running
Running
Add application file
Browse files- .gitattributes +35 -35
- README.md +12 -12
- aryadytm-vit-vehicle-classifier/config.json +60 -0
- aryadytm-vit-vehicle-classifier/model.safetensors +3 -0
- assets/spotlight.png +0 -0
- assets/vit.png +0 -0
- requirements.txt +4 -0
- st_app.py +58 -0
.gitattributes
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.28.2
|
8 |
-
app_file:
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
|
|
|
1 |
+
---
|
2 |
+
title: ViT Vehicle Classifier
|
3 |
+
emoji: 🚋
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: blue
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.28.2
|
8 |
+
app_file: st_app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
This app lets you classify vehicle images using a pre-trained ViT model. You need to upload your own image.
|
aryadytm-vit-vehicle-classifier/config.json
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/vit-base-patch16-224-in21k",
|
3 |
+
"architectures": [
|
4 |
+
"ViTForImageClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"encoder_stride": 16,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.0,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"id2label": {
|
12 |
+
"0": "Ambulance",
|
13 |
+
"1": "Barge",
|
14 |
+
"10": "Segway",
|
15 |
+
"11": "Snowmobile",
|
16 |
+
"12": "Tank",
|
17 |
+
"13": "Taxi",
|
18 |
+
"14": "Truck",
|
19 |
+
"15": "Van",
|
20 |
+
"2": "Bicycle",
|
21 |
+
"3": "Boat",
|
22 |
+
"4": "Bus",
|
23 |
+
"5": "Car",
|
24 |
+
"6": "Cart",
|
25 |
+
"7": "Helicopter",
|
26 |
+
"8": "Limousine",
|
27 |
+
"9": "Motorcycle"
|
28 |
+
},
|
29 |
+
"image_size": 224,
|
30 |
+
"initializer_range": 0.02,
|
31 |
+
"intermediate_size": 3072,
|
32 |
+
"label2id": {
|
33 |
+
"Ambulance": "0",
|
34 |
+
"Barge": "1",
|
35 |
+
"Bicycle": "2",
|
36 |
+
"Boat": "3",
|
37 |
+
"Bus": "4",
|
38 |
+
"Car": "5",
|
39 |
+
"Cart": "6",
|
40 |
+
"Helicopter": "7",
|
41 |
+
"Limousine": "8",
|
42 |
+
"Motorcycle": "9",
|
43 |
+
"Segway": "10",
|
44 |
+
"Snowmobile": "11",
|
45 |
+
"Tank": "12",
|
46 |
+
"Taxi": "13",
|
47 |
+
"Truck": "14",
|
48 |
+
"Van": "15"
|
49 |
+
},
|
50 |
+
"layer_norm_eps": 1e-12,
|
51 |
+
"model_type": "vit",
|
52 |
+
"num_attention_heads": 12,
|
53 |
+
"num_channels": 3,
|
54 |
+
"num_hidden_layers": 12,
|
55 |
+
"patch_size": 16,
|
56 |
+
"problem_type": "single_label_classification",
|
57 |
+
"qkv_bias": true,
|
58 |
+
"torch_dtype": "float32",
|
59 |
+
"transformers_version": "4.35.2"
|
60 |
+
}
|
aryadytm-vit-vehicle-classifier/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab0a9056bbd040174ef686d89b3130a0b98f8846eb9c8adea25e2c81d26bdda7
|
3 |
+
size 343267040
|
assets/spotlight.png
ADDED
assets/vit.png
ADDED
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit==1.28.2
|
2 |
+
transformers==4.35.2
|
3 |
+
Pillow==10.1.0
|
4 |
+
torch==2.1.1
|
st_app.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import ViTForImageClassification, ViTFeatureExtractor
|
3 |
+
from PIL import Image
|
4 |
+
import torch
|
5 |
+
import torch.nn.functional as F
|
6 |
+
|
7 |
+
# Load the feature extractor and model
|
8 |
+
model_name_or_path = 'google/vit-base-patch16-224-in21k' # Replace with your actual model path
|
9 |
+
feature_extractor = ViTFeatureExtractor.from_pretrained(model_name_or_path)
|
10 |
+
model = ViTForImageClassification.from_pretrained("./aryadytm-vit-vehicle-classifier")
|
11 |
+
|
12 |
+
def predict_image(image):
|
13 |
+
inputs = feature_extractor(images=image, return_tensors="pt")
|
14 |
+
|
15 |
+
with torch.no_grad():
|
16 |
+
outputs = model(**inputs)
|
17 |
+
|
18 |
+
logits = outputs.logits
|
19 |
+
probs = F.softmax(logits, dim=-1)
|
20 |
+
predicted_label_id = probs.argmax(-1).item()
|
21 |
+
predicted_label = model.config.id2label[predicted_label_id]
|
22 |
+
confidence = probs.max().item()
|
23 |
+
|
24 |
+
return predicted_label, confidence
|
25 |
+
|
26 |
+
# Streamlit UI
|
27 |
+
st.markdown("## Vehicle Image Classification")
|
28 |
+
st.image('./assets/spotlight.png')
|
29 |
+
st.markdown("""
|
30 |
+
### Group 2
|
31 |
+
- Arya Adyatma - 2501985836
|
32 |
+
- Aldre Muhammad Keyzar - 2502006543
|
33 |
+
- Devin Eldrian Wijaya - 2501961363
|
34 |
+
- Rollando Marcellino Himmel Madison - 2502006575
|
35 |
+
|
36 |
+
This app lets you classify vehicle images using a pre-trained ViT model. You need to upload your own image.
|
37 |
+
- Kaggle dataset: https://www.kaggle.com/code/rydytm/vehicle-classification/edit.
|
38 |
+
- Colab Notebook: https://colab.research.google.com/drive/1El7RhY69KvE9Nj9vAxUPGjg42NwuNcPu?usp=sharing
|
39 |
+
""")
|
40 |
+
st.image('./assets/vit.png')
|
41 |
+
st.markdown("### Upload Your Image Here")
|
42 |
+
|
43 |
+
uploaded_file = st.file_uploader("Choose an image...", type=['png', 'jpg', 'jpeg'])
|
44 |
+
|
45 |
+
if uploaded_file is not None:
|
46 |
+
image = (
|
47 |
+
Image.open(uploaded_file)
|
48 |
+
.convert("RGB")
|
49 |
+
.resize((512, 512))
|
50 |
+
)
|
51 |
+
st.image(image, caption='Uploaded Image', use_column_width=True)
|
52 |
+
st.write("")
|
53 |
+
|
54 |
+
predicted_label, confidence = predict_image(image)
|
55 |
+
|
56 |
+
st.write("### Prediction Result")
|
57 |
+
st.write(f"Predicted label: **{predicted_label}**")
|
58 |
+
st.write(f"Confidence: **{confidence:.2f}**")
|