hardyliyanto commited on
Commit
2545225
·
1 Parent(s): 312d631

change model directory

Browse files
Files changed (2) hide show
  1. app.py +83 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import ViTImageProcessor, AutoModelForImageClassification
2
+
3
+ import torch
4
+ import gradio as gr
5
+ import os
6
+ import glob
7
+ import mediapipe as mp
8
+ import numpy as np
9
+ from PIL import Image
10
+
11
+ feature_extractor = ViTImageProcessor.from_pretrained('ArdyL/VIT_SIBI_ALL')
12
+ model = AutoModelForImageClassification.from_pretrained('ArdyL/VIT_SIBI_ALL')
13
+
14
+ mp_hands = mp.solutions.hands
15
+ # mp_drawing_styles = mp.solutions.drawing_styles
16
+ # mp_holistic = mp.solutions.holistic
17
+ # mp_pose = mp.solutions.pose
18
+ mp_drawing = mp.solutions.drawing_utils
19
+ examples_dir = './'
20
+ example_files = glob.glob(os.path.join(examples_dir, '*.jpg'))
21
+
22
+
23
+ def preprocess(im):
24
+ with mp_hands.Hands(min_detection_confidence=0.3, min_tracking_confidence=0.3) as hands:
25
+
26
+ # Read image file with cv2 and process with face_mesh
27
+ results = hands.process(im)
28
+ image2 = np.array(im)
29
+ annotated_image = image2.copy()
30
+ annotated_image = np.empty(annotated_image.shape)
31
+ annotated_image.fill(255)
32
+ hand_found = bool(results.multi_hand_landmarks)
33
+ if hand_found:
34
+ for hand_landmarks in results.multi_hand_landmarks:
35
+ mp_drawing.draw_landmarks(annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS,
36
+ mp_drawing.DrawingSpec(
37
+ color=(0, 0, 0), thickness=2, circle_radius=2),
38
+ mp_drawing.DrawingSpec(
39
+ color=(0, 0, 0), thickness=2, circle_radius=2),
40
+ )
41
+
42
+ annotated_image[...] /= 255
43
+ return annotated_image
44
+
45
+
46
+ def classify_image(image):
47
+ preprocessedImage = preprocess(image)
48
+ with torch.no_grad():
49
+ model.eval()
50
+ inputs = feature_extractor(
51
+ images=preprocessedImage, return_tensors="pt")
52
+ outputs = model(**inputs)
53
+
54
+ logits = outputs.logits
55
+ predicted_label = logits.argmax(-1).item()
56
+ label = model.config.id2label[predicted_label]
57
+ return label # confidences
58
+
59
+
60
+ with gr.Blocks(title=">ViT - SIBI Classifier") as demo:
61
+ with gr.Tab("Upload Image", id='upload-image'):
62
+ with gr.Row():
63
+ uploadImage = gr.Image(
64
+ type="numpy", image_mode="RGB", shape=(224, 224))
65
+ output_label = gr.Label(label="Hasil", num_top_classes=5)
66
+ with gr.Row():
67
+ send_btn = gr.Button("Terjemahkan")
68
+ send_btn.click(fn=classify_image, inputs=uploadImage,
69
+ outputs=output_label)
70
+
71
+ with gr.Tab("Capture Image", id='capture-image'):
72
+ with gr.Row():
73
+ streamImage = gr.Image(
74
+ type="numpy", source='webcam', image_mode="RGB", shape=(224, 224))
75
+ output_label2 = gr.Label(label="Hasil", num_top_classes=5)
76
+ with gr.Row():
77
+ send_btn2 = gr.Button("Terjemahkan")
78
+ send_btn2.click(fn=classify_image,
79
+ inputs=streamImage, outputs=output_label2)
80
+
81
+
82
+ # demo.queue(concurrency_count=3)
83
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ mediapipe
2
+ transformers
3
+ torch