leonelhs commited on
Commit
9a02a1c
1 Parent(s): d32c6db

inis space

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. .gitignore +4 -0
  3. app.py +55 -0
  4. examples/01.jpg +3 -0
  5. examples/02.jpg +3 -0
  6. examples/03.jpg +3 -0
  7. poser.py +141 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ .idea/
2
+ __pycache__/
3
+ requirements.txt
4
+ push_model.py
app.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import PIL.Image
2
+ import PIL.ImageOps
3
+ import gradio as gr
4
+ import numpy as np
5
+ import tensorflow as tf
6
+
7
+ from poser import draw_bones, movenet
8
+
9
+
10
+ def predict(image: PIL.Image):
11
+ input_size = 256
12
+ size = (1280, 1280)
13
+ image = PIL.ImageOps.fit(image, size, PIL.Image.LANCZOS)
14
+ image_tf = tf.keras.preprocessing.image.img_to_array(image)
15
+ # Resize and pad the image to keep the aspect ratio and fit the expected size.
16
+ input_image = tf.expand_dims(image_tf, axis=0)
17
+ input_image = tf.image.resize_with_pad(input_image, input_size, input_size)
18
+ keypoints = movenet(input_image)
19
+ keypoints = np.array(keypoints)
20
+ image = tf.keras.preprocessing.image.array_to_img(image_tf)
21
+ draw_bones(image, keypoints)
22
+ return image
23
+
24
+
25
+ footer = r"""
26
+ <center>
27
+ <b>
28
+ Demo for <a href='https://www.tensorflow.org/hub/tutorials/movenet'>MoveNet</a>
29
+ </b>
30
+ </center>
31
+ """
32
+
33
+ with gr.Blocks(title="MoveNet") as app:
34
+ gr.HTML("<center><h1>Human Pose Estimation with MoveNet</h1></center>")
35
+ gr.HTML("<center><h3>MoveNet: Ultra fast and accurate pose detection model</h3></center>")
36
+ with gr.Row().style(equal_height=False):
37
+ with gr.Column():
38
+ input_img = gr.Image(type="pil", label="Input image")
39
+ run_btn = gr.Button(variant="primary")
40
+ with gr.Column():
41
+ output_img = gr.Image(type="numpy", label="Output image")
42
+ gr.ClearButton(components=[input_img, output_img], variant="stop")
43
+
44
+ run_btn.click(predict, [input_img], [output_img])
45
+
46
+ with gr.Row():
47
+ blobs = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
48
+ examples = gr.Dataset(components=[input_img], samples=blobs)
49
+ examples.click(lambda x: x[0], [examples], [input_img])
50
+
51
+ with gr.Row():
52
+ gr.HTML(footer)
53
+
54
+ app.launch(share=False, debug=True, show_error=True)
55
+ app.queue()
examples/01.jpg ADDED

Git LFS Details

  • SHA256: 553341d9732edb3943a76d62eed2f0934061f85a20fdcc2815f05d7b283693fe
  • Pointer size: 132 Bytes
  • Size of remote file: 1.22 MB
examples/02.jpg ADDED

Git LFS Details

  • SHA256: 353c8c048567931d18049f6dece667b7149d3c56b333c6032662e118fde178dd
  • Pointer size: 132 Bytes
  • Size of remote file: 1.21 MB
examples/03.jpg ADDED

Git LFS Details

  • SHA256: 5f2df3f80cc6e13d7d00dce94aa18f35a424478ba2b733f0335eb616e9d29b29
  • Pointer size: 131 Bytes
  • Size of remote file: 722 kB
poser.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #############################################################################
2
+ #
3
+ # Source from:
4
+ # https://www.tensorflow.org/hub/tutorials/movenet
5
+ #
6
+ #
7
+ #############################################################################
8
+ import PIL.Image
9
+ import PIL.ImageOps
10
+ import numpy as np
11
+ import tensorflow as tf
12
+ from PIL import ImageDraw
13
+ from huggingface_hub import snapshot_download
14
+
15
+
16
+ # Dictionary that maps from joint names to keypoint indices.
17
+ KEYPOINT_DICT = {
18
+ 'nose': 0,
19
+ 'left_eye': 1, 'right_eye': 2,
20
+ 'left_ear': 3, 'right_ear': 4,
21
+ 'left_shoulder': 5, 'right_shoulder': 6,
22
+ 'left_elbow': 7, 'right_elbow': 8,
23
+ 'left_wrist': 9, 'right_wrist': 10,
24
+ 'left_hip': 11, 'right_hip': 12,
25
+ 'left_knee': 13, 'right_knee': 14,
26
+ 'left_ankle': 15, 'right_ankle': 16
27
+ }
28
+
29
+ COLOR_DICT = {
30
+ (0, 1): 'Magenta',
31
+ (0, 2): 'Cyan',
32
+ (1, 3): 'Magenta',
33
+ (2, 4): 'Cyan',
34
+ (0, 5): 'Magenta',
35
+ (0, 6): 'Cyan',
36
+ (5, 7): 'Magenta',
37
+ (7, 9): 'Magenta',
38
+ (6, 8): 'Cyan',
39
+ (8, 10): 'Cyan',
40
+ (5, 6): 'Yellow',
41
+ (5, 11): 'Magenta',
42
+ (6, 12): 'Cyan',
43
+ (11, 12): 'Yellow',
44
+ (11, 13): 'Magenta',
45
+ (13, 15): 'Magenta',
46
+ (12, 14): 'Cyan',
47
+ (14, 16): 'Cyan'
48
+ }
49
+
50
+
51
+ def process_keypoints(keypoints, height, width, threshold=0.22):
52
+ """Returns high confidence keypoints and edges for visualization.
53
+
54
+ Args:
55
+ keypoints: A numpy array with shape [1, 1, 17, 3] representing
56
+ the keypoint coordinates and scores returned from the MoveNet model.
57
+ height: height of the image in pixels.
58
+ width: width of the image in pixels.
59
+ threshold: minimum confidence score for a keypoint to be
60
+ visualized.
61
+
62
+ Returns:
63
+ A (joints, bones, colors) containing:
64
+ * the coordinates of all keypoints of all detected entities;
65
+ * the coordinates of all skeleton edges of all detected entities;
66
+ * the colors in which the edges should be plotted.
67
+ """
68
+ keypoints_all = []
69
+ keypoint_edges_all = []
70
+ colors = []
71
+ num_instances, _, _, _ = keypoints.shape
72
+ for idx in range(num_instances):
73
+ kpts_x = keypoints[0, idx, :, 1]
74
+ kpts_y = keypoints[0, idx, :, 0]
75
+ kpts_scores = keypoints[0, idx, :, 2]
76
+ kpts_absolute_xy = np.stack(
77
+ [width * np.array(kpts_x), height * np.array(kpts_y)], axis=-1)
78
+ kpts_above_thresh_absolute = kpts_absolute_xy[
79
+ kpts_scores > threshold, :]
80
+ keypoints_all.append(kpts_above_thresh_absolute)
81
+
82
+ for edge_pair, color in COLOR_DICT.items():
83
+ if (kpts_scores[edge_pair[0]] > threshold and
84
+ kpts_scores[edge_pair[1]] > threshold):
85
+ x_start = kpts_absolute_xy[edge_pair[0], 0]
86
+ y_start = kpts_absolute_xy[edge_pair[0], 1]
87
+ x_end = kpts_absolute_xy[edge_pair[1], 0]
88
+ y_end = kpts_absolute_xy[edge_pair[1], 1]
89
+ line_seg = np.array([[x_start, y_start], [x_end, y_end]])
90
+ keypoint_edges_all.append(line_seg)
91
+ colors.append(color)
92
+ if keypoints_all:
93
+ joints = np.concatenate(keypoints_all, axis=0)
94
+ else:
95
+ joints = np.zeros((0, 17, 2))
96
+
97
+ if keypoint_edges_all:
98
+ bones = np.stack(keypoint_edges_all, axis=0)
99
+ else:
100
+ bones = np.zeros((0, 2, 2))
101
+ return joints, bones, colors
102
+
103
+
104
+ def draw_bones(pixmap: PIL.Image, keypoints):
105
+ draw = ImageDraw.Draw(pixmap)
106
+ joints, bones, colors = process_keypoints(keypoints, pixmap.height, pixmap.width)
107
+
108
+ for bone, color in zip(bones.tolist(), colors):
109
+ draw.line((*bone[0], *bone[1]), fill=color, width=4)
110
+
111
+ radio = 3
112
+
113
+ for c_x, c_y in joints:
114
+ shape = [(c_x - radio, c_y - radio), (c_x + radio, c_y + radio)]
115
+ draw.ellipse(shape, fill="red", outline="red")
116
+
117
+
118
+ def movenet(image):
119
+ """Runs detection on an input image.
120
+
121
+ Args:
122
+ image: A [1, height, width, 3] tensor represents the input image
123
+ pixels. Note that the height/width should already be resized and match the
124
+ expected input resolution of the model before passing into this function.
125
+
126
+ Returns:
127
+ A [1, 1, 17, 3] float numpy array representing the predicted keypoint
128
+ coordinates and scores.
129
+ """
130
+ model_path = snapshot_download("leonelhs/movenet")
131
+ module = tf.saved_model.load(model_path)
132
+ model = module.signatures['serving_default']
133
+ # SavedModel format expects tensor type of int32.
134
+ image = tf.cast(image, dtype=tf.int32)
135
+ # Run model inference.
136
+ outputs = model(image)
137
+ # Output is a [1, 1, 17, 3] tensor.
138
+ return outputs['output_0'].numpy()
139
+
140
+
141
+