supArs commited on
Commit
00d036b
1 Parent(s): 1bc0acf

Upload 10 files

Browse files
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### 1. Imports and class names setup ###
2
+ import gradio as gr
3
+ import os
4
+ import torch
5
+ from model import create_effnetb0_model
6
+ from timeit import default_timer as timer
7
+ from typing import Tuple, Dict
8
+ from torchvision import transforms
9
+
10
+ # Setup class names
11
+ class_names = ["Happy", "Disgusted", "Suprised","Angry","Neutral","Sad","Fearful"]
12
+
13
+ # Create EffNetB2 model
14
+ effnetb0, effnetb0_transforms = create_effnetb0_model(
15
+ num_classes=7, # len(class_names) would also work
16
+ )
17
+
18
+ # Load saved weights
19
+
20
+
21
+ # Load saved weights
22
+ effnetb0.load_state_dict(
23
+ torch.load(
24
+ f="models/efficientnet_b0.pth",
25
+ map_location=torch.device("cpu"), # load to CPU
26
+ )
27
+ )
28
+
29
+ ### 3. Predict function ###
30
+
31
+ # Create predict function
32
+ def predict(img) -> Tuple[Dict, float]:
33
+ """Transforms and performs a prediction on img and returns prediction and time taken.
34
+ """
35
+ # Start the timer
36
+ start_time = timer()
37
+
38
+ # Transform the target image and add a batch dimension
39
+ img = effnetb0_transforms(img).unsqueeze(0)
40
+
41
+ # Put model into evaluation mode and turn on inference mode
42
+ effnetb0.eval()
43
+ with torch.inference_mode():
44
+ # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
45
+ pred_probs = torch.softmax(effnetb0(img), dim=1)
46
+
47
+ # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
48
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
49
+
50
+ # Calculate the prediction time
51
+ pred_time = round(timer() - start_time, 5)
52
+
53
+ # Return the prediction dictionary and prediction time
54
+ return pred_labels_and_probs, pred_time
55
+
56
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
57
+
58
+ ### 4. Gradio app ###
59
+
60
+ # Create title, description and article strings
61
+ title = "Emotion Detection App 😀😐😰😞🤢😲😡"
62
+ description = "An EfficientNetB0 computer vision model to classify images of emotions: Happy, Neutral, Sad, fearful, Angry, Suprised, Disgusted."
63
+ article = "Reference: [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
64
+
65
+ import gradio as gr
66
+
67
+ demo = gr.Interface(fn=predict, # mapping function from input to output
68
+ inputs=gr.Image(type="pil"), # what are the inputs?
69
+ outputs=[gr.Label(num_top_classes=7, label="Predictions"), # what are the outputs?
70
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
71
+ # Create examples list from "examples/" directory
72
+ examples=example_list,
73
+ title=title,
74
+ description=description,
75
+ article=article).launch()
76
+
examples/im175.png ADDED
examples/im867.png ADDED
examples/im90.png ADDED
facial-detection.ipynb ADDED
@@ -0,0 +1,524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
8
+ "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
9
+ },
10
+ "outputs": [],
11
+ "source": [
12
+ "\n",
13
+ "import numpy as np \n",
14
+ "import pandas as pd \n",
15
+ "import cv2\n",
16
+ "\n"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": null,
22
+ "metadata": {
23
+ "_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0",
24
+ "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
25
+ },
26
+ "outputs": [],
27
+ "source": [
28
+ "df = pd.read_csv('../input/facial-expression/fer2013.csv')"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": null,
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "df.head()"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": null,
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": [
46
+ "len(df.iloc[0]['pixels'].split())\n",
47
+ "# 48 * 48"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "code",
52
+ "execution_count": null,
53
+ "metadata": {},
54
+ "outputs": [],
55
+ "source": [
56
+ "label_map = ['Anger', 'Neutral', 'Fear', 'Happy', 'Sad', 'Surprise']"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "metadata": {},
63
+ "outputs": [],
64
+ "source": [
65
+ "import matplotlib.pyplot as plt"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "execution_count": null,
71
+ "metadata": {},
72
+ "outputs": [],
73
+ "source": [
74
+ "img = df.iloc[0]['pixels'].split()"
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "code",
79
+ "execution_count": null,
80
+ "metadata": {},
81
+ "outputs": [],
82
+ "source": [
83
+ "img = [int(i) for i in img]"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": null,
89
+ "metadata": {},
90
+ "outputs": [],
91
+ "source": [
92
+ "type(img[0])"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": null,
98
+ "metadata": {},
99
+ "outputs": [],
100
+ "source": [
101
+ "len(img)"
102
+ ]
103
+ },
104
+ {
105
+ "cell_type": "code",
106
+ "execution_count": null,
107
+ "metadata": {},
108
+ "outputs": [],
109
+ "source": [
110
+ "img = np.array(img)"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": null,
116
+ "metadata": {},
117
+ "outputs": [],
118
+ "source": [
119
+ "img = img.reshape(48,48)"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": null,
125
+ "metadata": {},
126
+ "outputs": [],
127
+ "source": [
128
+ "img.shape"
129
+ ]
130
+ },
131
+ {
132
+ "cell_type": "code",
133
+ "execution_count": null,
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": [
137
+ "plt.imshow(img, cmap='gray')\n",
138
+ "plt.xlabel(df.iloc[0]['emotion'])"
139
+ ]
140
+ },
141
+ {
142
+ "cell_type": "code",
143
+ "execution_count": null,
144
+ "metadata": {},
145
+ "outputs": [],
146
+ "source": [
147
+ "X = []\n",
148
+ "y = []"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": null,
154
+ "metadata": {},
155
+ "outputs": [],
156
+ "source": [
157
+ "def getData(path):\n",
158
+ " anger = 0\n",
159
+ " fear = 0\n",
160
+ " sad = 0\n",
161
+ " happy = 0\n",
162
+ " surprise = 0\n",
163
+ " neutral = 0\n",
164
+ " df = pd.read_csv(path)\n",
165
+ " \n",
166
+ " X = []\n",
167
+ " y = [] \n",
168
+ " \n",
169
+ " for i in range(len(df)):\n",
170
+ " if df.iloc[i]['emotion'] != 1:\n",
171
+ " if df.iloc[i]['emotion'] == 0:\n",
172
+ " if anger <= 4000: \n",
173
+ " y.append(df.iloc[i]['emotion'])\n",
174
+ " im = df.iloc[i]['pixels']\n",
175
+ " im = [int(x) for x in im.split()]\n",
176
+ " X.append(im)\n",
177
+ " anger += 1\n",
178
+ " else:\n",
179
+ " pass\n",
180
+ " \n",
181
+ " if df.iloc[i]['emotion'] == 2:\n",
182
+ " if fear <= 4000: \n",
183
+ " y.append(df.iloc[i]['emotion'])\n",
184
+ " im = df.iloc[i]['pixels']\n",
185
+ " im = [int(x) for x in im.split()]\n",
186
+ " X.append(im)\n",
187
+ " fear += 1\n",
188
+ " else:\n",
189
+ " pass\n",
190
+ " \n",
191
+ " if df.iloc[i]['emotion'] == 3:\n",
192
+ " if happy <= 4000: \n",
193
+ " y.append(df.iloc[i]['emotion'])\n",
194
+ " im = df.iloc[i]['pixels']\n",
195
+ " im = [int(x) for x in im.split()]\n",
196
+ " X.append(im)\n",
197
+ " happy += 1\n",
198
+ " else:\n",
199
+ " pass\n",
200
+ " \n",
201
+ " if df.iloc[i]['emotion'] == 4:\n",
202
+ " if sad <= 4000: \n",
203
+ " y.append(df.iloc[i]['emotion'])\n",
204
+ " im = df.iloc[i]['pixels']\n",
205
+ " im = [int(x) for x in im.split()]\n",
206
+ " X.append(im)\n",
207
+ " sad += 1\n",
208
+ " else:\n",
209
+ " pass\n",
210
+ " \n",
211
+ " if df.iloc[i]['emotion'] == 5:\n",
212
+ " if surprise <= 4000: \n",
213
+ " y.append(df.iloc[i]['emotion'])\n",
214
+ " im = df.iloc[i]['pixels']\n",
215
+ " im = [int(x) for x in im.split()]\n",
216
+ " X.append(im)\n",
217
+ " surprise += 1\n",
218
+ " else:\n",
219
+ " pass\n",
220
+ " \n",
221
+ " if df.iloc[i]['emotion'] == 6:\n",
222
+ " if neutral <= 4000: \n",
223
+ " y.append(df.iloc[i]['emotion'])\n",
224
+ " im = df.iloc[i]['pixels']\n",
225
+ " im = [int(x) for x in im.split()]\n",
226
+ " X.append(im)\n",
227
+ " neutral += 1\n",
228
+ " else:\n",
229
+ " pass\n",
230
+ "\n",
231
+ " \n",
232
+ " \n",
233
+ " return X, y \n",
234
+ " "
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": null,
240
+ "metadata": {},
241
+ "outputs": [],
242
+ "source": [
243
+ "X, y = getData('../input/facial-expression/fer2013.csv')"
244
+ ]
245
+ },
246
+ {
247
+ "cell_type": "code",
248
+ "execution_count": null,
249
+ "metadata": {},
250
+ "outputs": [],
251
+ "source": [
252
+ "np.unique(y, return_counts=True)"
253
+ ]
254
+ },
255
+ {
256
+ "cell_type": "code",
257
+ "execution_count": null,
258
+ "metadata": {},
259
+ "outputs": [],
260
+ "source": [
261
+ "X = np.array(X)/255.0\n",
262
+ "y = np.array(y)"
263
+ ]
264
+ },
265
+ {
266
+ "cell_type": "code",
267
+ "execution_count": null,
268
+ "metadata": {},
269
+ "outputs": [],
270
+ "source": [
271
+ "X.shape, y.shape"
272
+ ]
273
+ },
274
+ {
275
+ "cell_type": "code",
276
+ "execution_count": null,
277
+ "metadata": {},
278
+ "outputs": [],
279
+ "source": [
280
+ "y_o = []\n",
281
+ "for i in y:\n",
282
+ " if i != 6:\n",
283
+ " y_o.append(i)\n",
284
+ " \n",
285
+ " else:\n",
286
+ " y_o.append(1)"
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "code",
291
+ "execution_count": null,
292
+ "metadata": {},
293
+ "outputs": [],
294
+ "source": [
295
+ "np.unique(y_o, return_counts=True)"
296
+ ]
297
+ },
298
+ {
299
+ "cell_type": "code",
300
+ "execution_count": null,
301
+ "metadata": {},
302
+ "outputs": [],
303
+ "source": [
304
+ "for i in range(5):\n",
305
+ " r = np.random.randint((1), 24000, 1)[0]\n",
306
+ " plt.figure()\n",
307
+ " plt.imshow(X[r].reshape(48,48), cmap='gray')\n",
308
+ " plt.xlabel(label_map[y_o[r]])"
309
+ ]
310
+ },
311
+ {
312
+ "cell_type": "code",
313
+ "execution_count": null,
314
+ "metadata": {},
315
+ "outputs": [],
316
+ "source": [
317
+ "X = X.reshape(len(X), 48, 48, 1)"
318
+ ]
319
+ },
320
+ {
321
+ "cell_type": "code",
322
+ "execution_count": null,
323
+ "metadata": {},
324
+ "outputs": [],
325
+ "source": [
326
+ "# no_of_images, height, width, coloar_map"
327
+ ]
328
+ },
329
+ {
330
+ "cell_type": "code",
331
+ "execution_count": null,
332
+ "metadata": {},
333
+ "outputs": [],
334
+ "source": [
335
+ "X.shape"
336
+ ]
337
+ },
338
+ {
339
+ "cell_type": "code",
340
+ "execution_count": null,
341
+ "metadata": {},
342
+ "outputs": [],
343
+ "source": [
344
+ "from keras.utils import to_categorical\n",
345
+ "y_new = to_categorical(y_o, num_classes=6)"
346
+ ]
347
+ },
348
+ {
349
+ "cell_type": "code",
350
+ "execution_count": null,
351
+ "metadata": {},
352
+ "outputs": [],
353
+ "source": [
354
+ "len(y_o), y_new.shape"
355
+ ]
356
+ },
357
+ {
358
+ "cell_type": "code",
359
+ "execution_count": null,
360
+ "metadata": {},
361
+ "outputs": [],
362
+ "source": [
363
+ "y_o[150], y_new[150]"
364
+ ]
365
+ },
366
+ {
367
+ "cell_type": "code",
368
+ "execution_count": null,
369
+ "metadata": {},
370
+ "outputs": [],
371
+ "source": [
372
+ "from keras.models import Sequential\n",
373
+ "from keras.layers import Dense , Activation , Dropout ,Flatten\n",
374
+ "from keras.layers.convolutional import Conv2D\n",
375
+ "from keras.layers.convolutional import MaxPooling2D\n",
376
+ "from keras.metrics import categorical_accuracy\n",
377
+ "from keras.models import model_from_json\n",
378
+ "from keras.callbacks import ModelCheckpoint\n",
379
+ "from keras.optimizers import *\n",
380
+ "from keras.layers.normalization import BatchNormalization"
381
+ ]
382
+ },
383
+ {
384
+ "cell_type": "code",
385
+ "execution_count": null,
386
+ "metadata": {},
387
+ "outputs": [],
388
+ "source": [
389
+ "model = Sequential()\n",
390
+ "\n",
391
+ "\n",
392
+ "input_shape = (48,48,1)\n",
393
+ "\n",
394
+ "\n",
395
+ "model.add(Conv2D(64, (5, 5), input_shape=input_shape,activation='relu', padding='same'))\n",
396
+ "model.add(Conv2D(64, (5, 5), padding='same'))\n",
397
+ "model.add(BatchNormalization())\n",
398
+ "model.add(Activation('relu'))\n",
399
+ "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
400
+ "\n",
401
+ "\n",
402
+ "model.add(Conv2D(128, (5, 5),activation='relu',padding='same'))\n",
403
+ "model.add(Conv2D(128, (5, 5),padding='same'))\n",
404
+ "model.add(BatchNormalization())\n",
405
+ "model.add(Activation('relu'))\n",
406
+ "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
407
+ "\n",
408
+ "model.add(Conv2D(256, (3, 3),activation='relu',padding='same'))\n",
409
+ "model.add(Conv2D(256, (3, 3),activation='relu',padding='same'))\n",
410
+ "model.add(BatchNormalization())\n",
411
+ "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
412
+ "\n",
413
+ "## (15, 15) ---> 30\n",
414
+ "model.add(Flatten())\n",
415
+ "model.add(Dense(6, activation='softmax'))\n",
416
+ "\n",
417
+ "model.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer='adam')"
418
+ ]
419
+ },
420
+ {
421
+ "cell_type": "code",
422
+ "execution_count": null,
423
+ "metadata": {},
424
+ "outputs": [],
425
+ "source": [
426
+ "model.fit(X, y_new, epochs=22, batch_size=64, shuffle=True, validation_split=0.2)"
427
+ ]
428
+ },
429
+ {
430
+ "cell_type": "code",
431
+ "execution_count": null,
432
+ "metadata": {},
433
+ "outputs": [],
434
+ "source": [
435
+ "model.save('model.h5')"
436
+ ]
437
+ },
438
+ {
439
+ "cell_type": "code",
440
+ "execution_count": null,
441
+ "metadata": {},
442
+ "outputs": [],
443
+ "source": [
444
+ "import cv2"
445
+ ]
446
+ },
447
+ {
448
+ "cell_type": "code",
449
+ "execution_count": null,
450
+ "metadata": {},
451
+ "outputs": [],
452
+ "source": [
453
+ "test_img = cv2.imread('../input/happy-img-test/pexels-andrea-piacquadio-941693.jpg', 0)"
454
+ ]
455
+ },
456
+ {
457
+ "cell_type": "code",
458
+ "execution_count": null,
459
+ "metadata": {},
460
+ "outputs": [],
461
+ "source": [
462
+ "test_img.shape"
463
+ ]
464
+ },
465
+ {
466
+ "cell_type": "code",
467
+ "execution_count": null,
468
+ "metadata": {},
469
+ "outputs": [],
470
+ "source": [
471
+ "test_img = cv2.resize(test_img, (48,48))\n",
472
+ "test_img.shape"
473
+ ]
474
+ },
475
+ {
476
+ "cell_type": "code",
477
+ "execution_count": null,
478
+ "metadata": {},
479
+ "outputs": [],
480
+ "source": [
481
+ "test_img = test_img.reshape(1,48,48,1)"
482
+ ]
483
+ },
484
+ {
485
+ "cell_type": "code",
486
+ "execution_count": null,
487
+ "metadata": {},
488
+ "outputs": [],
489
+ "source": [
490
+ "model.predict(test_img)"
491
+ ]
492
+ },
493
+ {
494
+ "cell_type": "code",
495
+ "execution_count": null,
496
+ "metadata": {},
497
+ "outputs": [],
498
+ "source": [
499
+ "# label_map = ['Anger', 'Neutral', 'Fear', 'Happy', 'Sad', 'Surprise']"
500
+ ]
501
+ }
502
+ ],
503
+ "metadata": {
504
+ "kernelspec": {
505
+ "display_name": "Python 3",
506
+ "language": "python",
507
+ "name": "python3"
508
+ },
509
+ "language_info": {
510
+ "codemirror_mode": {
511
+ "name": "ipython",
512
+ "version": 3
513
+ },
514
+ "file_extension": ".py",
515
+ "mimetype": "text/x-python",
516
+ "name": "python",
517
+ "nbconvert_exporter": "python",
518
+ "pygments_lexer": "ipython3",
519
+ "version": "3.8.3"
520
+ }
521
+ },
522
+ "nbformat": 4,
523
+ "nbformat_minor": 4
524
+ }
facial-detection.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[ ]:
5
+
6
+
7
+
8
+ import numpy as np
9
+ import pandas as pd
10
+ import cv2
11
+
12
+
13
+ # In[ ]:
14
+
15
+
16
+ df = pd.read_csv('../input/facial-expression/fer2013.csv')
17
+
18
+
19
+ # In[ ]:
20
+
21
+
22
+ df.head()
23
+
24
+
25
+ # In[ ]:
26
+
27
+
28
+ len(df.iloc[0]['pixels'].split())
29
+ # 48 * 48
30
+
31
+
32
+ # In[ ]:
33
+
34
+
35
+ label_map = ['Anger', 'Neutral', 'Fear', 'Happy', 'Sad', 'Surprise']
36
+
37
+
38
+ # In[ ]:
39
+
40
+
41
+ import matplotlib.pyplot as plt
42
+
43
+
44
+ # In[ ]:
45
+
46
+
47
+ img = df.iloc[0]['pixels'].split()
48
+
49
+
50
+ # In[ ]:
51
+
52
+
53
+ img = [int(i) for i in img]
54
+
55
+
56
+ # In[ ]:
57
+
58
+
59
+ type(img[0])
60
+
61
+
62
+ # In[ ]:
63
+
64
+
65
+ len(img)
66
+
67
+
68
+ # In[ ]:
69
+
70
+
71
+ img = np.array(img)
72
+
73
+
74
+ # In[ ]:
75
+
76
+
77
+ img = img.reshape(48,48)
78
+
79
+
80
+ # In[ ]:
81
+
82
+
83
+ img.shape
84
+
85
+
86
+ # In[ ]:
87
+
88
+
89
+ plt.imshow(img, cmap='gray')
90
+ plt.xlabel(df.iloc[0]['emotion'])
91
+
92
+
93
+ # In[ ]:
94
+
95
+
96
+ X = []
97
+ y = []
98
+
99
+
100
+ # In[ ]:
101
+
102
+
103
+ def getData(path):
104
+ anger = 0
105
+ fear = 0
106
+ sad = 0
107
+ happy = 0
108
+ surprise = 0
109
+ neutral = 0
110
+ df = pd.read_csv(path)
111
+
112
+ X = []
113
+ y = []
114
+
115
+ for i in range(len(df)):
116
+ if df.iloc[i]['emotion'] != 1:
117
+ if df.iloc[i]['emotion'] == 0:
118
+ if anger <= 4000:
119
+ y.append(df.iloc[i]['emotion'])
120
+ im = df.iloc[i]['pixels']
121
+ im = [int(x) for x in im.split()]
122
+ X.append(im)
123
+ anger += 1
124
+ else:
125
+ pass
126
+
127
+ if df.iloc[i]['emotion'] == 2:
128
+ if fear <= 4000:
129
+ y.append(df.iloc[i]['emotion'])
130
+ im = df.iloc[i]['pixels']
131
+ im = [int(x) for x in im.split()]
132
+ X.append(im)
133
+ fear += 1
134
+ else:
135
+ pass
136
+
137
+ if df.iloc[i]['emotion'] == 3:
138
+ if happy <= 4000:
139
+ y.append(df.iloc[i]['emotion'])
140
+ im = df.iloc[i]['pixels']
141
+ im = [int(x) for x in im.split()]
142
+ X.append(im)
143
+ happy += 1
144
+ else:
145
+ pass
146
+
147
+ if df.iloc[i]['emotion'] == 4:
148
+ if sad <= 4000:
149
+ y.append(df.iloc[i]['emotion'])
150
+ im = df.iloc[i]['pixels']
151
+ im = [int(x) for x in im.split()]
152
+ X.append(im)
153
+ sad += 1
154
+ else:
155
+ pass
156
+
157
+ if df.iloc[i]['emotion'] == 5:
158
+ if surprise <= 4000:
159
+ y.append(df.iloc[i]['emotion'])
160
+ im = df.iloc[i]['pixels']
161
+ im = [int(x) for x in im.split()]
162
+ X.append(im)
163
+ surprise += 1
164
+ else:
165
+ pass
166
+
167
+ if df.iloc[i]['emotion'] == 6:
168
+ if neutral <= 4000:
169
+ y.append(df.iloc[i]['emotion'])
170
+ im = df.iloc[i]['pixels']
171
+ im = [int(x) for x in im.split()]
172
+ X.append(im)
173
+ neutral += 1
174
+ else:
175
+ pass
176
+
177
+
178
+
179
+ return X, y
180
+
181
+
182
+
183
+ # In[ ]:
184
+
185
+
186
+ X, y = getData('../input/facial-expression/fer2013.csv')
187
+
188
+
189
+ # In[ ]:
190
+
191
+
192
+ np.unique(y, return_counts=True)
193
+
194
+
195
+ # In[ ]:
196
+
197
+
198
+ X = np.array(X)/255.0
199
+ y = np.array(y)
200
+
201
+
202
+ # In[ ]:
203
+
204
+
205
+ X.shape, y.shape
206
+
207
+
208
+ # In[ ]:
209
+
210
+
211
+ y_o = []
212
+ for i in y:
213
+ if i != 6:
214
+ y_o.append(i)
215
+
216
+ else:
217
+ y_o.append(1)
218
+
219
+
220
+ # In[ ]:
221
+
222
+
223
+ np.unique(y_o, return_counts=True)
224
+
225
+
226
+ # In[ ]:
227
+
228
+
229
+ for i in range(5):
230
+ r = np.random.randint((1), 24000, 1)[0]
231
+ plt.figure()
232
+ plt.imshow(X[r].reshape(48,48), cmap='gray')
233
+ plt.xlabel(label_map[y_o[r]])
234
+
235
+
236
+ # In[ ]:
237
+
238
+
239
+ X = X.reshape(len(X), 48, 48, 1)
240
+
241
+
242
+ # In[ ]:
243
+
244
+
245
+ # no_of_images, height, width, coloar_map
246
+
247
+
248
+ # In[ ]:
249
+
250
+
251
+ X.shape
252
+
253
+
254
+ # In[ ]:
255
+
256
+
257
+ from keras.utils import to_categorical
258
+ y_new = to_categorical(y_o, num_classes=6)
259
+
260
+
261
+ # In[ ]:
262
+
263
+
264
+ len(y_o), y_new.shape
265
+
266
+
267
+ # In[ ]:
268
+
269
+
270
+ y_o[150], y_new[150]
271
+
272
+
273
+ # In[ ]:
274
+
275
+
276
+ from keras.models import Sequential
277
+ from keras.layers import Dense , Activation , Dropout ,Flatten
278
+ from keras.layers.convolutional import Conv2D
279
+ from keras.layers.convolutional import MaxPooling2D
280
+ from keras.metrics import categorical_accuracy
281
+ from keras.models import model_from_json
282
+ from keras.callbacks import ModelCheckpoint
283
+ from keras.optimizers import *
284
+ from keras.layers.normalization import BatchNormalization
285
+
286
+
287
+ # In[ ]:
288
+
289
+
290
+ model = Sequential()
291
+
292
+
293
+ input_shape = (48,48,1)
294
+
295
+
296
+ model.add(Conv2D(64, (5, 5), input_shape=input_shape,activation='relu', padding='same'))
297
+ model.add(Conv2D(64, (5, 5), padding='same'))
298
+ model.add(BatchNormalization())
299
+ model.add(Activation('relu'))
300
+ model.add(MaxPooling2D(pool_size=(2, 2)))
301
+
302
+
303
+ model.add(Conv2D(128, (5, 5),activation='relu',padding='same'))
304
+ model.add(Conv2D(128, (5, 5),padding='same'))
305
+ model.add(BatchNormalization())
306
+ model.add(Activation('relu'))
307
+ model.add(MaxPooling2D(pool_size=(2, 2)))
308
+
309
+ model.add(Conv2D(256, (3, 3),activation='relu',padding='same'))
310
+ model.add(Conv2D(256, (3, 3),activation='relu',padding='same'))
311
+ model.add(BatchNormalization())
312
+ model.add(MaxPooling2D(pool_size=(2, 2)))
313
+
314
+ ## (15, 15) ---> 30
315
+ model.add(Flatten())
316
+ model.add(Dense(6, activation='softmax'))
317
+
318
+ model.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer='adam')
319
+
320
+
321
+ # In[ ]:
322
+
323
+
324
+ model.fit(X, y_new, epochs=22, batch_size=64, shuffle=True, validation_split=0.2)
325
+
326
+
327
+ # In[ ]:
328
+
329
+
330
+ model.save('model.h5')
331
+
332
+
333
+ # In[ ]:
334
+
335
+
336
+ import cv2
337
+
338
+
339
+ # In[ ]:
340
+
341
+
342
+ test_img = cv2.imread('../input/happy-img-test/pexels-andrea-piacquadio-941693.jpg', 0)
343
+
344
+
345
+ # In[ ]:
346
+
347
+
348
+ test_img.shape
349
+
350
+
351
+ # In[ ]:
352
+
353
+
354
+ test_img = cv2.resize(test_img, (48,48))
355
+ test_img.shape
356
+
357
+
358
+ # In[ ]:
359
+
360
+
361
+ test_img = test_img.reshape(1,48,48,1)
362
+
363
+
364
+ # In[ ]:
365
+
366
+
367
+ model.predict(test_img)
368
+
369
+
370
+ # In[ ]:
371
+
372
+
373
+ # label_map = ['Anger', 'Neutral', 'Fear', 'Happy', 'Sad', 'Surprise']
374
+
model.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+
4
+ from torch import nn
5
+
6
+
7
+ def create_effnetb0_model(num_classes:int=7,
8
+ seed:int=42):
9
+ """Creates an EfficientNetB2 feature extractor model and transforms.
10
+ Args:
11
+ num_classes (int, optional): number of classes in the classifier head.
12
+ Defaults to 3.
13
+ seed (int, optional): random seed value. Defaults to 42.
14
+ Returns:
15
+ model (torch.nn.Module): EffNetB2 feature extractor model.
16
+ transforms (torchvision.transforms): EffNetB2 image transforms.
17
+ """
18
+ # Create EffNetB2 pretrained weights, transforms and model
19
+ weights = torchvision.models.EfficientNet_B0_Weights.DEFAULT
20
+ transforms = weights.transforms()
21
+ model = torchvision.models.efficientnet_b0(weights=weights)
22
+
23
+ # Freeze all layers in base model
24
+ for param in model.parameters():
25
+ param.requires_grad = False
26
+
27
+ # Change classifier head with random seed for reproducibility
28
+ torch.manual_seed(seed)
29
+ model.classifier = nn.Sequential(
30
+ nn.Dropout(p=0.3, inplace=True),
31
+ nn.Linear(in_features=1280, out_features=num_classes),
32
+ )
33
+
34
+ return model, transforms
models/checkpoint.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c19080505a7503bda94d190f212cc15a12e0db73d771e287fce914fb026ec5f8
3
+ size 16368687
models/efficientnet_b0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8e33a3660f4cdf1a53f06867637f080394658f125273225f25eaef7ed6639bb
3
+ size 16366529
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch==1.13.0
2
+ torchvision==0.14.0
3
+ gradio==3.1.4
4
+