|
|
|
from fastai.vision.all import * |
|
import gradio as gr |
|
|
|
|
|
learn = load_learner('facial_exp_model.pkl') |
|
|
|
|
|
labels = learn.dls.vocab |
|
def classify_image(img): |
|
img = PILImage.create(img) |
|
pred,pred_idx,probs = learn.predict(img) |
|
return {labels[i]: float(probs[i]) for i in range(len(labels))} |
|
|
|
examples = ['afraid.jpg','anger.jpg','happyface.jpg','disgust.jpg','sadface.webp','neutral.webp'] |
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft())as demo: |
|
gr.HTML("<center><h1>EmoPix - Facial Expressions Classifier! ππ</h1><center>") |
|
gr.Markdown("""##### Classifies human facial expressions from an uploaded image. |
|
<b>Upload images and have funπ€. Classifies facial expressions with 75% accuracy</b>""") |
|
|
|
|
|
gr.Interface(fn=classify_image, |
|
inputs=gr.components.Image(), |
|
outputs=gr.components.Label(show_label=True,num_top_classes=3), |
|
examples=examples) |
|
|
|
demo.launch(inline=False) |