Spaces:
Runtime error
Runtime error
wmacdonald
commited on
Commit
•
e4dd32b
1
Parent(s):
1681c87
Update app.py
Browse files
app.py
CHANGED
@@ -1,187 +1,27 @@
|
|
1 |
-
#
|
2 |
-
#NB: Kaggle requires phone verification to use the internet or a GPU. If you haven't done that yet, the cell below will fail
|
3 |
-
# This code is only here to check that your internet is enabled. It doesn't do anything else.
|
4 |
-
# Here's a help thread on getting your phone number verified: https://www.kaggle.com/product-feedback/135367
|
5 |
|
6 |
-
|
7 |
-
try:
|
8 |
-
socket.setdefaulttimeout(1)
|
9 |
-
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(('1.1.1.1', 53))
|
10 |
-
except socket.error as ex: raise Exception("STOP: No internet. Click '>|' in top right and set 'Internet' switch to on")
|
11 |
-
|
12 |
-
# %% [code] {"_kg_hide-input":true,"_kg_hide-output":true,"execution":{"iopub.status.busy":"2023-01-19T13:44:55.472652Z","iopub.execute_input":"2023-01-19T13:44:55.473277Z","iopub.status.idle":"2023-01-19T13:45:07.027672Z","shell.execute_reply.started":"2023-01-19T13:44:55.473245Z","shell.execute_reply":"2023-01-19T13:45:07.026513Z"},"jupyter":{"source_hidden":true}}
|
13 |
-
# It's a good idea to ensure you're running the latest version of any libraries you need.
|
14 |
-
# `!pip install -Uqq <libraries>` upgrades to the latest version of <libraries>
|
15 |
-
# NB: You can safely ignore any warnings or errors pip spits out about running as root or incompatibilities
|
16 |
-
import os
|
17 |
-
iskaggle = os.environ.get('KAGGLE_KERNEL_RUN_TYPE', '')
|
18 |
-
|
19 |
-
# %% [markdown]
|
20 |
-
# In 2015 the idea of creating a computer system that could recognise birds was considered so outrageously challenging that it was the basis of [this XKCD joke](https://xkcd.com/1425/):
|
21 |
-
|
22 |
-
# %% [markdown]
|
23 |
-
# ![image.png](attachment:a0483178-c30e-4fdd-b2c2-349e130ab260.png)
|
24 |
-
|
25 |
-
# %% [markdown]
|
26 |
-
# But today, we can do exactly that, in just a few minutes, using entirely free resources!
|
27 |
-
#
|
28 |
-
# The basic steps we'll take are:
|
29 |
-
#
|
30 |
-
# 1. Use DuckDuckGo to search for images of "bird photos"
|
31 |
-
# 1. Use DuckDuckGo to search for images of "forest photos"
|
32 |
-
# 1. Fine-tune a pretrained neural network to recognise these two groups
|
33 |
-
# 1. Try running this model on a picture of a bird and see if it works.
|
34 |
-
|
35 |
-
# %% [markdown]
|
36 |
-
# ## Step 1: Download images of birds and non-birds
|
37 |
-
|
38 |
-
# %% [code] {"_kg_hide-input":true,"execution":{"iopub.status.busy":"2023-01-19T13:45:56.061456Z","iopub.execute_input":"2023-01-19T13:45:56.061849Z","iopub.status.idle":"2023-01-19T13:45:56.069190Z","shell.execute_reply.started":"2023-01-19T13:45:56.061817Z","shell.execute_reply":"2023-01-19T13:45:56.067878Z"}}
|
39 |
-
from duckduckgo_search import ddg_images
|
40 |
-
from fastcore.all import *
|
41 |
-
|
42 |
-
def search_images(term, max_images=30):
|
43 |
-
print(f"Searching for '{term}'")
|
44 |
-
return L(ddg_images(term, max_results=max_images)).itemgot('image')
|
45 |
-
|
46 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:45:51.347402Z","iopub.execute_input":"2023-01-19T13:45:51.347810Z","iopub.status.idle":"2023-01-19T13:45:51.408967Z","shell.execute_reply.started":"2023-01-19T13:45:51.347776Z","shell.execute_reply":"2023-01-19T13:45:51.407774Z"}}
|
47 |
-
|
48 |
-
|
49 |
-
# %% [markdown]
|
50 |
-
# Let's start by searching for a bird photo and seeing what kind of result we get. We'll start by getting URLs from a search:
|
51 |
-
|
52 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:46:01.346523Z","iopub.execute_input":"2023-01-19T13:46:01.347704Z","iopub.status.idle":"2023-01-19T13:46:01.848775Z","shell.execute_reply.started":"2023-01-19T13:46:01.347644Z","shell.execute_reply":"2023-01-19T13:46:01.847667Z"}}
|
53 |
-
#NB: `search_images` depends on duckduckgo.com, which doesn't always return correct responses.
|
54 |
-
# If you get a JSON error, just try running it again (it may take a couple of tries).
|
55 |
-
urls = search_images('snowboard', max_images=1)
|
56 |
-
urls[0]
|
57 |
-
|
58 |
-
# %% [markdown]
|
59 |
-
# ...and then download a URL and take a look at it:
|
60 |
-
|
61 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:46:15.912826Z","iopub.execute_input":"2023-01-19T13:46:15.913436Z","iopub.status.idle":"2023-01-19T13:46:21.067256Z","shell.execute_reply.started":"2023-01-19T13:46:15.913404Z","shell.execute_reply":"2023-01-19T13:46:21.066186Z"}}
|
62 |
-
from fastdownload import download_url
|
63 |
-
dest = 'snowboard.jpg'
|
64 |
-
download_url(urls[0], dest, show_progress=False)
|
65 |
|
|
|
66 |
from fastai.vision.all import *
|
67 |
-
|
68 |
-
im.to_thumb(256,256)
|
69 |
-
|
70 |
-
# %% [markdown]
|
71 |
-
# Now let's do the same with "forest photos":
|
72 |
-
|
73 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:46:30.534509Z","iopub.execute_input":"2023-01-19T13:46:30.535405Z","iopub.status.idle":"2023-01-19T13:46:32.663418Z","shell.execute_reply.started":"2023-01-19T13:46:30.535368Z","shell.execute_reply":"2023-01-19T13:46:32.662448Z"}}
|
74 |
-
download_url(search_images('snowboard', max_images=1)[0], 'snowboard.jpg', show_progress=False)
|
75 |
-
Image.open('snowboard.jpg').to_thumb(256,256)
|
76 |
-
|
77 |
-
# %% [markdown]
|
78 |
-
# Our searches seem to be giving reasonable results, so let's grab a few examples of each of "bird" and "forest" photos, and save each group of photos to a different folder (I'm also trying to grab a range of lighting conditions here):
|
79 |
-
|
80 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:46:44.584802Z","iopub.execute_input":"2023-01-19T13:46:44.585202Z","iopub.status.idle":"2023-01-19T13:48:11.129905Z","shell.execute_reply.started":"2023-01-19T13:46:44.585170Z","shell.execute_reply":"2023-01-19T13:48:11.128350Z"}}
|
81 |
-
searches = 'skis','snowboard'
|
82 |
-
path = Path('snowboard_or_not')
|
83 |
-
from time import sleep
|
84 |
-
|
85 |
-
for o in searches:
|
86 |
-
dest = (path/o)
|
87 |
-
dest.mkdir(exist_ok=True, parents=True)
|
88 |
-
download_images(dest, urls=search_images(f'{o} photo'))
|
89 |
-
sleep(10) # Pause between searches to avoid over-loading server
|
90 |
-
download_images(dest, urls=search_images(f'{o} backcountry photo'))
|
91 |
-
sleep(10)
|
92 |
-
download_images(dest, urls=search_images(f'{o} downhill photo'))
|
93 |
-
sleep(10)
|
94 |
-
resize_images(path/o, max_size=400, dest=path/o)
|
95 |
-
|
96 |
-
# %% [markdown]
|
97 |
-
# ## Step 2: Train our model
|
98 |
-
|
99 |
-
# %% [markdown]
|
100 |
-
# Some photos might not download correctly which could cause our model training to fail, so we'll remove them:
|
101 |
-
|
102 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:48:13.927238Z","iopub.execute_input":"2023-01-19T13:48:13.927600Z","iopub.status.idle":"2023-01-19T13:48:14.375727Z","shell.execute_reply.started":"2023-01-19T13:48:13.927569Z","shell.execute_reply":"2023-01-19T13:48:14.374184Z"}}
|
103 |
-
failed = verify_images(get_image_files(path))
|
104 |
-
failed.map(Path.unlink)
|
105 |
-
len(failed)
|
106 |
-
|
107 |
-
# %% [markdown]
|
108 |
-
# To train a model, we'll need `DataLoaders`, which is an object that contains a *training set* (the images used to create a model) and a *validation set* (the images used to check the accuracy of a model -- not used during training). In `fastai` we can create that easily using a `DataBlock`, and view sample images from it:
|
109 |
-
|
110 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:48:18.530196Z","iopub.execute_input":"2023-01-19T13:48:18.530595Z","iopub.status.idle":"2023-01-19T13:48:19.133024Z","shell.execute_reply.started":"2023-01-19T13:48:18.530562Z","shell.execute_reply":"2023-01-19T13:48:19.132389Z"}}
|
111 |
-
dls = DataBlock(
|
112 |
-
blocks=(ImageBlock, CategoryBlock),
|
113 |
-
get_items=get_image_files,
|
114 |
-
splitter=RandomSplitter(valid_pct=0.2, seed=42),
|
115 |
-
get_y=parent_label,
|
116 |
-
item_tfms=[Resize(192, method='squish')]
|
117 |
-
).dataloaders(path, bs=32)
|
118 |
-
|
119 |
-
dls.show_batch(max_n=6)
|
120 |
-
|
121 |
-
# %% [markdown]
|
122 |
-
# Here what each of the `DataBlock` parameters means:
|
123 |
-
#
|
124 |
-
# blocks=(ImageBlock, CategoryBlock),
|
125 |
-
#
|
126 |
-
# The inputs to our model are images, and the outputs are categories (in this case, "bird" or "forest").
|
127 |
-
#
|
128 |
-
# get_items=get_image_files,
|
129 |
-
#
|
130 |
-
# To find all the inputs to our model, run the `get_image_files` function (which returns a list of all image files in a path).
|
131 |
-
#
|
132 |
-
# splitter=RandomSplitter(valid_pct=0.2, seed=42),
|
133 |
-
#
|
134 |
-
# Split the data into training and validation sets randomly, using 20% of the data for the validation set.
|
135 |
-
#
|
136 |
-
# get_y=parent_label,
|
137 |
-
#
|
138 |
-
# The labels (`y` values) is the name of the `parent` of each file (i.e. the name of the folder they're in, which will be *bird* or *forest*).
|
139 |
-
#
|
140 |
-
# item_tfms=[Resize(192, method='squish')]
|
141 |
-
#
|
142 |
-
# Before training, resize each image to 192x192 pixels by "squishing" it (as opposed to cropping it).
|
143 |
-
|
144 |
-
# %% [markdown]
|
145 |
-
# Now we're ready to train our model. The fastest widely used computer vision model is `resnet18`. You can train this in a few minutes, even on a CPU! (On a GPU, it generally takes under 10 seconds...)
|
146 |
-
#
|
147 |
-
# `fastai` comes with a helpful `fine_tune()` method which automatically uses best practices for fine tuning a pre-trained model, so we'll use that.
|
148 |
-
|
149 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:48:35.151957Z","iopub.execute_input":"2023-01-19T13:48:35.153822Z","iopub.status.idle":"2023-01-19T13:49:18.382049Z","shell.execute_reply.started":"2023-01-19T13:48:35.153778Z","shell.execute_reply":"2023-01-19T13:49:18.380613Z"}}
|
150 |
-
learn = vision_learner(dls, resnet18, metrics=error_rate)
|
151 |
-
learn.fine_tune(3)
|
152 |
-
|
153 |
-
# %% [markdown]
|
154 |
-
# Generally when I run this I see 100% accuracy on the validation set (although it might vary a bit from run to run).
|
155 |
-
#
|
156 |
-
# "Fine-tuning" a model means that we're starting with a model someone else has trained using some other dataset (called the *pretrained model*), and adjusting the weights a little bit so that the model learns to recognise your particular dataset. In this case, the pretrained model was trained to recognise photos in *imagenet*, and widely-used computer vision dataset with images covering 1000 categories) For details on fine-tuning and why it's important, check out the [free fast.ai course](https://course.fast.ai/).
|
157 |
-
|
158 |
-
# %% [markdown]
|
159 |
-
# ## Step 3: Use our model (and build your own!)
|
160 |
-
|
161 |
-
# %% [markdown]
|
162 |
-
# Let's see what our model thinks about that bird we downloaded at the start:
|
163 |
-
|
164 |
-
# %% [code] {"execution":{"iopub.status.busy":"2023-01-19T13:50:02.912145Z","iopub.execute_input":"2023-01-19T13:50:02.912475Z","iopub.status.idle":"2023-01-19T13:50:03.010309Z","shell.execute_reply.started":"2023-01-19T13:50:02.912450Z","shell.execute_reply":"2023-01-19T13:50:03.009055Z"}}
|
165 |
-
is_snowboard,_,probs = learn.predict(PILImage.create('snowboard.jpg'))
|
166 |
|
167 |
-
|
168 |
-
print(dls.vocab.o2i)
|
169 |
-
print(f"Probability that it is a {is_snowboard}: {probs[dls.vocab.o2i.get(is_snowboard)]:.4f}")
|
170 |
|
171 |
-
#
|
|
|
172 |
|
|
|
|
|
173 |
|
|
|
|
|
|
|
174 |
|
175 |
-
#
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
#
|
180 |
-
# It's not just in computer vision. Thanks to deep learning, computers can now do many things which seemed impossible just a few years ago, including [creating amazing artworks](https://openai.com/dall-e-2/), and [explaining jokes](https://www.datanami.com/2022/04/22/googles-massive-new-language-model-can-explain-jokes/). It's moving so fast that even experts in the field have trouble predicting how it's going to impact society in the coming years.
|
181 |
-
#
|
182 |
-
# One thing is clear -- it's important that we all do our best to understand this technology, because otherwise we'll get left behind!
|
183 |
|
184 |
-
|
185 |
-
|
186 |
-
#
|
187 |
-
# If you enjoyed this, please consider clicking the "upvote" button in the top-right -- it's very encouraging to us notebook authors to know when people appreciate our work.
|
|
|
1 |
+
# AUTOGENERATED! DO NOT EDIT! File to edit: . (unless otherwise specified).
|
|
|
|
|
|
|
2 |
|
3 |
+
__all__ = ['is_cat', 'learn', 'classify_image', 'categories', 'image', 'label', 'examples', 'intf']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
# Cell
|
6 |
from fastai.vision.all import *
|
7 |
+
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
+
def is_cat(x): return x[0].isupper()
|
|
|
|
|
10 |
|
11 |
+
# Cell
|
12 |
+
learn = load_learner('model.pkl')
|
13 |
|
14 |
+
# Cell
|
15 |
+
categories = ('Dog', 'Cat')
|
16 |
|
17 |
+
def classify_image(img):
|
18 |
+
pred,idx,probs = learn.predict(img)
|
19 |
+
return dict(zip(categories, map(float,probs)))
|
20 |
|
21 |
+
# Cell
|
22 |
+
image = gr.inputs.Image(shape=(192, 192))
|
23 |
+
label = gr.outputs.Label()
|
24 |
+
examples = ['dog.jpg', 'cat.jpg', 'dunno.jpg']
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
|
27 |
+
intf.launch(inline=False)
|
|
|
|