Spaces:
Running
on
Zero
Running
on
Zero
Upload folder using huggingface_hub
Browse files- app.py +11 -13
- requirements.txt +0 -1
app.py
CHANGED
@@ -10,7 +10,6 @@ import torch.amp as amp
|
|
10 |
import torchvision.transforms as transforms
|
11 |
import torchvision.models as models
|
12 |
import gradio as gr
|
13 |
-
from gradio_imageslider import ImageSlider
|
14 |
|
15 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
16 |
print('DEVICE:', device)
|
@@ -103,7 +102,7 @@ def inference(content_image, style_image, style_strength, progress=gr.Progress(t
|
|
103 |
generated_img = content_img.clone().requires_grad_(True)
|
104 |
optimizer = optim.Adam([generated_img], lr=lr)
|
105 |
|
106 |
-
for
|
107 |
generated_features = model(generated_img)
|
108 |
content_features = model(content_img)
|
109 |
style_features = model(style_img)
|
@@ -112,7 +111,6 @@ def inference(content_image, style_image, style_strength, progress=gr.Progress(t
|
|
112 |
style_loss = 0
|
113 |
|
114 |
for generated_feature, content_feature, style_feature in zip(generated_features, content_features, style_features):
|
115 |
-
|
116 |
batch_size, n_feature_maps, height, width = generated_feature.size()
|
117 |
|
118 |
content_loss += (torch.mean((generated_feature - content_feature) ** 2))
|
@@ -136,15 +134,15 @@ def inference(content_image, style_image, style_strength, progress=gr.Progress(t
|
|
136 |
|
137 |
examples = [
|
138 |
# page 1
|
139 |
-
['./content_images/TajMahal.jpg', 'Starry Night'],
|
140 |
-
['./content_images/GoldenRetriever.jpg', 'Lego Bricks'],
|
141 |
-
['./content_images/Beach.jpg', 'Oil Painting'],
|
142 |
-
['./content_images/StandingOnCliff.png', 'Great Wave'],
|
143 |
# page 2
|
144 |
-
['./content_images/Surfer.jpg', 'Starry Night'],
|
145 |
-
['./content_images/CameraGirl.jpg', 'Lego Bricks'],
|
146 |
-
['./content_images/NYCSkyline.jpg', 'Oil Painting'],
|
147 |
-
['./content_images/GoldenRetriever.jpg', 'Great Wave'],
|
148 |
]
|
149 |
|
150 |
with gr.Blocks(title='🖼️ Neural Style Transfer') as demo:
|
@@ -154,10 +152,10 @@ with gr.Blocks(title='🖼️ Neural Style Transfer') as demo:
|
|
154 |
content_image = gr.Image(label='Content', type='pil', sources=['upload'])
|
155 |
style_dropdown = gr.Dropdown(choices=list(style_options.keys()), label='Style', value='Starry Night', type='value')
|
156 |
with gr.Accordion('Advanced Settings', open=False):
|
157 |
-
style_strength = gr.Slider(label='Style Strength', minimum=
|
158 |
submit_button = gr.Button('Submit')
|
159 |
with gr.Column():
|
160 |
-
output_image =
|
161 |
|
162 |
submit_button.click(fn=inference, inputs=[content_image, style_dropdown, style_strength], outputs=[output_image])
|
163 |
|
|
|
10 |
import torchvision.transforms as transforms
|
11 |
import torchvision.models as models
|
12 |
import gradio as gr
|
|
|
13 |
|
14 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
15 |
print('DEVICE:', device)
|
|
|
102 |
generated_img = content_img.clone().requires_grad_(True)
|
103 |
optimizer = optim.Adam([generated_img], lr=lr)
|
104 |
|
105 |
+
for _ in tqdm(range(iters), desc=''):
|
106 |
generated_features = model(generated_img)
|
107 |
content_features = model(content_img)
|
108 |
style_features = model(style_img)
|
|
|
111 |
style_loss = 0
|
112 |
|
113 |
for generated_feature, content_feature, style_feature in zip(generated_features, content_features, style_features):
|
|
|
114 |
batch_size, n_feature_maps, height, width = generated_feature.size()
|
115 |
|
116 |
content_loss += (torch.mean((generated_feature - content_feature) ** 2))
|
|
|
134 |
|
135 |
examples = [
|
136 |
# page 1
|
137 |
+
['./content_images/TajMahal.jpg', 'Starry Night', 75],
|
138 |
+
['./content_images/GoldenRetriever.jpg', 'Lego Bricks', 50],
|
139 |
+
['./content_images/Beach.jpg', 'Oil Painting', 50],
|
140 |
+
['./content_images/StandingOnCliff.png', 'Great Wave', 75],
|
141 |
# page 2
|
142 |
+
['./content_images/Surfer.jpg', 'Starry Night', 75],
|
143 |
+
['./content_images/CameraGirl.jpg', 'Lego Bricks', 50],
|
144 |
+
['./content_images/NYCSkyline.jpg', 'Oil Painting', 50],
|
145 |
+
['./content_images/GoldenRetriever.jpg', 'Great Wave', 75],
|
146 |
]
|
147 |
|
148 |
with gr.Blocks(title='🖼️ Neural Style Transfer') as demo:
|
|
|
152 |
content_image = gr.Image(label='Content', type='pil', sources=['upload'])
|
153 |
style_dropdown = gr.Dropdown(choices=list(style_options.keys()), label='Style', value='Starry Night', type='value')
|
154 |
with gr.Accordion('Advanced Settings', open=False):
|
155 |
+
style_strength = gr.Slider(label='Style Strength', minimum=0, maximum=100, step=5, value=50)
|
156 |
submit_button = gr.Button('Submit')
|
157 |
with gr.Column():
|
158 |
+
output_image = gr.Image(label='Output', show_download_button=True, interactive=False)
|
159 |
|
160 |
submit_button.click(fn=inference, inputs=[content_image, style_dropdown, style_strength], outputs=[output_image])
|
161 |
|
requirements.txt
CHANGED
@@ -3,6 +3,5 @@ torch
|
|
3 |
torchvision
|
4 |
pillow
|
5 |
gradio
|
6 |
-
gradio_imageslider
|
7 |
spaces
|
8 |
tqdm
|
|
|
3 |
torchvision
|
4 |
pillow
|
5 |
gradio
|
|
|
6 |
spaces
|
7 |
tqdm
|