jamino30 commited on
Commit
2eca929
·
verified ·
1 Parent(s): 565b35e

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. README.md +1 -1
  2. app.py +29 -15
  3. style_images/Watercolor.jpg +0 -0
  4. vgg16.py +73 -0
  5. vgg19.py +54 -0
README.md CHANGED
@@ -5,4 +5,4 @@ app_file: app.py
5
  sdk: gradio
6
  sdk_version: 4.44.0
7
  ---
8
- # style-transfer
 
5
  sdk: gradio
6
  sdk_version: 4.44.0
7
  ---
8
+ # Neural Style Transfer
app.py CHANGED
@@ -23,6 +23,15 @@ for param in model.parameters():
23
 
24
  style_files = os.listdir('./style_images')
25
  style_options = {' '.join(style_file.split('.')[0].split('_')): f'./style_images/{style_file}' for style_file in style_files}
 
 
 
 
 
 
 
 
 
26
 
27
  @spaces.GPU(duration=20)
28
  def inference(content_image, style_image, style_strength, output_quality, progress=gr.Progress(track_tqdm=True)):
@@ -82,6 +91,9 @@ def inference(content_image, style_image, style_strength, output_quality, progre
82
  def set_slider(value):
83
  return gr.update(value=value)
84
 
 
 
 
85
  css = """
86
  #container {
87
  margin: 0 auto;
@@ -93,36 +105,27 @@ with gr.Blocks(css=css) as demo:
93
  gr.HTML("<h1 style='text-align: center; padding: 10px'>🖼️ Neural Style Transfer</h1>")
94
  with gr.Column(elem_id='container'):
95
  content_and_output = gr.Image(show_label=False, type='pil', sources=['upload'], format='jpg', show_download_button=False)
96
- style_dropdown = gr.Radio(choices=list(style_options.keys()), label='Style', value='Starry Night', type='value')
97
  with gr.Accordion('Adjustments', open=False):
98
  with gr.Group():
99
  style_strength_slider = gr.Slider(label='Style Strength', minimum=1, maximum=100, step=1, value=50)
 
100
  with gr.Row():
101
- low_button = gr.Button('Low').click(fn=lambda: set_slider(10), outputs=[style_strength_slider])
102
- medium_button = gr.Button('Medium').click(fn=lambda: set_slider(50), outputs=[style_strength_slider])
103
- high_button = gr.Button('High').click(fn=lambda: set_slider(100), outputs=[style_strength_slider])
104
  with gr.Group():
105
  output_quality = gr.Checkbox(label='More Realistic', info='Note: If unchecked, the resulting image will have a more artistic flair.', value=True)
106
 
107
  submit_button = gr.Button('Submit', variant='primary')
108
- download_button = gr.DownloadButton(label='Download Image', visible=False)
109
-
110
- def save_generated_image(img):
111
- output_path = 'generated.jpg'
112
- img.save(output_path)
113
- return output_path
114
 
115
  submit_button.click(
116
  fn=inference,
117
  inputs=[content_and_output, style_dropdown, style_strength_slider, output_quality],
118
  outputs=[content_and_output]
119
- ).then(
120
- fn=save_generated_image,
121
- inputs=[content_and_output],
122
- outputs=[download_button]
123
  ).then(
124
  fn=lambda: gr.update(visible=True),
125
- inputs=[],
126
  outputs=[download_button]
127
  )
128
 
@@ -132,6 +135,17 @@ with gr.Blocks(css=css) as demo:
132
  outputs=[download_button]
133
  )
134
 
 
 
 
 
 
 
 
 
 
 
 
135
  examples = gr.Examples(
136
  examples=[
137
  ['./content_images/TajMahal.jpg', 'Starry Night', 75, True],
 
23
 
24
  style_files = os.listdir('./style_images')
25
  style_options = {' '.join(style_file.split('.')[0].split('_')): f'./style_images/{style_file}' for style_file in style_files}
26
+ optimal_settings = {
27
+ 'Starry Night': (100, True),
28
+ 'Lego Bricks': (50, False),
29
+ 'Mosaic': (100, False),
30
+ 'Oil Painting': (100, False),
31
+ 'Scream': (75, True),
32
+ 'Great Wave': (75, False),
33
+ 'Watercolor': (10, False),
34
+ }
35
 
36
  @spaces.GPU(duration=20)
37
  def inference(content_image, style_image, style_strength, output_quality, progress=gr.Progress(track_tqdm=True)):
 
91
  def set_slider(value):
92
  return gr.update(value=value)
93
 
94
+ def update_settings(style):
95
+ return optimal_settings.get(style, (50, True))
96
+
97
  css = """
98
  #container {
99
  margin: 0 auto;
 
105
  gr.HTML("<h1 style='text-align: center; padding: 10px'>🖼️ Neural Style Transfer</h1>")
106
  with gr.Column(elem_id='container'):
107
  content_and_output = gr.Image(show_label=False, type='pil', sources=['upload'], format='jpg', show_download_button=False)
108
+ style_dropdown = gr.Radio(choices=list(style_options.keys()), label='Style', info='Note: Adjustments automatically optimize for different styles.', value='Starry Night', type='value')
109
  with gr.Accordion('Adjustments', open=False):
110
  with gr.Group():
111
  style_strength_slider = gr.Slider(label='Style Strength', minimum=1, maximum=100, step=1, value=50)
112
+
113
  with gr.Row():
114
+ low_button = gr.Button('Low', size='sm').click(fn=lambda: set_slider(10), outputs=[style_strength_slider])
115
+ medium_button = gr.Button('Medium', size='sm').click(fn=lambda: set_slider(50), outputs=[style_strength_slider])
116
+ high_button = gr.Button('High', size='sm').click(fn=lambda: set_slider(100), outputs=[style_strength_slider])
117
  with gr.Group():
118
  output_quality = gr.Checkbox(label='More Realistic', info='Note: If unchecked, the resulting image will have a more artistic flair.', value=True)
119
 
120
  submit_button = gr.Button('Submit', variant='primary')
121
+ download_button = gr.DownloadButton(label='Download Image', value='generated.jpg', visible=False)
 
 
 
 
 
122
 
123
  submit_button.click(
124
  fn=inference,
125
  inputs=[content_and_output, style_dropdown, style_strength_slider, output_quality],
126
  outputs=[content_and_output]
 
 
 
 
127
  ).then(
128
  fn=lambda: gr.update(visible=True),
 
129
  outputs=[download_button]
130
  )
131
 
 
135
  outputs=[download_button]
136
  )
137
 
138
+ style_dropdown.change(
139
+ fn=lambda style: set_slider(update_settings(style)[0]),
140
+ inputs=[style_dropdown],
141
+ outputs=[style_strength_slider]
142
+ )
143
+ style_dropdown.change(
144
+ fn=lambda style: gr.update(value=update_settings(style)[1]),
145
+ inputs=[style_dropdown],
146
+ outputs=[output_quality]
147
+ )
148
+
149
  examples = gr.Examples(
150
  examples=[
151
  ['./content_images/TajMahal.jpg', 'Starry Night', 75, True],
style_images/Watercolor.jpg ADDED
vgg16.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torchvision.models as models
3
+
4
+ """ VGG_16 Architecture
5
+ VGG(
6
+ (features): Sequential(
7
+ (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
8
+ (1): ReLU(inplace=True)
9
+ (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
10
+ (3): ReLU(inplace=True)
11
+ (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
12
+ (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
13
+ (6): ReLU(inplace=True)
14
+ (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
15
+ (8): ReLU(inplace=True)
16
+ (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
17
+ (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
18
+ (11): ReLU(inplace=True)
19
+ (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
20
+ (13): ReLU(inplace=True)
21
+ (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
22
+ (15): ReLU(inplace=True)
23
+ (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
24
+ (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
25
+ (18): ReLU(inplace=True)
26
+ (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
27
+ (20): ReLU(inplace=True)
28
+ (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
29
+ (22): ReLU(inplace=True)
30
+ (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
31
+ (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
32
+ (25): ReLU(inplace=True)
33
+ (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
34
+ (27): ReLU(inplace=True)
35
+ (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
36
+ (29): ReLU(inplace=True)
37
+ (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
38
+ )
39
+ (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
40
+ (classifier): Sequential(
41
+ (0): Linear(in_features=25088, out_features=4096, bias=True)
42
+ (1): ReLU(inplace=True)
43
+ (2): Dropout(p=0.5, inplace=False)
44
+ (3): Linear(in_features=4096, out_features=4096, bias=True)
45
+ (4): ReLU(inplace=True)
46
+ (5): Dropout(p=0.5, inplace=False)
47
+ (6): Linear(in_features=4096, out_features=1000, bias=True)
48
+ )
49
+ )
50
+ """
51
+
52
+ class VGG_16(nn.Module):
53
+ def __init__(self):
54
+ super(VGG_16, self).__init__()
55
+ self.model = models.vgg19(weights='DEFAULT').features[:30]
56
+
57
+ for i, _ in enumerate(self.model):
58
+ if i in [4, 9, 16, 23]:
59
+ self.model[i] = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
60
+
61
+ def forward(self, x):
62
+ features = []
63
+
64
+ for i, layer in enumerate(self.model):
65
+ x = layer(x)
66
+ if i in [0, 5, 10, 17, 24]:
67
+ features.append(x)
68
+ return features
69
+
70
+
71
+ if __name__ == '__main__':
72
+ model = VGG_16()
73
+ print(model)
vgg19.py CHANGED
@@ -1,6 +1,60 @@
1
  import torch.nn as nn
2
  import torchvision.models as models
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  class VGG_19(nn.Module):
5
  def __init__(self):
6
  super(VGG_19, self).__init__()
 
1
  import torch.nn as nn
2
  import torchvision.models as models
3
 
4
+ """ VGG_19 Architecture
5
+ VGG(
6
+ (features): Sequential(
7
+ (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
8
+ (1): ReLU(inplace=True)
9
+ (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
10
+ (3): ReLU(inplace=True)
11
+ (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
12
+ (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
13
+ (6): ReLU(inplace=True)
14
+ (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
15
+ (8): ReLU(inplace=True)
16
+ (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
17
+ (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
18
+ (11): ReLU(inplace=True)
19
+ (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
20
+ (13): ReLU(inplace=True)
21
+ (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
22
+ (15): ReLU(inplace=True)
23
+ (16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
24
+ (17): ReLU(inplace=True)
25
+ (18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
26
+ (19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
27
+ (20): ReLU(inplace=True)
28
+ (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
29
+ (22): ReLU(inplace=True)
30
+ (23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
31
+ (24): ReLU(inplace=True)
32
+ (25): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
33
+ (26): ReLU(inplace=True)
34
+ (27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
35
+ (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
36
+ (29): ReLU(inplace=True)
37
+ (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
38
+ (31): ReLU(inplace=True)
39
+ (32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
40
+ (33): ReLU(inplace=True)
41
+ (34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
42
+ (35): ReLU(inplace=True)
43
+ (36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
44
+ )
45
+ (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
46
+ (classifier): Sequential(
47
+ (0): Linear(in_features=25088, out_features=4096, bias=True)
48
+ (1): ReLU(inplace=True)
49
+ (2): Dropout(p=0.5, inplace=False)
50
+ (3): Linear(in_features=4096, out_features=4096, bias=True)
51
+ (4): ReLU(inplace=True)
52
+ (5): Dropout(p=0.5, inplace=False)
53
+ (6): Linear(in_features=4096, out_features=1000, bias=True)
54
+ )
55
+ )
56
+ """
57
+
58
  class VGG_19(nn.Module):
59
  def __init__(self):
60
  super(VGG_19, self).__init__()