jgwill commited on
Commit
305e0ff
·
1 Parent(s): 4a89d40
Dockerfile CHANGED
@@ -1,24 +1,21 @@
1
  FROM guillaumeai/ast:pikawill02b-285ik
2
 
3
- # RUN apt update
4
- # RUN apt upgrade -y
5
-
6
- RUN pip install -U pip
7
- # #RUN pip install -U pyyaml
8
- # RUN pip install -U runway-python
9
- # #runway --force-reinstall
10
- # #RUN pip install -U tensorflow
11
-
12
 
 
13
  COPY requirements.txt .
14
- RUN pip install -r requirements.txt
 
 
 
15
 
16
  COPY server.py .
17
  COPY compo-singleone-v1-dev-acc.py .
 
18
 
19
  EXPOSE 7860
20
 
 
21
  #compo-singleone-v1-dev-acc.py
22
 
23
- CMD ["python", "compo-singleone-v1-dev-acc.py"]
24
 
 
1
  FROM guillaumeai/ast:pikawill02b-285ik
2
 
 
 
 
 
 
 
 
 
 
3
 
4
+ USER root
5
  COPY requirements.txt .
6
+ COPY upgrade.sh .
7
+ RUN chmod +x upgrade.sh
8
+ RUN bash upgrade.sh
9
+
10
 
11
  COPY server.py .
12
  COPY compo-singleone-v1-dev-acc.py .
13
+ COPY compo-singleone-v2-dev-acc.py .
14
 
15
  EXPOSE 7860
16
 
17
+ USER 1000
18
  #compo-singleone-v1-dev-acc.py
19
 
20
+ CMD ["python", "compo-singleone-v2-dev-acc.py"]
21
 
compo-singleone-v1-dev-acc.py CHANGED
@@ -24,7 +24,7 @@ from datetime import datetime
24
  import time
25
 
26
 
27
-
28
 
29
  # Determining the size of the passes
30
  pass1_image_size = 1328
 
24
  import time
25
 
26
 
27
+ #/var/lib/ast/model/model_cezanne:/data/styleCheckpoint/model_cezanne
28
 
29
  # Determining the size of the passes
30
  pass1_image_size = 1328
compo-singleone-v2-dev-acc.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #####################################################
2
+ # AST Composite Server Double Two
3
+ # By Guillaume Descoteaux-Isabelle, 20021
4
+ #
5
+ # This server compose two Adaptive Style Transfer model (output of the first pass serve as input to the second using the same model)
6
+ ########################################################
7
+ #v1-dev
8
+ #Receive the 2 res from arguments in the request...
9
+
10
+
11
+ import os
12
+ import numpy as np
13
+ import tensorflow as tf
14
+ import cv2
15
+ from module import encoder, decoder
16
+ from glob import glob
17
+ import runway
18
+ from runway.data_types import number, text
19
+
20
+
21
+ #from utils import *
22
+ import scipy
23
+ from datetime import datetime
24
+ import time
25
+
26
+
27
+ import re
28
+
29
+
30
+
31
+ # Determining the size of the passes
32
+ pass1_image_size = 1328
33
+ if not os.getenv('PASS1IMAGESIZE'):
34
+ print("PASS1IMAGESIZE env var non existent;using default:" + str(pass1_image_size))
35
+ else:
36
+ pass1_image_size = os.getenv('PASS1IMAGESIZE', 1328)
37
+ print("PASS1IMAGESIZE value:" + str(pass1_image_size))
38
+
39
+
40
+ # Determining the size of the passes
41
+ autoabc = 1
42
+ if not os.getenv('AUTOABC'):
43
+ print("AUTOABC env var non existent;using default:")
44
+ print(autoabc)
45
+ abcdefault = 1
46
+ print("NOTE----> when running docker, set AUTOABC variable")
47
+ print(" docker run ... -e AUTOABC=1 #enabled, 0 to disabled (default)")
48
+ else:
49
+ autoabc = os.getenv('AUTOABC',1)
50
+ print("AUTOABC value:")
51
+ print(autoabc)
52
+ abcdefault = autoabc
53
+
54
+
55
+ #pass2_image_size = 1024
56
+ #if not os.getenv('PASS2IMAGESIZE'):
57
+ # print("PASS2IMAGESIZE env var non existent;using default:" + pass2_image_size)
58
+ #else:
59
+ # pass2_image_size = os.getenv('PASS2IMAGESIZE')
60
+ # print("PASS2IMAGESIZE value:" + pass2_image_size)
61
+
62
+ # pass3_image_size = 2048
63
+ # if not os.getenv('PASS3IMAGESIZE'):
64
+ # print("PASS3IMAGESIZE env var non existent;using default:" + pass3_image_size)
65
+ # else:
66
+ # pass3_image_size = os.getenv('PASS3IMAGESIZE')
67
+ # print("PASS3IMAGESIZE value:" + pass3_image_size)
68
+
69
+ ##########################################
70
+ ## MODELS
71
+ #model name for sending it in the response
72
+ model1name = "UNNAMED"
73
+ if not os.getenv('MODEL1NAME'):
74
+ print("MODEL1NAME env var non existent;using default:" + model1name)
75
+ else:
76
+ model1name = os.getenv('MODEL1NAME', "UNNAMED")
77
+ print("MODEL1NAME value:" + model1name)
78
+
79
+ # #m2
80
+ # model2name = "UNNAMED"
81
+ # if not os.getenv('MODEL2NAME'): print("MODEL2NAME env var non existent;using default:" + model2name)
82
+ # else:
83
+ # model2name = os.getenv('MODEL2NAME')
84
+ # print("MODEL2NAME value:" + model2name)
85
+
86
+ # #m3
87
+ # model3name = "UNNAMED"
88
+ # if not os.getenv('MODEL3NAME'): print("MODEL3NAME env var non existent;using default:" + model3name)
89
+ # else:
90
+ # model3name = os.getenv('MODEL3NAME')
91
+ # print("MODEL3NAME value:" + model3name)
92
+
93
+ #######################################################
94
+
95
+ def get_padded_checkpoint_no_from_filename(checkpoint_filename):
96
+ match = re.search(r'ckpt-(\d+)', checkpoint_filename)
97
+ if match:
98
+ number = int(match.group(1))
99
+ checkpoint_number = round(number/1000,0)
100
+ print(checkpoint_number)
101
+
102
+ padded_checkpoint_number = str(str(checkpoint_number).zfill(3))
103
+ return padded_checkpoint_number
104
+
105
+ found_model='none'
106
+ found_model_checkpoint='0'
107
+ checkpoint_info='0'
108
+ #########################################################
109
+ # SETUP
110
+
111
+
112
+ runway_files = runway.file(is_directory=True)
113
+ @runway.setup(options={'styleCheckpoint': runway_files})
114
+ def setup(opts):
115
+ global found_model,found_model_checkpoint,checkpoint_info
116
+ sess = tf.Session()
117
+ # sess2 = tf.Session()
118
+ # sess3 = tf.Session()
119
+ init_op = tf.global_variables_initializer()
120
+ # init_op2 = tf.global_variables_initializer()
121
+ # init_op3 = tf.global_variables_initializer()
122
+ sess.run(init_op)
123
+ # sess2.run(init_op2)
124
+ # sess3.run(init_op3)
125
+ with tf.name_scope('placeholder'):
126
+ input_photo = tf.placeholder(dtype=tf.float32,
127
+ shape=[1, None, None, 3],
128
+ name='photo')
129
+ input_photo_features = encoder(image=input_photo,
130
+ options={'gf_dim': 32},
131
+ reuse=False)
132
+ output_photo = decoder(features=input_photo_features,
133
+ options={'gf_dim': 32},
134
+ reuse=False)
135
+ saver = tf.train.Saver()
136
+ # saver2 = tf.train.Saver()
137
+ # saver3 = tf.train.Saver()
138
+ path = opts['styleCheckpoint']
139
+ #Getting the model name
140
+ model_name = [p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))][0]
141
+ if not os.getenv('MODELNAME'):
142
+ dtprint("CONFIG::MODELNAME env var non existent;using default:" + model_name)
143
+ else:
144
+ model_name = os.getenv('MODELNAME')
145
+
146
+
147
+
148
+ # #Getting the model2 name
149
+ # model2_name = [p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))][1]
150
+ # if not os.getenv('MODEL2NAME'):
151
+ # dtprint("CONFIG::MODEL2NAME env var non existent;using default:" + model2_name)
152
+ # else:
153
+ # model2_name = os.getenv('MODEL2NAME')
154
+
155
+
156
+ ##Getting the model3 name
157
+ # model3_name = [p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))][2]
158
+ # if not os.getenv('MODEL3NAME'):
159
+ # dtprint("CONFIG::MODEL3NAME env var non existent;using default:" + model3_name)
160
+ # else:
161
+ # model3_name = os.getenv('MODEL3NAME')
162
+
163
+
164
+
165
+ checkpoint_dir = os.path.join(path, model_name, 'checkpoint_long')
166
+ #checkpoint2_dir = os.path.join(path, model2_name, 'checkpoint_long')
167
+ # checkpoint3_dir = os.path.join(path, model3_name, 'checkpoint_long')
168
+ print("-----------------------------------------")
169
+ print("modelname is : " + model_name)
170
+ found_model=model_name
171
+
172
+ #print("model2name is : " + model2_name)
173
+ # print("model3name is : " + model3_name)
174
+ print("checkpoint_dir is : " + checkpoint_dir)
175
+
176
+ print("Auto Brightness-Contrast Correction can be set as the x2 of this SingleOne Server")
177
+
178
+
179
+ #print("checkpoint2_dir is : " + checkpoint2_dir)
180
+ # print("checkpoint3_dir is : " + checkpoint3_dir)
181
+ print("-----------------------------------------")
182
+ ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
183
+ #ckpt2 = tf.train.get_checkpoint_state(checkpoint2_dir)
184
+ # ckpt3 = tf.train.get_checkpoint_state(checkpoint3_dir)
185
+ ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
186
+ checkpoint_info = ckpt_name
187
+ found_model_checkpoint= get_padded_checkpoint_no_from_filename(ckpt_name)
188
+
189
+ #ckpt2_name = os.path.basename(ckpt2.model_checkpoint_path)
190
+ # ckpt3_name = os.path.basename(ckpt3.model_checkpoint_path)
191
+ saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
192
+ #saver2.restore(sess2, os.path.join(checkpoint2_dir, ckpt2_name))
193
+ # saver3.restore(sess3, os.path.join(checkpoint3_dir, ckpt3_name))
194
+ m1 = dict(sess=sess, input_photo=input_photo, output_photo=output_photo)
195
+ #m2 = dict(sess=sess2, input_photo=input_photo, output_photo=output_photo)
196
+ # m3 = dict(sess=sess3, input_photo=input_photo, output_photo=output_photo)
197
+ models = type('', (), {})()
198
+ models.m1 = m1
199
+ #models.m2 = m2
200
+ # models.m3 = m3
201
+ return models
202
+
203
+ meta_inputs={'meta':text}
204
+ meta_outputs={'meta':text,'model':text,'files':text,'checkpoint':text}
205
+
206
+ @runway.command('meta2', inputs=meta_inputs, outputs=meta_outputs)
207
+ def get_geta(models, inp):
208
+ global found_model,found_model_checkpoint,checkpoint_info
209
+ meta_value = inp['meta']
210
+ #convert RUNWAY_FILES to string
211
+ json_return = {
212
+ "meta": "PROTO3 " + meta_value,
213
+ "model": found_model,
214
+ "files": str(runway_files),
215
+ "checkpoint": found_model_checkpoint
216
+ }
217
+ # "files": "nothing yet"
218
+ print(json_return)
219
+ return json_return #'{"meta":"PROTO","files":'+str(RUNWAY_FILES)+'}'
220
+
221
+
222
+
223
+
224
+ #@STCGoal add number or text to specify resolution of the three pass
225
+ inputs={'contentImage': runway.image,'x1':number(default=1024,min=24,max=17000),'x2':number(default=0,min=-99,max=99)}
226
+ outputs={'stylizedImage': runway.image,'totaltime':number,'x1': number,'c1':number,'model1name':text}
227
+
228
+ @runway.command('stylize', inputs=inputs, outputs=outputs)
229
+ def stylize(models, inp):
230
+ start = time.time()
231
+ dtprint("Composing.1..")
232
+ model = models.m1
233
+ #model2 = models.m2
234
+ # model3 = models.m3
235
+
236
+ #Getting our names back (even though I think we dont need)
237
+ #@STCIssue BUGGED
238
+ # m1name=models.m1.name
239
+ # m2name=models.m2.name
240
+ # m3name=models.m3.name
241
+
242
+ #get size from inputs rather than env
243
+ x1 = inp['x1']
244
+ c1 = inp['x2']
245
+ # x3 = inp['x3']
246
+ if c1 > 99:
247
+ ci = abcdefault
248
+
249
+
250
+ #
251
+ img = inp['contentImage']
252
+ img = np.array(img)
253
+ img = img / 127.5 - 1.
254
+
255
+ #@a Pass 1 RESIZE to 1368px the smaller side
256
+ image_size=pass1_image_size
257
+ image_size=x1
258
+ img_shape = img.shape[:2]
259
+ alpha = float(image_size) / float(min(img_shape))
260
+ dtprint ("DEBUG::content.imgshape:" + str(tuple(img_shape)) + ", alpha:" + str(alpha))
261
+
262
+ try:
263
+ img = scipy.misc.imresize(img, size=alpha)
264
+ except:
265
+ pass
266
+
267
+
268
+ img = np.expand_dims(img, axis=0)
269
+ #@a INFERENCE PASS 1
270
+ dtprint("INFO:Pass1 inference starting")
271
+ img = model['sess'].run(model['output_photo'], feed_dict={model['input_photo']: img})
272
+ dtprint("INFO:Pass1 inference done")
273
+ #
274
+ img = (img + 1.) * 127.5
275
+ img = img.astype('uint8')
276
+ img = img[0]
277
+ #dtprint("INFO:Upresing Pass1 for Pass 2 (STARTING) ")
278
+
279
+ #@a Pass 2 RESIZE to 1024px the smaller side
280
+ #image_size=pass2_image_size
281
+ #image_size=x2
282
+ #img_shape = img.shape[:2]
283
+
284
+
285
+ #alpha = float(image_size) / float(min(img_shape))
286
+ #dtprint ("DEBUG::pass1.imgshape:" + str(tuple(img_shape)) + ", alpha:" + str(alpha))
287
+
288
+ #img = scipy.misc.imresize(img, size=alpha)
289
+ #dtprint("INFO:Upresing Pass1 (DONE) ")
290
+
291
+ #Iteration 2
292
+ #img = np.array(img)
293
+ #img = img / 127.5 - 1.
294
+ #img = np.expand_dims(img, axis=0)
295
+ #@a INFERENCE PASS 2 using the same model
296
+ #dtprint("INFO:Pass2 inference (STARTING)")
297
+ #img = model['sess'].run(model['output_photo'], feed_dict={model['input_photo']: img})
298
+ #dtprint("INFO:Pass2 inference (DONE)")
299
+ #img = (img + 1.) * 127.5
300
+ #img = img.astype('uint8')
301
+ #img = img[0]
302
+
303
+
304
+
305
+ # #pass3
306
+
307
+ # #@a Pass 3 RESIZE to 2048px the smaller side
308
+ # image_size=pass3_image_size
309
+ # image_size=x3
310
+ # img_shape = img.shape[:2]
311
+
312
+
313
+ # alpha = float(image_size) / float(min(img_shape))
314
+ # dtprint ("DEBUG::pass2.imgshape:" + str(tuple(img_shape)) + ", alpha:" + str(alpha))
315
+
316
+ # img = scipy.misc.imresize(img, size=alpha)
317
+ # dtprint("INFO:Upresing Pass2 (DONE) ")
318
+
319
+ # #Iteration 3
320
+ # img = np.array(img)
321
+ # img = img / 127.5 - 1.
322
+ # img = np.expand_dims(img, axis=0)
323
+ # #@a INFERENCE PASS 3
324
+ # dtprint("INFO:Pass3 inference (STARTING)")
325
+ # img = model3['sess'].run(model3['output_photo'], feed_dict={model3['input_photo']: img})
326
+ # dtprint("INFO:Pass3 inference (DONE)")
327
+ # img = (img + 1.) * 127.5
328
+ # img = img.astype('uint8')
329
+ # img = img[0]
330
+ # #pass3
331
+
332
+ #dtprint("INFO:Composing done")
333
+ print('autoabc value:')
334
+ print(c1)
335
+ if c1 != 0 :
336
+ print('Auto Brightening images...')
337
+ img = img, alpha2, beta = automatic_brightness_and_contrast(img,c1)
338
+
339
+ stop = time.time()
340
+ totaltime = stop - start
341
+ print("The time of the run:", totaltime)
342
+ res2 = dict(stylizedImage=img,totaltime=totaltime,x1=x1,model1name=model1name,c1=c1)
343
+ return res2
344
+
345
+
346
+
347
+ def dtprint(msg):
348
+ dttag=getdttag()
349
+ print(dttag + "::" + msg )
350
+
351
+ def getdttag():
352
+ # datetime object containing current date and time
353
+ now = datetime.now()
354
+
355
+ # dd/mm/YY H:M:S
356
+ # dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
357
+ return now.strftime("%H:%M:%S")
358
+
359
+
360
+
361
+ # Automatic brightness and contrast optimization with optional histogram clipping
362
+ def automatic_brightness_and_contrast(image, clip_hist_percent=25):
363
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
364
+
365
+ # Calculate grayscale histogram
366
+ hist = cv2.calcHist([gray],[0],None,[256],[0,256])
367
+ hist_size = len(hist)
368
+
369
+ # Calculate cumulative distribution from the histogram
370
+ accumulator = []
371
+ accumulator.append(float(hist[0]))
372
+ for index in range(1, hist_size):
373
+ accumulator.append(accumulator[index -1] + float(hist[index]))
374
+
375
+ # Locate points to clip
376
+ maximum = accumulator[-1]
377
+ clip_hist_percent *= (maximum/100.0)
378
+ clip_hist_percent /= 2.0
379
+
380
+ # Locate left cut
381
+ minimum_gray = 0
382
+ while accumulator[minimum_gray] < clip_hist_percent:
383
+ minimum_gray += 1
384
+
385
+ # Locate right cut
386
+ maximum_gray = hist_size -1
387
+ while accumulator[maximum_gray] >= (maximum - clip_hist_percent):
388
+ maximum_gray -= 1
389
+
390
+ # Calculate alpha and beta values
391
+ alpha = 255 / (maximum_gray - minimum_gray)
392
+ beta = -minimum_gray * alpha
393
+
394
+ '''
395
+ # Calculate new histogram with desired range and show histogram
396
+ new_hist = cv2.calcHist([gray],[0],None,[256],[minimum_gray,maximum_gray])
397
+ plt.plot(hist)
398
+ plt.plot(new_hist)
399
+ plt.xlim([0,256])
400
+ plt.show()
401
+ '''
402
+
403
+ auto_result = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
404
+ return (auto_result, alpha, beta)
405
+
406
+
407
+ if __name__ == '__main__':
408
+ #print('External Service port is:' +os.environ.get('SPORT'))
409
+ os.environ["RW_PORT"] = "7860"
410
+ print("Launched...")
411
+ runway.run()
upgrade.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+
4
+ apt update
5
+ apt upgrade -y
6
+
7
+
8
+ pip install -U pip
9
+ # #pip install -U pyyaml
10
+ # pip install -U runway-python
11
+ # #runway --force-reinstall
12
+ #pip install -U tensorflow
13
+
14
+ conda update conda --yes
15
+
16
+
17
+ pip install -r requirements.txt
18
+
19
+ apt install sudo -y
20
+ apt install libre2-dev -y
21
+ pip install re2
22
+ #apt install build-essential -y