Roman commited on
Commit
4bee342
1 Parent(s): 3cf0931

chore: make the client-server interface inherit from CML

Browse files
Files changed (35) hide show
  1. app.py +47 -44
  2. compile.py +7 -6
  3. custom_client_server.py +19 -188
  4. filters.py +37 -82
  5. filters/black and white/deployment/client.zip +1 -1
  6. filters/black and white/deployment/serialized_processing.json +1 -1
  7. filters/black and white/deployment/server.zip +1 -1
  8. filters/black_and_white/deployment/client.zip +0 -3
  9. filters/black_and_white/deployment/serialized_processing.json +0 -1
  10. filters/black_and_white/deployment/server.zip +0 -3
  11. filters/black_and_white/server.onnx +0 -3
  12. filters/blur/deployment/client.zip +1 -1
  13. filters/blur/deployment/serialized_processing.json +1 -1
  14. filters/blur/deployment/server.zip +1 -1
  15. filters/identity/deployment/client.zip +1 -1
  16. filters/identity/deployment/serialized_processing.json +1 -1
  17. filters/identity/deployment/server.zip +1 -1
  18. filters/inverted/deployment/client.zip +1 -1
  19. filters/inverted/deployment/serialized_processing.json +1 -1
  20. filters/inverted/deployment/server.zip +1 -1
  21. filters/ridge detection/deployment/client.zip +1 -1
  22. filters/ridge detection/deployment/serialized_processing.json +1 -1
  23. filters/ridge detection/deployment/server.zip +1 -1
  24. filters/ridge_detection/deployment/client.zip +0 -3
  25. filters/ridge_detection/deployment/serialized_processing.json +0 -1
  26. filters/ridge_detection/deployment/server.zip +0 -3
  27. filters/ridge_detection/server.onnx +0 -3
  28. filters/rotate/deployment/client.zip +1 -1
  29. filters/rotate/deployment/serialized_processing.json +1 -1
  30. filters/rotate/deployment/server.zip +1 -1
  31. filters/sharpen/deployment/client.zip +1 -1
  32. filters/sharpen/deployment/serialized_processing.json +1 -1
  33. filters/sharpen/deployment/server.zip +1 -1
  34. generate_dev_files.py +6 -6
  35. server.py +5 -5
app.py CHANGED
@@ -26,18 +26,18 @@ subprocess.Popen(["uvicorn", "server:app"], cwd=REPO_DIR)
26
  time.sleep(3)
27
 
28
 
29
- def decrypt_output_with_wrong_key(encrypted_image, image_filter):
30
  """Decrypt the encrypted output using a different private key.
31
  """
32
  # Retrieve the filter's deployment path
33
- filter_path = FILTERS_PATH / f"{image_filter}/deployment"
34
 
35
  # Instantiate the client interface and generate a new private key
36
  wrong_client = CustomFHEClient(filter_path, WRONG_KEYS_PATH)
37
  wrong_client.generate_private_and_evaluation_keys(force=True)
38
 
39
  # Deserialize, decrypt and post-processing the encrypted output using the new private key
40
- output_image = wrong_client.deserialize_decrypt_post_process(encrypted_image)
41
 
42
  return output_image
43
 
@@ -61,33 +61,33 @@ def shorten_bytes_object(bytes_object, limit=500):
61
  return bytes_object[shift : limit + shift].hex()
62
 
63
 
64
- def get_client(user_id, image_filter):
65
  """Get the client API.
66
 
67
  Args:
68
  user_id (int): The current user's ID.
69
- image_filter (str): The filter chosen by the user
70
 
71
  Returns:
72
  CustomFHEClient: The client API.
73
  """
74
  return CustomFHEClient(
75
- FILTERS_PATH / f"{image_filter}/deployment", KEYS_PATH / f"{image_filter}_{user_id}"
76
  )
77
 
78
 
79
- def get_client_file_path(name, user_id, image_filter):
80
  """Get the correct temporary file path for the client.
81
 
82
  Args:
83
  name (str): The desired file name.
84
  user_id (int): The current user's ID.
85
- image_filter (str): The filter chosen by the user
86
 
87
  Returns:
88
  pathlib.Path: The file path.
89
  """
90
- return CLIENT_TMP_PATH / f"{name}_{image_filter}_{user_id}"
91
 
92
 
93
  def clean_temporary_files(n_keys=20):
@@ -121,11 +121,11 @@ def clean_temporary_files(n_keys=20):
121
  file.unlink()
122
 
123
 
124
- def keygen(image_filter):
125
  """Generate the private key associated to a filter.
126
 
127
  Args:
128
- image_filter (str): The current filter to consider.
129
 
130
  Returns:
131
  (user_id, True) (Tuple[int, bool]): The current user's ID and a boolean used for visual display.
@@ -138,7 +138,7 @@ def keygen(image_filter):
138
  user_id = numpy.random.randint(0, 2**32)
139
 
140
  # Retrieve the client API
141
- client = get_client(user_id, image_filter)
142
 
143
  # Generate a private key
144
  client.generate_private_and_evaluation_keys(force=True)
@@ -154,7 +154,7 @@ def keygen(image_filter):
154
 
155
  # Save evaluation_key as bytes in a file as it is too large to pass through regular Gradio
156
  # buttons (see https://github.com/gradio-app/gradio/issues/1877)
157
- evaluation_key_path = get_client_file_path("evaluation_key", user_id, image_filter)
158
 
159
  with evaluation_key_path.open("wb") as evaluation_key_file:
160
  evaluation_key_file.write(evaluation_key)
@@ -162,13 +162,13 @@ def keygen(image_filter):
162
  return (user_id, True, private_key_size)
163
 
164
 
165
- def encrypt(user_id, input_image, image_filter):
166
  """Encrypt the given image for a specific user and filter.
167
 
168
  Args:
169
  user_id (int): The current user's ID.
170
  input_image (numpy.ndarray): The image to encrypt.
171
- image_filter (str): The current filter to consider.
172
 
173
  Returns:
174
  (input_image, encrypted_image_short) (Tuple[bytes]): The encrypted image and one of its
@@ -182,17 +182,20 @@ def encrypt(user_id, input_image, image_filter):
182
  raise gr.Error("Please choose an image first.")
183
 
184
  # Retrieve the client API
185
- client = get_client(user_id, image_filter)
186
 
187
- # Pre-process, encrypt and serialize the image
188
- encrypted_image = client.pre_process_encrypt_serialize(input_image)
 
 
 
189
 
190
  # Compute the input's size in Megabytes
191
  encrypted_input_size = len(encrypted_image) / 1000000
192
 
193
  # Save encrypted_image to bytes in a file, since too large to pass through regular Gradio
194
  # buttons, https://github.com/gradio-app/gradio/issues/1877
195
- encrypted_image_path = get_client_file_path("encrypted_image", user_id, image_filter)
196
 
197
  with encrypted_image_path.open("wb") as encrypted_image_file:
198
  encrypted_image_file.write(encrypted_image)
@@ -203,20 +206,20 @@ def encrypt(user_id, input_image, image_filter):
203
  return (input_image, encrypted_image_short, encrypted_input_size)
204
 
205
 
206
- def send_input(user_id, image_filter):
207
  """Send the encrypted input image as well as the evaluation key to the server.
208
 
209
  Args:
210
  user_id (int): The current user's ID.
211
- image_filter (str): The current filter to consider.
212
  """
213
  # Get the evaluation key path
214
- evaluation_key_path = get_client_file_path("evaluation_key", user_id, image_filter)
215
 
216
  if user_id == "" or not evaluation_key_path.is_file():
217
  raise gr.Error("Please generate the private key first.")
218
 
219
- encrypted_input_path = get_client_file_path("encrypted_image", user_id, image_filter)
220
 
221
  if not encrypted_input_path.is_file():
222
  raise gr.Error("Please generate the private key and then encrypt an image first.")
@@ -224,7 +227,7 @@ def send_input(user_id, image_filter):
224
  # Define the data and files to post
225
  data = {
226
  "user_id": user_id,
227
- "filter": image_filter,
228
  }
229
 
230
  files = [
@@ -242,16 +245,16 @@ def send_input(user_id, image_filter):
242
  return response.ok
243
 
244
 
245
- def run_fhe(user_id, image_filter):
246
  """Apply the filter on the encrypted image previously sent using FHE.
247
 
248
  Args:
249
  user_id (int): The current user's ID.
250
- image_filter (str): The current filter to consider.
251
  """
252
  data = {
253
  "user_id": user_id,
254
- "filter": image_filter,
255
  }
256
 
257
  # Trigger the FHE execution on the encrypted image previously sent
@@ -266,12 +269,12 @@ def run_fhe(user_id, image_filter):
266
  raise gr.Error("Please wait for the input image to be sent to the server.")
267
 
268
 
269
- def get_output(user_id, image_filter):
270
  """Retrieve the encrypted output image.
271
 
272
  Args:
273
  user_id (int): The current user's ID.
274
- image_filter (str): The current filter to consider.
275
 
276
  Returns:
277
  encrypted_output_image_short (bytes): A representation of the encrypted result.
@@ -279,7 +282,7 @@ def get_output(user_id, image_filter):
279
  """
280
  data = {
281
  "user_id": user_id,
282
- "filter": image_filter,
283
  }
284
 
285
  # Retrieve the encrypted output image
@@ -296,25 +299,25 @@ def get_output(user_id, image_filter):
296
 
297
  # Save the encrypted output to bytes in a file as it is too large to pass through regular
298
  # Gradio buttons (see https://github.com/gradio-app/gradio/issues/1877)
299
- encrypted_output_path = get_client_file_path("encrypted_output", user_id, image_filter)
300
 
301
  with encrypted_output_path.open("wb") as encrypted_output_file:
302
  encrypted_output_file.write(encrypted_output)
303
 
304
  # Decrypt the image using a different (wrong) key for display
305
- output_image_representation = decrypt_output_with_wrong_key(encrypted_output, image_filter)
306
 
307
  return output_image_representation, encrypted_output_size
308
  else:
309
  raise gr.Error("Please wait for the FHE execution to be completed.")
310
 
311
 
312
- def decrypt_output(user_id, image_filter):
313
  """Decrypt the result.
314
 
315
  Args:
316
  user_id (int): The current user's ID.
317
- image_filter (str): The current filter to consider.
318
 
319
  Returns:
320
  (output_image, False, False) ((Tuple[numpy.ndarray, bool, bool]): The decrypted output, as
@@ -325,7 +328,7 @@ def decrypt_output(user_id, image_filter):
325
  raise gr.Error("Please generate the private key first.")
326
 
327
  # Get the encrypted output path
328
- encrypted_output_path = get_client_file_path("encrypted_output", user_id, image_filter)
329
 
330
  if not encrypted_output_path.is_file():
331
  raise gr.Error("Please run the FHE execution first.")
@@ -335,10 +338,10 @@ def decrypt_output(user_id, image_filter):
335
  encrypted_output_image = encrypted_output_file.read()
336
 
337
  # Retrieve the client API
338
- client = get_client(user_id, image_filter)
339
 
340
  # Deserialize, decrypt and post-process the encrypted output
341
- output_image = client.deserialize_decrypt_post_process(encrypted_output_image)
342
 
343
  return output_image, False, False
344
 
@@ -374,7 +377,7 @@ with demo:
374
  )
375
 
376
  gr.Markdown("### Step 2. Choose your filter")
377
- image_filter = gr.Dropdown(
378
  choices=AVAILABLE_FILTERS, value="inverted", label="Choose your filter", interactive=True
379
  )
380
 
@@ -475,36 +478,36 @@ with demo:
475
  # Button to generate the private key
476
  keygen_button.click(
477
  keygen,
478
- inputs=[image_filter],
479
  outputs=[user_id, keygen_checkbox, private_key_size],
480
  )
481
 
482
  # Button to encrypt inputs on the client side
483
  encrypt_button.click(
484
  encrypt,
485
- inputs=[user_id, input_image, image_filter],
486
  outputs=[original_image, encrypted_input, encrypted_input_size],
487
  )
488
 
489
  # Button to send the encodings to the server using post method
490
  send_input_button.click(
491
- send_input, inputs=[user_id, image_filter], outputs=[send_input_checkbox]
492
  )
493
 
494
  # Button to send the encodings to the server using post method
495
- execute_fhe_button.click(run_fhe, inputs=[user_id, image_filter], outputs=[fhe_execution_time])
496
 
497
  # Button to send the encodings to the server using post method
498
  get_output_button.click(
499
  get_output,
500
- inputs=[user_id, image_filter],
501
  outputs=[encrypted_output_representation, encrypted_output_size]
502
  )
503
 
504
  # Button to decrypt the output on the client side
505
  decrypt_button.click(
506
  decrypt_output,
507
- inputs=[user_id, image_filter],
508
  outputs=[output_image, keygen_checkbox, send_input_checkbox],
509
  )
510
 
 
26
  time.sleep(3)
27
 
28
 
29
+ def decrypt_output_with_wrong_key(encrypted_image, filter_name):
30
  """Decrypt the encrypted output using a different private key.
31
  """
32
  # Retrieve the filter's deployment path
33
+ filter_path = FILTERS_PATH / f"{filter_name}/deployment"
34
 
35
  # Instantiate the client interface and generate a new private key
36
  wrong_client = CustomFHEClient(filter_path, WRONG_KEYS_PATH)
37
  wrong_client.generate_private_and_evaluation_keys(force=True)
38
 
39
  # Deserialize, decrypt and post-processing the encrypted output using the new private key
40
+ output_image = wrong_client.deserialize_decrypt_dequantize(encrypted_image)
41
 
42
  return output_image
43
 
 
61
  return bytes_object[shift : limit + shift].hex()
62
 
63
 
64
+ def get_client(user_id, filter_name):
65
  """Get the client API.
66
 
67
  Args:
68
  user_id (int): The current user's ID.
69
+ filter_name (str): The filter chosen by the user
70
 
71
  Returns:
72
  CustomFHEClient: The client API.
73
  """
74
  return CustomFHEClient(
75
+ FILTERS_PATH / f"{filter_name}/deployment", KEYS_PATH / f"{filter_name}_{user_id}"
76
  )
77
 
78
 
79
+ def get_client_file_path(name, user_id, filter_name):
80
  """Get the correct temporary file path for the client.
81
 
82
  Args:
83
  name (str): The desired file name.
84
  user_id (int): The current user's ID.
85
+ filter_name (str): The filter chosen by the user
86
 
87
  Returns:
88
  pathlib.Path: The file path.
89
  """
90
+ return CLIENT_TMP_PATH / f"{name}_{filter_name}_{user_id}"
91
 
92
 
93
  def clean_temporary_files(n_keys=20):
 
121
  file.unlink()
122
 
123
 
124
+ def keygen(filter_name):
125
  """Generate the private key associated to a filter.
126
 
127
  Args:
128
+ filter_name (str): The current filter to consider.
129
 
130
  Returns:
131
  (user_id, True) (Tuple[int, bool]): The current user's ID and a boolean used for visual display.
 
138
  user_id = numpy.random.randint(0, 2**32)
139
 
140
  # Retrieve the client API
141
+ client = get_client(user_id, filter_name)
142
 
143
  # Generate a private key
144
  client.generate_private_and_evaluation_keys(force=True)
 
154
 
155
  # Save evaluation_key as bytes in a file as it is too large to pass through regular Gradio
156
  # buttons (see https://github.com/gradio-app/gradio/issues/1877)
157
+ evaluation_key_path = get_client_file_path("evaluation_key", user_id, filter_name)
158
 
159
  with evaluation_key_path.open("wb") as evaluation_key_file:
160
  evaluation_key_file.write(evaluation_key)
 
162
  return (user_id, True, private_key_size)
163
 
164
 
165
+ def encrypt(user_id, input_image, filter_name):
166
  """Encrypt the given image for a specific user and filter.
167
 
168
  Args:
169
  user_id (int): The current user's ID.
170
  input_image (numpy.ndarray): The image to encrypt.
171
+ filter_name (str): The current filter to consider.
172
 
173
  Returns:
174
  (input_image, encrypted_image_short) (Tuple[bytes]): The encrypted image and one of its
 
182
  raise gr.Error("Please choose an image first.")
183
 
184
  # Retrieve the client API
185
+ client = get_client(user_id, filter_name)
186
 
187
+ # Pre-process the input image as Torch and Numpy don't follow the same shape format
188
+ preprocessed_input_image = client.model.pre_processing(input_image)
189
+
190
+ # Encrypt and serialize the image
191
+ encrypted_image = client.quantize_encrypt_serialize(preprocessed_input_image)
192
 
193
  # Compute the input's size in Megabytes
194
  encrypted_input_size = len(encrypted_image) / 1000000
195
 
196
  # Save encrypted_image to bytes in a file, since too large to pass through regular Gradio
197
  # buttons, https://github.com/gradio-app/gradio/issues/1877
198
+ encrypted_image_path = get_client_file_path("encrypted_image", user_id, filter_name)
199
 
200
  with encrypted_image_path.open("wb") as encrypted_image_file:
201
  encrypted_image_file.write(encrypted_image)
 
206
  return (input_image, encrypted_image_short, encrypted_input_size)
207
 
208
 
209
+ def send_input(user_id, filter_name):
210
  """Send the encrypted input image as well as the evaluation key to the server.
211
 
212
  Args:
213
  user_id (int): The current user's ID.
214
+ filter_name (str): The current filter to consider.
215
  """
216
  # Get the evaluation key path
217
+ evaluation_key_path = get_client_file_path("evaluation_key", user_id, filter_name)
218
 
219
  if user_id == "" or not evaluation_key_path.is_file():
220
  raise gr.Error("Please generate the private key first.")
221
 
222
+ encrypted_input_path = get_client_file_path("encrypted_image", user_id, filter_name)
223
 
224
  if not encrypted_input_path.is_file():
225
  raise gr.Error("Please generate the private key and then encrypt an image first.")
 
227
  # Define the data and files to post
228
  data = {
229
  "user_id": user_id,
230
+ "filter": filter_name,
231
  }
232
 
233
  files = [
 
245
  return response.ok
246
 
247
 
248
+ def run_fhe(user_id, filter_name):
249
  """Apply the filter on the encrypted image previously sent using FHE.
250
 
251
  Args:
252
  user_id (int): The current user's ID.
253
+ filter_name (str): The current filter to consider.
254
  """
255
  data = {
256
  "user_id": user_id,
257
+ "filter": filter_name,
258
  }
259
 
260
  # Trigger the FHE execution on the encrypted image previously sent
 
269
  raise gr.Error("Please wait for the input image to be sent to the server.")
270
 
271
 
272
+ def get_output(user_id, filter_name):
273
  """Retrieve the encrypted output image.
274
 
275
  Args:
276
  user_id (int): The current user's ID.
277
+ filter_name (str): The current filter to consider.
278
 
279
  Returns:
280
  encrypted_output_image_short (bytes): A representation of the encrypted result.
 
282
  """
283
  data = {
284
  "user_id": user_id,
285
+ "filter": filter_name,
286
  }
287
 
288
  # Retrieve the encrypted output image
 
299
 
300
  # Save the encrypted output to bytes in a file as it is too large to pass through regular
301
  # Gradio buttons (see https://github.com/gradio-app/gradio/issues/1877)
302
+ encrypted_output_path = get_client_file_path("encrypted_output", user_id, filter_name)
303
 
304
  with encrypted_output_path.open("wb") as encrypted_output_file:
305
  encrypted_output_file.write(encrypted_output)
306
 
307
  # Decrypt the image using a different (wrong) key for display
308
+ output_image_representation = decrypt_output_with_wrong_key(encrypted_output, filter_name)
309
 
310
  return output_image_representation, encrypted_output_size
311
  else:
312
  raise gr.Error("Please wait for the FHE execution to be completed.")
313
 
314
 
315
+ def decrypt_output(user_id, filter_name):
316
  """Decrypt the result.
317
 
318
  Args:
319
  user_id (int): The current user's ID.
320
+ filter_name (str): The current filter to consider.
321
 
322
  Returns:
323
  (output_image, False, False) ((Tuple[numpy.ndarray, bool, bool]): The decrypted output, as
 
328
  raise gr.Error("Please generate the private key first.")
329
 
330
  # Get the encrypted output path
331
+ encrypted_output_path = get_client_file_path("encrypted_output", user_id, filter_name)
332
 
333
  if not encrypted_output_path.is_file():
334
  raise gr.Error("Please run the FHE execution first.")
 
338
  encrypted_output_image = encrypted_output_file.read()
339
 
340
  # Retrieve the client API
341
+ client = get_client(user_id, filter_name)
342
 
343
  # Deserialize, decrypt and post-process the encrypted output
344
+ output_image = client.deserialize_decrypt_dequantize(encrypted_output_image)
345
 
346
  return output_image, False, False
347
 
 
377
  )
378
 
379
  gr.Markdown("### Step 2. Choose your filter")
380
+ filter_name = gr.Dropdown(
381
  choices=AVAILABLE_FILTERS, value="inverted", label="Choose your filter", interactive=True
382
  )
383
 
 
478
  # Button to generate the private key
479
  keygen_button.click(
480
  keygen,
481
+ inputs=[filter_name],
482
  outputs=[user_id, keygen_checkbox, private_key_size],
483
  )
484
 
485
  # Button to encrypt inputs on the client side
486
  encrypt_button.click(
487
  encrypt,
488
+ inputs=[user_id, input_image, filter_name],
489
  outputs=[original_image, encrypted_input, encrypted_input_size],
490
  )
491
 
492
  # Button to send the encodings to the server using post method
493
  send_input_button.click(
494
+ send_input, inputs=[user_id, filter_name], outputs=[send_input_checkbox]
495
  )
496
 
497
  # Button to send the encodings to the server using post method
498
+ execute_fhe_button.click(run_fhe, inputs=[user_id, filter_name], outputs=[fhe_execution_time])
499
 
500
  # Button to send the encodings to the server using post method
501
  get_output_button.click(
502
  get_output,
503
+ inputs=[user_id, filter_name],
504
  outputs=[encrypted_output_representation, encrypted_output_size]
505
  )
506
 
507
  # Button to decrypt the output on the client side
508
  decrypt_button.click(
509
  decrypt_output,
510
+ inputs=[user_id, filter_name],
511
  outputs=[output_image, keygen_checkbox, send_input_checkbox],
512
  )
513
 
compile.py CHANGED
@@ -4,21 +4,22 @@ import json
4
  import shutil
5
  import onnx
6
  from common import AVAILABLE_FILTERS, FILTERS_PATH, KEYS_PATH
7
- from custom_client_server import CustomFHEClient, CustomFHEDev
 
8
 
9
  print("Starting compiling the filters.")
10
 
11
- for image_filter in AVAILABLE_FILTERS:
12
- print("\nCompiling filter:", image_filter)
13
 
14
  # Retrieve the deployment files associated to the current filter
15
- deployment_path = FILTERS_PATH / f"{image_filter}/deployment"
16
 
17
  # Retrieve the client associated to the current filter
18
  model = CustomFHEClient(deployment_path, KEYS_PATH).model
19
 
20
  # Load the onnx model
21
- onnx_model = onnx.load(FILTERS_PATH / f"{image_filter}/server.onnx")
22
 
23
  # Compile the model on a representative inputset, using the loaded onnx model
24
  model.compile(onnx_model=onnx_model)
@@ -34,7 +35,7 @@ for image_filter in AVAILABLE_FILTERS:
34
  shutil.rmtree(deployment_path)
35
 
36
  # Save the development files needed for deployment
37
- fhe_dev = CustomFHEDev(model=model, path_dir=deployment_path)
38
  fhe_dev.save()
39
 
40
  # Write the serialized_processing.json file in the deployment directory
 
4
  import shutil
5
  import onnx
6
  from common import AVAILABLE_FILTERS, FILTERS_PATH, KEYS_PATH
7
+ from custom_client_server import CustomFHEClient
8
+ from concrete.ml.deployment import FHEModelDev
9
 
10
  print("Starting compiling the filters.")
11
 
12
+ for filter_name in AVAILABLE_FILTERS:
13
+ print("\nCompiling filter:", filter_name)
14
 
15
  # Retrieve the deployment files associated to the current filter
16
+ deployment_path = FILTERS_PATH / f"{filter_name}/deployment"
17
 
18
  # Retrieve the client associated to the current filter
19
  model = CustomFHEClient(deployment_path, KEYS_PATH).model
20
 
21
  # Load the onnx model
22
+ onnx_model = onnx.load(FILTERS_PATH / f"{filter_name}/server.onnx")
23
 
24
  # Compile the model on a representative inputset, using the loaded onnx model
25
  model.compile(onnx_model=onnx_model)
 
35
  shutil.rmtree(deployment_path)
36
 
37
  # Save the development files needed for deployment
38
+ fhe_dev = FHEModelDev(model=model, path_dir=deployment_path)
39
  fhe_dev.save()
40
 
41
  # Write the serialized_processing.json file in the deployment directory
custom_client_server.py CHANGED
@@ -1,204 +1,35 @@
1
- "Client-server interface implementation for custom integer models."
2
-
3
- from pathlib import Path
4
- from typing import Any
5
 
 
6
  import concrete.numpy as cnp
7
- import numpy as np
8
  from filters import Filter
9
 
10
- from concrete.ml.common.debugging.custom_assert import assert_true
11
-
12
-
13
- class CustomFHEDev:
14
- """Dev API to save the custom integer model, load and run a FHE circuit."""
15
-
16
- model: Any = None
17
-
18
- def __init__(self, path_dir: str, model: Any = None):
19
- """Initialize the development interface.
20
-
21
- Args:
22
- path_dir (str): The path to the directory where the circuit is saved.
23
- model (Any): The model to use for the development interface.
24
- """
25
-
26
- self.path_dir = Path(path_dir)
27
- self.model = model
28
 
29
- # Create the directory path if it does not exist yet
30
- Path(self.path_dir).mkdir(parents=True, exist_ok=True)
31
-
32
- def save(self):
33
- """Export all needed artifacts for the client and server.
34
-
35
- Raises:
36
- Exception: path_dir is not empty.
37
- """
38
- # Check if the path_dir is empty with pathlib
39
- listdir = list(Path(self.path_dir).glob("**/*"))
40
- if len(listdir) > 0:
41
- raise Exception(
42
- f"path_dir: {self.path_dir} is not empty."
43
- "Please delete it before saving a new model."
44
- )
45
-
46
- assert_true(
47
- hasattr(self.model, "fhe_circuit"),
48
- "The model must be compiled and have a fhe_circuit object",
49
- )
50
-
51
- # Model must be compiled with jit=False
52
- # In a jit model, everything is in memory so it is not serializable.
53
- assert_true(
54
- not self.model.fhe_circuit.configuration.jit,
55
- "The model must be compiled with the configuration option jit=False.",
56
- )
57
 
58
- # Export the parameters
59
- self.model.to_json(path_dir=self.path_dir, file_name="serialized_processing")
60
 
61
- # Save the circuit for the server
62
- path_circuit_server = self.path_dir / "server.zip"
63
- self.model.fhe_circuit.server.save(path_circuit_server)
64
-
65
- # Save the circuit for the client
66
- path_circuit_client = self.path_dir / "client.zip"
67
- self.model.fhe_circuit.client.save(path_circuit_client)
68
-
69
-
70
- class CustomFHEClient:
71
- """Client API to encrypt and decrypt FHE data."""
72
-
73
- client: cnp.Client
74
-
75
- def __init__(self, path_dir: str, key_dir: str = None):
76
- """Initialize the client interface.
77
-
78
- Args:
79
- path_dir (str): The path to the directory where the circuit is saved.
80
- key_dir (str): The path to the directory where the keys are stored.
81
- """
82
- self.path_dir = Path(path_dir)
83
- self.key_dir = Path(key_dir)
84
-
85
- # If path_dir does not exist, raise an error
86
- assert_true(
87
- Path(path_dir).exists(), f"{path_dir} does not exist. Please specify a valid path."
88
- )
89
-
90
- # Load
91
- self.load()
92
-
93
- def load(self): # pylint: disable=no-value-for-parameter
94
  """Load the parameters along with the FHE specs."""
95
 
96
  # Load the client
97
  self.client = cnp.Client.load(self.path_dir / "client.zip", self.key_dir)
98
 
99
- # Load the model
100
- self.model = Filter.from_json(self.path_dir / "serialized_processing.json")
101
-
102
- def generate_private_and_evaluation_keys(self, force=False):
103
- """Generate the private and evaluation keys.
104
 
105
- Args:
106
- force (bool): If True, regenerate the keys even if they already exist.
107
- """
108
- self.client.keygen(force)
109
-
110
- def get_serialized_evaluation_keys(self) -> cnp.EvaluationKeys:
111
- """Get the serialized evaluation keys.
112
-
113
- Returns:
114
- cnp.EvaluationKeys: The evaluation keys.
115
- """
116
- return self.client.evaluation_keys.serialize()
117
-
118
- def pre_process_encrypt_serialize(self, x: np.ndarray) -> cnp.PublicArguments:
119
- """Encrypt and serialize the values.
120
-
121
- Args:
122
- x (numpy.ndarray): The values to encrypt and serialize.
123
-
124
- Returns:
125
- cnp.PublicArguments: The encrypted and serialized values.
126
- """
127
- # Pre-process the values
128
- x = self.model.pre_processing(x)
129
-
130
- # Encrypt the values
131
- enc_x = self.client.encrypt(x)
132
-
133
- # Serialize the encrypted values to be sent to the server
134
- serialized_enc_x = self.client.specs.serialize_public_args(enc_x)
135
- return serialized_enc_x
136
-
137
- def deserialize_decrypt_post_process(
138
- self, serialized_encrypted_output: cnp.PublicArguments
139
- ) -> np.ndarray:
140
- """Deserialize, decrypt and post-process the values.
141
-
142
- Args:
143
- serialized_encrypted_output (cnp.PublicArguments): The serialized and encrypted output.
144
-
145
- Returns:
146
- numpy.ndarray: The decrypted values.
147
- """
148
- # Deserialize the encrypted values
149
- deserialized_encrypted_output = self.client.specs.unserialize_public_result(
150
- serialized_encrypted_output
151
  )
152
 
153
- # Decrypt the values
154
- deserialized_decrypted_output = self.client.decrypt(deserialized_encrypted_output)
155
-
156
- # Apply the model post processing
157
- deserialized_decrypted_output = self.model.post_processing(deserialized_decrypted_output)
158
- return deserialized_decrypted_output
159
-
160
 
161
- class CustomFHEServer:
162
- """Server interface to load and run a FHE circuit."""
163
-
164
- server: cnp.Server
165
-
166
- def __init__(self, path_dir: str):
167
- """Initialize the server interface.
168
-
169
- Args:
170
- path_dir (str): The path to the directory where the circuit is saved.
171
- """
172
-
173
- self.path_dir = Path(path_dir)
174
-
175
- # Load the FHE circuit
176
- self.load()
177
-
178
- def load(self):
179
- """Load the circuit."""
180
- self.server = cnp.Server.load(self.path_dir / "server.zip")
181
-
182
- def run(
183
- self,
184
- serialized_encrypted_data: cnp.PublicArguments,
185
- serialized_evaluation_keys: cnp.EvaluationKeys,
186
- ) -> cnp.PublicResult:
187
- """Run the model on the server over encrypted data.
188
-
189
- Args:
190
- serialized_encrypted_data (cnp.PublicArguments): The encrypted and serialized data.
191
- serialized_evaluation_keys (cnp.EvaluationKeys): The serialized evaluation keys.
192
-
193
- Returns:
194
- cnp.PublicResult: The result of the model.
195
- """
196
- assert_true(self.server is not None, "Model has not been loaded.")
197
-
198
- deserialized_encrypted_data = self.server.client_specs.unserialize_public_args(
199
- serialized_encrypted_data
200
- )
201
- deserialized_evaluation_keys = cnp.EvaluationKeys.unserialize(serialized_evaluation_keys)
202
- result = self.server.run(deserialized_encrypted_data, deserialized_evaluation_keys)
203
- serialized_result = self.server.client_specs.serialize_public_result(result)
204
- return serialized_result
 
1
+ "Client-server interface custom implementation for filter models."
 
 
 
2
 
3
+ import json
4
  import concrete.numpy as cnp
 
5
  from filters import Filter
6
 
7
+ from concrete.ml.deployment import FHEModelClient
8
+ from concrete.ml.version import __version__ as CML_VERSION
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ class CustomFHEClient(FHEModelClient):
12
+ """Client interface to encrypt and decrypt FHE data associated to a Filter."""
13
 
14
+ def load(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  """Load the parameters along with the FHE specs."""
16
 
17
  # Load the client
18
  self.client = cnp.Client.load(self.path_dir / "client.zip", self.key_dir)
19
 
20
+ # Load the filter's parameters from the json file
21
+ with (self.path_dir / "serialized_processing.json").open("r", encoding="utf-8") as f:
22
+ serialized_processing = json.load(f)
 
 
23
 
24
+ # Make sure the version in serialized_model is the same as CML_VERSION
25
+ assert serialized_processing["cml_version"] == CML_VERSION, (
26
+ f"The version of Concrete ML library ({CML_VERSION}) is different "
27
+ f"from the one used to save the model ({serialized_processing['cml_version']}). "
28
+ "Please update to the proper Concrete ML version.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  )
30
 
31
+ # Initialize the filter model using its filter name
32
+ filter_name = serialized_processing["model_post_processing_params"]["filter_name"]
33
+ self.model = Filter(filter_name)
 
 
 
 
34
 
35
+ return self.model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
filters.py CHANGED
@@ -127,37 +127,45 @@ class _TorchConv2D(nn.Module):
127
  class Filter:
128
  """Filter class used in the app."""
129
 
130
- def __init__(self, image_filter="inverted"):
131
  """Initializing the filter class using a given filter.
132
 
133
  Most filters can be found at https://en.wikipedia.org/wiki/Kernel_(image_processing).
134
 
135
  Args:
136
- image_filter (str): The filter to consider. Default to "inverted".
137
  """
138
 
139
  assert_true(
140
- image_filter in AVAILABLE_FILTERS,
141
  f"Unsupported image filter or transformation. Expected one of {*AVAILABLE_FILTERS,}, "
142
- f"but got {image_filter}",
143
  )
144
 
145
- self.filter = image_filter
 
 
 
 
 
 
 
146
  self.onnx_model = None
147
  self.fhe_circuit = None
148
  self.divide = None
149
  self.repeat_out_channels = False
150
 
151
- if image_filter == "identity":
 
152
  self.torch_model = _TorchIdentity()
153
 
154
- elif image_filter == "inverted":
155
  self.torch_model = _TorchInverted()
156
 
157
- elif image_filter == "rotate":
158
  self.torch_model = _TorchRotate()
159
 
160
- elif image_filter == "black and white":
161
  # Define the grayscale weights (RGB order)
162
  # These weights were used in PAL and NTSC video systems and can be found at
163
  # https://en.wikipedia.org/wiki/Grayscale
@@ -177,7 +185,7 @@ class Filter:
177
  # images to have a RGB format, even for grayscaled ones
178
  self.repeat_out_channels = True
179
 
180
- elif image_filter == "blur":
181
  kernel = np.ones((3, 3))
182
 
183
  self.torch_model = _TorchConv2D(kernel, n_out_channels=3, groups=3)
@@ -185,7 +193,7 @@ class Filter:
185
  # Define the value used when for dividing the output values in post-processing
186
  self.divide = 9
187
 
188
- elif image_filter == "sharpen":
189
  kernel = [
190
  [0, -1, 0],
191
  [-1, 5, -1],
@@ -194,7 +202,7 @@ class Filter:
194
 
195
  self.torch_model = _TorchConv2D(kernel, n_out_channels=3, groups=3)
196
 
197
- elif image_filter == "ridge detection":
198
  kernel = [
199
  [-1, -1, -1],
200
  [-1, 9, -1],
@@ -218,7 +226,9 @@ class Filter:
218
  generated automatically using a NumpyModule. Default to None.
219
  """
220
  # Generate a random representative set of images used for compilation, following Torch's
221
- # shape format (batch, in_channels, image_height, image_width)
 
 
222
  np.random.seed(42)
223
  inputset = tuple(
224
  np.random.randint(0, 255, size=((1, 3) + INPUT_SHAPE), dtype=np.int64) for _ in range(10)
@@ -249,6 +259,20 @@ class Filter:
249
 
250
  return self.fhe_circuit
251
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  def pre_processing(self, input_image):
253
  """Apply pre-processing to the encrypted input images.
254
 
@@ -290,72 +314,3 @@ class Filter:
290
  output_image = output_image.repeat(3, axis=2)
291
 
292
  return output_image
293
-
294
- @classmethod
295
- def from_json(cls, json_path):
296
- """Instantiate a filter using a json file.
297
-
298
- Args:
299
- json_path (Union[str, pathlib.Path]): Path to the json file.
300
-
301
- Returns:
302
- model (Filter): The instantiated filter class.
303
- """
304
- # Load the parameters from the json file
305
- with open(json_path, "r", encoding="utf-8") as f:
306
- serialized_processing = json.load(f)
307
-
308
- # Make sure the version in serialized_model is the same as CML_VERSION
309
- assert_true(
310
- serialized_processing["cml_version"] == CML_VERSION,
311
- f"The version of Concrete ML library ({CML_VERSION}) is different "
312
- f"from the one used to save the model ({serialized_processing['cml_version']}). "
313
- "Please update to the proper Concrete ML version.",
314
- )
315
-
316
- # Initialize the model
317
- model = cls(image_filter=serialized_processing["model_filter"])
318
-
319
- return model
320
-
321
- def to_json(self, path_dir, file_name="serialized_processing"):
322
- """Export the parameters to a json file.
323
-
324
- Args:
325
- path_dir (Union[str, pathlib.Path]): The path to consider when saving the file.
326
- file_name (str): The file name
327
- """
328
- # Serialize the parameters
329
- serialized_processing = {
330
- "model_filter": self.filter,
331
- }
332
- serialized_processing = self._clean_dict_types_for_json(serialized_processing)
333
-
334
- # Add the version of the current CML library
335
- serialized_processing["cml_version"] = CML_VERSION
336
-
337
- # Save the json file
338
- with open(path_dir / f"{file_name}.json", "w", encoding="utf-8") as f:
339
- json.dump(serialized_processing, f)
340
-
341
- def _clean_dict_types_for_json(self, d: dict) -> dict:
342
- """Clean all values in the dict to be json serializable.
343
-
344
- Args:
345
- d (Dict): The dict to clean
346
-
347
- Returns:
348
- Dict: The cleaned dict
349
- """
350
- key_to_delete = []
351
- for key, value in d.items():
352
- if isinstance(value, list) and len(value) > 0 and isinstance(value[0], dict):
353
- d[key] = [self._clean_dict_types_for_json(v) for v in value]
354
- elif isinstance(value, dict):
355
- d[key] = self._clean_dict_types_for_json(value)
356
- elif isinstance(value, (np.generic, np.ndarray)):
357
- d[key] = d[key].tolist()
358
-
359
- for key in key_to_delete:
360
- d.pop(key)
361
- return d
 
127
  class Filter:
128
  """Filter class used in the app."""
129
 
130
+ def __init__(self, filter_name):
131
  """Initializing the filter class using a given filter.
132
 
133
  Most filters can be found at https://en.wikipedia.org/wiki/Kernel_(image_processing).
134
 
135
  Args:
136
+ filter_name (str): The filter to consider.
137
  """
138
 
139
  assert_true(
140
+ filter_name in AVAILABLE_FILTERS,
141
  f"Unsupported image filter or transformation. Expected one of {*AVAILABLE_FILTERS,}, "
142
+ f"but got {filter_name}",
143
  )
144
 
145
+ # Define attributes needed in order to prevent the Concrete-ML client-server interface
146
+ # from breaking
147
+ self.post_processing_params = {"filter_name": filter_name}
148
+ self.input_quantizers = []
149
+ self.output_quantizers = []
150
+
151
+ # Define attributes associated to the filter
152
+ self.filter = filter_name
153
  self.onnx_model = None
154
  self.fhe_circuit = None
155
  self.divide = None
156
  self.repeat_out_channels = False
157
 
158
+ # Instantiate the torch module associated to the given filter name
159
+ if filter_name == "identity":
160
  self.torch_model = _TorchIdentity()
161
 
162
+ elif filter_name == "inverted":
163
  self.torch_model = _TorchInverted()
164
 
165
+ elif filter_name == "rotate":
166
  self.torch_model = _TorchRotate()
167
 
168
+ elif filter_name == "black and white":
169
  # Define the grayscale weights (RGB order)
170
  # These weights were used in PAL and NTSC video systems and can be found at
171
  # https://en.wikipedia.org/wiki/Grayscale
 
185
  # images to have a RGB format, even for grayscaled ones
186
  self.repeat_out_channels = True
187
 
188
+ elif filter_name == "blur":
189
  kernel = np.ones((3, 3))
190
 
191
  self.torch_model = _TorchConv2D(kernel, n_out_channels=3, groups=3)
 
193
  # Define the value used when for dividing the output values in post-processing
194
  self.divide = 9
195
 
196
+ elif filter_name == "sharpen":
197
  kernel = [
198
  [0, -1, 0],
199
  [-1, 5, -1],
 
202
 
203
  self.torch_model = _TorchConv2D(kernel, n_out_channels=3, groups=3)
204
 
205
+ elif filter_name == "ridge detection":
206
  kernel = [
207
  [-1, -1, -1],
208
  [-1, 9, -1],
 
226
  generated automatically using a NumpyModule. Default to None.
227
  """
228
  # Generate a random representative set of images used for compilation, following Torch's
229
+ # shape format (batch, in_channels, image_height, image_width) for each samples
230
+ # This version's compiler only handles tuples of 1-batch array as inputset, meaning we need
231
+ # to define the inputset as a Tuple[np.ndarray[shape=(1, 3, H, W)]]
232
  np.random.seed(42)
233
  inputset = tuple(
234
  np.random.randint(0, 255, size=((1, 3) + INPUT_SHAPE), dtype=np.int64) for _ in range(10)
 
259
 
260
  return self.fhe_circuit
261
 
262
+ def quantize_input(self, input_image):
263
+ """Quantize the input.
264
+
265
+ Images are already quantized in this case, however we need to define this method in order
266
+ to prevent the Concrete-ML client-server interface from breaking.
267
+
268
+ Args:
269
+ input_image (np.ndarray): The input to quantize.
270
+
271
+ Returns:
272
+ np.ndarray: The quantized input.
273
+ """
274
+ return input_image
275
+
276
  def pre_processing(self, input_image):
277
  """Apply pre-processing to the encrypted input images.
278
 
 
314
  output_image = output_image.repeat(3, axis=2)
315
 
316
  return output_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
filters/black and white/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eef269dcec0d548972cc25c3ef9abd8067bd8df8e4a30b53a1b3006575b70baf
3
  size 388
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c6a88c9717d1ec81035e715ad84803b2351756c1a1a6fb51786e2e07b8cbe84
3
  size 388
filters/black and white/deployment/serialized_processing.json CHANGED
@@ -1 +1 @@
1
- {"model_filter": "black and white", "cml_version": "0.6.1"}
 
1
+ {"model_type": "Filter", "model_post_processing_params": {"filter_name": "black and white"}, "input_quantizers": [], "output_quantizers": [], "cml_version": "0.6.1"}
filters/black and white/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ca528ad3f3b99b6c69b0bb0e0a4724615a6be7ac2222424b7c2ac48c26e5b95
3
  size 4364
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3efb2af650a4b4690a8096048b084a03f9a505985fc8dee4da7d3367ea040918
3
  size 4364
filters/black_and_white/deployment/client.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:000285a62f642b20eda541c6697e33de3d725c254ff5c2098e3157fc73cd017b
3
- size 388
 
 
 
 
filters/black_and_white/deployment/serialized_processing.json DELETED
@@ -1 +0,0 @@
1
- {"model_filter": "black_and_white", "cml_version": "0.6.0-rc0"}
 
 
filters/black_and_white/deployment/server.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9867657ff1e7b2c8eb3c72f28be8b8e8ee0b355762b99f34a25a2c9de0cb104c
3
- size 4762
 
 
 
 
filters/black_and_white/server.onnx DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f4774c394a6fec8cc43dae14ce627837aa998fcc78ba4ab67ad1c5bf92dd3ee
3
- size 336
 
 
 
 
filters/blur/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce25848e14481bf54e4a52fad3ea178bc78ebf2d62e464839da4de58c5a48d43
3
  size 391
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f724173d51c70053038bd40ce911ce4ef9ea50a70b077129b86abb482ad4a21e
3
  size 391
filters/blur/deployment/serialized_processing.json CHANGED
@@ -1 +1 @@
1
- {"model_filter": "blur", "cml_version": "0.6.1"}
 
1
+ {"model_type": "Filter", "model_post_processing_params": {"filter_name": "blur"}, "input_quantizers": [], "output_quantizers": [], "cml_version": "0.6.1"}
filters/blur/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a058aeab0894ea93e00db344a9e71abeb63c6e8faa8bdb661ae4b304d3eee5c
3
  size 7263
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1193734d14b02195075fc402e3d84e11b8b7216a83ceff9a0becb16f1b3fbcf0
3
  size 7263
filters/identity/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b285786e91816d4f1848968d6737929a90f073d2aabac607b0fe5cd0867f314a
3
  size 378
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e396e33163faf6dbf2a8eca318908efa92dea5d0d8c24a46439a925497543431
3
  size 378
filters/identity/deployment/serialized_processing.json CHANGED
@@ -1 +1 @@
1
- {"model_filter": "identity", "cml_version": "0.6.1"}
 
1
+ {"model_type": "Filter", "model_post_processing_params": {"filter_name": "identity"}, "input_quantizers": [], "output_quantizers": [], "cml_version": "0.6.1"}
filters/identity/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:438384a8517e5ccb354851b9a8baa3ee86af59726d9f1600d98527f0568059b5
3
  size 2559
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da52e793a997ded3b0c383f246da31d51317e2461ff1955f9f01014258272f9b
3
  size 2559
filters/inverted/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b285786e91816d4f1848968d6737929a90f073d2aabac607b0fe5cd0867f314a
3
  size 378
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e396e33163faf6dbf2a8eca318908efa92dea5d0d8c24a46439a925497543431
3
  size 378
filters/inverted/deployment/serialized_processing.json CHANGED
@@ -1 +1 @@
1
- {"model_filter": "inverted", "cml_version": "0.6.1"}
 
1
+ {"model_type": "Filter", "model_post_processing_params": {"filter_name": "inverted"}, "input_quantizers": [], "output_quantizers": [], "cml_version": "0.6.1"}
filters/inverted/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca6086a06f95b349609433162e316ceddf05dfe6ea2b0936492123ff46f417a7
3
  size 4179
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe95edd2998cee4ff7e40fde889f7a85bbf69218a4f1b517565de79d82517c4f
3
  size 4179
filters/ridge detection/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8fe63b4e3b322a2c4dd1bb742878b2e90c1b6c151dc2af7bb16155fea29a66c
3
  size 397
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1bf9931bbf568d5b74fd3e1bab8fcd48780c88b2f36289b26976cb1ebf4c665
3
  size 397
filters/ridge detection/deployment/serialized_processing.json CHANGED
@@ -1 +1 @@
1
- {"model_filter": "ridge detection", "cml_version": "0.6.1"}
 
1
+ {"model_type": "Filter", "model_post_processing_params": {"filter_name": "ridge detection"}, "input_quantizers": [], "output_quantizers": [], "cml_version": "0.6.1"}
filters/ridge detection/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58266bb522e40f8ba7746e1eca6191e7a1c3c385e99b294c759bbbc88f7e6408
3
  size 5043
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a31acdba1d94ec7fec833fc8e0b0de7a6b345c9dfef5d0b1a7cbdf30613fdc44
3
  size 5043
filters/ridge_detection/deployment/client.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d241694b8c01dce2ad8a5ce2dbe12190e40d6912e88d086dbc0e047aba4dfafb
3
- size 397
 
 
 
 
filters/ridge_detection/deployment/serialized_processing.json DELETED
@@ -1 +0,0 @@
1
- {"model_filter": "ridge_detection", "cml_version": "0.6.0-rc0"}
 
 
filters/ridge_detection/deployment/server.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3605e14d8533e3c57edf30a7da32d4441fcb68228a8ebd028015338b8b5d5f70
3
- size 4884
 
 
 
 
filters/ridge_detection/server.onnx DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e05d56c4988abd621aee6dea4efe2dfdaf1d09dfb78bb7bf7b6bb3a00d3e80b
3
- size 532
 
 
 
 
filters/rotate/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b285786e91816d4f1848968d6737929a90f073d2aabac607b0fe5cd0867f314a
3
  size 378
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e396e33163faf6dbf2a8eca318908efa92dea5d0d8c24a46439a925497543431
3
  size 378
filters/rotate/deployment/serialized_processing.json CHANGED
@@ -1 +1 @@
1
- {"model_filter": "rotate", "cml_version": "0.6.1"}
 
1
+ {"model_type": "Filter", "model_post_processing_params": {"filter_name": "rotate"}, "input_quantizers": [], "output_quantizers": [], "cml_version": "0.6.1"}
filters/rotate/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecea959453ffd704efba1c5e22db54e902cc6c3289870ece101793d1479cb347
3
  size 4431
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa91ba0d7021fcc6237cd628b6f03151b05625f377149d3d0eedd4a124407646
3
  size 4431
filters/sharpen/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4df798a79bfc380debbfbc7a9cdaf79a096fe1deb18327f31dc141bea38f8d4e
3
  size 396
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c5dee467fb63804731e998a11323a460d2fc8a9f08f4480d9d9c6deb4431447
3
  size 396
filters/sharpen/deployment/serialized_processing.json CHANGED
@@ -1 +1 @@
1
- {"model_filter": "sharpen", "cml_version": "0.6.1"}
 
1
+ {"model_type": "Filter", "model_post_processing_params": {"filter_name": "sharpen"}, "input_quantizers": [], "output_quantizers": [], "cml_version": "0.6.1"}
filters/sharpen/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:befb2eaff02cc855af745dc82bc8f24ce713e4ff4393e3b635f55b8f82e0ff20
3
  size 7311
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87285146e97b8787261a7aa15db77819ac9e10a8c165708792db682d6a5072c7
3
  size 7311
generate_dev_files.py CHANGED
@@ -3,22 +3,22 @@
3
  import shutil
4
  import onnx
5
  from common import AVAILABLE_FILTERS, FILTERS_PATH
6
- from custom_client_server import CustomFHEDev
7
  from filters import Filter
 
8
 
9
  print("Generating deployment files for all available filters")
10
 
11
- for image_filter in AVAILABLE_FILTERS:
12
- print("Filter:", image_filter, "\n")
13
 
14
  # Create the filter instance
15
- filter = Filter(image_filter)
16
 
17
  # Compile the model on a representative inputset
18
  filter.compile()
19
 
20
  # Define the directory path associated to this filter
21
- filter_path = FILTERS_PATH / image_filter
22
 
23
  # Define the directory path associated to this filter's deployment files
24
  deployment_path = filter_path / "deployment"
@@ -28,7 +28,7 @@ for image_filter in AVAILABLE_FILTERS:
28
  shutil.rmtree(deployment_path)
29
 
30
  # Save the files needed for deployment
31
- fhe_dev_filter = CustomFHEDev(deployment_path, filter)
32
  fhe_dev_filter.save()
33
 
34
  # Save the ONNX model
 
3
  import shutil
4
  import onnx
5
  from common import AVAILABLE_FILTERS, FILTERS_PATH
 
6
  from filters import Filter
7
+ from concrete.ml.deployment import FHEModelDev
8
 
9
  print("Generating deployment files for all available filters")
10
 
11
+ for filter_name in AVAILABLE_FILTERS:
12
+ print("Filter:", filter_name, "\n")
13
 
14
  # Create the filter instance
15
+ filter = Filter(filter_name)
16
 
17
  # Compile the model on a representative inputset
18
  filter.compile()
19
 
20
  # Define the directory path associated to this filter
21
+ filter_path = FILTERS_PATH / filter_name
22
 
23
  # Define the directory path associated to this filter's deployment files
24
  deployment_path = filter_path / "deployment"
 
28
  shutil.rmtree(deployment_path)
29
 
30
  # Save the files needed for deployment
31
+ fhe_dev_filter = FHEModelDev(deployment_path, filter)
32
  fhe_dev_filter.save()
33
 
34
  # Save the ONNX model
server.py CHANGED
@@ -4,24 +4,24 @@ import time
4
  from typing import List
5
 
6
  from common import FILTERS_PATH, SERVER_TMP_PATH
7
- from custom_client_server import CustomFHEServer
8
  from fastapi import FastAPI, File, Form, UploadFile
9
  from fastapi.responses import JSONResponse, Response
10
  from pydantic import BaseModel
 
11
 
12
 
13
- def get_server_file_path(name, user_id, image_filter):
14
  """Get the correct temporary file path for the server.
15
 
16
  Args:
17
  name (str): The desired file name.
18
  user_id (int): The current user's ID.
19
- image_filter (str): The filter chosen by the user
20
 
21
  Returns:
22
  pathlib.Path: The file path.
23
  """
24
- return SERVER_TMP_PATH / f"{name}_{image_filter}_{user_id}"
25
 
26
 
27
  class FilterRequest(BaseModel):
@@ -74,7 +74,7 @@ def run_fhe(
74
  evaluation_key = evaluation_key_file.read()
75
 
76
  # Load the FHE server
77
- fhe_server = CustomFHEServer(FILTERS_PATH / f"{filter}/deployment")
78
 
79
  # Run the FHE execution
80
  start = time.time()
 
4
  from typing import List
5
 
6
  from common import FILTERS_PATH, SERVER_TMP_PATH
 
7
  from fastapi import FastAPI, File, Form, UploadFile
8
  from fastapi.responses import JSONResponse, Response
9
  from pydantic import BaseModel
10
+ from concrete.ml.deployment import FHEModelServer
11
 
12
 
13
+ def get_server_file_path(name, user_id, filter_name):
14
  """Get the correct temporary file path for the server.
15
 
16
  Args:
17
  name (str): The desired file name.
18
  user_id (int): The current user's ID.
19
+ filter_name (str): The filter chosen by the user
20
 
21
  Returns:
22
  pathlib.Path: The file path.
23
  """
24
+ return SERVER_TMP_PATH / f"{name}_{filter_name}_{user_id}"
25
 
26
 
27
  class FilterRequest(BaseModel):
 
74
  evaluation_key = evaluation_key_file.read()
75
 
76
  # Load the FHE server
77
+ fhe_server = FHEModelServer(FILTERS_PATH / f"{filter}/deployment")
78
 
79
  # Run the FHE execution
80
  start = time.time()