Roman commited on
Commit
d07e70b
1 Parent(s): 1abded0

chore: display encrypted ouput, add sizes and resize input examples

Browse files
.gitignore CHANGED
@@ -2,6 +2,7 @@
2
  .playground/
3
  .artifacts
4
  .fhe_keys
 
5
  server_tmp/
6
  client_tmp/
7
  .artifacts
 
2
  .playground/
3
  .artifacts
4
  .fhe_keys
5
+ .wrong_keys
6
  server_tmp/
7
  client_tmp/
8
  .artifacts
README.md CHANGED
@@ -36,6 +36,7 @@ Then, install required packages:
36
  <!--pytest-codeblocks:skip-->
37
 
38
  ```bash
 
39
  pip3 install -U pip wheel setuptools --ignore-installed
40
  pip3 install -r requirements.txt --ignore-installed
41
  ```
 
36
  <!--pytest-codeblocks:skip-->
37
 
38
  ```bash
39
+ pip3 install pip --upgrade
40
  pip3 install -U pip wheel setuptools --ignore-installed
41
  pip3 install -r requirements.txt --ignore-installed
42
  ```
app.py CHANGED
@@ -15,16 +15,28 @@ from common import (
15
  FILTERS_PATH,
16
  INPUT_SHAPE,
17
  KEYS_PATH,
 
18
  REPO_DIR,
19
  SERVER_URL,
20
  )
21
- from custom_client_server import CustomFHEClient
22
 
23
  # Uncomment here to have both the server and client in the same terminal
24
  subprocess.Popen(["uvicorn", "server:app"], cwd=REPO_DIR)
25
  time.sleep(3)
26
 
27
 
 
 
 
 
 
 
 
 
 
 
 
28
  def shorten_bytes_object(bytes_object, limit=500):
29
  """Shorten the input bytes object to a given length.
30
 
@@ -121,8 +133,6 @@ def keygen(image_filter):
121
  user_id = numpy.random.randint(0, 2**32)
122
 
123
  # Retrieve the client API
124
- # Currently, the key generation needs to be done after choosing a filter
125
- # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2258
126
  client = get_client(user_id, image_filter)
127
 
128
  # Generate a private key
@@ -133,6 +143,10 @@ def keygen(image_filter):
133
  # execution
134
  evaluation_key = client.get_serialized_evaluation_keys()
135
 
 
 
 
 
136
  # Save evaluation_key as bytes in a file as it is too large to pass through regular Gradio
137
  # buttons (see https://github.com/gradio-app/gradio/issues/1877)
138
  evaluation_key_path = get_client_file_path("evaluation_key", user_id, image_filter)
@@ -140,7 +154,7 @@ def keygen(image_filter):
140
  with evaluation_key_path.open("wb") as evaluation_key_file:
141
  evaluation_key_file.write(evaluation_key)
142
 
143
- return (user_id, True)
144
 
145
 
146
  def encrypt(user_id, input_image, image_filter):
@@ -159,12 +173,18 @@ def encrypt(user_id, input_image, image_filter):
159
  if user_id == "":
160
  raise gr.Error("Please generate the private key first.")
161
 
 
 
 
162
  # Retrieve the client API
163
  client = get_client(user_id, image_filter)
164
 
165
  # Pre-process, encrypt and serialize the image
166
  encrypted_image = client.pre_process_encrypt_serialize(input_image)
167
 
 
 
 
168
  # Save encrypted_image to bytes in a file, since too large to pass through regular Gradio
169
  # buttons, https://github.com/gradio-app/gradio/issues/1877
170
  encrypted_image_path = get_client_file_path("encrypted_image", user_id, image_filter)
@@ -175,7 +195,7 @@ def encrypt(user_id, input_image, image_filter):
175
  # Create a truncated version of the encrypted image for display
176
  encrypted_image_short = shorten_bytes_object(encrypted_image)
177
 
178
- return (input_image, encrypted_image_short)
179
 
180
 
181
  def send_input(user_id, image_filter):
@@ -264,17 +284,22 @@ def get_output(user_id, image_filter):
264
  data=data,
265
  ) as response:
266
  if response.ok:
 
 
 
 
 
267
  # Save the encrypted output to bytes in a file as it is too large to pass through regular
268
  # Gradio buttons (see https://github.com/gradio-app/gradio/issues/1877)
269
  encrypted_output_path = get_client_file_path("encrypted_output", user_id, image_filter)
270
 
271
  with encrypted_output_path.open("wb") as encrypted_output_file:
272
- encrypted_output_file.write(response.content)
273
 
274
- # Create a truncated version of the encrypted output for display
275
- encrypted_output_image_short = shorten_bytes_object(response.content)
276
 
277
- return encrypted_output_image_short
278
  else:
279
  raise gr.Error("Please wait for the FHE execution to be completed.")
280
 
@@ -328,9 +353,11 @@ with demo:
328
  )
329
 
330
  gr.Markdown("## Client side")
 
331
  gr.Markdown(
332
- f"Step 1. Upload an image. It will automatically be resized to shape ({INPUT_SHAPE[0]}x{INPUT_SHAPE[1]})."
333
- "The image is however displayed using its original resolution."
 
334
  )
335
  with gr.Row():
336
  input_image = gr.Image(
@@ -341,33 +368,46 @@ with demo:
341
  examples=EXAMPLES, inputs=[input_image], examples_per_page=5, label="Examples to use."
342
  )
343
 
344
- gr.Markdown("Step 2. Choose your filter")
345
  image_filter = gr.Dropdown(
346
  choices=AVAILABLE_FILTERS, value="inverted", label="Choose your filter", interactive=True
347
  )
348
 
349
- gr.Markdown("### Notes")
350
  gr.Markdown(
351
  """
352
  - The private key is used to encrypt and decrypt the data and shall never be shared.
353
  - No public key are required for these filter operators.
354
  """
355
  )
356
-
 
357
  with gr.Row():
358
- keygen_button = gr.Button("Step 3. Generate the private key.")
359
 
360
  keygen_checkbox = gr.Checkbox(label="Private key generated:", interactive=False)
361
 
362
- with gr.Row():
363
- encrypt_button = gr.Button("Step 4. Encrypt the image using FHE.")
 
364
 
365
- user_id = gr.Textbox(label="", max_lines=2, interactive=False, visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
366
 
367
- # Display an image representation
368
- # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2265
369
- encrypted_image = gr.Textbox(
370
- label="Encrypted image representation:", max_lines=2, interactive=False
371
  )
372
 
373
  gr.Markdown("## Server side")
@@ -377,25 +417,36 @@ with demo:
377
  "the encrypted results to the client."
378
  )
379
 
 
380
  with gr.Row():
381
- send_input_button = gr.Button("Step 5. Send the encrypted image to the server.")
382
 
383
  send_input_checkbox = gr.Checkbox(label="Encrypted image sent.", interactive=False)
384
 
 
385
  with gr.Row():
386
- execute_fhe_button = gr.Button("Step 6. Run FHE execution")
387
 
388
  fhe_execution_time = gr.Textbox(
389
- label="Total FHE execution time (in seconds).", max_lines=1, interactive=False
390
  )
391
 
 
 
 
 
 
 
392
  with gr.Row():
393
- get_output_button = gr.Button("Step 7. Receive the encrypted output image from the server.")
394
 
395
- # Display an image representation
396
- # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2265
397
- encrypted_output_image = gr.Textbox(
398
- label="Encrypted output image representation:", max_lines=2, interactive=False
 
 
 
399
  )
400
 
401
  gr.Markdown("## Client side")
@@ -404,7 +455,12 @@ with demo:
404
  "private key. Only the client is aware of the original image and its transformed version."
405
  )
406
 
407
- decrypt_button = gr.Button("Step 8. Decrypt the output")
 
 
 
 
 
408
 
409
  # Final input vs output display
410
  with gr.Row():
@@ -424,14 +480,14 @@ with demo:
424
  keygen_button.click(
425
  keygen,
426
  inputs=[image_filter],
427
- outputs=[user_id, keygen_checkbox],
428
  )
429
 
430
  # Button to encrypt inputs on the client side
431
  encrypt_button.click(
432
  encrypt,
433
  inputs=[user_id, input_image, image_filter],
434
- outputs=[original_image, encrypted_image],
435
  )
436
 
437
  # Button to send the encodings to the server using post method
@@ -444,7 +500,9 @@ with demo:
444
 
445
  # Button to send the encodings to the server using post method
446
  get_output_button.click(
447
- get_output, inputs=[user_id, image_filter], outputs=[encrypted_output_image]
 
 
448
  )
449
 
450
  # Button to decrypt the output on the client side
 
15
  FILTERS_PATH,
16
  INPUT_SHAPE,
17
  KEYS_PATH,
18
+ WRONG_KEYS_PATH,
19
  REPO_DIR,
20
  SERVER_URL,
21
  )
22
+ from custom_client_server import CustomFHEClient, CustomFHEServer
23
 
24
  # Uncomment here to have both the server and client in the same terminal
25
  subprocess.Popen(["uvicorn", "server:app"], cwd=REPO_DIR)
26
  time.sleep(3)
27
 
28
 
29
+ def decrypt_output_with_wrong_key(encrypted_image, image_filter):
30
+ filter_path = FILTERS_PATH / f"{image_filter}/deployment"
31
+
32
+ wrong_client = CustomFHEClient(filter_path, WRONG_KEYS_PATH)
33
+ wrong_client.generate_private_and_evaluation_keys(force=True)
34
+
35
+ output_image = wrong_client.deserialize_decrypt_post_process(encrypted_image)
36
+
37
+ return output_image
38
+
39
+
40
  def shorten_bytes_object(bytes_object, limit=500):
41
  """Shorten the input bytes object to a given length.
42
 
 
133
  user_id = numpy.random.randint(0, 2**32)
134
 
135
  # Retrieve the client API
 
 
136
  client = get_client(user_id, image_filter)
137
 
138
  # Generate a private key
 
143
  # execution
144
  evaluation_key = client.get_serialized_evaluation_keys()
145
 
146
+ # Compute the private key's size in Kilobytes
147
+ private_key_path = next(client.key_dir.iterdir()) / "0_0/secretKey_big"
148
+ private_key_size = private_key_path.stat().st_size / 1000
149
+
150
  # Save evaluation_key as bytes in a file as it is too large to pass through regular Gradio
151
  # buttons (see https://github.com/gradio-app/gradio/issues/1877)
152
  evaluation_key_path = get_client_file_path("evaluation_key", user_id, image_filter)
 
154
  with evaluation_key_path.open("wb") as evaluation_key_file:
155
  evaluation_key_file.write(evaluation_key)
156
 
157
+ return (user_id, True, private_key_size)
158
 
159
 
160
  def encrypt(user_id, input_image, image_filter):
 
173
  if user_id == "":
174
  raise gr.Error("Please generate the private key first.")
175
 
176
+ if input_image is None:
177
+ raise gr.Error("Please choose an image first.")
178
+
179
  # Retrieve the client API
180
  client = get_client(user_id, image_filter)
181
 
182
  # Pre-process, encrypt and serialize the image
183
  encrypted_image = client.pre_process_encrypt_serialize(input_image)
184
 
185
+ # Compute the input's size in Megabytes
186
+ encrypted_input_size = len(encrypted_image) / 1000000
187
+
188
  # Save encrypted_image to bytes in a file, since too large to pass through regular Gradio
189
  # buttons, https://github.com/gradio-app/gradio/issues/1877
190
  encrypted_image_path = get_client_file_path("encrypted_image", user_id, image_filter)
 
195
  # Create a truncated version of the encrypted image for display
196
  encrypted_image_short = shorten_bytes_object(encrypted_image)
197
 
198
+ return (input_image, encrypted_image_short, encrypted_input_size)
199
 
200
 
201
  def send_input(user_id, image_filter):
 
284
  data=data,
285
  ) as response:
286
  if response.ok:
287
+ encrypted_output = response.content
288
+
289
+ # Compute the output's size in Megabytes
290
+ encrypted_output_size = len(encrypted_output) / 1000000
291
+
292
  # Save the encrypted output to bytes in a file as it is too large to pass through regular
293
  # Gradio buttons (see https://github.com/gradio-app/gradio/issues/1877)
294
  encrypted_output_path = get_client_file_path("encrypted_output", user_id, image_filter)
295
 
296
  with encrypted_output_path.open("wb") as encrypted_output_file:
297
+ encrypted_output_file.write(encrypted_output)
298
 
299
+ # Decrypt the image using a different (wrong) key for display
300
+ output_image_representation = decrypt_output_with_wrong_key(encrypted_output, image_filter)
301
 
302
+ return output_image_representation, encrypted_output_size
303
  else:
304
  raise gr.Error("Please wait for the FHE execution to be completed.")
305
 
 
353
  )
354
 
355
  gr.Markdown("## Client side")
356
+ gr.Markdown("### Step 1. Upload an image. ")
357
  gr.Markdown(
358
+ f"The image will automatically be resized to shape ({INPUT_SHAPE[0]}x{INPUT_SHAPE[1]})."
359
+ "The image displayed here is however using its original resolution. The true image used "
360
+ "in this demo can be seen in Step. 8."
361
  )
362
  with gr.Row():
363
  input_image = gr.Image(
 
368
  examples=EXAMPLES, inputs=[input_image], examples_per_page=5, label="Examples to use."
369
  )
370
 
371
+ gr.Markdown("### Step 2. Choose your filter")
372
  image_filter = gr.Dropdown(
373
  choices=AVAILABLE_FILTERS, value="inverted", label="Choose your filter", interactive=True
374
  )
375
 
376
+ gr.Markdown("#### Notes")
377
  gr.Markdown(
378
  """
379
  - The private key is used to encrypt and decrypt the data and shall never be shared.
380
  - No public key are required for these filter operators.
381
  """
382
  )
383
+
384
+ gr.Markdown("### Step 3. Generate the private key.")
385
  with gr.Row():
386
+ keygen_button = gr.Button("Generate the private key.")
387
 
388
  keygen_checkbox = gr.Checkbox(label="Private key generated:", interactive=False)
389
 
390
+ private_key_size = gr.Number(
391
+ label="Private key size (in kB):", value=0, precision=1, interactive=False
392
+ )
393
 
394
+ user_id = gr.Textbox(label="", max_lines=2, interactive=False, visible=False)
395
+
396
+ gr.Markdown("### Step 4. Encrypt the image using FHE.")
397
+ with gr.Row():
398
+ encrypt_button = gr.Button("Encrypt the image using FHE.")
399
+
400
+ # FIXME: Display an image representation here
401
+ # Currently not really possible as Concrete-Numpy does not give the ability to decrypt
402
+ # an encrypted input
403
+ # https://github.com/zama-ai/concrete-ml-internal/issues/2265
404
+ # https://github.com/zama-ai/concrete-numpy-internal/issues/1753
405
+ encrypted_input = gr.Textbox(
406
+ label="Encrypted input representation:", max_lines=2, interactive=False
407
+ )
408
 
409
+ encrypted_input_size = gr.Number(
410
+ label="Encrypted input size (in MB):", value=0, precision=1, interactive=False
 
 
411
  )
412
 
413
  gr.Markdown("## Server side")
 
417
  "the encrypted results to the client."
418
  )
419
 
420
+ gr.Markdown("### Step 5. Send the encrypted image to the server.")
421
  with gr.Row():
422
+ send_input_button = gr.Button("Send the encrypted image to the server.")
423
 
424
  send_input_checkbox = gr.Checkbox(label="Encrypted image sent.", interactive=False)
425
 
426
+ gr.Markdown("### Step 6. Run FHE execution.")
427
  with gr.Row():
428
+ execute_fhe_button = gr.Button("Run FHE execution.")
429
 
430
  fhe_execution_time = gr.Textbox(
431
+ label="Total FHE execution time (in seconds):", max_lines=1, interactive=False
432
  )
433
 
434
+ gr.Markdown("### Step 7. Receive the encrypted output image from the server.")
435
+ gr.Markdown(
436
+ "The image displayed here is the encrypted result sent by the server which has been "
437
+ "decrypted using a different private key. This is only used to visually represent an "
438
+ "encrypted image."
439
+ )
440
  with gr.Row():
441
+ get_output_button = gr.Button("Receive the encrypted output image from the server.")
442
 
443
+ encrypted_output_representation = gr.Image(
444
+ label=f"Encrypted output representation ({INPUT_SHAPE[0]}x{INPUT_SHAPE[1]}):", interactive=False
445
+ )
446
+ encrypted_output_representation.style(height=256, width=256)
447
+
448
+ encrypted_output_size = gr.Number(
449
+ label="Encrypted output size (in MB):", value=0, precision=1, interactive=False
450
  )
451
 
452
  gr.Markdown("## Client side")
 
455
  "private key. Only the client is aware of the original image and its transformed version."
456
  )
457
 
458
+ gr.Markdown("### Step 8. Decrypt the output")
459
+ gr.Markdown(
460
+ "The image displayed on the left is the input image used during the demo. The output image "
461
+ "can be seen on the right."
462
+ )
463
+ decrypt_button = gr.Button("Decrypt the output")
464
 
465
  # Final input vs output display
466
  with gr.Row():
 
480
  keygen_button.click(
481
  keygen,
482
  inputs=[image_filter],
483
+ outputs=[user_id, keygen_checkbox, private_key_size],
484
  )
485
 
486
  # Button to encrypt inputs on the client side
487
  encrypt_button.click(
488
  encrypt,
489
  inputs=[user_id, input_image, image_filter],
490
+ outputs=[original_image, encrypted_input, encrypted_input_size],
491
  )
492
 
493
  # Button to send the encodings to the server using post method
 
500
 
501
  # Button to send the encodings to the server using post method
502
  get_output_button.click(
503
+ get_output,
504
+ inputs=[user_id, image_filter],
505
+ outputs=[encrypted_output_representation, encrypted_output_size]
506
  )
507
 
508
  # Button to decrypt the output on the client side
common.py CHANGED
@@ -11,11 +11,13 @@ REPO_DIR = Path(__file__).parent
11
  # The repository's main directories
12
  FILTERS_PATH = REPO_DIR / "filters"
13
  KEYS_PATH = REPO_DIR / ".fhe_keys"
 
14
  CLIENT_TMP_PATH = REPO_DIR / "client_tmp"
15
  SERVER_TMP_PATH = REPO_DIR / "server_tmp"
16
 
17
  # Create the directories if it does not exist yet
18
  KEYS_PATH.mkdir(exist_ok=True)
 
19
  CLIENT_TMP_PATH.mkdir(exist_ok=True)
20
  SERVER_TMP_PATH.mkdir(exist_ok=True)
21
 
 
11
  # The repository's main directories
12
  FILTERS_PATH = REPO_DIR / "filters"
13
  KEYS_PATH = REPO_DIR / ".fhe_keys"
14
+ WRONG_KEYS_PATH = REPO_DIR / ".wrong_keys"
15
  CLIENT_TMP_PATH = REPO_DIR / "client_tmp"
16
  SERVER_TMP_PATH = REPO_DIR / "server_tmp"
17
 
18
  # Create the directories if it does not exist yet
19
  KEYS_PATH.mkdir(exist_ok=True)
20
+ WRONG_KEYS_PATH.mkdir(exist_ok=True)
21
  CLIENT_TMP_PATH.mkdir(exist_ok=True)
22
  SERVER_TMP_PATH.mkdir(exist_ok=True)
23
 
filters.py CHANGED
@@ -14,9 +14,6 @@ from concrete.ml.onnx.convert import get_equivalent_numpy_forward
14
  from concrete.ml.torch.numpy_module import NumpyModule
15
  from concrete.ml.version import __version__ as CML_VERSION
16
 
17
- # Add a "black and white" filter
18
- # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2277
19
-
20
 
21
  class _TorchIdentity(nn.Module):
22
  """Torch identity model."""
@@ -189,9 +186,6 @@ class Filter:
189
  self.torch_model = _TorchConv2D(kernel, n_out_channels=3, groups=3)
190
 
191
  elif image_filter == "ridge detection":
192
- # Make the filter properly grayscaled, as it is commonly used
193
- # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2265
194
-
195
  kernel = torch.tensor(
196
  [
197
  [-1, -1, -1],
 
14
  from concrete.ml.torch.numpy_module import NumpyModule
15
  from concrete.ml.version import __version__ as CML_VERSION
16
 
 
 
 
17
 
18
  class _TorchIdentity(nn.Module):
19
  """Torch identity model."""
 
186
  self.torch_model = _TorchConv2D(kernel, n_out_channels=3, groups=3)
187
 
188
  elif image_filter == "ridge detection":
 
 
 
189
  kernel = torch.tensor(
190
  [
191
  [-1, -1, -1],
filters/black and white/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63773fc0796c373f650f74b5462515a7bf95c2b591266472f23bf37b0846e935
3
  size 388
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a8fae28225c53cc6e184535a8880187626892461a7f0c25afe322dfaa83f678
3
  size 388
filters/black and white/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e3d461f5553416f5f011b25b76afdbb0cd0f57b64a2cdfa34b705049cd796a9
3
- size 4367
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4253afb0e46be27e5aa2562c14e7bc402fc7aa39cf01a824f1170c5e46ccf9aa
3
+ size 4364
filters/blur/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b9b07a161dbe6020a0749c2bec2ae9e4215ac25574ac086213a45979a8c229c
3
  size 391
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ead03bc109e6598c90000f580c2d149f19c4ed968dfa302dfea78c131bdad02
3
  size 391
filters/blur/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34f388a428c9d9c61d5138ee86c0248a2c495b5e2c361a8839551f20dc0f19f6
3
- size 7266
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:deb92cf90d25f2c4552bb8a2fa0eb0f0cbe0fdc549665f2b63a15a78c0b20d72
3
+ size 7263
filters/identity/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:adbdb74b0fb4dec87aaa065bf9c90940004c5a1f4cb1c0943c0134237b8008cd
3
  size 378
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f39bf162b0ffcd9e4105c371180a621ebdc64fce12a751547424e3a6ec0c0b
3
  size 378
filters/identity/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5dd04e43447e0ad26bbbc48db67b1aa7fc2993862bdce53eabd2d39793240eec
3
- size 2562
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9741844b0e166078608310f3812724cf0dd117927457763ecec48e5e510ebc5a
3
+ size 2559
filters/inverted/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:adbdb74b0fb4dec87aaa065bf9c90940004c5a1f4cb1c0943c0134237b8008cd
3
  size 378
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f39bf162b0ffcd9e4105c371180a621ebdc64fce12a751547424e3a6ec0c0b
3
  size 378
filters/inverted/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25c281b6d13a16e5e41ee53b929dd33318b24f21ce91e9120aa5ed2065b4e7ab
3
- size 4182
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3b413c12bb4d656e739247af1ffb848d57a70a2f57011dd367af66c56cce025
3
+ size 4179
filters/ridge detection/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5f28e47bbfa1cc51e4212b471ba8fb981af51f7db23a93892ec849a2ec2541f
3
  size 397
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b58c440c56ec350092d730eb2e8d2118225eed501fb8dba5df17b96440fe6a08
3
  size 397
filters/ridge detection/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a621b96e5a707c67efc53c6ac262676558545c8d173df9cab5138866a46afe3
3
- size 4483
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91c225472c2d225d3211b54a1f4bca1a7df54ae6543e6d62e7c6a48601dd9e31
3
+ size 4479
filters/rotate/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f3d46ba45c50191d39587992f3a53827949214481ac05a9673f2481f9038ab4
3
  size 378
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f39bf162b0ffcd9e4105c371180a621ebdc64fce12a751547424e3a6ec0c0b
3
  size 378
filters/rotate/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5599e84aa1aaf1a908f17dd15c4e221c234dcc6998a17ada305cfc4aaceade6d
3
- size 4434
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f4c3cfa73f19125923edca845c70a55eb5a882ddb17fc8f979ed349b845eb6e
3
+ size 4431
filters/sharpen/deployment/client.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6fe9d09eacf0ffcc5569457fb3e869488a35ce134583c70b60cf5c099c8b46e
3
  size 396
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a13c773025de645be26fbedd7bb9b0926464ad08f992ff089aee90ee58df5f3
3
  size 396
filters/sharpen/deployment/server.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab65ba0d811d187405b681fd5210da8c996111a53ffac48de8ff20457ac46949
3
- size 7314
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65892df870b0e07b717fb971b85c0e6ed2d0acc345db9fec1eb90497b1092374
3
+ size 7311
input_examples/arc.jpg CHANGED
input_examples/book.jpg CHANGED
input_examples/computer.jpg CHANGED
input_examples/tree.jpg CHANGED
input_examples/zama_math.jpg CHANGED
input_examples/zebra.jpg CHANGED
requirements.txt CHANGED
@@ -2,4 +2,3 @@ concrete-ml==0.6.1
2
  gradio==3.11.0
3
  uvicorn==0.20.0
4
  fastapi==0.87.0
5
- jupyter==1.0.0
 
2
  gradio==3.11.0
3
  uvicorn==0.20.0
4
  fastapi==0.87.0