aletrn commited on
Commit
6f2f547
·
1 Parent(s): cbeec42

[docs] add more docs content

Browse files
README.md CHANGED
@@ -72,10 +72,10 @@ extension, document a whole package in the style of other automatic API document
72
  Run the command from the project root:
73
 
74
  ```bash
75
- # missing docs folder
76
- sphinx-quickstart -p SamGIS -a "alessandro trinca tornidor" -r 1.0.0 -l python --master index
77
 
78
- # update docs folder
79
  sphinx-apidoc -f -o docs src
80
  ```
81
 
 
72
  Run the command from the project root:
73
 
74
  ```bash
75
+ # missing docs folder (run from project root)
76
+ cd docs && sphinx-quickstart -p SamGIS -a "alessandro trinca tornidor" -r 1.0.0 -l python --master index
77
 
78
+ # update docs folder (from project root)
79
  sphinx-apidoc -f -o docs src
80
  ```
81
 
dockerfiles/dockerfile-lambda-fastsam-api CHANGED
@@ -7,7 +7,7 @@ ARG PYTHONPATH="${LAMBDA_TASK_ROOT}:${PYTHONPATH}:/usr/local/lib/python3/dist-pa
7
  # Set working directory to function root directory
8
  WORKDIR ${LAMBDA_TASK_ROOT}
9
  COPY ./src ${LAMBDA_TASK_ROOT}/src
10
- COPY ./models ${LAMBDA_TASK_ROOT}/models
11
 
12
  RUN ls -l /usr/bin/which
13
  RUN /usr/bin/which python
@@ -17,7 +17,7 @@ RUN echo "PATH: ${PATH}."
17
  RUN echo "LAMBDA_TASK_ROOT: ${LAMBDA_TASK_ROOT}."
18
  RUN ls -l ${LAMBDA_TASK_ROOT}
19
  RUN ls -ld ${LAMBDA_TASK_ROOT}
20
- RUN ls -l ${LAMBDA_TASK_ROOT}/models
21
  RUN python -c "import sys; print(sys.path)"
22
  RUN python -c "import cv2"
23
  RUN python -c "import geopandas"
 
7
  # Set working directory to function root directory
8
  WORKDIR ${LAMBDA_TASK_ROOT}
9
  COPY ./src ${LAMBDA_TASK_ROOT}/src
10
+ COPY ./machine_learning_models ${LAMBDA_TASK_ROOT}/machine_learning_models
11
 
12
  RUN ls -l /usr/bin/which
13
  RUN /usr/bin/which python
 
17
  RUN echo "LAMBDA_TASK_ROOT: ${LAMBDA_TASK_ROOT}."
18
  RUN ls -l ${LAMBDA_TASK_ROOT}
19
  RUN ls -ld ${LAMBDA_TASK_ROOT}
20
+ RUN ls -l ${LAMBDA_TASK_ROOT}/machine_learning_models
21
  RUN python -c "import sys; print(sys.path)"
22
  RUN python -c "import cv2"
23
  RUN python -c "import geopandas"
docs/conf.py CHANGED
@@ -39,7 +39,9 @@ napoleon_google_docstring = True
39
  # napoleon_attr_annotations = True
40
 
41
  templates_path = ['_templates']
42
- exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'build/*']
 
 
43
 
44
  source_suffix = {
45
  '.rst': 'restructuredtext',
 
39
  # napoleon_attr_annotations = True
40
 
41
  templates_path = ['_templates']
42
+ exclude_patterns = [
43
+ '_build', 'Thumbs.db', '.DS_Store', 'build/*', 'machine_learning_models', 'machine_learning_models/*'
44
+ ]
45
 
46
  source_suffix = {
47
  '.rst': 'restructuredtext',
docs/src.io.rst ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ src.io package
2
+ ==============
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ src.io.coordinates\_pixel\_conversion module
8
+ --------------------------------------------
9
+
10
+ .. automodule:: src.io.coordinates_pixel_conversion
11
+ :members:
12
+ :undoc-members:
13
+ :show-inheritance:
14
+
15
+ src.io.geo\_helpers module
16
+ --------------------------
17
+
18
+ .. automodule:: src.io.geo_helpers
19
+ :members:
20
+ :undoc-members:
21
+ :show-inheritance:
22
+
23
+ src.io.lambda\_helpers module
24
+ -----------------------------
25
+
26
+ .. automodule:: src.io.lambda_helpers
27
+ :members:
28
+ :undoc-members:
29
+ :show-inheritance:
30
+
31
+ src.io.tms2geotiff module
32
+ -------------------------
33
+
34
+ .. automodule:: src.io.tms2geotiff
35
+ :members:
36
+ :undoc-members:
37
+ :show-inheritance:
38
+
39
+ Module contents
40
+ ---------------
41
+
42
+ .. automodule:: src.io
43
+ :members:
44
+ :undoc-members:
45
+ :show-inheritance:
docs/src.rst CHANGED
@@ -7,6 +7,7 @@ Subpackages
7
  .. toctree::
8
  :maxdepth: 4
9
 
 
10
  src.prediction_api
11
  src.utilities
12
 
 
7
  .. toctree::
8
  :maxdepth: 4
9
 
10
+ src.io
11
  src.prediction_api
12
  src.utilities
13
 
{models → machine_learning_models}/.gitignore RENAMED
File without changes
src/__init__.py CHANGED
@@ -7,5 +7,5 @@ from src.utilities.constants import SERVICE_NAME
7
 
8
 
9
  PROJECT_ROOT_FOLDER = Path(globals().get("__file__", "./_")).absolute().parent.parent
10
- MODEL_FOLDER = Path(os.path.join(PROJECT_ROOT_FOLDER, "models"))
11
  app_logger = Logger(service=SERVICE_NAME)
 
7
 
8
 
9
  PROJECT_ROOT_FOLDER = Path(globals().get("__file__", "./_")).absolute().parent.parent
10
+ MODEL_FOLDER = Path(PROJECT_ROOT_FOLDER / "machine_learning_models")
11
  app_logger = Logger(service=SERVICE_NAME)
src/io/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """input/output helpers functions"""
src/io/coordinates_pixel_conversion.py CHANGED
@@ -1,17 +1,13 @@
 
1
  import math
2
- from typing import TypedDict
3
 
4
  from src import app_logger
5
  from src.utilities.constants import TILE_SIZE
6
  from src.utilities.type_hints import LatLngDict
 
7
 
8
 
9
- class PixelCoordinate(TypedDict):
10
- x: int
11
- y: int
12
-
13
-
14
- def get_latlng2pixel_projection(latlng: LatLngDict) -> PixelCoordinate:
15
  app_logger.debug(f"latlng: {type(latlng)}, value:{latlng}.")
16
  app_logger.debug(f'latlng lat: {type(latlng.lat)}, value:{latlng.lat}.')
17
  app_logger.debug(f'latlng lng: {type(latlng.lng)}, value:{latlng.lng}.')
@@ -31,13 +27,13 @@ def get_latlng2pixel_projection(latlng: LatLngDict) -> PixelCoordinate:
31
  raise e_get_latlng2pixel_projection
32
 
33
 
34
- def get_point_latlng_to_pixel_coordinates(latlng: LatLngDict, zoom: int | float) -> PixelCoordinate:
35
  try:
36
- world_coordinate: PixelCoordinate = get_latlng2pixel_projection(latlng)
37
  app_logger.debug(f"world_coordinate:{world_coordinate}.")
38
  scale: int = pow(2, zoom)
39
  app_logger.debug(f"scale:{scale}.")
40
- return PixelCoordinate(
41
  x=math.floor(world_coordinate["x"] * scale),
42
  y=math.floor(world_coordinate["y"] * scale)
43
  )
@@ -52,14 +48,27 @@ def get_latlng_to_pixel_coordinates(
52
  latlng_current_point: LatLngDict,
53
  zoom: int | float,
54
  k: str
55
- ):
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  app_logger.debug(f"latlng_origin - {k}: {type(latlng_origin_ne)}, value:{latlng_origin_ne}.")
57
  app_logger.debug(f"latlng_current_point - {k}: {type(latlng_current_point)}, value:{latlng_current_point}.")
58
- latlng_map_origin_ne = get_point_latlng_to_pixel_coordinates(latlng_origin_ne, zoom)
59
- latlng_map_origin_sw = get_point_latlng_to_pixel_coordinates(latlng_origin_sw, zoom)
60
- latlng_map_current_point = get_point_latlng_to_pixel_coordinates(latlng_current_point, zoom)
61
  diff_coord_x = abs(latlng_map_origin_sw["x"] - latlng_map_current_point["x"])
62
  diff_coord_y = abs(latlng_map_origin_ne["y"] - latlng_map_current_point["y"])
63
- point = PixelCoordinate(x=diff_coord_x, y=diff_coord_y)
64
  app_logger.debug(f"point type - {k}: {point}.")
65
  return point
 
1
+ """functions useful to convert to/from latitude-longitude coordinates to pixel image coordinates"""
2
  import math
 
3
 
4
  from src import app_logger
5
  from src.utilities.constants import TILE_SIZE
6
  from src.utilities.type_hints import LatLngDict
7
+ from src.utilities.type_hints import ImagePixelCoordinates
8
 
9
 
10
+ def _get_latlng2pixel_projection(latlng: LatLngDict) -> ImagePixelCoordinates:
 
 
 
 
 
11
  app_logger.debug(f"latlng: {type(latlng)}, value:{latlng}.")
12
  app_logger.debug(f'latlng lat: {type(latlng.lat)}, value:{latlng.lat}.')
13
  app_logger.debug(f'latlng lng: {type(latlng.lng)}, value:{latlng.lng}.')
 
27
  raise e_get_latlng2pixel_projection
28
 
29
 
30
+ def _get_point_latlng_to_pixel_coordinates(latlng: LatLngDict, zoom: int | float) -> ImagePixelCoordinates:
31
  try:
32
+ world_coordinate: ImagePixelCoordinates = _get_latlng2pixel_projection(latlng)
33
  app_logger.debug(f"world_coordinate:{world_coordinate}.")
34
  scale: int = pow(2, zoom)
35
  app_logger.debug(f"scale:{scale}.")
36
+ return ImagePixelCoordinates(
37
  x=math.floor(world_coordinate["x"] * scale),
38
  y=math.floor(world_coordinate["y"] * scale)
39
  )
 
48
  latlng_current_point: LatLngDict,
49
  zoom: int | float,
50
  k: str
51
+ ) -> ImagePixelCoordinates:
52
+ """
53
+ Parse the input request lambda event.
54
+
55
+ Args:
56
+ latlng_origin_ne: NE latitude-longitude origin point
57
+ latlng_origin_sw: SW latitude-longitude origin point
58
+ latlng_current_point: latitude-longitude prompt point
59
+ zoom: zoom value
60
+ k: prompt type
61
+
62
+ Returns:
63
+ ImagePixelCoordinates: pixel image coordinate point
64
+ """
65
  app_logger.debug(f"latlng_origin - {k}: {type(latlng_origin_ne)}, value:{latlng_origin_ne}.")
66
  app_logger.debug(f"latlng_current_point - {k}: {type(latlng_current_point)}, value:{latlng_current_point}.")
67
+ latlng_map_origin_ne = _get_point_latlng_to_pixel_coordinates(latlng_origin_ne, zoom)
68
+ latlng_map_origin_sw = _get_point_latlng_to_pixel_coordinates(latlng_origin_sw, zoom)
69
+ latlng_map_current_point = _get_point_latlng_to_pixel_coordinates(latlng_current_point, zoom)
70
  diff_coord_x = abs(latlng_map_origin_sw["x"] - latlng_map_current_point["x"])
71
  diff_coord_y = abs(latlng_map_origin_ne["y"] - latlng_map_current_point["y"])
72
+ point = ImagePixelCoordinates(x=diff_coord_x, y=diff_coord_y)
73
  app_logger.debug(f"point type - {k}: {point}.")
74
  return point
src/io/geo_helpers.py CHANGED
@@ -1,14 +1,22 @@
1
- """Async download raster tiles"""
2
  from pathlib import Path
3
- from typing import List
4
 
 
5
  import numpy as np
6
 
7
  from src import app_logger, PROJECT_ROOT_FOLDER
8
 
9
 
10
- def load_affine_transformation_from_matrix(matrix_source_coeffs: List):
11
- from affine import Affine
 
 
 
 
 
 
 
12
 
13
  if len(matrix_source_coeffs) != 6:
14
  raise ValueError(f"Expected 6 coefficients, found {len(matrix_source_coeffs)}; "
@@ -23,13 +31,29 @@ def load_affine_transformation_from_matrix(matrix_source_coeffs: List):
23
  raise e
24
 
25
 
26
- def get_affine_transform_from_gdal(matrix):
27
- from rasterio import Affine
 
 
 
 
 
 
 
 
 
28
 
29
- return Affine.from_gdal(*matrix)
 
 
30
 
 
 
 
31
 
32
- def get_vectorized_raster_as_geojson(mask, matrix):
 
 
33
  try:
34
  from rasterio.features import shapes
35
  from geopandas import GeoDataFrame
 
1
+ """handle geo-referenced raster images"""
2
  from pathlib import Path
3
+ from typing import List, Tuple, Dict
4
 
5
+ from affine import Affine
6
  import numpy as np
7
 
8
  from src import app_logger, PROJECT_ROOT_FOLDER
9
 
10
 
11
+ def load_affine_transformation_from_matrix(matrix_source_coeffs: List[float]) -> Affine:
12
+ """wrapper for rasterio Affine from_gdal method
13
+
14
+ Args:
15
+ matrix_source_coeffs: 6 floats ordered by GDAL.
16
+
17
+ Returns:
18
+ Affine: Affine transform
19
+ """
20
 
21
  if len(matrix_source_coeffs) != 6:
22
  raise ValueError(f"Expected 6 coefficients, found {len(matrix_source_coeffs)}; "
 
31
  raise e
32
 
33
 
34
+ def get_affine_transform_from_gdal(matrix_source_coeffs: List[float]) -> Affine:
35
+ """wrapper for rasterio Affine from_gdal method
36
+
37
+ Args:
38
+ matrix_source_coeffs: 6 floats ordered by GDAL.
39
+
40
+ Returns:
41
+ Affine: Affine transform
42
+ """
43
+ return Affine.from_gdal(*matrix_source_coeffs)
44
+
45
 
46
+ def get_vectorized_raster_as_geojson(mask: np.ndarray, matrix: Tuple[float]) -> Dict[str, int]:
47
+ """
48
+ Parse the input request lambda event.
49
 
50
+ Args:
51
+ mask: numpy mask
52
+ matrix: tuple of float to transform into an Affine transform
53
 
54
+ Returns:
55
+ Dict: dict containing the output geojson and the predictions number
56
+ """
57
  try:
58
  from rasterio.features import shapes
59
  from geopandas import GeoDataFrame
src/io/lambda_helpers.py CHANGED
@@ -1,7 +1,8 @@
 
1
  import json
2
  import logging
3
  import time
4
- from typing import Dict
5
  from aws_lambda_powertools.event_handler import content_types
6
 
7
  from src import app_logger
@@ -41,6 +42,15 @@ def get_response(status: int, start_time: float, request_id: str, response_body:
41
 
42
 
43
  def get_parsed_bbox_points(request_input: RawRequestInput) -> Dict:
 
 
 
 
 
 
 
 
 
44
  app_logger.info(f"try to parsing input request {request_input}...")
45
 
46
  bbox = request_input.bbox
@@ -78,7 +88,16 @@ def get_parsed_bbox_points(request_input: RawRequestInput) -> Dict:
78
  }
79
 
80
 
81
- def get_parsed_request_body(event):
 
 
 
 
 
 
 
 
 
82
  app_logger.info(f"event:{json.dumps(event)}...")
83
  try:
84
  raw_body = event["body"]
 
1
+ """lambda helper functions"""
2
  import json
3
  import logging
4
  import time
5
+ from typing import Dict, List
6
  from aws_lambda_powertools.event_handler import content_types
7
 
8
  from src import app_logger
 
42
 
43
 
44
  def get_parsed_bbox_points(request_input: RawRequestInput) -> Dict:
45
+ """
46
+ Format the bbox and prompt request input
47
+
48
+ Args:
49
+ request_input: input dict
50
+
51
+ Returns:
52
+ Dict:
53
+ """
54
  app_logger.info(f"try to parsing input request {request_input}...")
55
 
56
  bbox = request_input.bbox
 
88
  }
89
 
90
 
91
+ def get_parsed_request_body(event: Dict):
92
+ """
93
+ Parse the input request lambda event.
94
+
95
+ Args:
96
+ event: input dict
97
+
98
+ Returns:
99
+ RawRequestInput: parsed request input
100
+ """
101
  app_logger.info(f"event:{json.dumps(event)}...")
102
  try:
103
  raw_body = event["body"]
src/io/tms2geotiff.py CHANGED
@@ -1,5 +1,6 @@
1
  #!/usr/bin/env python3
2
  # -*- coding: utf-8 -*-
 
3
 
4
  import io
5
  import os
 
1
  #!/usr/bin/env python3
2
  # -*- coding: utf-8 -*-
3
+ """download geo-referenced raster tiles images"""
4
 
5
  import io
6
  import os
src/prediction_api/__init__.py CHANGED
@@ -0,0 +1 @@
 
 
1
+ """functions useful to handle machine learning models"""
src/prediction_api/predictors.py CHANGED
@@ -1,7 +1,6 @@
1
- # Press the green button in the gutter to run the script.
2
- import json
3
- import tempfile
4
-
5
  import numpy as np
6
 
7
  from src import app_logger, MODEL_FOLDER
@@ -9,13 +8,29 @@ from src.io.geo_helpers import get_vectorized_raster_as_geojson, get_affine_tran
9
  from src.io.tms2geotiff import download_extent
10
  from src.prediction_api.sam_onnx import SegmentAnythingONNX
11
  from src.utilities.constants import MODEL_ENCODER_NAME, MODEL_DECODER_NAME, DEFAULT_TMS
12
- from src.utilities.serialize import serialize
13
-
14
 
15
  models_dict = {"fastsam": {"instance": None}}
16
 
17
 
18
- def samexporter_predict(bbox, prompt: list[dict], zoom: float, model_name: str = "fastsam") -> dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  try:
20
  if models_dict[model_name]["instance"] is None:
21
  app_logger.info(f"missing instance model {model_name}, instantiating it now!")
@@ -31,10 +46,7 @@ def samexporter_predict(bbox, prompt: list[dict], zoom: float, model_name: str =
31
  pt0, pt1 = bbox
32
  app_logger.info(f"downloading geo-referenced raster with bbox {bbox}, zoom {zoom}.")
33
  img, matrix = download_extent(DEFAULT_TMS, pt0[0], pt0[1], pt1[0], pt1[1], zoom)
34
- app_logger.info(f"img type {type(img)} with shape/size:{img.size}, matrix:{matrix}.")
35
-
36
- with tempfile.NamedTemporaryFile(mode='w', prefix=f"matrix_", delete=False) as temp_f1:
37
- json.dump({"matrix": serialize(matrix)}, temp_f1)
38
 
39
  transform = get_affine_transform_from_gdal(matrix)
40
  app_logger.debug(f"transform to consume with rasterio.shapes: {type(transform)}, {transform}.")
@@ -49,7 +61,20 @@ def samexporter_predict(bbox, prompt: list[dict], zoom: float, model_name: str =
49
  app_logger.error(f"Error trying import module:{e_import_module}.")
50
 
51
 
52
- def get_raster_inference(img, prompt, models_instance, model_name):
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  np_img = np.array(img)
54
  app_logger.info(f"img type {type(np_img)}, prompt:{prompt}.")
55
  app_logger.debug(f"onnxruntime input shape/size (shape if PIL) {np_img.size}.")
@@ -57,12 +82,6 @@ def get_raster_inference(img, prompt, models_instance, model_name):
57
  app_logger.debug(f"onnxruntime input shape (NUMPY) {np_img.shape}.")
58
  except Exception as e_shape:
59
  app_logger.error(f"e_shape:{e_shape}.")
60
- try:
61
- with tempfile.NamedTemporaryFile(mode='w', prefix=f"get_raster_inference__img_", delete=False) as temp_f0:
62
- np.save(str(temp_f0.file.name), np_img)
63
- except Exception as e_save:
64
- app_logger.error(f"e_save:{e_save}.")
65
- raise e_save
66
  app_logger.info(f"instantiated model {model_name}, ENCODER {MODEL_ENCODER_NAME}, "
67
  f"DECODER {MODEL_DECODER_NAME} from {MODEL_FOLDER}: Creating embedding...")
68
  embedding = models_instance.encode(np_img)
@@ -76,12 +95,4 @@ def get_raster_inference(img, prompt, models_instance, model_name):
76
  app_logger.debug(f"{n}th of prediction_masks shape {inference_out.shape}"
77
  f" => mask shape:{mask.shape}, {mask.dtype}.")
78
  mask[m > 0.0] = 255
79
- try:
80
- with tempfile.NamedTemporaryFile(mode='w', prefix=f"get_raster_inference__mask_", delete=False) as temp_f1:
81
- np.save(temp_f1.file.name, mask)
82
- with tempfile.NamedTemporaryFile(mode='w', prefix=f"get_raster_inference__inference_out_", delete=False) as temp_f2:
83
- np.save(temp_f2.file.name, inference_out)
84
- except Exception as e_save1:
85
- app_logger.error(f"e_save1:{e_save1}.")
86
- raise e_save1
87
  return mask, len_inference_out
 
1
+ """functions using the machine learning instance model"""
2
+ from typing import Dict, Tuple
3
+ from PIL.Image import Image
 
4
  import numpy as np
5
 
6
  from src import app_logger, MODEL_FOLDER
 
8
  from src.io.tms2geotiff import download_extent
9
  from src.prediction_api.sam_onnx import SegmentAnythingONNX
10
  from src.utilities.constants import MODEL_ENCODER_NAME, MODEL_DECODER_NAME, DEFAULT_TMS
11
+ from src.utilities.type_hints import llist_float
 
12
 
13
  models_dict = {"fastsam": {"instance": None}}
14
 
15
 
16
+ def samexporter_predict(
17
+ bbox: llist_float, prompt: list[dict], zoom: float, model_name: str = "fastsam") -> Dict[str, int]:
18
+ """
19
+ Return predictions as a geojson from a geo-referenced image using the given input prompt.
20
+ 1. if necessary instantiate a segment anything machine learning instance model
21
+ 2. download a geo-referenced raster image delimited by the coordinates bounding box (bbox)
22
+ 3. get a prediction image from the segment anything instance model using the input prompt
23
+ 4. get a geo-referenced geojson from the prediction image
24
+
25
+ Args:
26
+ bbox: coordinates bounding box
27
+ prompt: machine learning input prompt
28
+ zoom: zoom value
29
+ model_name: machine learning model name
30
+
31
+ Returns:
32
+ dict: Affine transform
33
+ """
34
  try:
35
  if models_dict[model_name]["instance"] is None:
36
  app_logger.info(f"missing instance model {model_name}, instantiating it now!")
 
46
  pt0, pt1 = bbox
47
  app_logger.info(f"downloading geo-referenced raster with bbox {bbox}, zoom {zoom}.")
48
  img, matrix = download_extent(DEFAULT_TMS, pt0[0], pt0[1], pt1[0], pt1[1], zoom)
49
+ app_logger.info(f"img type {type(img)} with shape/size:{img.size}, matrix:{type(matrix)}, matrix:{matrix}.")
 
 
 
50
 
51
  transform = get_affine_transform_from_gdal(matrix)
52
  app_logger.debug(f"transform to consume with rasterio.shapes: {type(transform)}, {transform}.")
 
61
  app_logger.error(f"Error trying import module:{e_import_module}.")
62
 
63
 
64
+ def get_raster_inference(
65
+ img: Image, prompt: list[dict], models_instance: SegmentAnythingONNX, model_name: str
66
+ ) -> Tuple[np.ndarray, int]:
67
+ """wrapper for rasterio Affine from_gdal method
68
+
69
+ Args:
70
+ img: input PIL Image
71
+ prompt: list of prompt dict
72
+ models_instance: SegmentAnythingONNX instance model
73
+ model_name: model name string
74
+
75
+ Returns:
76
+ Tuple[np.ndarray, int]: raster prediction mask, prediction number
77
+ """
78
  np_img = np.array(img)
79
  app_logger.info(f"img type {type(np_img)}, prompt:{prompt}.")
80
  app_logger.debug(f"onnxruntime input shape/size (shape if PIL) {np_img.size}.")
 
82
  app_logger.debug(f"onnxruntime input shape (NUMPY) {np_img.shape}.")
83
  except Exception as e_shape:
84
  app_logger.error(f"e_shape:{e_shape}.")
 
 
 
 
 
 
85
  app_logger.info(f"instantiated model {model_name}, ENCODER {MODEL_ENCODER_NAME}, "
86
  f"DECODER {MODEL_DECODER_NAME} from {MODEL_FOLDER}: Creating embedding...")
87
  embedding = models_instance.encode(np_img)
 
95
  app_logger.debug(f"{n}th of prediction_masks shape {inference_out.shape}"
96
  f" => mask shape:{mask.shape}, {mask.dtype}.")
97
  mask[m > 0.0] = 255
 
 
 
 
 
 
 
 
98
  return mask, len_inference_out
src/prediction_api/sam_onnx.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from copy import deepcopy
2
 
3
  import cv2
 
1
+ """machine learning segment anything class"""
2
  from copy import deepcopy
3
 
4
  import cv2
src/utilities/__init__.py CHANGED
@@ -1 +1 @@
1
- """various helpers utilities"""
 
1
+ """various helpers functions"""
src/utilities/type_hints.py CHANGED
@@ -13,44 +13,37 @@ llist_float = list[list_float]
13
 
14
 
15
  class LatLngDict(BaseModel):
 
16
  lat: float
17
  lng: float
18
 
19
 
20
  class PromptType(str, Enum):
 
21
  point = "point"
22
  # rectangle = "rectangle"
23
 
24
 
25
- class ParsedPrompt(BaseModel):
26
- type: PromptType
27
- data: llist_float
28
- label: int = 0
29
-
30
-
31
- class ParsedRequestInput(BaseModel):
32
- bbox: llist_float
33
- prompt: ParsedPrompt
34
- zoom: int | float
35
-
36
-
37
- class PixelCoordinate(TypedDict):
38
  x: int
39
  y: int
40
 
41
 
42
  class RawBBox(BaseModel):
 
43
  ne: LatLngDict
44
  sw: LatLngDict
45
 
46
 
47
  class RawPrompt(BaseModel):
 
48
  type: PromptType
49
  data: LatLngDict
50
  label: int = 0
51
 
52
 
53
  class RawRequestInput(BaseModel):
 
54
  bbox: RawBBox
55
  prompt: list[RawPrompt]
56
  zoom: int | float
 
13
 
14
 
15
  class LatLngDict(BaseModel):
16
+ """A latitude-longitude type"""
17
  lat: float
18
  lng: float
19
 
20
 
21
  class PromptType(str, Enum):
22
+ """Segment Anyting enumeration prompt type"""
23
  point = "point"
24
  # rectangle = "rectangle"
25
 
26
 
27
+ class ImagePixelCoordinates(TypedDict):
 
 
 
 
 
 
 
 
 
 
 
 
28
  x: int
29
  y: int
30
 
31
 
32
  class RawBBox(BaseModel):
33
+ """Input lambda bbox request - not parsed"""
34
  ne: LatLngDict
35
  sw: LatLngDict
36
 
37
 
38
  class RawPrompt(BaseModel):
39
+ """Input lambda prompt request - not parsed"""
40
  type: PromptType
41
  data: LatLngDict
42
  label: int = 0
43
 
44
 
45
  class RawRequestInput(BaseModel):
46
+ """Input lambda request - not parsed"""
47
  bbox: RawBBox
48
  prompt: list[RawPrompt]
49
  zoom: int | float
src/utilities/utilities.py CHANGED
@@ -1,7 +1,7 @@
1
  """Various utilities (logger, time benchmark, args dump, numerical and stats info)"""
2
 
3
 
4
- def prepare_base64_input(sb):
5
  if isinstance(sb, str):
6
  # If there's any unicode here, an exception will be thrown and the function will return false
7
  return bytes(sb, 'ascii')
@@ -10,27 +10,45 @@ def prepare_base64_input(sb):
10
  raise ValueError("Argument must be string or bytes")
11
 
12
 
13
- def is_base64(sb: str or bytes):
14
  import base64
15
 
16
  try:
17
- sb_bytes = prepare_base64_input(sb)
18
  return base64.b64encode(base64.b64decode(sb_bytes, validate=True)) == sb_bytes
19
  except ValueError:
20
  return False
21
 
22
 
23
  def base64_decode(s):
 
 
 
 
 
 
 
 
 
24
  import base64
25
 
26
- if isinstance(s, str) and is_base64(s):
27
  return base64.b64decode(s, validate=True).decode("utf-8")
28
 
29
  return s
30
 
31
 
32
- def base64_encode(sb: str or bytes):
 
 
 
 
 
 
 
 
 
33
  import base64
34
 
35
- sb_bytes = prepare_base64_input(sb)
36
  return base64.b64encode(sb_bytes)
 
1
  """Various utilities (logger, time benchmark, args dump, numerical and stats info)"""
2
 
3
 
4
+ def _prepare_base64_input(sb):
5
  if isinstance(sb, str):
6
  # If there's any unicode here, an exception will be thrown and the function will return false
7
  return bytes(sb, 'ascii')
 
10
  raise ValueError("Argument must be string or bytes")
11
 
12
 
13
+ def _is_base64(sb: str or bytes):
14
  import base64
15
 
16
  try:
17
+ sb_bytes = _prepare_base64_input(sb)
18
  return base64.b64encode(base64.b64decode(sb_bytes, validate=True)) == sb_bytes
19
  except ValueError:
20
  return False
21
 
22
 
23
  def base64_decode(s):
24
+ """
25
+ Decode base64 strings
26
+
27
+ Args:
28
+ s: input string
29
+
30
+ Returns:
31
+ decoded string
32
+ """
33
  import base64
34
 
35
+ if isinstance(s, str) and _is_base64(s):
36
  return base64.b64decode(s, validate=True).decode("utf-8")
37
 
38
  return s
39
 
40
 
41
+ def base64_encode(sb: str or bytes) -> bytes:
42
+ """
43
+ Encode input strings or bytes as base64
44
+
45
+ Args:
46
+ sb: input string or bytes
47
+
48
+ Returns:
49
+ base64 encoded bytes
50
+ """
51
  import base64
52
 
53
+ sb_bytes = _prepare_base64_input(sb)
54
  return base64.b64encode(sb_bytes)
tests/io/test_coordinates_pixel_conversion.py CHANGED
@@ -1,6 +1,6 @@
1
  import json
2
 
3
- from src.io.coordinates_pixel_conversion import get_latlng2pixel_projection, get_point_latlng_to_pixel_coordinates, \
4
  get_latlng_to_pixel_coordinates
5
  from src.utilities.type_hints import LatLngDict
6
  from tests import TEST_EVENTS_FOLDER
@@ -15,7 +15,7 @@ def test_get_latlng2pixel_projection():
15
  print(f"k:{k}")
16
  current_input = input_output["input"]
17
  latlng_input = LatLngDict.model_validate(current_input["latlng"])
18
- output = get_latlng2pixel_projection(latlng_input)
19
  assert output == input_output["output"]
20
 
21
 
@@ -28,7 +28,7 @@ def test_get_point_latlng_to_pixel_coordinates():
28
  print(f"k:{k}")
29
  current_input = input_output["input"]
30
  latlng_input = LatLngDict.model_validate(current_input["latlng"])
31
- output = get_point_latlng_to_pixel_coordinates(latlng=latlng_input, zoom=current_input["zoom"])
32
  assert output == input_output["output"]
33
 
34
 
 
1
  import json
2
 
3
+ from src.io.coordinates_pixel_conversion import _get_latlng2pixel_projection, _get_point_latlng_to_pixel_coordinates, \
4
  get_latlng_to_pixel_coordinates
5
  from src.utilities.type_hints import LatLngDict
6
  from tests import TEST_EVENTS_FOLDER
 
15
  print(f"k:{k}")
16
  current_input = input_output["input"]
17
  latlng_input = LatLngDict.model_validate(current_input["latlng"])
18
+ output = _get_latlng2pixel_projection(latlng_input)
19
  assert output == input_output["output"]
20
 
21
 
 
28
  print(f"k:{k}")
29
  current_input = input_output["input"]
30
  latlng_input = LatLngDict.model_validate(current_input["latlng"])
31
+ output = _get_point_latlng_to_pixel_coordinates(latlng=latlng_input, zoom=current_input["zoom"])
32
  assert output == input_output["output"]
33
 
34