File size: 11,933 Bytes
4dc5014 cd2368d 97bb9d9 4dc5014 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 |
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
import PIL
import torchvision.transforms as transforms
##Extras por si pudiera reconstruir la imagen en HF tambi茅n
import numpy as np
import os
import cv2
def extract_subimages(image : np.ndarray, wwidth, wheight, overlap_fraction):
"""
Extracts subimages of the input image using a moving window of size (wwidth, wheight)
with the specified overlap fraction. Returns a tuple (subimages, coords) where subimages
is a list of subimages and coords is a list of tuples (x, y) indicating the top left corner
coordinates of each subimage in the input image.
"""
subimages = []
coords = []
height, width, channels = image.shape
if channels > 3:
image = image[:,:,0:3]
channels = 3
overlap = int(max(0, min(overlap_fraction, 1)) * min(wwidth, wheight))
y = 0
while y + wheight <= height:
x = 0
while x + wwidth <= width:
subimage = image[y:y+wheight, x:x+wwidth, :]
subimages.append(subimage)
coords.append((x, y))
x += wwidth - overlap
y += wheight - overlap
if y < height:
y = height - wheight
x = 0
while x + wwidth <= width:
subimage = image[y:y+wheight, x:x+wwidth, :]
subimages.append(subimage)
coords.append((x, y))
x += wwidth - overlap
if x < width:
x = width - wwidth
subimage = image[y:y+wheight, x:x+wwidth, :]
subimages.append(subimage)
coords.append((x, y))
if x < width:
x = width - wwidth
y = 0
while y + wheight <= height:
subimage = image[y:y+wheight, x:x+wwidth, :]
subimages.append(subimage)
coords.append((x, y))
y += wheight - overlap
if y < height:
y = height - wheight
subimage = image[y:y+wheight, x:x+wwidth, :]
subimages.append(subimage)
coords.append((x, y))
return subimages, coords
# Si no hay archivos tif (labels) no se tratan, no hace falta considerarlo
def generate_and_save_subimages(path, output_dir_images, output_dir_labels = None):
if output_dir_labels:
if not os.path.exists(output_dir_labels):
os.makedirs(output_dir_labels)
if not os.path.exists(output_dir_images):
os.makedirs(output_dir_images)
for filename in os.listdir(path):
if filename.endswith(".png") or filename.endswith(".tif"):
filepath = os.path.join(path, filename)
image = cv2.imread(filepath)
subimages, coords = extract_subimages(image, 400, 400, 0.66)
for i, subimage in enumerate(subimages):
if filename.endswith(".png"):
output_filename = os.path.join(output_dir_images, f"{filename.rsplit('.', 1)[0]}_{coords[i][0]}_{coords[i][1]}.png")
cv2.imwrite(output_filename, subimage)
else:
if output_dir_labels:
output_filename = os.path.join(output_dir_labels, f"{filename.rsplit('.', 1)[0]}_{coords[i][0]}_{coords[i][1]}.tif")
cv2.imwrite(output_filename, subimage)
def generate_and_save_subimages_nolabel(path, output_dir_images, olverlap=0.0, imagesformat="png", split_in_dirs=True):
for entry in os.scandir(path):
if entry.is_file() and entry.name.lower().endswith(imagesformat):
filepath = entry.path
gss_single(filepath, output_dir_images, olverlap, imagesformat, split_in_dirs)
def gss_single(filepath, output_dir_images, olverlap=0.0, imagesformat="png", split_in_dirs=True):
image = cv2.imread(filepath)
if split_in_dirs:
dir_this_image = Path(output_dir_images)/filepath.rsplit('.', 1)[0]
os.makedirs(dir_this_image, exist_ok=True)
else:
os.makedirs(output_dir_images, exist_ok=True)
subimages, coords = extract_subimages(image, 400, 400, olverlap)
for i, subimage in enumerate(subimages):
if split_in_dirs:
output_filename = os.path.join(dir_this_image, f"{filepath.rsplit('.', 1)[0]}_{coords[i][0]}_{coords[i][1]}.png")
else:
output_filename = os.path.join(output_dir_images, f"{filepath.rsplit('.', 1)[0]}_{coords[i][0]}_{coords[i][1]}.png")
cv2.imwrite(output_filename, subimage)
def split_windows_in_folders(input_images_folder, output_images_folder):
for filename in os.listdir(input_images_folder):
dir_this_image = Path(output_images_folder)/filename.rsplit('.', 1)[0]
os.makedirs(dir_this_image, exist_ok=True)
if filename.endswith(".png"):
print(str(dir_this_image))
filepath = os.path.join(path, filename)
image = cv2.imread(filepath)
subimages, coords = extract_subimages(image, 400, 400, 0)
for i, subimage in enumerate(subimages):
output_filename = os.path.join(dir_this_image, f"{filename.rsplit('.', 1)[0]}_{coords[i][0]}_{coords[i][1]}.png")
cv2.imwrite(output_filename, subimage)
def subimages_from_directory(directorio):
# Define el directorio a recorrer
directorio = directorio
# Define la expresi贸n regular para buscar los n煤meros X e Y en el nombre de archivo
patron = re.compile(r"(.*)_(\d+)_(\d+)\.(png|jpg|tif)")
windowlist = []
coords = []
# Recorre el directorio en busca de im谩genes
for filename in os.listdir(directorio):
match = patron.search(filename)
if match:
origname = match.group(1)
x = int(match.group(2))
y = int(match.group(3))
#print(f"El archivo {filename} tiene los n煤meros X={x} e Y={y}")
img = cv2.imread(os.path.join(directorio, filename))
windowlist.append(img)
coords.append((x, y))
# Ordena las listas por coordenadas X e Y
windowlist, coords = zip(*sorted(zip(windowlist, coords), key=lambda pair: (pair[1][0], pair[1][1])))
wh, ww, chan = windowlist[0].shape
origsize = tuple(elem1 + elem2 for elem1, elem2 in zip(coords[-1], (wh,ww)))
return windowlist, coords, wh, ww, chan, origsize
def subimages_onlypath(directorio):
# Define el directorio a recorrer
directorio = directorio
pathlist = []
patron = re.compile(r"(.*)_(\d+)_(\d+)\.(png|jpg|tif)")
for filename in os.listdir(directorio):
match = patron.search(filename)
if match:
pathlist.append(os.path.join(directorio, filename))
return pathlist
def ReconstructFromMW(windowlist, coords, wh, ww, chan, origsize):
canvas = np.zeros((origsize[1], origsize[0], chan), dtype=np.uint8)
for idx, window in enumerate(windowlist):
canvas[coords[idx][1]:coords[idx][1]+wh, coords[idx][0]:coords[idx][0]+ww, :] = window
return canvas
def get_list_tp(path):
list_to_process = [] # Inicializar la lista que contendr谩 los nombres de los subdirectorios
list_names = []
# Recorrer los elementos del directorio
for element in os.scandir(path):
# Verificar si el elemento es un directorio
if element.is_dir():
# Agregar el nombre del subdirectorio a la lista
windowlist, coords, wh, ww, chan, origsize = subimages_from_directory(element)
list_to_process.append(ReconstructFromMW(windowlist, coords, wh, ww, chan, origsize))
list_names.append(element.name)
return list_to_process, list_names
def get_paths_tp(path):
list_to_process = [] # Inicializar la lista que contendr谩 los nombres de los subdirectorios
# Recorrer los elementos del directorio
for element in os.scandir(path):
# Verificar si el elemento es un directorio
if element.is_dir():
# Agregar el nombre del subdirectorio a la lista
list_to_process.append(subimages_onlypath(element))
return list_to_process
def process_multifolder(process_folders, result_folder):
for folder in process_folders:
folname = os.path.basename(os.path.dirname(folder[0]))
destname = Path(result_folder)/folname
os.makedirs(destname, exist_ok=True)
for subimagepath in folder:
img = PIL.Image.open(subimagepath)
image = transforms.Resize((400,400))(img)
tensor = transform_image(image=image)
with torch.no_grad():
outputs = model(tensor)
outputs = torch.argmax(outputs,1)
mask = np.array(outputs.cpu())
mask[mask==1]=255
mask=np.reshape(mask,(400,400))
mask_img = Image.fromarray(mask.astype('uint8'))
filename = os.path.basename(subimagepath)
new_image_path = os.path.join(result_folder, folname, filename)
mask_img.save(new_image_path)
def recombine_windows(results_folder_w, result_f_rec):
imgs, nombres = get_list_tp(results_folder_w)
os.makedirs(result_f_rec, exist_ok=True)
for idx, image in enumerate(imgs):
img = Image.fromarray(image)
new_image_path = os.path.join(result_f_rec, nombres[idx] + '.tif')
img.save(new_image_path, compression='tiff_lzw')
return new_image_path
def process_single_image(single_image_path, base_f, pro_f, rsw_f, rsd_f):
gss_single(single_image_path, pro_f, 0, "tif", True)
process_multifolder(get_paths_tp(pro_f),rsw_f)
pt = recombine_windows(rsw_f,rsd_f)
shutil.rmtree(pro_f)
shutil.rmtree(rsw_f)
#copiar_info_georref(single_image_path, pt)
return pt
# from osgeo import gdal, osr
# def copiar_info_georref(entrada, salida):
# try:
# # Abrir el archivo GeoTIFF original
# original_dataset = gdal.Open(entrada)
# # Obtener la informaci贸n de georreferenciaci贸n del archivo original
# original_projection = original_dataset.GetProjection()
# original_geotransform = original_dataset.GetGeoTransform()
# # Abrir la imagen resultado
# result_dataset = gdal.Open(salida, gdal.GA_Update)
# # Copiar la informaci贸n de georreferenciaci贸n del archivo original a la imagen resultado
# result_dataset.SetProjection(original_projection)
# result_dataset.SetGeoTransform(original_geotransform)
# # Cerrar los archivos
# original_dataset = None
# result_dataset = None
# except Exception as e:
# print("Error: ", e)
###FIN de extras
#repo_id = "Ignaciobfp/segmentacion-dron-marras"
#learner = from_pretrained_fastai(repo_id)
device = torch.device("cpu")
#model = learner.model
model = torch.jit.load("modelo_marras.pth")
model = model.cpu()
def transform_image(image):
my_transforms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image_aux = image
return my_transforms(image_aux).unsqueeze(0).to(device)
# Definimos una funci贸n que se encarga de llevar a cabo las predicciones
def predict(img):
img_pil = PIL.Image.fromarray(img, 'RGB')
image = transforms.Resize((400,400))(img_pil)
tensor = transform_image(image=image)
model.to(device)
with torch.no_grad():
outputs = model(tensor)
outputs = torch.argmax(outputs,1)
mask = np.array(outputs.cpu())
mask[mask==1]=255
mask=np.reshape(mask,(400,400))
return Image.fromarray(mask.astype('uint8'))
def predict_full(img):
# Obtener la ruta actual
ruta_actual = Path(".")
# Imprimir la ruta actual en la consola
print(f"La ruta actual es: {ruta_actual.resolve()}")
single_image_path = "/home/user/app/tmp.tif"
base_f = "."
pro_f = "processing"
rsw_f = "results_windows"
rsd_f = "results_together"
destpath = process_single_image(single_image_path, base_f, pro_f, rsw_f, rsd_f)
im = Image.open(destpath)
return im
# Creamos la interfaz y la lanzamos.
gr.Interface(fn=predict_full, inputs=gr.inputs.Image(), outputs=gr.outputs.Image(type="pil")).launch(share=False)
|