Spaces:
Running
Running
File size: 2,020 Bytes
9223079 8320ccc 9223079 2eaeef9 9223079 8320ccc 44ae162 9223079 44ae162 9223079 e15a186 9223079 8320ccc 9223079 e15a186 9223079 e15a186 9223079 e15a186 9223079 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import sys
from pathlib import Path
import torch
from hloc import logger
from ..utils.base_model import BaseModel
rekd_path = Path(__file__).parent / "../../third_party"
sys.path.append(str(rekd_path))
from REKD.training.model.REKD import REKD as REKD_
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class REKD(BaseModel):
default_conf = {
"model_name": "v0",
"keypoint_threshold": 0.1,
}
required_inputs = ["image"]
def _init(self, conf):
model_path = (
rekd_path / "checkpoints" / f'PointModel_{conf["model_name"]}.pth'
)
if not model_path.exists():
print(f"No model found at {model_path}")
self.net = REKD_(is_test=True)
state_dict = torch.load(model_path, map_location="cpu")
self.net.load_state_dict(state_dict["model_state"])
logger.info("Load REKD model done.")
def _forward(self, data):
image = data["image"]
keypoints, scores, descriptors = self.net(image)
_, _, Hc, Wc = descriptors.shape
# Scores & Descriptors
kpts_score = (
torch.cat([keypoints, scores], dim=1)
.view(3, -1)
.t()
.cpu()
.detach()
.numpy()
)
descriptors = (
descriptors.view(256, Hc, Wc)
.view(256, -1)
.t()
.cpu()
.detach()
.numpy()
)
# Filter based on confidence threshold
descriptors = descriptors[
kpts_score[:, 0] > self.conf["keypoint_threshold"], :
]
kpts_score = kpts_score[
kpts_score[:, 0] > self.conf["keypoint_threshold"], :
]
keypoints = kpts_score[:, 1:]
scores = kpts_score[:, 0]
return {
"keypoints": torch.from_numpy(keypoints)[None],
"scores": torch.from_numpy(scores)[None],
"descriptors": torch.from_numpy(descriptors.T)[None],
}
|