File size: 4,771 Bytes
404d2af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
from pathlib import Path
import argparse
import cv2
import matplotlib.cm as cm
import torch
import numpy as np
from utils.nnmatching import NNMatching
from utils.misc import (AverageTimer, VideoStreamer, make_matching_plot_fast, frame2tensor)
torch.set_grad_enabled(False)
def compute_essential(matched_kp1, matched_kp2, K):
pts1 = cv2.undistortPoints(matched_kp1,cameraMatrix=K, distCoeffs = (-0.117918271740560,0.075246403574314,0,0))
pts2 = cv2.undistortPoints(matched_kp2,cameraMatrix=K, distCoeffs = (-0.117918271740560,0.075246403574314,0,0))
K_1 = np.eye(3)
# Estimate the homography between the matches using RANSAC
ransac_model, ransac_inliers = cv2.findEssentialMat(pts1, pts2, K_1, method=cv2.RANSAC, prob=0.999, threshold=0.001, maxIters=10000)
if ransac_inliers is None or ransac_model.shape != (3,3):
ransac_inliers = np.array([])
ransac_model = None
return ransac_model, ransac_inliers, pts1, pts2
sizer = (960, 640)
focallength_x = 4.504986436499113e+03/(6744/sizer[0])
focallength_y = 4.513311442889859e+03/(4502/sizer[1])
K = np.eye(3)
K[0,0] = focallength_x
K[1,1] = focallength_y
K[0,2] = 3.363322177533149e+03/(6744/sizer[0])# * 0.5
K[1,2] = 2.291824660547715e+03/(4502/sizer[1])# * 0.5
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='DarkFeat demo',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--input', type=str,
help='path to an image directory')
parser.add_argument(
'--output_dir', type=str, default=None,
help='Directory where to write output frames (If None, no output)')
parser.add_argument(
'--image_glob', type=str, nargs='+', default=['*.ARW'],
help='Glob if a directory of images is specified')
parser.add_argument(
'--resize', type=int, nargs='+', default=[640, 480],
help='Resize the input image before running inference. If two numbers, '
'resize to the exact dimensions, if one number, resize the max '
'dimension, if -1, do not resize')
parser.add_argument(
'--force_cpu', action='store_true',
help='Force pytorch to run in CPU mode.')
parser.add_argument('--model_path', type=str,
help='Path to the pretrained model')
opt = parser.parse_args()
print(opt)
assert len(opt.resize) == 2
print('Will resize to {}x{} (WxH)'.format(opt.resize[0], opt.resize[1]))
device = 'cuda' if torch.cuda.is_available() and not opt.force_cpu else 'cpu'
print('Running inference on device \"{}\"'.format(device))
matching = NNMatching(opt.model_path).eval().to(device)
keys = ['keypoints', 'scores', 'descriptors']
vs = VideoStreamer(opt.input, opt.resize, opt.image_glob)
frame, ret = vs.next_frame()
assert ret, 'Error when reading the first frame (try different --input?)'
frame_tensor = frame2tensor(frame, device)
last_data = matching.darkfeat({'image': frame_tensor})
last_data = {k+'0': [last_data[k]] for k in keys}
last_data['image0'] = frame_tensor
last_frame = frame
last_image_id = 0
if opt.output_dir is not None:
print('==> Will write outputs to {}'.format(opt.output_dir))
Path(opt.output_dir).mkdir(exist_ok=True)
timer = AverageTimer()
while True:
frame, ret = vs.next_frame()
if not ret:
print('Finished demo_darkfeat.py')
break
timer.update('data')
stem0, stem1 = last_image_id, vs.i - 1
frame_tensor = frame2tensor(frame, device)
pred = matching({**last_data, 'image1': frame_tensor})
kpts0 = last_data['keypoints0'][0].cpu().numpy()
kpts1 = pred['keypoints1'][0].cpu().numpy()
matches = pred['matches0'][0].cpu().numpy()
confidence = pred['matching_scores0'][0].cpu().numpy()
timer.update('forward')
valid = matches > -1
mkpts0 = kpts0[valid]
mkpts1 = kpts1[matches[valid]]
E, inliers, pts1, pts2 = compute_essential(mkpts0, mkpts1, K)
color = cm.jet(np.clip(confidence[valid][inliers[:, 0].astype('bool')] * 2 - 1, -1, 1))
text = [
'DarkFeat',
'Matches: {}'.format(inliers.sum())
]
out = make_matching_plot_fast(
last_frame, frame, mkpts0[inliers[:, 0].astype('bool')], mkpts1[inliers[:, 0].astype('bool')], color, text,
path=None, small_text=' ')
if opt.output_dir is not None:
stem = 'matches_{:06}_{:06}'.format(stem0, stem1)
out_file = str(Path(opt.output_dir, stem + '.png'))
print('Writing image to {}'.format(out_file))
cv2.imwrite(out_file, out)
|