Commit 1ce0e097 authored by Clément Pinard's avatar Clément Pinard
Browse files

Add matching option

parent 1d64aa91
......@@ -91,6 +91,9 @@ def add_exec_options(parser):
def add_pm_options(parser):
pm_parser = parser.add_argument_group("Photogrammetry")
pm_parser.add_argument('--max_num_matches', default=32768, type=int, help="max number of matches, lower it if you get GPU memory error")
pm_parser.add_argument('--match_method', default='vocab_tree', choices=['vocab_tree', 'exhaustive'],
help='Match method for first thorough photogrammetry, '
'see https://colmap.github.io/tutorial.html#feature-matching-and-geometric-verification')
pm_parser.add_argument('--vocab_tree', type=Path, default="vocab_tree_flickr100K_words256K.bin")
pm_parser.add_argument('--triangulate', action="store_true")
pm_parser.add_argument('--multiple_models', action='store_true', help='If selected, will let colmap mapper do multiple models.'
......
......@@ -3,10 +3,12 @@ import numpy as np
from path import Path
import yaml
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from colmap_util.read_model import Image, Camera, Point3D, write_model, qvec2rotmat, rotmat2qvec
from colmap_util.read_model import Image, Camera, Point3D, write_model, rotmat2qvec, CAMERA_MODEL_NAMES
from colmap_util.database import COLMAPDatabase
from tqdm import tqdm
from pyntcloud import PyntCloud
from itertools import islice
from scipy.spatial.transform import Rotation, Slerp
from scipy.interpolate import interp1d
parser = ArgumentParser(description='Convert EuroC dataset to COLMAP',
formatter_class=ArgumentDefaultsHelpFormatter)
......@@ -43,14 +45,9 @@ def get_vicon_calib(yaml_path):
return np.array(calib["data"]).reshape((calib["rows"], calib["cols"]))
def create_image(img_id, cam_id, file_path, drone_pose, image_calib, vicon_calib):
t_prefix = " p_RS_R_{} [m]"
q_prefix = " q_RS_{} []"
drone_tvec = drone_pose[[t_prefix.format(dim) for dim in 'xyz']].to_numpy().reshape(3, 1)
drone_qvec = drone_pose[[q_prefix.format(dim) for dim in 'wxyz']].to_numpy()
drone_R = qvec2rotmat(drone_qvec)
drone_matrix = np.concatenate((np.hstack((drone_R, drone_tvec)), np.array([0, 0, 0, 1]).reshape(1, 4)))
image_matrix = drone_matrix @ np.linalg.inv(vicon_calib) @ image_calib
def create_image(img_id, cam_id, file_path, drone_tvec, drone_matrix, image_calib, vicon_calib):
drone_full_matrix = np.concatenate((np.hstack((drone_matrix, drone_tvec[:, None])), np.array([0, 0, 0, 1]).reshape(1, 4)))
image_matrix = drone_full_matrix @ np.linalg.inv(vicon_calib) @ image_calib
colmap_matrix = np.linalg.inv(image_matrix)
colmap_qvec = rotmat2qvec(colmap_matrix[:3, :3])
colmap_tvec = colmap_matrix[:3, -1]
......@@ -71,10 +68,19 @@ def main():
vicon_poses = pd.read_csv(vicon_dir/"data.csv")
vicon_poses = vicon_poses.set_index("#timestamp")
vicon_calib = get_vicon_calib(vicon_dir/"sensor.yaml")
min_ts, max_ts = min(vicon_poses.index), max(vicon_poses.index)
t_prefix = " p_RS_R_{} [m]"
q_prefix = " q_RS_{} []"
drone_tvec = vicon_poses[[t_prefix.format(dim) for dim in 'xyz']].values
drone_qvec = Rotation.from_quat(vicon_poses[[q_prefix.format(dim) for dim in 'xyzw']].values)
drone_qvec_slerp = Slerp(vicon_poses.index, drone_qvec)
drone_tvec_interp = interp1d(vicon_poses.index, drone_tvec.T)
cameras = {}
images = {}
image_list = []
image_georef = []
database = COLMAPDatabase.connect(args.root/"database.db")
database.create_tables()
for cam_id, cam in enumerate(cam_dirs):
print("Converting camera {} ...".format(cam))
if len(images.keys()) == 0:
......@@ -82,18 +88,27 @@ def main():
else:
last_image_id = max(images.keys())
cameras[cam_id], cam_calib = get_cam(cam/"sensor.yaml", cam_id)
image_names = pd.read_csv(cam/"data.csv")
model_id = CAMERA_MODEL_NAMES["OPENCV"].model_id
database.add_camera(model_id, cameras[cam_id].width, cameras[cam_id].height, cameras[cam_id].params)
metadata = pd.read_csv(cam/"data.csv")
metadata["time"] = metadata['#timestamp [ns]']
metadata = metadata[(metadata['time'] > min_ts) & (metadata['time'] < max_ts)]
tvec_interpolated = drone_tvec_interp(metadata['time']).T
qvec_interpolated = drone_qvec_slerp(metadata['time'])
image_root = cam/"data"
step = 1
for img_id, (_, (ts, filename)) in tqdm(enumerate(islice(image_names.iterrows(), 0, None, step)), total=len(image_names.index)//step):
step = 10
for img_id, (filename, current_tvec, current_qvec) in tqdm(enumerate(zip(metadata["filename"].values[::step],
tvec_interpolated[::step],
qvec_interpolated[::step])),
total=len(metadata)):
final_path = (image_root/filename).relpath(args.img_root)
image_list.append(final_path)
row_index = vicon_poses.index.get_loc(ts, method='nearest')
current_drone_pose = vicon_poses.iloc[row_index]
images[1 + img_id + last_image_id], georef = create_image(1 + img_id + last_image_id, cam_id,
final_path, current_drone_pose,
final_path, current_tvec,
current_qvec.as_matrix(),
cam_calib, vicon_calib)
database.add_image(final_path, camera_id=cam_id, image_id=1 + img_id + last_image_id)
image_georef.append(georef)
points = {}
......
......@@ -119,7 +119,10 @@ def main():
env["thorough_recon"].makedirs_p()
colmap.extract_features(image_list=env["video_frame_list_thorough"], more=args.more_sift_features)
colmap.index_images(vocab_tree_output=env["indexed_vocab_tree"], vocab_tree_input=args.vocab_tree)
colmap.match(method="vocab_tree", vocab_tree=env["indexed_vocab_tree"], max_num_matches=env["max_num_matches"])
if env["match_method"] == "vocab_tree":
colmap.match(method="vocab_tree", vocab_tree=env["indexed_vocab_tree"], max_num_matches=env["max_num_matches"])
else:
colmap.match(method="exhaustive", max_num_matches=env["max_num_matches"])
colmap.map(output=env["thorough_recon"], multiple_models=env["multiple_models"])
thorough_model = pi.choose_biggest_model(env["thorough_recon"])
colmap.adjust_bundle(thorough_model, thorough_model,
......
......@@ -273,8 +273,8 @@ def generate_GT(video_name, raw_output_folder, images_root_folder, video_frames_
i_pv += 1
print_step_pv(i_pv, "Creating Ground truth data with ETH3D")
# eth3d.create_ground_truth(final_mlp, final_model, raw_output_folder,
# final_occlusions, final_splats)
eth3d.create_ground_truth(final_mlp, final_model, raw_output_folder,
final_occlusions, final_splats)
viz_folder.makedirs_p()
kitti_format_folder.makedirs_p()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment