Commit ee07a2dc authored by Clément Pinard's avatar Clément Pinard
Browse files

reinstate th3d registration method

parent 62525ece
......@@ -16,7 +16,7 @@ parser.add_argument('--database', metavar='DB', required=True,
help='path to colmap database file, to get the image ids right')
def add_to_db(db_path, metadata_path, frame_list_path, input_frame_ids=None, **env):
def add_to_db(db_path, metadata_path, frame_list_path, **env):
metadata = pd.read_csv(metadata_path)
database = db.COLMAPDatabase.connect(db_path)
......@@ -26,9 +26,6 @@ def add_to_db(db_path, metadata_path, frame_list_path, input_frame_ids=None, **e
with open(frame_list_path, "r") as f:
frame_list = [line[:-1] for line in f.readlines()]
metadata = metadata[metadata["image_path"].isin(frame_list)]
if input_frame_ids:
assert(len(metadata) == len(input_frame_ids))
metadata["input_frame_id"] = input_frame_ids
for _, row in tqdm(metadata.iterrows(), total=len(metadata)):
image_path = row["image_path"]
......@@ -38,8 +35,7 @@ def add_to_db(db_path, metadata_path, frame_list_path, input_frame_ids=None, **e
else:
frame_gps = np.full(3, np.NaN)
try:
input_id = row["input_frame_id"] if input_frame_ids else None
frame_ids.append(database.add_image(image_path, int(camera_id), prior_t=frame_gps, image_id=input_id))
frame_ids.append(database.add_image(image_path, int(camera_id), prior_t=frame_gps, image_id=row["db_id"]))
except IntegrityError:
sql_string = "SELECT camera_id, image_id FROM images WHERE name='{}'".format(image_path)
sql_output = next(database.execute(sql_string))
......
from colmap import read_model as rm
from colmap.database import COLMAPDatabase
from colmap_util import read_model as rm
from colmap_util.database import COLMAPDatabase
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from path import Path
import pandas as pd
......
This diff is collapsed.
......@@ -28,9 +28,40 @@ def create_project(mlp_path, model_paths, labels=None, transforms=None):
def remove_mesh_from_project(input_mlp, output_mlp, index):
with open(input_mlp, "r") as f:
to_modify = etree.parse(f)
meshgroup = to_modify.getroot()[0]
if index < len(meshgroup):
removed = meshgroup[index]
meshgroup.remove(removed)
to_modify.write(output_mlp, pretty_print=True)
transform = np.fromstring(removed[0].text, sep=" ").reshape(4, 4)
filepath = removed.get("label")
return transform, filepath
def get_mesh(input_mlp, index):
with open(input_mlp, "r") as f:
to_modify = etree.parse(f)
meshgroup = to_modify.getroot()[0]
if index < len(meshgroup):
removed = meshgroup[index]
transform = np.fromstring(removed[0].text, sep=" ").reshape(4, 4)
filepath = removed.get("label")
return transform, filepath
def add_mesh_to_project(input_mlp, output_mlp, model_path, index=0, label=None, transform=np.eye(4)):
with open(input_mlp, "r") as f:
to_modify = etree.parse(f)
if label is None:
label = model_path.basename()
root = to_modify.getroot()
if index < len(root[0][0]):
root[0].remove(root[0][index])
group = root[0]
mesh = etree.Element("MLMesh")
mesh.set("label", label)
mesh.set("filename", model_path)
matrix = etree.SubElement(mesh, "MLMatrix44")
matrix.text = "\n" + "\n".join(" ".join(str(element) for element in row) + " " for row in transform) + "\n"
group.insert(index, mesh)
to_modify.write(output_mlp, pretty_print=True)
......@@ -50,5 +81,7 @@ if __name__ == '__main__':
labels = "1", "2"
transforms = [np.random.randn(4, 4), np.random.randn(4, 4)]
create_project("test.mlp", model_paths)
add_mesh_to_project("test.mlp", "test.mlp", model_paths[0], index=0)
remove_mesh_from_project("test.mlp", "test2.mlp", 0)
matrix, filename = remove_mesh_from_project("test.mlp", "test2.mlp", 0)
print(matrix, filename)
......@@ -6,6 +6,7 @@
#include <pcl/registration/icp.h>
#include <pcl/registration/transformation_estimation_svd_scale.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/features/normal_3d.h>
#include <pcl/io/ply_io.h>
......@@ -45,44 +46,60 @@ int main (int argc, char** argv)
// Load point cloud with normals.
LOG(INFO) << "Loading point clouds ...";
pcl::PointCloud<pcl::PointXYZ>::Ptr geroef(
pcl::PointCloud<pcl::PointXYZ>::Ptr georef(
new pcl::PointCloud<pcl::PointXYZ>());
if (pcl::io::loadPLYFile(georef_path, *geroef) < 0) {
if (pcl::io::loadPLYFile(georef_path, *georef) < 0) {
return EXIT_FAILURE;
}
pcl::PointCloud<pcl::PointXYZ>::Ptr lidar(
new pcl::PointCloud<pcl::PointXYZ>());
pcl::PointCloud<pcl::PointNormal>::Ptr lidar(
new pcl::PointCloud<pcl::PointNormal>());
if (pcl::io::loadPLYFile(lidar_path, *lidar) < 0) {
return EXIT_FAILURE;
}
LOG(INFO) << "point clouds loaded...";
// Filter to get inlier cloud, store in filtered_cloud.
pcl::PointCloud<pcl::PointXYZ>::Ptr geroef_filtered (new pcl::PointCloud<pcl::PointXYZ>);
pcl::StatisticalOutlierRemoval<pcl::PointXYZ> sor;
sor.setInputCloud(geroef);
sor.setMeanK(6);
sor.setStddevMulThresh(0.1);
sor.filter(*geroef_filtered);
//pcl::PointCloud<pcl::PointXYZ>::Ptr geroef_filtered (new pcl::PointCloud<pcl::PointXYZ>);
//pcl::StatisticalOutlierRemoval<pcl::PointXYZ> sor;
//sor.setInputCloud(geroef);
//sor.setMeanK(6);
//sor.setStddevMulThresh(0.1);
//sor.filter(*geroef_filtered);
pcl::IterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ> icp;
pcl::registration::TransformationEstimationSVDScale<pcl::PointXYZ, pcl::PointXYZ>::Ptr est;
est.reset(new pcl::registration::TransformationEstimationSVDScale<pcl::PointXYZ, pcl::PointXYZ>);
// Normal estimation*
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> n;
pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
tree->setInputCloud (georef);
n.setInputCloud (georef);
n.setSearchMethod (tree);
n.setKSearch (6);
n.compute (*normals);
pcl::PointCloud<pcl::PointNormal>::Ptr geroef_normals (new pcl::PointCloud<pcl::PointNormal>);
pcl::concatenateFields (*georef, *normals, *geroef_normals);
// pcl::io::savePLYFile("test_normals.ply", *geroef_normals);
pcl::IterativeClosestPointWithNormals<pcl::PointNormal, pcl::PointNormal> icp;
pcl::registration::TransformationEstimationSVDScale<pcl::PointNormal, pcl::PointNormal>::Ptr est;
est.reset(new pcl::registration::TransformationEstimationSVDScale<pcl::PointNormal, pcl::PointNormal>);
icp.setTransformationEstimation(est);
icp.setMaxCorrespondenceDistance (max_distance);
icp.setTransformationEpsilon(0.0001);
icp.setMaximumIterations(500);
icp.setEuclideanFitnessEpsilon(0.0001);
icp.setInputSource(geroef_filtered);
icp.setInputSource(geroef_normals);
icp.setInputTarget(lidar);
pcl::PointCloud<pcl::PointXYZ> Final;
pcl::PointCloud<pcl::PointNormal> Final;
icp.align(Final);
Eigen::Matrix4f transform = icp.getFinalTransformation().inverse();
pcl::PointCloud<pcl::PointXYZ>::Ptr lidar_aligned(
new pcl::PointCloud<pcl::PointXYZ>());
Eigen::Matrix4f transform = icp.getFinalTransformation();
pcl::PointCloud<pcl::PointNormal>::Ptr lidar_aligned(
new pcl::PointCloud<pcl::PointNormal>());
pcl::transformPointCloud (*lidar, *lidar_aligned, transform);
std::ofstream output_file;
......
......@@ -55,7 +55,7 @@ int main(int argc, char** argv) {
std::string point_normal_cloud_path;
pcl::console::parse_argument(argc, argv, "--point_normal_cloud_path", point_normal_cloud_path);
float resolution = 20; //20cm resolution
float resolution = 0.2; //20cm resolution
pcl::console::parse_argument(argc, argv, "--resolution", resolution);
std::string mesh_output;
pcl::console::parse_argument(argc, argv, "--out_mesh", mesh_output);
......
......@@ -10,6 +10,7 @@ import pandas as pd
import numpy as np
from pyproj import Proj
from tqdm import tqdm
import tempfile
parser = ArgumentParser(description='Take all the drone videos of a folder and put the frame '
'location in a COLMAP file for vizualisation',
......@@ -29,7 +30,7 @@ parser.add_argument('--nw', default='',
help="native-wrapper.sh file location")
parser.add_argument('--fps', default=1, type=int,
help="framerate at which videos will be scanned WITH reconstruction")
parser.add_argument('--num_frames', default=200, type=int)
parser.add_argument('--total_frames', default=200, type=int)
parser.add_argument('--orientation_weight', default=1, type=float)
parser.add_argument('--resolution_weight', default=1, type=float)
parser.add_argument('--num_neighbours', default=10, type=int)
......@@ -120,7 +121,7 @@ def register_new_cameras(cameras_dataframe, database, camera_dict, model_name="P
def process_video_folder(videos_list, existing_pictures, output_video_folder, image_path, system, centroid,
workspace, fps=1, total_frames=500, orientation_weight=1, resolution_weight=1,
thorough_db, workspace, fps=1, total_frames=500, orientation_weight=1, resolution_weight=1,
output_colmap_format="bin", save_space=False, max_sequence_length=1000, **env):
proj = Proj(system)
indoor_videos = []
......@@ -128,9 +129,9 @@ def process_video_folder(videos_list, existing_pictures, output_video_folder, im
video_output_folders = {}
images = {}
colmap_cameras = {}
database_filepath = workspace/"thorough_scan.db"
tempfile_database = Path(tempfile.NamedTemporaryFile().name)
path_lists_output = {}
database = db.COLMAPDatabase.connect(database_filepath)
database = db.COLMAPDatabase.connect(thorough_db)
database.create_tables()
to_extract = total_frames - len(existing_pictures)
......@@ -178,14 +179,21 @@ def process_video_folder(videos_list, existing_pictures, output_video_folder, im
print("Constructing COLMAP model with {:,} frames".format(len(final_metadata[final_metadata["sampled"]])))
database.commit()
thorough_db.copy(tempfile_database)
temp_database = db.COLMAPDatabase.connect(tempfile_database)
final_metadata["image_path"] = ""
for image_id, row in tqdm(final_metadata.iterrows(), total=len(final_metadata)):
final_metadata["db_id"] = -1
for current_id, row in tqdm(final_metadata.iterrows(), total=len(final_metadata)):
video = row["video"]
frame = row["frame"]
camera_id = row["camera_id"]
current_image_path = video_output_folders[video].relpath(image_path) / video.namebase + "_{:05d}.jpg".format(frame)
final_metadata.at[image_id, "image_path"] = current_image_path
final_metadata.at[current_id, "image_path"] = current_image_path
db_image_id = temp_database.add_image(current_image_path, int(camera_id))
final_metadata.at[current_id, "db_id"] = db_image_id
if row["sampled"]:
frame_qvec = row[["frame_quat_w",
......@@ -200,14 +208,15 @@ def process_video_folder(videos_list, existing_pictures, output_video_folder, im
frame_gps = np.full(3, np.NaN)
world_qvec, world_tvec = world_coord_from_frame(frame_qvec, frame_tvec)
db_image_id = database.add_image(current_image_path, int(camera_id), prior_t=frame_gps)
images[db_image_id] = rm.Image(
id=db_image_id, qvec=world_qvec, tvec=world_tvec,
camera_id=camera_id, name=current_image_path,
xys=[], point3D_ids=[])
database.add_image(current_image_path, int(camera_id), prior_t=frame_gps, image_id=db_image_id)
images[db_image_id] = rm.Image(id=db_image_id, qvec=world_qvec, tvec=world_tvec,
camera_id=camera_id, name=current_image_path,
xys=[], point3D_ids=[])
database.commit()
database.close()
temp_database.commit()
temp_database.close()
rm.write_model(colmap_cameras, images, {}, output_video_folder, "." + output_colmap_format)
print("COLMAP model created")
......
......@@ -122,7 +122,7 @@ def process_viz(depth_dir, img_dir, occ_dir, output_dir, video, fps, downscale,
raise e
if video:
video_path = str(output_dir/'video.mp4')
video_path = str(output_dir/'{}_groundtruth_viz.mp4'.format(output_dir.namebase))
glob_pattern = str(output_dir/'*.png')
ffmpeg.create_video(video_path, glob_pattern, fps)
......
......@@ -10,7 +10,7 @@ class Colmap(Wrapper):
self.image_path = image_path
self.mask_path = mask_path
def extract_features(self, per_sub_folder=False, image_list=None, model="RADIAL", fine=False):
def extract_features(self, per_sub_folder=False, image_list=None, model="RADIAL", more=False):
options = ["feature_extractor", "--database_path", self.db,
"--image_path", self.image_path, "--ImageReader.mask_path", self.mask_path,
"--ImageReader.camera_model", model]
......@@ -18,9 +18,13 @@ class Colmap(Wrapper):
options += ["--ImageReader.single_camera_per_folder", "1"]
if image_list is not None:
options += ["--image_list_path", image_list]
if fine:
if more:
options += ["--SiftExtraction.domain_size_pooling", "1",
"--SiftExtraction.estimate_affine_shape", "1"]
else:
# See issue https://github.com/colmap/colmap/issues/627
# If COLMAP is updated to work better on newest driver, this should be removed
options += ["--SiftExtraction.use_gpu", "0"]
self.__call__(options)
def match(self, method="exhaustive", guided_matching=True, vocab_tree=None):
......
......@@ -38,21 +38,25 @@ class ETH3D(Wrapper):
"--distance_threshold", str(threshold)]
self.__call__(options)
def create_ground_truth(self, scan_meshlab, occlusion_ply, splats_ply, colmap_model, output_folder,
def create_ground_truth(self, scan_meshlab, colmap_model, output_folder, occlusions=None, splats=None,
point_cloud=True, depth_maps=True, occlusion_maps=True):
options = ["GroundTruthCreator", "--scan_alignment_path", scan_meshlab,
"--image_base_path", self.image_path, "--state_path", colmap_model,
"--output_folder_path", output_folder, "--occlusion_mesh_path", occlusion_ply,
"--occlusion_splats_path", splats_ply,
"--output_folder_path", output_folder, "--occlusion_mesh_path", occlusions,
"--occlusion_splats_path", splats,
"--write_point_cloud", "1" if point_cloud else "0",
"--write_depth_maps", "1" if depth_maps else "0",
"--write_occlusion_depth", "1" if occlusion_maps else "0",
"--compress_depth_maps", "1"]
self.__call__(options)
def inspect_dataset(self, scan_meshlab, occlusion_meshlab, colmap_model, output_folder,
point_cloud=True, depth_maps=True, occlusion_maps=True):
def inspect_dataset(self, scan_meshlab, colmap_model, image_path=None, occlusions=None, splats=None):
if image_path is None:
image_path = self.image_path
options = ["DatasetInspector", "--scan_alignment_path", scan_meshlab,
"--image_base_path", self.image_path, "--state_path", colmap_model,
"--occlusion_mesh_paths", occlusion_meshlab]
"--image_base_path", image_path, "--state_path", colmap_model]
if occlusions is not None:
options += ["--occlusion_mesh_path", occlusions]
if splats is not None:
options += ["--occlusion_splats_path", splats]
self.__call__(options)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment