Commit f77b2258 authored by Clément Pinard's avatar Clément Pinard
Browse files

Update README

parent ac6f363f
......@@ -9,6 +9,7 @@ For a brief recap of what it does, see section [How it works](#how-it-works)
* [Hardware Dependencies](#hardware-dependencies)
* [How it works](#how-it-works)
* [Step by step guide](#usage)
* [Special case : adding new images to an existing constructed dataset](#special-case-adding-new-images-to-an-existing-dataset)
* [Detailed method with the manoir example](#detailed-method-with-the-manoir-example)
......@@ -732,6 +733,34 @@ This will essentially do the same thing as the script, in order to let you chang
--threads 8
```
### Special case : Adding new images to an existing dataset
In case you already have constructed a dataset and you still have the workspace that used available, you can easily add new images to the dataset. See https://colmap.github.io/faq.html#register-localize-new-images-into-an-existing-reconstruction
The main task is to localize news images in the thorough model, and use the already computed Lidar cloud alignment to deduce the new depth.
The basic steps are :
1. Extract feature of new frames
2. Match extracted features with frames of first database (usually named `scan_thorough.db`)
3. Either run `colmap mapper` or `colmap image_registrator` in order to have a model where the new frames are registered
4. (Optional) Re-build the Occlusion mesh. This can be important if the new images see parts of the model that were unseen before. Delaunay Meshing will have occluded it, as since it is not seen by any localized image, it was deemed in the interior of the model.
- Run Point cloud densification. If workspace is intact, it should be very fast, as it will only compute depth maps of new images
- Run stereo fusion
- Transfer visibility from dense reconstruction to Lidar point cloud
- Run delauney mesher on Lidar point cloud with new visibility index
- Run splat creator
4. Extract desired frames in a new colmap model only containing these frames.
5. Run ETH3D's `GroundTruthCreator` on the extracte colmap model
6. run `convert_dataset` on every subfolder of the new frames
All these steps can be done under the script `picture_localization.py` with the same options as the script `main_pipeline.py`, except when unneeded. To these options are added 4 more options:
* `--map_new_images`: if selected, will replace the 'omage_registrator' step with a full mapping step
* `--bundle_adjuster_steps` : number of iteration for bundle adjustor after image registration (default: 100)
* `--rebuild_occlusion_mesh` : If selected, will rebuild a new dense point cloud and delauney mesh. Useful when new images see new parts of the model
* `--generic_model` : COLMAP model for image folders. Same zoom level assumed throughout whole folders. See https://colmap.github.io/cameras.html (default: OPENCV)
## Detailed method with the "Manoir" example
### Scene presentation
......
......@@ -160,7 +160,8 @@ def set_full_argparser_no_lidar():
add_om_options(parser)
add_gt_options(parser)
parser.add_argument("--SOR", default=[10, 6], nargs=2, type=float,
nl_parser = parser.add_argument_group("Main pipeline no Lidar specific options")
nl_parser.add_argument("--SOR", default=[10, 6], nargs=2, type=float,
help="Satistical Outlier Removal parameters : Number of nearest neighbours, "
"max relative distance to standard deviation. "
"This will be used for filtering dense reconstruction")
......@@ -178,15 +179,17 @@ def set_new_images_arparser():
add_om_options(parser)
add_gt_options(parser)
parser.add_argument("--map_new_images", action="store_true",
ni_parser = parser.add_argument_group("Add new pictures specific options")
ni_parser.add_argument("--map_new_images", action="store_true",
help="if selected, will replace the 'omage_registrator' step with a full mapping step")
parser.add_argument("--bundle_adjustor_steps", default=100, type=int,
ni_parser.add_argument("--bundle_adjuster_steps", default=100, type=int,
help="number of iteration for bundle adjustor after image registration")
parser.add_argument("--rebuild_occlusion_mesh", action="store_true",
ni_parser.add_argument("--rebuild_occlusion_mesh", action="store_true",
help="If selected, will rebuild a new dense point cloud and deauney mesh. "
"Useful when new images see new parts of the model")
parser.add_argument('--generic_model', default='OPENCV',
help='COLMAP model for generic videos. Same zoom level assumed throughout the whole video. '
ni_parser.add_argument('--generic_model', default='OPENCV',
help='COLMAP model for added pictures. Same zoom level assumed throughout whole folders. '
'See https://colmap.github.io/cameras.html')
return parser
......
......@@ -276,7 +276,11 @@ def main():
metadata=video_env["metadata"],
**video_env["output_env"], **env)
if env["generate_groundtruth_for_individual_images"]:
by_folder = pi.group_pics_by_folder(env["individual_pictures"])
for folder, pic_list in by_folder.items():
generate_GT_individual_pictures(input_colmap_model=env["georef_full_recon"],
individual_pictures=pic_list,
relpath=folder,
step_index=i, **env)
......
......@@ -31,69 +31,68 @@ def main():
i = 1
print_step(i, "Pictures preparation")
existing_pictures = sum((list(env["individual_pictures_path"].walkfiles('*{}'.format(ext))) for ext in env["pic_ext"]), [])
existing_pictures = [path.relpath(env["colmap_img_root"]) for path in existing_pictures]
# env["new_pictures"] = [p for p in pi.extract_pictures_to_workspace(**env) if p not in existing_pictures]
env["new_pictures"] = existing_pictures
env["individual_pictures"] = pi.extract_pictures_to_workspace(**env)
# colmap.db = env["thorough_db"]
# colmap.match(method="vocab_tree", vocab_tree=env["indexed_vocab_tree"], max_num_matches=env["max_num_matches"])
# extended_georef = env["georef_recon"] + "_extended"
# extended_georef.makedirs_p()
# if args.map_new_images:
# colmap.map(input=env["georef_recon"], output=extended_georef)
# else:
# colmap.register_images(input=env["georef_recon"], output=extended_georef)
# colmap.adjust_bundle(extended_georef, extended_georef,
# num_iter=args.bundle_adjustor_steps, refine_extra_params=True)
# colmap.merge_models(output=env["georef_full_recon"], input1=env["georef_full_recon"], input2=extended_georef)
i += 1
print_step(i, "Add new pictures to COLMAP thorough model")
colmap.db = env["thorough_db"]
colmap.match(method="vocab_tree", vocab_tree=env["indexed_vocab_tree"], max_num_matches=env["max_num_matches"])
extended_georef = env["georef_recon"] + "_extended"
extended_georef.makedirs_p()
if args.map_new_images:
colmap.map(input=env["georef_recon"], output=extended_georef)
else:
colmap.register_images(input=env["georef_recon"], output=extended_georef)
colmap.adjust_bundle(extended_georef, extended_georef,
num_iter=args.bundle_adjuster_steps, refine_extra_params=True)
colmap.merge_models(output=env["georef_full_recon"], input1=env["georef_full_recon"], input2=extended_georef)
# if env["rebuild_occlusion_mesh"]:
# i += 1
# print_step(i, "Full reconstruction point cloud densificitation with new images")
# colmap.undistort(input=env["georef_full_recon"])
# # This step should be fast since everything else than new images is already computed
# colmap.dense_stereo(min_depth=env["stereo_min_depth"], max_depth=env["stereo_max_depth"])
# colmap.stereo_fusion(output=env["georefrecon_ply"])
# if args.inspect_dataset:
# georef_mlp = env["georef_full_recon"]/"georef_recon.mlp"
# mxw.create_project(georef_mlp, [env["georefrecon_ply"]])
# colmap.export_model(output=env["georef_full_recon"],
# input=env["georef_full_recon"],
# output_type="TXT")
# eth3d.inspect_dataset(scan_meshlab=georef_mlp,
# colmap_model=env["georef_full_recon"],
# image_path=env["colmap_img_root"])
# eth3d.inspect_dataset(scan_meshlab=env["aligned_mlp"],
# colmap_model=env["georef_full_recon"],
# image_path=env["colmap_img_root"])
if env["rebuild_occlusion_mesh"]:
i += 1
print_step(i, "Full reconstruction point cloud densificitation with new images")
colmap.undistort(input=env["georef_full_recon"])
# This step should be fast since everything else than new images is already computed
colmap.dense_stereo(min_depth=env["stereo_min_depth"], max_depth=env["stereo_max_depth"])
colmap.stereo_fusion(output=env["georefrecon_ply"])
if args.inspect_dataset:
georef_mlp = env["georef_full_recon"]/"georef_recon.mlp"
mxw.create_project(georef_mlp, [env["georefrecon_ply"]])
colmap.export_model(output=env["georef_full_recon"],
input=env["georef_full_recon"],
output_type="TXT")
eth3d.inspect_dataset(scan_meshlab=georef_mlp,
colmap_model=env["georef_full_recon"],
image_path=env["colmap_img_root"])
eth3d.inspect_dataset(scan_meshlab=env["aligned_mlp"],
colmap_model=env["georef_full_recon"],
image_path=env["colmap_img_root"])
# i += 1
# print_step(i, "Occlusion Mesh re-computing")
# '''combine the MLP file into a single ply file. We need the normals for the splats'''
# if args.normals_method == "radius":
# eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_radius=args.normals_radius)
# else:
# eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_count=args.normals_neighbours)
# '''Create vis file that will tell by what images each point can be seen. We transfer this knowledge from georefrecon
# to the Lidar model'''
# env["global_registration_matrix"] = get_matrix(env["matrix_path"])
# scale = np.linalg.norm(env["global_registration_matrix"][:3, :3], ord=2)
# with_normals_subsampled = env["with_normals_path"].stripext() + "_subsampled.ply"
# pcl_util.create_vis_file(env["georefrecon_ply"], env["with_normals_path"],
# resolution=args.mesh_resolution / scale,
# output=with_normals_subsampled)
# '''Compute the occlusion mesh by fooling COLMAP into thinking the lidar point cloud was made with colmap'''
# colmap.delaunay_mesh(env["occlusion_ply"], input_ply=with_normals_subsampled)
# if args.splats:
# eth3d.create_splats(env["splats_ply"], with_normals_subsampled,
# env["occlusion_ply"], env["splat_threshold"] / scale,
# env["max_splat_size"])
i += 1
print_step(i, "Occlusion Mesh re-computing")
'''combine the MLP file into a single ply file. We need the normals for the splats'''
if args.normals_method == "radius":
eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_radius=args.normals_radius)
else:
eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_count=args.normals_neighbours)
'''Create vis file that will tell by what images each point can be seen. We transfer this knowledge from georefrecon
to the Lidar model'''
env["global_registration_matrix"] = get_matrix(env["matrix_path"])
scale = np.linalg.norm(env["global_registration_matrix"][:3, :3], ord=2)
with_normals_subsampled = env["with_normals_path"].stripext() + "_subsampled.ply"
pcl_util.create_vis_file(env["georefrecon_ply"], env["with_normals_path"],
resolution=args.mesh_resolution / scale,
output=with_normals_subsampled)
'''Compute the occlusion mesh by fooling COLMAP into thinking the lidar point cloud was made with colmap'''
colmap.delaunay_mesh(env["occlusion_ply"], input_ply=with_normals_subsampled)
if args.splats:
eth3d.create_splats(env["splats_ply"], with_normals_subsampled,
env["occlusion_ply"], env["splat_threshold"] / scale,
env["max_splat_size"])
i += 1
if i not in args.skip_step:
print_step(i, "Groud Truth generation")
by_folder = pi.group_pics_by_folder(env["new_pictures"])
by_folder = pi.group_pics_by_folder(env["individual_pictures"])
for folder, pic_list in by_folder.items():
generate_GT_individual_pictures(input_colmap_model=env["georef_full_recon"],
individual_pictures=pic_list,
......
......@@ -301,13 +301,18 @@ def generate_GT_individual_pictures(colmap_img_root, individual_pictures, raw_ou
aligned_mlp, relpath,
occlusion_ply, splats_ply,
eth3d, colmap, step_index=None,
save_space=False, **env):
save_space=False, resume_work=False, **env):
def print_step_pv(step_number, step_name):
if step_index is not None:
print_step("{}.{}".format(step_index, step_number), step_name)
else:
print_step(step_index, step_name)
ground_truth_depth_folder = raw_output_folder / "ground_truth_depth" / relpath.stem
occlusion_depth_folder = raw_output_folder / "occlusion_depth" / relpath.stem
if resume_work and ground_truth_depth_folder.isdir():
print("Directory {} already done, skipping...".format(relpath))
return
i_pv = 1
print_step_pv(i_pv, "Copy individual images to output dataset {}".format(raw_output_folder))
for p in individual_pictures:
......@@ -317,7 +322,7 @@ def generate_GT_individual_pictures(colmap_img_root, individual_pictures, raw_ou
i_pv += 1
print_step_pv(i_pv, "Extract individual images to dedicated COLMAP model")
pictures_colmap_model = raw_output_folder / "models" / "individual_pictures"
pictures_colmap_model = raw_output_folder / "models" / relpath
pictures_colmap_model.makedirs_p()
epfm.extract_pictures(input=input_colmap_model,
output=pictures_colmap_model,
......@@ -339,9 +344,9 @@ def generate_GT_individual_pictures(colmap_img_root, individual_pictures, raw_ou
i_pv += 1
print_step_pv(i_pv, "Convert to KITTI format and create pictures with GT visualization")
cd.convert_dataset(pictures_colmap_model,
raw_output_folder / "ground_truth_depth" / "individual_pictures",
ground_truth_depth_folder,
raw_output_folder / "images",
raw_output_folder / "occlusion_depth" / "individual_pictures",
occlusion_depth_folder,
kitti_format_folder, viz_folder,
images_list=individual_pictures,
visualization=True, video=False, downscale=4, threads=8, **env)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment