Commit ab398ca4 authored by Clément Pinard's avatar Clément Pinard
Browse files

adapt to pipeline without GT

parent b390444c
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from path import Path
def set_argparser():
parser = ArgumentParser(description='Main pipeline, from LIDAR pictures and videos to GT depth enabled videos',
formatter_class=ArgumentDefaultsHelpFormatter)
main_parser = parser.add_argument_group("Main options")
main_parser.add_argument('--input_folder', metavar='PATH', default=Path("."), type=Path,
help="Folder with LAS point cloud, videos, and images")
main_parser.add_argument('--workspace', metavar='PATH', default=Path("."),
help='path to workspace where COLMAP operations will be done', type=Path)
main_parser.add_argument('--raw_output_folder', metavar='PATH', default=Path("."),
help='path to output folder : must be big !', type=Path)
main_parser.add_argument('--converted_output_folder', metavar='PATH', default=Path("."),
help='path to output folder : must be big !', type=Path)
main_parser.add_argument('--skip_step', metavar="N", nargs="*", default=[], type=int)
main_parser.add_argument('--begin_step', metavar="N", type=int, default=None)
main_parser.add_argument('--show_steps', action="store_true")
main_parser.add_argument('-v', '--verbose', action="count", default=0)
main_parser.add_argument('--vid_ext', nargs='+', default=[".mp4", ".MP4"])
main_parser.add_argument('--pic_ext', nargs='+', default=[".jpg", ".JPG", ".png", ".PNG"])
main_parser.add_argument('--raw_ext', nargs='+', default=[".ARW", ".NEF", ".DNG"])
main_parser.add_argument('--more_sift_features', action="store_true")
main_parser.add_argument('--triangulate', action="store_true")
main_parser.add_argument('--save_space', action="store_true")
main_parser.add_argument('--add_new_videos', action="store_true")
main_parser.add_argument('--resume_work', action="store_true")
main_parser.add_argument('--inspect_dataset', action="store_true")
main_parser.add_argument('--registration_method', choices=["simple", "eth3d", "interactive"], default="simple")
pcp_parser = parser.add_argument_group("PointCLoud preparation")
pcp_parser.add_argument("--pointcloud_resolution", default=0.1, type=float)
pcp_parser.add_argument("--SOR", default=[10, 6], nargs=2, type=float)
ve_parser = parser.add_argument_group("Video extractor")
ve_parser.add_argument('--total_frames', default=500, type=int)
ve_parser.add_argument('--orientation_weight', default=1, type=float)
ve_parser.add_argument('--resolution_weight', default=1, type=float)
ve_parser.add_argument('--num_neighbours', default=10, type=int)
ve_parser.add_argument('--system', default="epsg:2154")
ve_parser.add_argument('--lowfps', default=1, type=int)
ve_parser.add_argument('--max_sequence_length', default=4000, type=int)
exec_parser = parser.add_argument_group("Executable files")
exec_parser.add_argument('--log', default=None, type=Path)
exec_parser.add_argument('--nw', default="native-wrapper.sh", type=Path,
help="native-wrapper.sh file location")
exec_parser.add_argument("--colmap", default="colmap", type=Path,
help="colmap exec file location")
exec_parser.add_argument("--eth3d", default="../dataset-pipeline/build",
type=Path, help="ETH3D detaset pipeline exec files folder location")
exec_parser.add_argument("--ffmpeg", default="ffmpeg", type=Path)
exec_parser.add_argument("--pcl_util", default="pcl_util/build", type=Path)
vr_parser = parser.add_argument_group("Video Registration")
vr_parser.add_argument("--vocab_tree", type=Path, default="vocab_tree_flickr100K_words256K.bin")
om_parser = parser.add_argument_group("Occlusion Mesh")
om_parser.add_argument('--normals_method', default="radius", choices=["radius", "neighbours"])
om_parser.add_argument('--normals_radius', default=0.2, type=float)
om_parser.add_argument('--normals_neighbours', default=8, type=int)
om_parser.add_argument('--mesh_resolution', default=0.2, type=float)
om_parser.add_argument('--splat_threshold', default=0.1, type=float)
om_parser.add_argument('--max_occlusion_depth', default=250, type=float)
return parser
def print_step(step_number, step_name):
print("\n\n=================")
print("Step {}".format(step_number))
print(step_name)
print("=================")
global_steps = ["Point Cloud Preparation",
"Pictures preparation",
"Extracting Videos and selecting optimal frames for a thorough scan",
"First thorough photogrammetry",
"Alignment of photogrammetric reconstruction with GPS",
"Video localization with respect to reconstruction",
"Full reconstruction point cloud densificitation",
"Alignment of photogrammetric reconstruction with Lidar point cloud",
"Occlusion Mesh computing",
"Ground Truth creation"]
per_vid_steps_1 = ["Full video extraction",
"Sky mask generation",
"Complete photogrammetry with video at 1 fps",
"Localizing remaining frames",
"Re-Alignment of triangulated points with Lidar point cloud"]
per_vid_steps_2 = ["Creating Ground truth data",
"Create video with GT vizualisation",
"Convert to KITTI format"]
def print_workflow():
print("Global steps :")
for i, s in enumerate(global_steps):
print("{}) {}".format(i+1, s))
if i == 4:
print("\tPer video:")
for i, s in enumerate(per_vid_steps_1):
print("\t{}) {}".format(i+1, s))
print("\tPer video:")
for i, s in enumerate(per_vid_steps_2):
print("\t{}) {}".format(i+1, s))
...@@ -176,7 +176,6 @@ def main(): ...@@ -176,7 +176,6 @@ def main():
mxw.add_meshes_to_project(env["lidar_mlp"], env["aligned_mlp"], [env["georefrecon_ply"]], start_index=0) mxw.add_meshes_to_project(env["lidar_mlp"], env["aligned_mlp"], [env["georefrecon_ply"]], start_index=0)
eth3d.align_with_ICP(env["aligned_mlp"], env["aligned_mlp"], scales=5) eth3d.align_with_ICP(env["aligned_mlp"], env["aligned_mlp"], scales=5)
mxw.remove_mesh_from_project(env["aligned_mlp"], env["aligned_mlp"], 0) mxw.remove_mesh_from_project(env["aligned_mlp"], env["aligned_mlp"], 0)
print(mxw.get_mesh(env["aligned_mlp"], index=0)[0])
matrix = np.linalg.inv(mxw.get_mesh(env["aligned_mlp"], index=0)[0]) matrix = np.linalg.inv(mxw.get_mesh(env["aligned_mlp"], index=0)[0])
np.savetxt(env["matrix_path"], matrix) np.savetxt(env["matrix_path"], matrix)
...@@ -187,7 +186,10 @@ def main(): ...@@ -187,7 +186,10 @@ def main():
mxw.apply_transform_to_project(env["aligned_mlp"], env["lidar_mlp"], matrix) mxw.apply_transform_to_project(env["aligned_mlp"], env["lidar_mlp"], matrix)
env["global_registration_matrix"] = matrix env["global_registration_matrix"] = matrix
else: else:
eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_radius=args.normal_radius) if args.normals_method == "radius":
eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_radius=args.normals_radius)
else:
eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_count=args.normals_neighbours)
if args.registration_method == "simple": if args.registration_method == "simple":
pcl_util.register_reconstruction(georef=env["georefrecon_ply"], pcl_util.register_reconstruction(georef=env["georefrecon_ply"],
lidar=env["with_normals_path"], lidar=env["with_normals_path"],
...@@ -207,9 +209,13 @@ def main(): ...@@ -207,9 +209,13 @@ def main():
i += 1 i += 1
if i not in args.skip_step: if i not in args.skip_step:
print_step(i, "Occlusion Mesh computing") print_step(i, "Occlusion Mesh computing")
eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_radius=args.normal_radius) if args.normals_method == "radius":
pcl_util.create_vis_file(env["georefrecon_ply"], env["with_normals_path"], resolution=args.mesh_resolution) eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_radius=args.normals_radius)
colmap.delaunay_mesh(env["occlusion_ply"], input_ply=env["with_normals_path"]) else:
eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_count=args.normals_neighbours)
pcl_util.create_vis_file(env["georefrecon_ply"], env["with_normals_path"],
resolution=args.mesh_resolution, output=env["with_normals_path"].stripext() + "_subsampled.ply")
colmap.delaunay_mesh(env["occlusion_ply"], input_ply=env["with_normals_path"].stripext() + "_subsampled.ply")
eth3d.create_splats(env["splats_ply"], env["with_normals_path"], env["occlusion_ply"], threshold=args.splat_threshold) eth3d.create_splats(env["splats_ply"], env["with_normals_path"], env["occlusion_ply"], threshold=args.splat_threshold)
if args.inspect_dataset: if args.inspect_dataset:
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <pcl/registration/icp.h> #include <pcl/registration/icp.h>
#include <pcl/registration/transformation_estimation_svd_scale.h> #include <pcl/registration/transformation_estimation_svd_scale.h>
#include <pcl/filters/statistical_outlier_removal.h> #include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/features/normal_3d.h> #include <pcl/features/normal_3d_omp.h>
#include <pcl/io/ply_io.h> #include <pcl/io/ply_io.h>
...@@ -70,7 +70,7 @@ int main (int argc, char** argv) ...@@ -70,7 +70,7 @@ int main (int argc, char** argv)
// Normal estimation* // Normal estimation*
pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> n; pcl::NormalEstimationOMP<pcl::PointXYZ, pcl::Normal> n;
pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>); pcl::PointCloud<pcl::Normal>::Ptr normals (new pcl::PointCloud<pcl::Normal>);
pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>); pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
tree->setInputCloud (georef); tree->setInputCloud (georef);
......
from pyproj import Proj
from edit_exif import get_gps_location
import numpy as np
import rawpy
import imageio
import generate_sky_masks as gsm
import videos_to_colmap as v2c
def extract_gps_and_path(existing_pictures, image_path, system, centroid=None, **env):
proj = Proj(system)
georef_list = []
for img in existing_pictures:
gps = get_gps_location(img)
if gps is not None:
lat, lng, alt = gps
x, y = proj(lng, lat)
if centroid is None:
centroid = np.array([x, y, alt])
x -= centroid[0]
y -= centroid[1]
alt -= centroid[2]
georef_list.append("{} {} {} {}\n".format(img.relpath(image_path), x, y, alt))
return georef_list, centroid
def extract_pictures_to_workspace(input_folder, image_path, workspace, colmap,
raw_ext, pic_ext, more_sift_features, **env):
picture_folder = input_folder / "Pictures"
picture_folder.merge_tree(image_path)
raw_files = sum((list(image_path.walkfiles('*{}'.format(ext))) for ext in raw_ext), [])
for raw in raw_files:
if not any((raw.stripext() + ext).isfile() for ext in pic_ext):
raw_array = rawpy.imread(raw)
rgb = raw_array.postprocess()
imageio.imsave(raw.stripext() + ".jpg", rgb)
raw.remove()
gsm.process_folder(folder_to_process=image_path, image_path=image_path, pic_ext=pic_ext, **env)
colmap.extract_features(per_sub_folder=True, more=more_sift_features)
return sum((list(image_path.walkfiles('*{}'.format(ext))) for ext in pic_ext), [])
def extract_videos_to_workspace(video_path, video_frame_list_thorough, georef_frames_list, **env):
existing_georef, env["centroid"] = extract_gps_and_path(**env)
path_lists, extracted_video_folders = v2c.process_video_folder(output_video_folder=video_path, **env)
if path_lists is not None:
with open(video_frame_list_thorough, "w") as f:
f.write("\n".join(path_lists["thorough"]["frames"]))
with open(georef_frames_list, "w") as f:
f.write("\n".join(existing_georef) + "\n")
f.write("\n".join(path_lists["thorough"]["georef"]) + "\n")
for v in env["videos_list"]:
video_folder = extracted_video_folders[v]
with open(video_folder / "lowfps.txt", "w") as f:
f.write("\n".join(path_lists[v]["frames_lowfps"]) + "\n")
with open(video_folder / "georef.txt", "w") as f:
f.write("\n".join(existing_georef) + "\n")
f.write("\n".join(path_lists["thorough"]["georef"]) + "\n")
f.write("\n".join(path_lists[v]["georef_lowfps"]) + "\n")
for j, l in enumerate(path_lists[v]["frames_full"]):
with open(video_folder / "full_chunk_{}.txt".format(j), "w") as f:
f.write("\n".join(l) + "\n")
gsm.process_folder(folder_to_process=video_path, **env)
return extracted_video_folders
def check_input_folder(path, with_lidar=True):
def print_error_string():
print("Error, bad input folder structure")
print("Expected :")
if with_lidar:
print(str(path/"Lidar"))
print(str(path/"Pictures"))
print(str(path/"Videos"))
print()
print("but got :")
print("\n".join(str(d) for d in path.dirs()))
expected_folders = ["Pictures", "Videos"]
if with_lidar:
expected_folders.append("Lidar")
if all((path/d).isdir() for d in expected_folders):
return
else:
print_error_string()
def prepare_workspace(path, env, with_lidar=True):
if with_lidar:
env["lidar_path"] = path / "Lidar"
env["lidar_mlp"] = env["workspace"] / "lidar.mlp"
env["with_normals_path"] = env["lidar_path"] / "with_normals.ply"
env["occlusion_ply"] = env["lidar_path"] / "occlusion_model.ply"
env["splats_ply"] = env["lidar_path"] / "splats_model.ply"
env["occlusion_mlp"] = env["lidar_path"] / "occlusions.mlp"
env["splats_mlp"] = env["lidar_path"] / "splats.mlp"
env["matrix_path"] = env["workspace"] / "matrix_thorough.txt"
else:
env["occlusion_ply"] = path / "occlusion_model.ply"
env["splats_ply"] = path / "splats_model.ply"
env["image_path"] = path / "Pictures"
env["mask_path"] = path / "Masks"
env["video_path"] = path / "Pictures" / "Videos"
env["thorough_recon"] = path / "Thorough"
env["georef_recon"] = env["thorough_recon"] / "georef"
env["georef_full_recon"] = env["thorough_recon"] / "georef_full"
env["dense_workspace"] = env["thorough_recon"]/"dense"
env["video_recon"] = path / "Videos_reconstructions"
env["aligned_mlp"] = env["workspace"] / "aligned_model.mlp"
env["centroid_path"] = path / "centroid.txt"
env["thorough_db"] = path / "scan_thorough.db"
env["video_frame_list_thorough"] = env["image_path"] / "video_frames_for_thorough_scan.txt"
env["georef_frames_list"] = env["image_path"] / "georef.txt"
env["georefrecon_ply"] = env["georef_recon"] / "georef_reconstruction.ply"
env["indexed_vocab_tree"] = env["workspace"] / "vocab_tree_thorough.bin"
def prepare_video_workspace(video_name, video_frames_folder,
raw_output_folder, converted_output_folder,
video_recon, video_path, **env):
video_env = {video_name: video_name,
video_frames_folder: video_frames_folder}
relative_path_folder = video_frames_folder.relpath(video_path)
video_env["lowfps_db"] = video_frames_folder / "video_low_fps.db"
video_env["metadata"] = video_frames_folder / "metadata.csv"
video_env["lowfps_image_list_path"] = video_frames_folder / "lowfps.txt"
video_env["chunk_image_list_paths"] = sorted(video_frames_folder.files("full_chunk_*.txt"))
video_env["chunk_dbs"] = [video_frames_folder / fp.namebase + ".db" for fp in video_env["chunk_image_list_paths"]]
colmap_root = video_recon / relative_path_folder
video_env["colmap_models_root"] = colmap_root
video_env["full_model"] = colmap_root
video_env["lowfps_model"] = colmap_root / "lowfps"
num_chunks = len(video_env["chunk_image_list_paths"])
video_env["chunk_models"] = [colmap_root / "chunk_{}".format(index) for index in range(num_chunks)]
video_env["final_model"] = colmap_root / "final"
output = {}
output["images_root_folder"] = raw_output_folder / "images"
output["video_frames_folder"] = output["images_root_folder"] / relative_path_folder
output["model_folder"] = raw_output_folder / "models" / relative_path_folder
output["interpolated_frames_list"] = output["model_folder"] / "interpolated_frames.txt"
output["final_model"] = output["model_folder"] / "final"
output["kitti_format_folder"] = converted_output_folder / "KITTI" / relative_path_folder
output["viz_folder"] = converted_output_folder / "video" / relative_path_folder
video_env["output_env"] = output
video_env["already_localized"] = env["resume_work"] and output["model_folder"].isdir()
video_env["GT_already_done"] = env["resume_work"] and (raw_output_folder / "ground_truth_depth" / video_name.namebase).isdir()
return video_env
...@@ -28,7 +28,7 @@ class ETH3D(Wrapper): ...@@ -28,7 +28,7 @@ class ETH3D(Wrapper):
options = ["NormalEstimator", "-i", scan_meshlab, "-o", output_ply] options = ["NormalEstimator", "-i", scan_meshlab, "-o", output_ply]
if neighbor_count is not None: if neighbor_count is not None:
options += ["--neighbor_count", str(neighbor_count)] options += ["--neighbor_count", str(neighbor_count)]
if neighbor_radius is not None: elif neighbor_radius is not None:
options += ["--neighbor_radius", str(neighbor_radius)] options += ["--neighbor_radius", str(neighbor_radius)]
self.__call__(options) self.__call__(options)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment