main_pipeline.py 15.7 KB
Newer Older
1
2
3
import las2ply
import numpy as np
from wrappers import Colmap, FFMpeg, PDraw, ETH3D, PCLUtil
4
5
from cli_utils import set_full_argparser, print_step, print_workflow, get_matrix
from video_localization import localize_video, generate_GT, generate_GT_individual_pictures
Clement Pinard's avatar
Clement Pinard committed
6
import meshlab_xml_writer as mxw
Clément Pinard's avatar
Clément Pinard committed
7
8
import prepare_images as pi
import prepare_workspace as pw
9
10
11


def prepare_point_clouds(pointclouds, lidar_path, verbose, eth3d, pcl_util, SOR, pointcloud_resolution, **env):
Clement Pinard's avatar
Clement Pinard committed
12
    converted_clouds = []
Clément Pinard's avatar
Clément Pinard committed
13
    output_centroid = None
Clement Pinard's avatar
Clement Pinard committed
14
15
16
17
    for pc in pointclouds:
        ply, centroid = las2ply.load_and_convert(input_file=pc,
                                                 output_folder=lidar_path,
                                                 verbose=verbose >= 1)
Clément Pinard's avatar
Clément Pinard committed
18
19
20
21
        if pc.ext[1:].upper() == "LAS":
            if output_centroid is None:
                output_centroid = centroid
        pcl_util.filter_cloud(input_file=ply, output_file=ply.stripext() + "_filtered.ply", knn=SOR[0], std=SOR[1])
Clément Pinard's avatar
Clément Pinard committed
22
23
24
25
26
27
28
        if pointcloud_resolution is not None:
            pcl_util.subsample(input_file=ply.stripext() + "_filtered.ply",
                               output_file=ply.stripext() + "_subsampled.ply",
                               resolution=pointcloud_resolution)
            converted_clouds.append(ply.stripext() + "_subsampled.ply")
        else:
            converted_clouds.append(ply.stripext() + "_filtered.ply")
nicolas's avatar
nicolas committed
29
30
31
32
33
34
35
    temp_mlp = env["workspace"] / "lidar_unaligned.mlp"
    mxw.create_project(temp_mlp, converted_clouds, labels=None, transforms=None)
    if len(converted_clouds) > 1:
        eth3d.align_with_ICP(temp_mlp, env["lidar_mlp"], scales=5)
    else:
        temp_mlp.move(env["lidar_mlp"])

Clément Pinard's avatar
Clément Pinard committed
36
    return converted_clouds, output_centroid
Clement Pinard's avatar
Clement Pinard committed
37
38
39


def main():
40
    args = set_full_argparser().parse_args()
Clement Pinard's avatar
Clement Pinard committed
41
42
43
    env = vars(args)
    if args.show_steps:
        print_workflow()
44
        return
Clément Pinard's avatar
Clément Pinard committed
45
    if args.add_new_videos:
46
47
        env["resume_work"] = True
        args.skip_step = [1, 2, 4, 5, 8]
Clement Pinard's avatar
Clement Pinard committed
48
49
    if args.begin_step is not None:
        args.skip_step += list(range(args.begin_step))
Clément Pinard's avatar
Clément Pinard committed
50
    pw.check_input_folder(args.input_folder)
51
    args.workspace = args.workspace.abspath()
Clément Pinard's avatar
Clément Pinard committed
52
    pw.prepare_workspace(args.workspace, env)
53
    colmap = Colmap(db=env["thorough_db"],
54
                    image_path=env["colmap_img_root"],
Clement Pinard's avatar
Clement Pinard committed
55
                    mask_path=env["mask_path"],
56
                    dense_workspace=env["dense_workspace"],
nicolas's avatar
nicolas committed
57
                    binary=args.colmap,
Clément Pinard's avatar
Clément Pinard committed
58
                    verbose=args.verbose,
nicolas's avatar
nicolas committed
59
                    logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
60
    env["colmap"] = colmap
Clément Pinard's avatar
Clément Pinard committed
61
    ffmpeg = FFMpeg(args.ffmpeg, verbose=args.verbose, logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
62
    env["ffmpeg"] = ffmpeg
Clément Pinard's avatar
Clément Pinard committed
63
    pdraw = PDraw(args.nw, verbose=args.verbose, logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
64
    env["pdraw"] = pdraw
65
    eth3d = ETH3D(args.eth3d, args.raw_output_folder / "Images", args.max_occlusion_depth,
66
                  verbose=args.verbose, logfile=args.log, splat_radius=args.eth3d_splat_radius)
Clement Pinard's avatar
Clement Pinard committed
67
    env["eth3d"] = eth3d
Clément Pinard's avatar
Clément Pinard committed
68
    pcl_util = PCLUtil(args.pcl_util, verbose=args.verbose, logfile=args.log)
Clément Pinard's avatar
Clément Pinard committed
69
    env["pcl_util"] = pcl_util
Clement Pinard's avatar
Clement Pinard committed
70
71
72
73
74

    las_files = (args.input_folder/"Lidar").files("*.las")
    ply_files = (args.input_folder/"Lidar").files("*.ply")
    input_pointclouds = las_files + ply_files
    env["videos_list"] = sum((list((args.input_folder/"Videos").walkfiles('*{}'.format(ext))) for ext in args.vid_ext), [])
Clément Pinard's avatar
Clément Pinard committed
75
76
77
    no_gt_folder = args.input_folder/"Videos"/"no_groundtruth"
    if no_gt_folder.isdir():
        env["videos_to_localize"] = [v for v in env["videos_list"] if not str(v).startswith(no_gt_folder)]
Clément Pinard's avatar
Clément Pinard committed
78
79
    else:
        env["videos_to_localize"] = env["videos_list"]
Clement Pinard's avatar
Clement Pinard committed
80

Clément Pinard's avatar
Clément Pinard committed
81
82
83
84
    i = 1
    if i not in args.skip_step:
        print_step(i, "Point Cloud Preparation")
        env["pointclouds"], env["centroid"] = prepare_point_clouds(input_pointclouds, **env)
Clément Pinard's avatar
Clément Pinard committed
85
86
        if env["centroid"] is not None:
            np.savetxt(env["centroid_path"], env["centroid"])
Clement Pinard's avatar
Clement Pinard committed
87
    else:
Clément Pinard's avatar
Clément Pinard committed
88
89
        if env["centroid_path"].isfile():
            env["centroid"] = np.loadtxt(env["centroid_path"])
Clement Pinard's avatar
Clement Pinard committed
90

Clément Pinard's avatar
Clément Pinard committed
91
92
93
    i += 1
    if i not in args.skip_step:
        print_step(i, "Pictures preparation")
94
        env["individual_pictures"] = pi.extract_pictures_to_workspace(**env)
Clement Pinard's avatar
Clement Pinard committed
95
    else:
96
97
        full_paths = sum((list(env["individual_pictures_path"].walkfiles('*{}'.format(ext))) for ext in env["pic_ext"]), [])
        env["individual_pictures"] = [path.relpath(env["colmap_img_root"]) for path in full_paths]
Clement Pinard's avatar
Clement Pinard committed
98

Clément Pinard's avatar
Clément Pinard committed
99
    i += 1
100
101
102
103
104
105
106
    # Get already existing_videos
    env["videos_frames_folders"] = {}
    by_name = {v.stem: v for v in env["videos_list"]}
    for folder in env["video_path"].walkdirs():
        video_name = folder.basename()
        if video_name in by_name.keys():
            env["videos_frames_folders"][by_name[video_name]] = folder
Clément Pinard's avatar
Clément Pinard committed
107
108
    if i not in args.skip_step:
        print_step(i, "Extracting Videos and selecting optimal frames for a thorough scan")
109
110
111
        new_video_frame_folders = pi.extract_videos_to_workspace(fps=args.lowfps, **env)
        # Concatenate both already treated videos and newly detected videos
        env["videos_frames_folders"] = {**env["videos_frames_folders"], **new_video_frame_folders}
112
113
    env["videos_workspaces"] = {}
    for v, frames_folder in env["videos_frames_folders"].items():
Clément Pinard's avatar
Clément Pinard committed
114
        env["videos_workspaces"][v] = pw.prepare_video_workspace(v, frames_folder, **env)
Clement Pinard's avatar
Clement Pinard committed
115

Clément Pinard's avatar
Clément Pinard committed
116
117
118
    i += 1
    if i not in args.skip_step:
        print_step(i, "First thorough photogrammetry")
Clément Pinard's avatar
Clément Pinard committed
119
        env["thorough_recon"].makedirs_p()
120
        colmap.extract_features(image_list=env["video_frame_list_thorough"], more=args.more_sift_features)
Clément Pinard's avatar
Clément Pinard committed
121
        colmap.index_images(vocab_tree_output=env["indexed_vocab_tree"], vocab_tree_input=args.vocab_tree)
Clément Pinard's avatar
Clément Pinard committed
122
123
124
125
        if env["match_method"] == "vocab_tree":
            colmap.match(method="vocab_tree", vocab_tree=env["indexed_vocab_tree"], max_num_matches=env["max_num_matches"])
        else:
            colmap.match(method="exhaustive", max_num_matches=env["max_num_matches"])
Clément Pinard's avatar
Clément Pinard committed
126
127
128
        colmap.map(output=env["thorough_recon"], multiple_models=env["multiple_models"])
        thorough_model = pi.choose_biggest_model(env["thorough_recon"])
        colmap.adjust_bundle(thorough_model, thorough_model,
129
                             num_iter=100, refine_extra_params=True)
Clément Pinard's avatar
Clément Pinard committed
130
131
    else:
        thorough_model = pi.choose_biggest_model(env["thorough_recon"])
Clement Pinard's avatar
Clement Pinard committed
132

Clément Pinard's avatar
Clément Pinard committed
133
134
    i += 1
    if i not in args.skip_step:
Clément Pinard's avatar
Clément Pinard committed
135
        print_step(i, "Alignment of photogrammetric reconstruction with GPS")
136
        env["georef_recon"].makedirs_p()
137
        env["georef_full_recon"].makedirs_p()
Clément Pinard's avatar
Clément Pinard committed
138
        colmap.align_model(output=env["georef_recon"],
Clément Pinard's avatar
Clément Pinard committed
139
                           input=thorough_model,
Clement Pinard's avatar
Clement Pinard committed
140
                           ref_images=env["georef_frames_list"])
Clément Pinard's avatar
Clément Pinard committed
141
        if not (env["georef_recon"]/"images.bin").isfile():
Clément Pinard's avatar
Clément Pinard committed
142
143
            # GPS alignment failed, possibly because not enough GPS referenced images
            # Copy the original model without alignment
144
            print("Warning, model alignment failed, the model will be normalized, and thus the depth maps too")
Clément Pinard's avatar
Clément Pinard committed
145
            thorough_model.merge_tree(env["georef_recon"])
146
147
        env["georef_recon"].merge_tree(env["georef_full_recon"])
    if args.inspect_dataset:
148
149
150
        print("FIRST DATASET INSPECTION")
        print("Inspection of localisalization of frames used in thorough mapping "
              "w.r.t Sparse reconstruction")
151
152
        colmap.export_model(output=env["georef_recon"] / "georef_sparse.ply",
                            input=env["georef_recon"])
153
        georef_mlp = env["georef_recon"]/"georef_recon.mlp"
Clément Pinard's avatar
Clément Pinard committed
154
        mxw.create_project(georef_mlp, [env["georef_recon"] / "georef_sparse.ply"])
155
156
157
158
159
        colmap.export_model(output=env["georef_recon"],
                            input=env["georef_recon"],
                            output_type="TXT")
        eth3d.inspect_dataset(scan_meshlab=georef_mlp,
                              colmap_model=env["georef_recon"],
160
                              image_path=env["colmap_img_root"])
Clement Pinard's avatar
Clement Pinard committed
161

162
163
164
    i += 1
    if i not in args.skip_step:
        print_step(i, "Video localization with respect to reconstruction")
Clément Pinard's avatar
Clément Pinard committed
165
166
        for j, v in enumerate(env["videos_to_localize"]):
            print("\n\nNow working on video {} [{}/{}]".format(v, j + 1, len(env["videos_to_localize"])))
167
168
169
170
            video_env = env["videos_workspaces"][v]
            localize_video(video_name=v,
                           video_frames_folder=env["videos_frames_folders"][v],
                           video_index=j+1,
171
                           step_index=i,
Clément Pinard's avatar
Clément Pinard committed
172
                           num_videos=len(env["videos_to_localize"]),
173
174
175
176
177
178
                           **video_env, **env)

    i += 1
    if i not in args.skip_step:
        print_step(i, "Full reconstruction point cloud densificitation")
        colmap.undistort(input=env["georef_full_recon"])
Clément Pinard's avatar
Clément Pinard committed
179
        colmap.dense_stereo(min_depth=env["stereo_min_depth"], max_depth=env["stereo_max_depth"])
180
181
        colmap.stereo_fusion(output=env["georefrecon_ply"])

Clément Pinard's avatar
Clément Pinard committed
182
183
    i += 1
    if i not in args.skip_step:
184
        print_step(i, "Alignment of photogrammetric reconstruction with respect to Lidar Point Cloud")
Clément Pinard's avatar
Clément Pinard committed
185
186
        if args.registration_method == "eth3d":
            # Note : ETH3D doesn't register with scale, this might not be suitable for very large areas
Clément Pinard's avatar
Clément Pinard committed
187
            mxw.add_meshes_to_project(env["lidar_mlp"], env["aligned_mlp"], [env["georefrecon_ply"]], start_index=0)
Clément Pinard's avatar
Clément Pinard committed
188
189
190
            eth3d.align_with_ICP(env["aligned_mlp"], env["aligned_mlp"], scales=5)
            mxw.remove_mesh_from_project(env["aligned_mlp"], env["aligned_mlp"], 0)
            matrix = np.linalg.inv(mxw.get_mesh(env["aligned_mlp"], index=0)[0])
191
192
            np.savetxt(env["matrix_path"], matrix)

Clément Pinard's avatar
Clément Pinard committed
193
194
195
196
197
198
199
            ''' The new mlp is supposedly better than the one before because it was an ICP
            with N+1 models instead of just N so we replace it with the result on this scan
            by reversing the first transformation and getting back a mlp file with identity
            as first transform matrix'''
            mxw.apply_transform_to_project(env["aligned_mlp"], env["lidar_mlp"], matrix)
            env["global_registration_matrix"] = matrix
        else:
Clément Pinard's avatar
Clément Pinard committed
200
201
202
203
            if args.normals_method == "radius":
                eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_radius=args.normals_radius)
            else:
                eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_count=args.normals_neighbours)
Clément Pinard's avatar
Clément Pinard committed
204
205
206
207
208
209
210
211
212
213
214
215
216
            if args.registration_method == "simple":
                pcl_util.register_reconstruction(georef=env["georefrecon_ply"],
                                                 lidar=env["with_normals_path"],
                                                 output_matrix=env["matrix_path"],
                                                 max_distance=10)
            elif args.registration_method == "interactive":
                input("Get transformation matrix between {0} and {1} so that we should"
                      " apply it to the reconstructed point cloud to have the lidar point cloud, "
                      "and paste it in the file {2}. When done, press ENTER".format(env["with_normals_path"],
                                                                                    env["georefrecon_ply"],
                                                                                    env["matrix_path"]))
            env["global_registration_matrix"] = get_matrix(env["matrix_path"])
            mxw.apply_transform_to_project(env["lidar_mlp"], env["aligned_mlp"], env["global_registration_matrix"])
Clément Pinard's avatar
Clément Pinard committed
217
    else:
Clément Pinard's avatar
Clément Pinard committed
218
        env["global_registration_matrix"] = get_matrix(env["matrix_path"])
Clément Pinard's avatar
Clément Pinard committed
219
        mxw.apply_transform_to_project(env["lidar_mlp"], env["aligned_mlp"], env["global_registration_matrix"])
Clément Pinard's avatar
Clément Pinard committed
220
221
222

    i += 1
    if i not in args.skip_step:
223
        print_step(i, "Occlusion Mesh computing")
Clément Pinard's avatar
Clément Pinard committed
224
        '''combine the MLP file into a single ply file. We need the normals for the splats'''
Clément Pinard's avatar
Clément Pinard committed
225
226
227
228
        if args.normals_method == "radius":
            eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_radius=args.normals_radius)
        else:
            eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_count=args.normals_neighbours)
Clément Pinard's avatar
Clément Pinard committed
229
230
        '''Create vis file that will tell by what images each point can be seen. We transfer this knowledge from georefrecon
        to the Lidar model'''
Clément Pinard's avatar
Clément Pinard committed
231
        scale = np.linalg.norm(env["global_registration_matrix"][:3, :3], ord=2)
Clément Pinard's avatar
Clément Pinard committed
232
        with_normals_subsampled = env["with_normals_path"].stripext() + "_subsampled.ply"
Clément Pinard's avatar
Clément Pinard committed
233
        pcl_util.create_vis_file(env["georefrecon_ply"], env["with_normals_path"],
Clément Pinard's avatar
Clément Pinard committed
234
235
236
237
                                 resolution=args.mesh_resolution / scale,
                                 output=with_normals_subsampled)
        '''Compute the occlusion mesh by fooling COLMAP into thinking the lidar point cloud was made with colmap'''
        colmap.delaunay_mesh(env["occlusion_ply"], input_ply=with_normals_subsampled)
Clément Pinard's avatar
Clément Pinard committed
238
        if args.splats:
Clément Pinard's avatar
Clément Pinard committed
239
240
241
            eth3d.create_splats(env["splats_ply"], with_normals_subsampled,
                                env["occlusion_ply"], env["splat_threshold"] / scale,
                                env["max_splat_size"])
242

Clément Pinard's avatar
Clément Pinard committed
243
    if args.inspect_dataset:
Clément Pinard's avatar
Clément Pinard committed
244
245
        # First inspection : Check registration of the Lidar pointcloud wrt to COLMAP model but without the occlusion mesh
        # Second inspection : Check the occlusion mesh and the splats
Clément Pinard's avatar
Clément Pinard committed
246
247
248
249
250
        georef_mlp = env["georef_recon"]/"georef_recon.mlp"
        mxw.create_project(georef_mlp, [env["georefrecon_ply"]])
        colmap.export_model(output=env["georef_full_recon"],
                            input=env["georef_full_recon"],
                            output_type="TXT")
251
252
253
        print("SECOND DATASET INSPECTION")
        print("Inspection of localisalization of frames used in thorough mapping "
              "w.r.t Dense reconstruction")
Clément Pinard's avatar
Clément Pinard committed
254
255
        eth3d.inspect_dataset(scan_meshlab=georef_mlp,
                              colmap_model=env["georef_full_recon"],
256
257
258
                              image_path=env["colmap_img_root"])
        print("Inspection of localisalization of frames used in thorough mapping "
              "w.r.t Aligned Lidar Point Cloud")
Clément Pinard's avatar
Clément Pinard committed
259
        eth3d.inspect_dataset(scan_meshlab=env["aligned_mlp"],
Clément Pinard's avatar
Clément Pinard committed
260
                              colmap_model=env["georef_full_recon"],
261
262
263
                              image_path=env["colmap_img_root"])
        print("Inspection of localisalization of frames used in thorough mapping "
              "w.r.t Aligned Lidar Point Cloud and Occlusion Meshes")
Clément Pinard's avatar
Clément Pinard committed
264
        eth3d.inspect_dataset(scan_meshlab=env["aligned_mlp"],
Clément Pinard's avatar
Clément Pinard committed
265
                              colmap_model=env["georef_full_recon"],
266
                              image_path=env["colmap_img_root"],
Clément Pinard's avatar
Clément Pinard committed
267
268
269
                              occlusions=env["occlusion_ply"],
                              splats=env["splats_ply"])

270
271
272
    i += 1
    if i not in args.skip_step:
        print_step(i, "Groud Truth generation")
Clément Pinard's avatar
Clément Pinard committed
273
        for j, v in enumerate(env["videos_to_localize"]):
274
275
276
277
            video_env = env["videos_workspaces"][v]

            generate_GT(video_name=v, GT_already_done=video_env["GT_already_done"],
                        video_index=j+1,
Clément Pinard's avatar
Clément Pinard committed
278
                        step_index=i,
Clément Pinard's avatar
Clément Pinard committed
279
                        num_videos=len(env["videos_to_localize"]),
Clément Pinard's avatar
Clément Pinard committed
280
                        metadata_path=video_env["metadata_path"],
281
                        **video_env["output_env"], **env)
282
        if env["generate_groundtruth_for_individual_images"]:
Clément Pinard's avatar
Clément Pinard committed
283
            by_folder = pi.group_pics_by_folder(env["individual_pictures"])
Clément Pinard's avatar
Clément Pinard committed
284
285
286
287
288
            for folder, pic_list in by_folder.items():
                generate_GT_individual_pictures(input_colmap_model=env["georef_full_recon"],
                                                individual_pictures_list=pic_list,
                                                relpath=folder,
                                                step_index=i, **env)
Clement Pinard's avatar
Clement Pinard committed
289
290
291
292


if __name__ == '__main__':
    main()