main_pipeline.py 13.9 KB
Newer Older
1
2
3
4
5
import las2ply
import numpy as np
from wrappers import Colmap, FFMpeg, PDraw, ETH3D, PCLUtil
from cli_utils import set_argparser, print_step, print_workflow
from video_localization import localize_video, generate_GT
Clement Pinard's avatar
Clement Pinard committed
6
import meshlab_xml_writer as mxw
Clément Pinard's avatar
Clément Pinard committed
7
8
import prepare_images as pi
import prepare_workspace as pw
9
10
11


def prepare_point_clouds(pointclouds, lidar_path, verbose, eth3d, pcl_util, SOR, pointcloud_resolution, **env):
Clement Pinard's avatar
Clement Pinard committed
12
    converted_clouds = []
Clément Pinard's avatar
Clément Pinard committed
13
    output_centroid = None
Clement Pinard's avatar
Clement Pinard committed
14
15
16
17
    for pc in pointclouds:
        ply, centroid = las2ply.load_and_convert(input_file=pc,
                                                 output_folder=lidar_path,
                                                 verbose=verbose >= 1)
Clément Pinard's avatar
Clément Pinard committed
18
19
20
21
        if pc.ext[1:].upper() == "LAS":
            if output_centroid is None:
                output_centroid = centroid
        pcl_util.filter_cloud(input_file=ply, output_file=ply.stripext() + "_filtered.ply", knn=SOR[0], std=SOR[1])
22
23
24
        # pcl_util.subsample(input_file=ply.stripext() + "_filtered.ply",
        #                    output_file=ply.stripext() + "_subsampled.ply",
        #                    resolution=pointcloud_resolution)
Clément Pinard's avatar
Clément Pinard committed
25

26
27
        # converted_clouds.append(ply.stripext() + "_subsampled.ply")
        converted_clouds.append(ply.stripext() + "_filtered.ply")
nicolas's avatar
nicolas committed
28
29
30
31
32
33
34
    temp_mlp = env["workspace"] / "lidar_unaligned.mlp"
    mxw.create_project(temp_mlp, converted_clouds, labels=None, transforms=None)
    if len(converted_clouds) > 1:
        eth3d.align_with_ICP(temp_mlp, env["lidar_mlp"], scales=5)
    else:
        temp_mlp.move(env["lidar_mlp"])

Clément Pinard's avatar
Clément Pinard committed
35
    return converted_clouds, output_centroid
Clement Pinard's avatar
Clement Pinard committed
36
37
38


def main():
39
    args = set_argparser().parse_args()
Clement Pinard's avatar
Clement Pinard committed
40
41
42
    env = vars(args)
    if args.show_steps:
        print_workflow()
43
        return
Clément Pinard's avatar
Clément Pinard committed
44
45
    if args.add_new_videos:
        args.skip_step += [1, 2, 4, 5, 6]
Clement Pinard's avatar
Clement Pinard committed
46
47
    if args.begin_step is not None:
        args.skip_step += list(range(args.begin_step))
Clément Pinard's avatar
Clément Pinard committed
48
    pw.check_input_folder(args.input_folder)
49
    args.workspace = args.workspace.abspath()
Clément Pinard's avatar
Clément Pinard committed
50
    pw.prepare_workspace(args.workspace, env)
51
    colmap = Colmap(db=env["thorough_db"],
Clement Pinard's avatar
Clement Pinard committed
52
53
                    image_path=env["image_path"],
                    mask_path=env["mask_path"],
54
                    dense_workspace=env["dense_workspace"],
nicolas's avatar
nicolas committed
55
                    binary=args.colmap,
Clément Pinard's avatar
Clément Pinard committed
56
                    verbose=args.verbose,
nicolas's avatar
nicolas committed
57
                    logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
58
    env["colmap"] = colmap
Clément Pinard's avatar
Clément Pinard committed
59
    ffmpeg = FFMpeg(args.ffmpeg, verbose=args.verbose, logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
60
    env["ffmpeg"] = ffmpeg
Clément Pinard's avatar
Clément Pinard committed
61
    pdraw = PDraw(args.nw, verbose=args.verbose, logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
62
    env["pdraw"] = pdraw
63
64
    eth3d = ETH3D(args.eth3d, args.raw_output_folder / "Images", args.max_occlusion_depth,
                  verbose=args.verbose, logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
65
    env["eth3d"] = eth3d
Clément Pinard's avatar
Clément Pinard committed
66
    pcl_util = PCLUtil(args.pcl_util, verbose=args.verbose, logfile=args.log)
Clément Pinard's avatar
Clément Pinard committed
67
    env["pcl_util"] = pcl_util
Clement Pinard's avatar
Clement Pinard committed
68
69
70
71
72

    las_files = (args.input_folder/"Lidar").files("*.las")
    ply_files = (args.input_folder/"Lidar").files("*.ply")
    input_pointclouds = las_files + ply_files
    env["videos_list"] = sum((list((args.input_folder/"Videos").walkfiles('*{}'.format(ext))) for ext in args.vid_ext), [])
Clément Pinard's avatar
Clément Pinard committed
73
74
75
    no_gt_folder = args.input_folder/"Videos"/"no_groundtruth"
    if no_gt_folder.isdir():
        env["videos_to_localize"] = [v for v in env["videos_list"] if not str(v).startswith(no_gt_folder)]
Clément Pinard's avatar
Clément Pinard committed
76
77
    else:
        env["videos_to_localize"] = env["videos_list"]
Clement Pinard's avatar
Clement Pinard committed
78

Clément Pinard's avatar
Clément Pinard committed
79
80
81
82
    i = 1
    if i not in args.skip_step:
        print_step(i, "Point Cloud Preparation")
        env["pointclouds"], env["centroid"] = prepare_point_clouds(input_pointclouds, **env)
Clément Pinard's avatar
Clément Pinard committed
83
84
        if env["centroid"] is not None:
            np.savetxt(env["centroid_path"], env["centroid"])
Clement Pinard's avatar
Clement Pinard committed
85
    else:
Clément Pinard's avatar
Clément Pinard committed
86
87
        if env["centroid_path"].isfile():
            env["centroid"] = np.loadtxt(env["centroid_path"])
Clement Pinard's avatar
Clement Pinard committed
88

Clément Pinard's avatar
Clément Pinard committed
89
90
91
    i += 1
    if i not in args.skip_step:
        print_step(i, "Pictures preparation")
Clément Pinard's avatar
Clément Pinard committed
92
        env["existing_pictures"] = pi.extract_pictures_to_workspace(**env)
Clement Pinard's avatar
Clement Pinard committed
93
94
95
    else:
        env["existing_pictures"] = sum((list(env["image_path"].walkfiles('*{}'.format(ext))) for ext in env["pic_ext"]), [])

Clément Pinard's avatar
Clément Pinard committed
96
97
98
    i += 1
    if i not in args.skip_step:
        print_step(i, "Extracting Videos and selecting optimal frames for a thorough scan")
Clément Pinard's avatar
Clément Pinard committed
99
100
101
        existing_georef, env["centroid"] = pi.extract_gps_and_path(**env)
        env["videos_frames_folders"] = pi.extract_videos_to_workspace(existing_georef=existing_georef,
                                                                      fps=args.lowfps, **env)
Clement Pinard's avatar
Clement Pinard committed
102
    else:
103
        env["videos_frames_folders"] = {}
Clément Pinard's avatar
Clément Pinard committed
104
        by_name = {v.stem: v for v in env["videos_list"]}
Clement Pinard's avatar
Clement Pinard committed
105
106
        for folder in env["video_path"].walkdirs():
            video_name = folder.basename()
Clément Pinard's avatar
Clément Pinard committed
107
            if video_name in by_name.keys():
108
109
110
                env["videos_frames_folders"][by_name[video_name]] = folder
    env["videos_workspaces"] = {}
    for v, frames_folder in env["videos_frames_folders"].items():
Clément Pinard's avatar
Clément Pinard committed
111
        env["videos_workspaces"][v] = pw.prepare_video_workspace(v, frames_folder, **env)
Clement Pinard's avatar
Clement Pinard committed
112

Clément Pinard's avatar
Clément Pinard committed
113
114
115
    i += 1
    if i not in args.skip_step:
        print_step(i, "First thorough photogrammetry")
Clément Pinard's avatar
Clément Pinard committed
116
        env["thorough_recon"].makedirs_p()
117
        colmap.extract_features(image_list=env["video_frame_list_thorough"], more=args.more_sift_features)
Clément Pinard's avatar
Clément Pinard committed
118
        colmap.index_images(vocab_tree_output=env["indexed_vocab_tree"], vocab_tree_input=args.vocab_tree)
Clément Pinard's avatar
Clément Pinard committed
119
        colmap.match(method="vocab_tree", vocab_tree=env["indexed_vocab_tree"], max_num_matches=env["max_num_matches"])
Clément Pinard's avatar
Clément Pinard committed
120
121
122
        colmap.map(output=env["thorough_recon"], multiple_models=env["multiple_models"])
        thorough_model = pi.choose_biggest_model(env["thorough_recon"])
        colmap.adjust_bundle(thorough_model, thorough_model,
123
                             num_iter=100, refine_extra_params=True)
Clément Pinard's avatar
Clément Pinard committed
124
125
    else:
        thorough_model = pi.choose_biggest_model(env["thorough_recon"])
Clement Pinard's avatar
Clement Pinard committed
126

Clément Pinard's avatar
Clément Pinard committed
127
128
    i += 1
    if i not in args.skip_step:
Clément Pinard's avatar
Clément Pinard committed
129
        print_step(i, "Alignment of photogrammetric reconstruction with GPS")
130
        env["georef_recon"].makedirs_p()
Clément Pinard's avatar
Clément Pinard committed
131
        colmap.align_model(output=env["georef_recon"],
Clément Pinard's avatar
Clément Pinard committed
132
                           input=thorough_model,
Clement Pinard's avatar
Clement Pinard committed
133
                           ref_images=env["georef_frames_list"])
Clément Pinard's avatar
Clément Pinard committed
134
135
136
        if not (env["georef_frames_list"]/"images.bin").isfile():
            # GPS alignment failed, possibly because not enough GPS referenced images
            # Copy the original model without alignment
Clément Pinard's avatar
Clément Pinard committed
137
            (env["thorough_recon"] / "0").merge_tree(env["georef_recon"])
138
139
        env["georef_recon"].merge_tree(env["georef_full_recon"])
    if args.inspect_dataset:
140
141
        colmap.export_model(output=env["georef_recon"] / "georef_sparse.ply",
                            input=env["georef_recon"])
142
143
144
145
146
147
148
149
        georef_mlp = env["georef_recon"]/"georef_recon.mlp"
        mxw.create_project(georef_mlp, [env["georefrecon_ply"]])
        colmap.export_model(output=env["georef_recon"],
                            input=env["georef_recon"],
                            output_type="TXT")
        eth3d.inspect_dataset(scan_meshlab=georef_mlp,
                              colmap_model=env["georef_recon"],
                              image_path=env["image_path"])
Clement Pinard's avatar
Clement Pinard committed
150

151
152
153
    i += 1
    if i not in args.skip_step:
        print_step(i, "Video localization with respect to reconstruction")
Clément Pinard's avatar
Clément Pinard committed
154
155
        for j, v in enumerate(env["videos_to_localize"]):
            print("\n\nNow working on video {} [{}/{}]".format(v, j + 1, len(env["videos_to_localize"])))
156
157
158
159
            video_env = env["videos_workspaces"][v]
            localize_video(video_name=v,
                           video_frames_folder=env["videos_frames_folders"][v],
                           video_index=j+1,
160
                           step_index=i,
Clément Pinard's avatar
Clément Pinard committed
161
                           num_videos=len(env["videos_to_localize"]),
162
163
164
165
166
                           **video_env, **env)

    i += 1
    if i not in args.skip_step:
        print_step(i, "Full reconstruction point cloud densificitation")
Clément Pinard's avatar
Clément Pinard committed
167
        env["georef_full_recon"].makedirs_p()
168
169
170
171
        colmap.undistort(input=env["georef_full_recon"])
        colmap.dense_stereo()
        colmap.stereo_fusion(output=env["georefrecon_ply"])

Clément Pinard's avatar
Clément Pinard committed
172
173
174
175
176
177
178
179
180
    def get_matrix(path):
        if path.isfile():
            '''Note : We use the inverse matrix here, because in general, it's easier to register the reconstructed model into the lidar one,
            as the reconstructed will have less points, but in the end we need the matrix to apply to the lidar point to be aligned
            with the camera positions (ie the inverse)'''
            return np.linalg.inv(np.fromfile(env["matrix_path"], sep=" ").reshape(4, 4))
        else:
            print("Error, no registration matrix can be found, identity will be used")
            return np.eye(4)
Clément Pinard's avatar
Clément Pinard committed
181
182
    i += 1
    if i not in args.skip_step:
183
        print_step(i, "Registration of photogrammetric reconstruction with respect to Lidar Point Cloud")
Clément Pinard's avatar
Clément Pinard committed
184
185
        if args.registration_method == "eth3d":
            # Note : ETH3D doesn't register with scale, this might not be suitable for very large areas
Clément Pinard's avatar
Clément Pinard committed
186
            mxw.add_meshes_to_project(env["lidar_mlp"], env["aligned_mlp"], [env["georefrecon_ply"]], start_index=0)
Clément Pinard's avatar
Clément Pinard committed
187
188
189
            eth3d.align_with_ICP(env["aligned_mlp"], env["aligned_mlp"], scales=5)
            mxw.remove_mesh_from_project(env["aligned_mlp"], env["aligned_mlp"], 0)
            matrix = np.linalg.inv(mxw.get_mesh(env["aligned_mlp"], index=0)[0])
190
191
            np.savetxt(env["matrix_path"], matrix)

Clément Pinard's avatar
Clément Pinard committed
192
193
194
195
196
197
198
            ''' The new mlp is supposedly better than the one before because it was an ICP
            with N+1 models instead of just N so we replace it with the result on this scan
            by reversing the first transformation and getting back a mlp file with identity
            as first transform matrix'''
            mxw.apply_transform_to_project(env["aligned_mlp"], env["lidar_mlp"], matrix)
            env["global_registration_matrix"] = matrix
        else:
Clément Pinard's avatar
Clément Pinard committed
199
200
201
202
            if args.normals_method == "radius":
                eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_radius=args.normals_radius)
            else:
                eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_count=args.normals_neighbours)
Clément Pinard's avatar
Clément Pinard committed
203
204
205
206
207
208
209
210
211
212
213
214
215
            if args.registration_method == "simple":
                pcl_util.register_reconstruction(georef=env["georefrecon_ply"],
                                                 lidar=env["with_normals_path"],
                                                 output_matrix=env["matrix_path"],
                                                 max_distance=10)
            elif args.registration_method == "interactive":
                input("Get transformation matrix between {0} and {1} so that we should"
                      " apply it to the reconstructed point cloud to have the lidar point cloud, "
                      "and paste it in the file {2}. When done, press ENTER".format(env["with_normals_path"],
                                                                                    env["georefrecon_ply"],
                                                                                    env["matrix_path"]))
            env["global_registration_matrix"] = get_matrix(env["matrix_path"])
            mxw.apply_transform_to_project(env["lidar_mlp"], env["aligned_mlp"], env["global_registration_matrix"])
Clément Pinard's avatar
Clément Pinard committed
216
    else:
Clément Pinard's avatar
Clément Pinard committed
217
        env["global_registration_matrix"] = get_matrix(env["matrix_path"])
Clément Pinard's avatar
Clément Pinard committed
218
        mxw.apply_transform_to_project(env["lidar_mlp"], env["aligned_mlp"], env["global_registration_matrix"])
Clément Pinard's avatar
Clément Pinard committed
219
220
221

    i += 1
    if i not in args.skip_step:
222
        print_step(i, "Occlusion Mesh computing")
Clément Pinard's avatar
Clément Pinard committed
223
        '''combine the MLP file into a single ply file. We need the normals for the splats'''
Clément Pinard's avatar
Clément Pinard committed
224
225
226
227
        if args.normals_method == "radius":
            eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_radius=args.normals_radius)
        else:
            eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_count=args.normals_neighbours)
Clément Pinard's avatar
Clément Pinard committed
228
229
230
231
        '''Create vis file that will tell by what images each point can be seen. We transfer this knowledge from georefrecon
        to the Lidar model'''
        scale = np.linalg.norm(env["global_registration_matrix"], ord=2)
        with_normals_subsampled = env["with_normals_path"].stripext() + "_subsampled.ply"
Clément Pinard's avatar
Clément Pinard committed
232
        pcl_util.create_vis_file(env["georefrecon_ply"], env["with_normals_path"],
Clément Pinard's avatar
Clément Pinard committed
233
234
235
236
                                 resolution=args.mesh_resolution / scale,
                                 output=with_normals_subsampled)
        '''Compute the occlusion mesh by fooling COLMAP into thinking the lidar point cloud was made with colmap'''
        colmap.delaunay_mesh(env["occlusion_ply"], input_ply=with_normals_subsampled)
Clément Pinard's avatar
Clément Pinard committed
237
        if args.splats:
Clément Pinard's avatar
Clément Pinard committed
238
            eth3d.create_splats(env["splats_ply"], with_normals_subsampled, env["occlusion_ply"], threshold=args.splat_threshold / scale)
239

Clément Pinard's avatar
Clément Pinard committed
240
    if args.inspect_dataset:
Clément Pinard's avatar
Clément Pinard committed
241
242
        # First inspection : Check registration of the Lidar pointcloud wrt to COLMAP model but without the occlusion mesh
        # Second inspection : Check the occlusion mesh and the splats
Clément Pinard's avatar
Clément Pinard committed
243
244
245
246
247
248
249
250
251
        eth3d.inspect_dataset(scan_meshlab=env["aligned_mlp"],
                              colmap_model=env["georef_recon"],
                              image_path=env["image_path"])
        eth3d.inspect_dataset(scan_meshlab=env["aligned_mlp"],
                              colmap_model=env["georef_recon"],
                              image_path=env["image_path"],
                              occlusions=env["occlusion_ply"],
                              splats=env["splats_ply"])

252
253
254
    i += 1
    if i not in args.skip_step:
        print_step(i, "Groud Truth generation")
Clément Pinard's avatar
Clément Pinard committed
255
        for j, v in enumerate(env["videos_to_localize"]):
256
257
258
259
            video_env = env["videos_workspaces"][v]

            generate_GT(video_name=v, GT_already_done=video_env["GT_already_done"],
                        video_index=j+1,
Clément Pinard's avatar
Clément Pinard committed
260
                        num_videos=len(env["videos_to_localize"]),
261
                        metadata=video_env["metadata"],
262
                        **video_env["output_env"], **env)
Clement Pinard's avatar
Clement Pinard committed
263
264
265
266


if __name__ == '__main__':
    main()