main_pipeline.py 13 KB
Newer Older
1
2
3
4
5
import las2ply
import numpy as np
from wrappers import Colmap, FFMpeg, PDraw, ETH3D, PCLUtil
from cli_utils import set_argparser, print_step, print_workflow
from video_localization import localize_video, generate_GT
Clement Pinard's avatar
Clement Pinard committed
6
import meshlab_xml_writer as mxw
Clément Pinard's avatar
Clément Pinard committed
7
8
import prepare_images as pi
import prepare_workspace as pw
9
10
11


def prepare_point_clouds(pointclouds, lidar_path, verbose, eth3d, pcl_util, SOR, pointcloud_resolution, **env):
Clement Pinard's avatar
Clement Pinard committed
12
    converted_clouds = []
Clément Pinard's avatar
Clément Pinard committed
13
    output_centroid = None
Clement Pinard's avatar
Clement Pinard committed
14
15
16
17
    for pc in pointclouds:
        ply, centroid = las2ply.load_and_convert(input_file=pc,
                                                 output_folder=lidar_path,
                                                 verbose=verbose >= 1)
Clément Pinard's avatar
Clément Pinard committed
18
19
20
21
        if pc.ext[1:].upper() == "LAS":
            if output_centroid is None:
                output_centroid = centroid
        pcl_util.filter_cloud(input_file=ply, output_file=ply.stripext() + "_filtered.ply", knn=SOR[0], std=SOR[1])
22
23
24
        # pcl_util.subsample(input_file=ply.stripext() + "_filtered.ply",
        #                    output_file=ply.stripext() + "_subsampled.ply",
        #                    resolution=pointcloud_resolution)
Clément Pinard's avatar
Clément Pinard committed
25

26
27
        # converted_clouds.append(ply.stripext() + "_subsampled.ply")
        converted_clouds.append(ply.stripext() + "_filtered.ply")
nicolas's avatar
nicolas committed
28
29
30
31
32
33
34
    temp_mlp = env["workspace"] / "lidar_unaligned.mlp"
    mxw.create_project(temp_mlp, converted_clouds, labels=None, transforms=None)
    if len(converted_clouds) > 1:
        eth3d.align_with_ICP(temp_mlp, env["lidar_mlp"], scales=5)
    else:
        temp_mlp.move(env["lidar_mlp"])

Clément Pinard's avatar
Clément Pinard committed
35
    return converted_clouds, output_centroid
Clement Pinard's avatar
Clement Pinard committed
36
37
38


def main():
39
    args = set_argparser().parse_args()
Clement Pinard's avatar
Clement Pinard committed
40
41
42
    env = vars(args)
    if args.show_steps:
        print_workflow()
43
        return
Clément Pinard's avatar
Clément Pinard committed
44
45
    if args.add_new_videos:
        args.skip_step += [1, 2, 4, 5, 6]
Clement Pinard's avatar
Clement Pinard committed
46
47
    if args.begin_step is not None:
        args.skip_step += list(range(args.begin_step))
Clément Pinard's avatar
Clément Pinard committed
48
    pw.check_input_folder(args.input_folder)
49
    args.workspace = args.workspace.abspath()
Clément Pinard's avatar
Clément Pinard committed
50
    pw.prepare_workspace(args.workspace, env)
51
    colmap = Colmap(db=env["thorough_db"],
Clement Pinard's avatar
Clement Pinard committed
52
53
                    image_path=env["image_path"],
                    mask_path=env["mask_path"],
54
                    dense_workspace=env["dense_workspace"],
nicolas's avatar
nicolas committed
55
                    binary=args.colmap,
Clément Pinard's avatar
Clément Pinard committed
56
                    verbose=args.verbose,
nicolas's avatar
nicolas committed
57
                    logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
58
    env["colmap"] = colmap
Clément Pinard's avatar
Clément Pinard committed
59
    ffmpeg = FFMpeg(args.ffmpeg, verbose=args.verbose, logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
60
    env["ffmpeg"] = ffmpeg
Clément Pinard's avatar
Clément Pinard committed
61
    pdraw = PDraw(args.nw, verbose=args.verbose, logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
62
    env["pdraw"] = pdraw
63
64
    eth3d = ETH3D(args.eth3d, args.raw_output_folder / "Images", args.max_occlusion_depth,
                  verbose=args.verbose, logfile=args.log)
Clement Pinard's avatar
Clement Pinard committed
65
    env["eth3d"] = eth3d
Clément Pinard's avatar
Clément Pinard committed
66
    pcl_util = PCLUtil(args.pcl_util, verbose=args.verbose, logfile=args.log)
Clément Pinard's avatar
Clément Pinard committed
67
    env["pcl_util"] = pcl_util
Clement Pinard's avatar
Clement Pinard committed
68
69
70
71
72

    las_files = (args.input_folder/"Lidar").files("*.las")
    ply_files = (args.input_folder/"Lidar").files("*.ply")
    input_pointclouds = las_files + ply_files
    env["videos_list"] = sum((list((args.input_folder/"Videos").walkfiles('*{}'.format(ext))) for ext in args.vid_ext), [])
Clément Pinard's avatar
Clément Pinard committed
73
74
75
    no_gt_folder = args.input_folder/"Videos"/"no_groundtruth"
    if no_gt_folder.isdir():
        env["videos_to_localize"] = [v for v in env["videos_list"] if not str(v).startswith(no_gt_folder)]
Clement Pinard's avatar
Clement Pinard committed
76

Clément Pinard's avatar
Clément Pinard committed
77
78
79
80
    i = 1
    if i not in args.skip_step:
        print_step(i, "Point Cloud Preparation")
        env["pointclouds"], env["centroid"] = prepare_point_clouds(input_pointclouds, **env)
Clément Pinard's avatar
Clément Pinard committed
81
82
        if env["centroid"] is not None:
            np.savetxt(env["centroid_path"], env["centroid"])
Clement Pinard's avatar
Clement Pinard committed
83
    else:
Clément Pinard's avatar
Clément Pinard committed
84
85
        if env["centroid_path"].isfile():
            env["centroid"] = np.loadtxt(env["centroid_path"])
Clement Pinard's avatar
Clement Pinard committed
86

Clément Pinard's avatar
Clément Pinard committed
87
88
89
    i += 1
    if i not in args.skip_step:
        print_step(i, "Pictures preparation")
Clément Pinard's avatar
Clément Pinard committed
90
        env["existing_pictures"] = pi.extract_pictures_to_workspace(**env)
Clement Pinard's avatar
Clement Pinard committed
91
92
93
    else:
        env["existing_pictures"] = sum((list(env["image_path"].walkfiles('*{}'.format(ext))) for ext in env["pic_ext"]), [])

Clément Pinard's avatar
Clément Pinard committed
94
95
96
    i += 1
    if i not in args.skip_step:
        print_step(i, "Extracting Videos and selecting optimal frames for a thorough scan")
Clément Pinard's avatar
Clément Pinard committed
97
98
99
        existing_georef, env["centroid"] = pi.extract_gps_and_path(**env)
        env["videos_frames_folders"] = pi.extract_videos_to_workspace(existing_georef=existing_georef,
                                                                      fps=args.lowfps, **env)
Clement Pinard's avatar
Clement Pinard committed
100
    else:
101
        env["videos_frames_folders"] = {}
Clément Pinard's avatar
Clément Pinard committed
102
        by_name = {v.namebase: v for v in env["videos_list"]}
Clement Pinard's avatar
Clement Pinard committed
103
104
        for folder in env["video_path"].walkdirs():
            video_name = folder.basename()
Clément Pinard's avatar
Clément Pinard committed
105
            if video_name in by_name.keys():
106
107
108
                env["videos_frames_folders"][by_name[video_name]] = folder
    env["videos_workspaces"] = {}
    for v, frames_folder in env["videos_frames_folders"].items():
Clément Pinard's avatar
Clément Pinard committed
109
        env["videos_workspaces"][v] = pw.prepare_video_workspace(v, frames_folder, **env)
Clement Pinard's avatar
Clement Pinard committed
110

Clément Pinard's avatar
Clément Pinard committed
111
112
113
    i += 1
    if i not in args.skip_step:
        print_step(i, "First thorough photogrammetry")
Clément Pinard's avatar
Clément Pinard committed
114
        env["thorough_recon"].makedirs_p()
115
        colmap.extract_features(image_list=env["video_frame_list_thorough"], more=args.more_sift_features)
Clément Pinard's avatar
Clément Pinard committed
116
        colmap.index_images(vocab_tree_output=env["indexed_vocab_tree"], vocab_tree_input=args.vocab_tree)
117
        colmap.match(method="vocab_tree", vocab_tree=env["indexed_vocab_tree"])
Clément Pinard's avatar
Clément Pinard committed
118
119
120
        colmap.map(output=env["thorough_recon"], multiple_models=env["multiple_models"])
        thorough_model = pi.choose_biggest_model(env["thorough_recon"])
        colmap.adjust_bundle(thorough_model, thorough_model,
121
                             num_iter=100, refine_extra_params=True)
Clément Pinard's avatar
Clément Pinard committed
122
123
    else:
        thorough_model = pi.choose_biggest_model(env["thorough_recon"])
Clement Pinard's avatar
Clement Pinard committed
124

Clément Pinard's avatar
Clément Pinard committed
125
126
    i += 1
    if i not in args.skip_step:
Clément Pinard's avatar
Clément Pinard committed
127
        print_step(i, "Alignment of photogrammetric reconstruction with GPS")
128
        env["georef_recon"].makedirs_p()
Clément Pinard's avatar
Clément Pinard committed
129
        colmap.align_model(output=env["georef_recon"],
Clément Pinard's avatar
Clément Pinard committed
130
                           input=thorough_model,
Clement Pinard's avatar
Clement Pinard committed
131
                           ref_images=env["georef_frames_list"])
Clément Pinard's avatar
Clément Pinard committed
132
133
134
135
        if not (env["georef_frames_list"]/"images.bin").isfile():
            # GPS alignment failed, possibly because not enough GPS referenced images
            # Copy the original model without alignment
            (env["thorough_recon"] / "0").merge_tree(env["georef_full_recon"])
136
137
        env["georef_recon"].merge_tree(env["georef_full_recon"])
    if args.inspect_dataset:
138
139
        colmap.export_model(output=env["georef_recon"] / "georef_sparse.ply",
                            input=env["georef_recon"])
140
141
142
143
144
145
146
147
        georef_mlp = env["georef_recon"]/"georef_recon.mlp"
        mxw.create_project(georef_mlp, [env["georefrecon_ply"]])
        colmap.export_model(output=env["georef_recon"],
                            input=env["georef_recon"],
                            output_type="TXT")
        eth3d.inspect_dataset(scan_meshlab=georef_mlp,
                              colmap_model=env["georef_recon"],
                              image_path=env["image_path"])
Clement Pinard's avatar
Clement Pinard committed
148

149
150
151
    i += 1
    if i not in args.skip_step:
        print_step(i, "Video localization with respect to reconstruction")
Clément Pinard's avatar
Clément Pinard committed
152
153
        for j, v in enumerate(env["videos_to_localize"]):
            print("\n\nNow working on video {} [{}/{}]".format(v, j + 1, len(env["videos_to_localize"])))
154
155
156
157
            video_env = env["videos_workspaces"][v]
            localize_video(video_name=v,
                           video_frames_folder=env["videos_frames_folders"][v],
                           video_index=j+1,
158
                           step_index=i,
Clément Pinard's avatar
Clément Pinard committed
159
                           num_videos=len(env["videos_to_localize"]),
160
161
162
163
164
                           **video_env, **env)

    i += 1
    if i not in args.skip_step:
        print_step(i, "Full reconstruction point cloud densificitation")
Clément Pinard's avatar
Clément Pinard committed
165
        env["georef_full_recon"].makedirs_p()
166
167
168
169
        colmap.undistort(input=env["georef_full_recon"])
        colmap.dense_stereo()
        colmap.stereo_fusion(output=env["georefrecon_ply"])

Clément Pinard's avatar
Clément Pinard committed
170
171
172
173
174
175
176
177
178
    def get_matrix(path):
        if path.isfile():
            '''Note : We use the inverse matrix here, because in general, it's easier to register the reconstructed model into the lidar one,
            as the reconstructed will have less points, but in the end we need the matrix to apply to the lidar point to be aligned
            with the camera positions (ie the inverse)'''
            return np.linalg.inv(np.fromfile(env["matrix_path"], sep=" ").reshape(4, 4))
        else:
            print("Error, no registration matrix can be found, identity will be used")
            return np.eye(4)
Clément Pinard's avatar
Clément Pinard committed
179
180
    i += 1
    if i not in args.skip_step:
181
        print_step(i, "Registration of photogrammetric reconstruction with respect to Lidar Point Cloud")
Clément Pinard's avatar
Clément Pinard committed
182
183
        if args.registration_method == "eth3d":
            # Note : ETH3D doesn't register with scale, this might not be suitable for very large areas
Clément Pinard's avatar
Clément Pinard committed
184
            mxw.add_meshes_to_project(env["lidar_mlp"], env["aligned_mlp"], [env["georefrecon_ply"]], start_index=0)
Clément Pinard's avatar
Clément Pinard committed
185
186
187
            eth3d.align_with_ICP(env["aligned_mlp"], env["aligned_mlp"], scales=5)
            mxw.remove_mesh_from_project(env["aligned_mlp"], env["aligned_mlp"], 0)
            matrix = np.linalg.inv(mxw.get_mesh(env["aligned_mlp"], index=0)[0])
188
189
            np.savetxt(env["matrix_path"], matrix)

Clément Pinard's avatar
Clément Pinard committed
190
191
192
193
194
195
196
            ''' The new mlp is supposedly better than the one before because it was an ICP
            with N+1 models instead of just N so we replace it with the result on this scan
            by reversing the first transformation and getting back a mlp file with identity
            as first transform matrix'''
            mxw.apply_transform_to_project(env["aligned_mlp"], env["lidar_mlp"], matrix)
            env["global_registration_matrix"] = matrix
        else:
Clément Pinard's avatar
Clément Pinard committed
197
198
199
200
            if args.normals_method == "radius":
                eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_radius=args.normals_radius)
            else:
                eth3d.compute_normals(env["with_normals_path"], env["lidar_mlp"], neighbor_count=args.normals_neighbours)
Clément Pinard's avatar
Clément Pinard committed
201
202
203
204
205
206
207
208
209
210
211
212
213
            if args.registration_method == "simple":
                pcl_util.register_reconstruction(georef=env["georefrecon_ply"],
                                                 lidar=env["with_normals_path"],
                                                 output_matrix=env["matrix_path"],
                                                 max_distance=10)
            elif args.registration_method == "interactive":
                input("Get transformation matrix between {0} and {1} so that we should"
                      " apply it to the reconstructed point cloud to have the lidar point cloud, "
                      "and paste it in the file {2}. When done, press ENTER".format(env["with_normals_path"],
                                                                                    env["georefrecon_ply"],
                                                                                    env["matrix_path"]))
            env["global_registration_matrix"] = get_matrix(env["matrix_path"])
            mxw.apply_transform_to_project(env["lidar_mlp"], env["aligned_mlp"], env["global_registration_matrix"])
Clément Pinard's avatar
Clément Pinard committed
214
    else:
Clément Pinard's avatar
Clément Pinard committed
215
        env["global_registration_matrix"] = get_matrix(env["matrix_path"])
Clément Pinard's avatar
Clément Pinard committed
216
217
218

    i += 1
    if i not in args.skip_step:
219
        print_step(i, "Occlusion Mesh computing")
Clément Pinard's avatar
Clément Pinard committed
220
221
222
223
224
225
226
        if args.normals_method == "radius":
            eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_radius=args.normals_radius)
        else:
            eth3d.compute_normals(env["with_normals_path"], env["aligned_mlp"], neighbor_count=args.normals_neighbours)
        pcl_util.create_vis_file(env["georefrecon_ply"], env["with_normals_path"],
                                 resolution=args.mesh_resolution, output=env["with_normals_path"].stripext() + "_subsampled.ply")
        colmap.delaunay_mesh(env["occlusion_ply"], input_ply=env["with_normals_path"].stripext() + "_subsampled.ply")
Clément Pinard's avatar
Clément Pinard committed
227
228
        if args.splats:
            eth3d.create_splats(env["splats_ply"], env["with_normals_path"], env["occlusion_ply"], threshold=args.splat_threshold)
229

Clément Pinard's avatar
Clément Pinard committed
230
231
232
233
234
235
236
237
238
239
    if args.inspect_dataset:
        eth3d.inspect_dataset(scan_meshlab=env["aligned_mlp"],
                              colmap_model=env["georef_recon"],
                              image_path=env["image_path"])
        eth3d.inspect_dataset(scan_meshlab=env["aligned_mlp"],
                              colmap_model=env["georef_recon"],
                              image_path=env["image_path"],
                              occlusions=env["occlusion_ply"],
                              splats=env["splats_ply"])

240
241
242
    i += 1
    if i not in args.skip_step:
        print_step(i, "Groud Truth generation")
Clément Pinard's avatar
Clément Pinard committed
243
        for j, v in enumerate(env["videos_to_localize"]):
244
245
246
247
            video_env = env["videos_workspaces"][v]

            generate_GT(video_name=v, GT_already_done=video_env["GT_already_done"],
                        video_index=j+1,
Clément Pinard's avatar
Clément Pinard committed
248
                        num_videos=len(env["videos_to_localize"]),
249
                        metadata=video_env["metadata"],
250
                        **video_env["output_env"], **env)
Clement Pinard's avatar
Clement Pinard committed
251
252
253
254


if __name__ == '__main__':
    main()