video_localization.py 15.6 KB
Newer Older
1
2
3
4
5
6
7
8
import numpy as np
from path import Path

from cli_utils import print_step
from colmap_util.read_model import read_images_text, read_images_binary
from filter_colmap_model import filter_colmap_model
import pandas as pd
import add_video_to_db as avtd
9
import extract_pictures_from_model as epfm
10
11
12
13
14
15
16
17
18
19
20
21
import convert_dataset as cd
import generate_sky_masks as gsm
import meshlab_xml_writer as mxw


def is_video_in_model(video_name, colmap_model, metadata):

    mapped_images_ids = read_images_binary(colmap_model/"images.bin").keys()
    video_image_ids = pd.read_csv(metadata)["db_id"]
    return sum(video_image_ids.isin(mapped_images_ids)) > 0


Clément Pinard's avatar
Clément Pinard committed
22
def sift_and_match(colmap, more_sift_features, indexed_vocab_tree, image_list, max_num_matches, **env):
Clément Pinard's avatar
Clément Pinard committed
23
24
    tries = 0
    while tries < 10:
25
26
        try:
            colmap.extract_features(image_list=image_list, more=more_sift_features)
Clément Pinard's avatar
Clément Pinard committed
27
            colmap.match(method="sequential", vocab_tree=indexed_vocab_tree, max_num_matches=max_num_matches)
28
29
30
        except Exception:
            # If it failed, that's because sift gpu has failed.
            print("Error With colmap, will retry")
Clément Pinard's avatar
Clément Pinard committed
31
            tries += 1
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
            pass
        else:
            return


def error_empty():
    print("Error, empty localization")
    return
    # print("will try map from video")
    # colmap.db = lowfps_db
    # colmap.map(output_model=video_output_model, start_frame_id=added_frames[int(len(added_frames)/2)])
    # colmap.align_model(output_model=video_output_model,
    #                    input_model=video_output_model / "0",
    #                    ref_images=current_video_folder / "georef.txt")
    # colmap.db = full_db
    # colmap.register_images(output_model=video_output_model, input_model=video_output_model)
    # colmap.adjust_bundle(output_model=video_output_model, input_model=video_output_model)
    # empty = not evfm.extract_video(input_model=video_output_model,
    #                                output_model=final_output_model,
    #                                video_metadata_path=current_metadata,
    #                                output_format=".txt")
    # if empty:
    #     print("Error could not map anything, aborting this video")
    #     continue


58
def localize_video(video_name, video_frames_folder, thorough_db, metadata_path, lowfps_image_list_path, lowfps_db,
59
60
61
                   chunk_image_list_paths, chunk_dbs,
                   colmap_models_root, full_model, lowfps_model, chunk_models, final_model,
                   output_env, eth3d, colmap, ffmpeg, pcl_util,
Clément Pinard's avatar
Clément Pinard committed
62
                   step_index=None, video_index=None, num_videos=None, already_localized=False,
63
64
65
                   save_space=False, triangulate=False, **env):

    def print_step_pv(step_number, step_name):
Clément Pinard's avatar
Clément Pinard committed
66
        if step_index is not None and video_index is not None and num_videos is not None:
67
            progress = "{}/{}".format(video_index, num_videos)
Clément Pinard's avatar
Clément Pinard committed
68
            substep = "{}.{}".format(step_index, video_index)
69
70
71
        else:
            progress = ""
            substep = ""
Clément Pinard's avatar
Clément Pinard committed
72
        print_step("{}.{}".format(substep, step_number),
73
74
75
76
77
78
79
80
81
82
                   "[Video {}, {}] \n {}".format(video_name.basename(),
                                                 progress,
                                                 step_name))

    def clean_workspace():
        if save_space:
            with open(env["video_frame_list_thorough"], "r") as f:
                files_to_keep = [Path(path.split("\n")[0]) for path in f.readlines()]
            with open(lowfps_image_list_path, "r") as f:
                files_to_keep += [Path(path.split("\n")[0]) for path in f.readlines()]
83
84
85
            files_to_keep += [file.relpath(env["colmap_img_root"]) for file in [metadata_path,
                                                                                lowfps_image_list_path,
                                                                                *chunk_image_list_paths]]
86
            for file in sorted(video_frames_folder.files()):
87
                if file.relpath(env["colmap_img_root"]) not in files_to_keep:
88
89
90
91
92
93
94
95
96
97
98
99
                    file.remove()
            colmap_models_root.rmtree_p()

    # Perform checks if it has not already been computed
    if already_localized:
        print("already done")
        return

    i_pv = 1

    thorough_db.copy(lowfps_db)
    colmap.db = lowfps_db
100
    metadata = pd.read_csv(metadata_path)
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120

    print_step_pv(i_pv, "Full video extraction")
    if save_space:
        ffmpeg.extract_images(video_name, video_frames_folder)
    else:
        print("Already Done.")

    i_pv += 1
    print_step_pv(i_pv, "Sky mask generation")
    gsm.process_folder(folder_to_process=video_frames_folder, **env)

    i_pv += 1
    print_step_pv(i_pv, "Complete photogrammetry with video at {} fps".format(env["lowfps"]))
    avtd.add_to_db(lowfps_db, metadata, lowfps_image_list_path)

    sift_and_match(colmap, image_list=lowfps_image_list_path, **env)

    lowfps_model.makedirs_p()
    colmap.map(output=lowfps_model, input=env["georef_recon"])
    if not is_video_in_model(video_name, lowfps_model, metadata):
Clément Pinard's avatar
Clément Pinard committed
121
        print("Error, video was not localized")
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
        error_empty()
        clean_workspace()
        return

    # when colmap map is called, the model is normalized so we have to georegister it again
    # Can be done either with model_aligner, or with model_merger
    # Additionally, we add the new positions to a full model that will be used for lidar registration
    # and also occlusion mesh computing
    colmap.merge_models(output=env["georef_full_recon"], input1=env["georef_full_recon"], input2=lowfps_model)
    colmap.merge_models(output=lowfps_model, input1=env["georef_recon"], input2=lowfps_model)
    # colmap.align_model(output=lowfps_model,
    #                    input=lowfps_model,
    #                    ref_images=env["georef_frames_list"])

    i_pv += 1
    print_step_pv(i_pv, "Localizing remaining frames")

    for k, (list_path, full_db, chunk_model) in enumerate(zip(chunk_image_list_paths,
                                                              chunk_dbs,
                                                              chunk_models)):
        print("\nLocalizing Chunk {}/{}".format(k + 1, len(chunk_dbs)))
        chunk_model.makedirs_p()
        lowfps_db.copy(full_db)
        colmap.db = full_db
        avtd.add_to_db(full_db, metadata, frame_list_path=list_path)
        sift_and_match(colmap, image_list=list_path, **env)
        colmap.register_images(output=chunk_model, input=lowfps_model)
        colmap.adjust_bundle(output=chunk_model, input=chunk_model)
    chunk_models[0].merge_tree(full_model)
    if len(chunk_model) > 1:
        for chunk in chunk_models[1:]:
            colmap.merge_models(output=full_model, input1=full_model, input2=chunk)
    final_model.makedirs_p()
155
156
157
158
    empty = not epfm.extract_pictures(input=full_model,
                                      output=final_model,
                                      picture_list=metadata["image_path"].values,
                                      output_format=".bin" if triangulate else ".txt")
159
160
161
162
163
164
165
166
167
168
169

    if empty:
        error_empty()
        clean_workspace()

    if triangulate:
        i_pv += 1
        print_step_pv(i_pv, "Re-Alignment of triangulated points with Lidar point cloud")

        colmap.triangulate_points(final_model, final_model)
        colmap.export_model(final_model, final_model, output_type="TXT")
Clément Pinard's avatar
Clément Pinard committed
170
        ply_name = final_model / "georef_{}.ply".format(video_name.stem)
171
172
173
174
175
176
        matrix_name = final_model / "matrix.txt"
        colmap.export_model(ply_name, final_model, output_type="PLY")
        pcl_util.register_reconstruction(georef=ply_name, lidar=env["lidar_ply"],
                                         output_matrix=matrix_name, output_cloud=env["lidar_ply"],
                                         max_distance=10)

Clément Pinard's avatar
Clément Pinard committed
177
    (final_model / "images.txt").rename(final_model / "images_raw.txt")
178
179
180
181
182
183
184
185
186
187

    output_env["video_frames_folder"].makedirs_p()
    video_frames_folder.merge_tree(output_env["video_frames_folder"])

    output_env["model_folder"].makedirs_p()
    colmap_models_root.merge_tree(output_env["model_folder"])

    clean_workspace()


Clément Pinard's avatar
Clément Pinard committed
188
def generate_GT(video_name, raw_output_folder, images_root_folder, video_frames_folder,
189
                viz_folder, kitti_format_folder, metadata_path, interpolated_frames_list,
Clément Pinard's avatar
Clément Pinard committed
190
191
                final_model, aligned_mlp, global_registration_matrix,
                occlusion_ply, splats_ply,
Clément Pinard's avatar
Clément Pinard committed
192
193
                eth3d, colmap, filter_models=True,
                step_index=None, video_index=None, num_videos=None, GT_already_done=False,
194
                save_space=False, inspect_dataset=False, **env):
Clément Pinard's avatar
Clément Pinard committed
195
196
197
198
199
200
201
202
203
204
205
206
207

    def print_step_pv(step_number, step_name):
        if step_index is not None and video_index is not None and num_videos is not None:
            progress = "{}/{}".format(video_index, num_videos)
            substep = "{}.{}".format(step_index, video_index)
        else:
            progress = ""
            substep = ""
        print_step("{}.{}".format(substep, step_number),
                   "[Video {}, {}] \n {}".format(video_name.basename(),
                                                 progress,
                                                 step_name))

208
209
210
211
212
    if GT_already_done:
        return
    if not final_model.isdir():
        print("Video not localized, rerun the script without skipping former step")
        return
Clément Pinard's avatar
Clément Pinard committed
213
214
215

    print("Creating GT on video {} [{}/{}]".format(video_name.basename(), video_index, num_videos))
    i_pv = 1
216
    metadata = pd.read_csv(metadata_path)
Clément Pinard's avatar
Clément Pinard committed
217
218
219
220
    if filter_models:
        print_step_pv(i_pv, "Filtering model to have continuous localization")
        interpolated_frames = filter_colmap_model(input_images_colmap=final_model / "images_raw.txt",
                                                  output_images_colmap=final_model / "images.txt",
221
                                                  metadata=metadata, **env)
Clément Pinard's avatar
Clément Pinard committed
222
223
224
225
226
227
228
        with open(interpolated_frames_list, "w") as f:
            f.write("\n".join(interpolated_frames) + "\n")
        i_pv += 1
    else:
        (final_model / "images_raw.txt").copy(final_model / "images.txt")
        interpolated_frames = []

229
230
231
232
    model_length = len(read_images_text(final_model / "images.txt"))
    if model_length < 2:
        return

Clément Pinard's avatar
Clément Pinard committed
233
234
235
236
    '''
    In case the reconstructed model is only locally good, there's the possibility of having a specific
    transformation matrix per video in the final model folder, which might work better than the the global registration_matrix
    '''
237
238
239
    specific_matrix_path = final_model / "matrix.txt"
    if specific_matrix_path.isfile():
        registration_matrix = np.linalg.inv(np.fromfile(specific_matrix_path, sep=" ").reshape(4, 4))
Clément Pinard's avatar
Clément Pinard committed
240
        adjustment_matrix = registration_matrix * np.linalg.inv(global_registration_matrix)
Clément Pinard's avatar
Clément Pinard committed
241
242
243
        final_mlp = final_model / "aligned.mlp"
        final_occlusions = final_model / "occlusions.mlp"
        final_splats = final_model / "splats.mlp"
Clément Pinard's avatar
Clément Pinard committed
244
245
246
        mxw.apply_transform_to_project(aligned_mlp, final_mlp, adjustment_matrix)
        mxw.create_project(final_occlusions, [occlusion_ply], transforms=[adjustment_matrix])
        mxw.create_project(final_splats, [splats_ply], transforms=[adjustment_matrix])
247

Clément Pinard's avatar
Clément Pinard committed
248
249
250
251
    else:
        final_mlp = aligned_mlp
        final_occlusions = occlusion_ply
        final_splats = splats_ply
252
253
254
255
256
257
258

    if inspect_dataset:
        eth3d.image_path = images_root_folder
        # Do 3 inspections :
        #  - inspection with reconstructed cloud
        #  - inspection with lidar cloud without occlusion
        #  - inspection with lidar cloud and occlusion models
Clément Pinard's avatar
Clément Pinard committed
259
        # Careful, very RAM demanding for long sequences !
260
261
262
        print("THIRD DATASET INSPECTION")
        print("Inspection of localized video frames "
              "w.r.t Dense reconstruction")
263
264
        georef_mlp = env["georef_recon"]/"georef_recon.mlp"
        eth3d.inspect_dataset(georef_mlp, final_model)
265
266
        print("Inspection of localized video frames "
              "w.r.t Aligned Lidar Point Cloud")
Clément Pinard's avatar
Clément Pinard committed
267
        eth3d.inspect_dataset(final_mlp, final_model)
268
269
        print("Inspection of localized video frames "
              "w.r.t Aligned Lidar Point Cloud and Occlusion Meshes")
Clément Pinard's avatar
Clément Pinard committed
270
        eth3d.inspect_dataset(final_mlp, final_model,
271
272
                              final_occlusions, final_splats)

273
    i_pv += 1
Clément Pinard's avatar
Clément Pinard committed
274
    print_step_pv(i_pv, "Creating Ground truth data with ETH3D")
275

Clément Pinard's avatar
Clément Pinard committed
276
    eth3d.create_ground_truth(final_mlp, final_model, raw_output_folder,
Clément Pinard's avatar
Clément Pinard committed
277
                              final_occlusions, final_splats)
278
279
280
281
    viz_folder.makedirs_p()
    kitti_format_folder.makedirs_p()

    i_pv += 1
Clément Pinard's avatar
Clément Pinard committed
282
    print_step_pv(i_pv, "Convert to KITTI format and create video with GT visualization")
283
284

    cd.convert_dataset(final_model,
Clément Pinard's avatar
Clément Pinard committed
285
                       raw_output_folder / "ground_truth_depth" / video_name.stem,
286
                       images_root_folder,
Clément Pinard's avatar
Clément Pinard committed
287
                       raw_output_folder / "occlusion_depth" / video_name.stem,
288
                       kitti_format_folder, viz_folder,
Clément Pinard's avatar
Clément Pinard committed
289
290
                       metadata, interpolated_frames,
                       visualization=True, video=True, downscale=4, threads=8, **env)
291
292
    if filter_models:
        interpolated_frames_list.copy(kitti_format_folder)
Clément Pinard's avatar
Clément Pinard committed
293
294
    if save_space:
        (raw_output_folder / "occlusion_depth" / video_name.stem).rmtree_p()
295
296

    return
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349


def generate_GT_individual_pictures(colmap_img_root, individual_pictures, raw_output_folder,
                                    converted_output_folder, input_colmap_model,
                                    aligned_mlp, relpath,
                                    occlusion_ply, splats_ply,
                                    eth3d, colmap, step_index=None,
                                    save_space=False, **env):
    def print_step_pv(step_number, step_name):
        if step_index is not None:
            print_step("{}.{}".format(step_index, step_number), step_name)
        else:
            print_step(step_index, step_name)

    i_pv = 1
    print_step_pv(i_pv, "Copy individual images to output dataset {}".format(raw_output_folder))
    for p in individual_pictures:
        output_path = raw_output_folder / "images" / p
        output_path.parent.makedirs_p()
        (colmap_img_root / p).copy(output_path)

    i_pv += 1
    print_step_pv(i_pv, "Extract individual images to dedicated COLMAP model")
    pictures_colmap_model = raw_output_folder / "models" / "individual_pictures"
    pictures_colmap_model.makedirs_p()
    epfm.extract_pictures(input=input_colmap_model,
                          output=pictures_colmap_model,
                          picture_list=individual_pictures,
                          output_format=".txt")

    i_pv += 1
    print_step_pv(i_pv, "Creating Ground truth data with ETH3D")
    eth3d.create_ground_truth(aligned_mlp,
                              pictures_colmap_model,
                              raw_output_folder,
                              occlusion_ply,
                              splats_ply)
    viz_folder = converted_output_folder / "visualization" / relpath
    viz_folder.makedirs_p()
    kitti_format_folder = converted_output_folder / "KITTI" / relpath
    kitti_format_folder.makedirs_p()

    i_pv += 1
    print_step_pv(i_pv, "Convert to KITTI format and create pictures with GT visualization")
    cd.convert_dataset(pictures_colmap_model,
                       raw_output_folder / "ground_truth_depth" / "individual_pictures",
                       raw_output_folder / "images",
                       raw_output_folder / "occlusion_depth" / "individual_pictures",
                       kitti_format_folder, viz_folder,
                       images_list=individual_pictures,
                       visualization=True, video=False, downscale=4, threads=8, **env)
    if save_space:
        (raw_output_folder / "occlusion_depth" / "individual_pictures").rmtree_p()