video_localization.py 12.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import numpy as np
from path import Path

from cli_utils import print_step
from colmap_util.read_model import read_images_text, read_images_binary
from filter_colmap_model import filter_colmap_model
import pandas as pd
import add_video_to_db as avtd
import extract_video_from_model as evfm
import convert_dataset as cd
import generate_sky_masks as gsm
import meshlab_xml_writer as mxw


def is_video_in_model(video_name, colmap_model, metadata):

    mapped_images_ids = read_images_binary(colmap_model/"images.bin").keys()
    video_image_ids = pd.read_csv(metadata)["db_id"]
    return sum(video_image_ids.isin(mapped_images_ids)) > 0


Clément Pinard's avatar
Clément Pinard committed
22
def sift_and_match(colmap, more_sift_features, indexed_vocab_tree, image_list, max_num_matches, **env):
Clément Pinard's avatar
Clément Pinard committed
23
24
    tries = 0
    while tries < 10:
25
26
        try:
            colmap.extract_features(image_list=image_list, more=more_sift_features)
Clément Pinard's avatar
Clément Pinard committed
27
            colmap.match(method="sequential", vocab_tree=indexed_vocab_tree, max_num_matches=max_num_matches)
28
29
30
        except Exception:
            # If it failed, that's because sift gpu has failed.
            print("Error With colmap, will retry")
Clément Pinard's avatar
Clément Pinard committed
31
            tries += 1
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
            pass
        else:
            return


def error_empty():
    print("Error, empty localization")
    return
    # print("will try map from video")
    # colmap.db = lowfps_db
    # colmap.map(output_model=video_output_model, start_frame_id=added_frames[int(len(added_frames)/2)])
    # colmap.align_model(output_model=video_output_model,
    #                    input_model=video_output_model / "0",
    #                    ref_images=current_video_folder / "georef.txt")
    # colmap.db = full_db
    # colmap.register_images(output_model=video_output_model, input_model=video_output_model)
    # colmap.adjust_bundle(output_model=video_output_model, input_model=video_output_model)
    # empty = not evfm.extract_video(input_model=video_output_model,
    #                                output_model=final_output_model,
    #                                video_metadata_path=current_metadata,
    #                                output_format=".txt")
    # if empty:
    #     print("Error could not map anything, aborting this video")
    #     continue


def localize_video(video_name, video_frames_folder, thorough_db, metadata, lowfps_image_list_path, lowfps_db,
                   chunk_image_list_paths, chunk_dbs,
                   colmap_models_root, full_model, lowfps_model, chunk_models, final_model,
                   output_env, eth3d, colmap, ffmpeg, pcl_util,
Clément Pinard's avatar
Clément Pinard committed
62
                   step_index=None, video_index=None, num_videos=None, already_localized=False,
63
64
65
                   save_space=False, triangulate=False, **env):

    def print_step_pv(step_number, step_name):
Clément Pinard's avatar
Clément Pinard committed
66
        if step_index is not None and video_index is not None and num_videos is not None:
67
            progress = "{}/{}".format(video_index, num_videos)
Clément Pinard's avatar
Clément Pinard committed
68
            substep = "{}.{}".format(step_index, video_index)
69
70
71
        else:
            progress = ""
            substep = ""
Clément Pinard's avatar
Clément Pinard committed
72
        print_step("{}.{}".format(substep, step_number),
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
                   "[Video {}, {}] \n {}".format(video_name.basename(),
                                                 progress,
                                                 step_name))

    def clean_workspace():
        if save_space:
            with open(env["video_frame_list_thorough"], "r") as f:
                files_to_keep = [Path(path.split("\n")[0]) for path in f.readlines()]
            with open(lowfps_image_list_path, "r") as f:
                files_to_keep += [Path(path.split("\n")[0]) for path in f.readlines()]
            files_to_keep += [file.relpath(env["image_path"]) for file in [metadata,
                                                                           lowfps_image_list_path,
                                                                           *chunk_image_list_paths]]
            for file in sorted(video_frames_folder.files()):
                if file.relpath(env["image_path"]) not in files_to_keep:
                    file.remove()
            colmap_models_root.rmtree_p()

    # Perform checks if it has not already been computed
    if already_localized:
        print("already done")
        return

    i_pv = 1

    thorough_db.copy(lowfps_db)
    colmap.db = lowfps_db

    print_step_pv(i_pv, "Full video extraction")
    if save_space:
        ffmpeg.extract_images(video_name, video_frames_folder)
    else:
        print("Already Done.")

    i_pv += 1
    print_step_pv(i_pv, "Sky mask generation")
    gsm.process_folder(folder_to_process=video_frames_folder, **env)

    i_pv += 1
    print_step_pv(i_pv, "Complete photogrammetry with video at {} fps".format(env["lowfps"]))
    avtd.add_to_db(lowfps_db, metadata, lowfps_image_list_path)

    sift_and_match(colmap, image_list=lowfps_image_list_path, **env)

    lowfps_model.makedirs_p()
    colmap.map(output=lowfps_model, input=env["georef_recon"])
    if not is_video_in_model(video_name, lowfps_model, metadata):
Clément Pinard's avatar
Clément Pinard committed
120
        print("Error, video was not localized")
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
        error_empty()
        clean_workspace()
        return

    # when colmap map is called, the model is normalized so we have to georegister it again
    # Can be done either with model_aligner, or with model_merger
    # Additionally, we add the new positions to a full model that will be used for lidar registration
    # and also occlusion mesh computing
    colmap.merge_models(output=env["georef_full_recon"], input1=env["georef_full_recon"], input2=lowfps_model)
    colmap.merge_models(output=lowfps_model, input1=env["georef_recon"], input2=lowfps_model)
    # colmap.align_model(output=lowfps_model,
    #                    input=lowfps_model,
    #                    ref_images=env["georef_frames_list"])

    i_pv += 1
    print_step_pv(i_pv, "Localizing remaining frames")

    for k, (list_path, full_db, chunk_model) in enumerate(zip(chunk_image_list_paths,
                                                              chunk_dbs,
                                                              chunk_models)):
        print("\nLocalizing Chunk {}/{}".format(k + 1, len(chunk_dbs)))
        chunk_model.makedirs_p()
        lowfps_db.copy(full_db)
        colmap.db = full_db
        avtd.add_to_db(full_db, metadata, frame_list_path=list_path)
        sift_and_match(colmap, image_list=list_path, **env)
        colmap.register_images(output=chunk_model, input=lowfps_model)
        colmap.adjust_bundle(output=chunk_model, input=chunk_model)
    chunk_models[0].merge_tree(full_model)
    if len(chunk_model) > 1:
        for chunk in chunk_models[1:]:
            colmap.merge_models(output=full_model, input1=full_model, input2=chunk)
    final_model.makedirs_p()
    empty = not evfm.extract_video(input=full_model,
                                   output=final_model,
                                   video_metadata_path=metadata,
                                   output_format=".bin" if triangulate else ".txt")

    if empty:
        error_empty()
        clean_workspace()

    if triangulate:
        i_pv += 1
        print_step_pv(i_pv, "Re-Alignment of triangulated points with Lidar point cloud")

        colmap.triangulate_points(final_model, final_model)
        colmap.export_model(final_model, final_model, output_type="TXT")
Clément Pinard's avatar
Clément Pinard committed
169
        ply_name = final_model / "georef_{}.ply".format(video_name.stem)
170
171
172
173
174
175
        matrix_name = final_model / "matrix.txt"
        colmap.export_model(ply_name, final_model, output_type="PLY")
        pcl_util.register_reconstruction(georef=ply_name, lidar=env["lidar_ply"],
                                         output_matrix=matrix_name, output_cloud=env["lidar_ply"],
                                         max_distance=10)

Clément Pinard's avatar
Clément Pinard committed
176
    (final_model / "images.txt").rename(final_model / "images_raw.txt")
177
178
179
180
181
182
183
184
185
186

    output_env["video_frames_folder"].makedirs_p()
    video_frames_folder.merge_tree(output_env["video_frames_folder"])

    output_env["model_folder"].makedirs_p()
    colmap_models_root.merge_tree(output_env["model_folder"])

    clean_workspace()


Clément Pinard's avatar
Clément Pinard committed
187
def generate_GT(video_name, raw_output_folder, images_root_folder, video_frames_folder,
188
                viz_folder, kitti_format_folder, metadata, interpolated_frames_list,
Clément Pinard's avatar
Clément Pinard committed
189
190
                final_model, aligned_mlp, global_registration_matrix,
                occlusion_ply, splats_ply,
Clément Pinard's avatar
Clément Pinard committed
191
192
                eth3d, colmap, filter_models=True,
                step_index=None, video_index=None, num_videos=None, GT_already_done=False,
193
                save_space=False, inspect_dataset=False, **env):
Clément Pinard's avatar
Clément Pinard committed
194
195
196
197
198
199
200
201
202
203
204
205
206

    def print_step_pv(step_number, step_name):
        if step_index is not None and video_index is not None and num_videos is not None:
            progress = "{}/{}".format(video_index, num_videos)
            substep = "{}.{}".format(step_index, video_index)
        else:
            progress = ""
            substep = ""
        print_step("{}.{}".format(substep, step_number),
                   "[Video {}, {}] \n {}".format(video_name.basename(),
                                                 progress,
                                                 step_name))

207
208
209
210
211
    if GT_already_done:
        return
    if not final_model.isdir():
        print("Video not localized, rerun the script without skipping former step")
        return
Clément Pinard's avatar
Clément Pinard committed
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226

    print("Creating GT on video {} [{}/{}]".format(video_name.basename(), video_index, num_videos))
    i_pv = 1
    if filter_models:
        print_step_pv(i_pv, "Filtering model to have continuous localization")
        interpolated_frames = filter_colmap_model(input_images_colmap=final_model / "images_raw.txt",
                                                  output_images_colmap=final_model / "images.txt",
                                                  metadata_path=metadata, **env)
        with open(interpolated_frames_list, "w") as f:
            f.write("\n".join(interpolated_frames) + "\n")
        i_pv += 1
    else:
        (final_model / "images_raw.txt").copy(final_model / "images.txt")
        interpolated_frames = []

227
228
229
230
    model_length = len(read_images_text(final_model / "images.txt"))
    if model_length < 2:
        return

Clément Pinard's avatar
Clément Pinard committed
231
232
233
234
    '''
    In case the reconstructed model is only locally good, there's the possibility of having a specific
    transformation matrix per video in the final model folder, which might work better than the the global registration_matrix
    '''
235
236
237
    specific_matrix_path = final_model / "matrix.txt"
    if specific_matrix_path.isfile():
        registration_matrix = np.linalg.inv(np.fromfile(specific_matrix_path, sep=" ").reshape(4, 4))
Clément Pinard's avatar
Clément Pinard committed
238
        adjustment_matrix = registration_matrix * np.linalg.inv(global_registration_matrix)
Clément Pinard's avatar
Clément Pinard committed
239
240
241
        final_mlp = final_model / "aligned.mlp"
        final_occlusions = final_model / "occlusions.mlp"
        final_splats = final_model / "splats.mlp"
Clément Pinard's avatar
Clément Pinard committed
242
243
244
        mxw.apply_transform_to_project(aligned_mlp, final_mlp, adjustment_matrix)
        mxw.create_project(final_occlusions, [occlusion_ply], transforms=[adjustment_matrix])
        mxw.create_project(final_splats, [splats_ply], transforms=[adjustment_matrix])
245

Clément Pinard's avatar
Clément Pinard committed
246
247
248
249
    else:
        final_mlp = aligned_mlp
        final_occlusions = occlusion_ply
        final_splats = splats_ply
250
251
252
253
254
255
256

    if inspect_dataset:
        eth3d.image_path = images_root_folder
        # Do 3 inspections :
        #  - inspection with reconstructed cloud
        #  - inspection with lidar cloud without occlusion
        #  - inspection with lidar cloud and occlusion models
Clément Pinard's avatar
Clément Pinard committed
257
        # Careful, very RAM demanding for long sequences !
258
259
        georef_mlp = env["georef_recon"]/"georef_recon.mlp"
        eth3d.inspect_dataset(georef_mlp, final_model)
Clément Pinard's avatar
Clément Pinard committed
260
261
        eth3d.inspect_dataset(final_mlp, final_model)
        eth3d.inspect_dataset(final_mlp, final_model,
262
263
                              final_occlusions, final_splats)

Clément Pinard's avatar
Clément Pinard committed
264
    print_step_pv(i_pv, "Creating Ground truth data with ETH3D")
265

Clément Pinard's avatar
Clément Pinard committed
266
    eth3d.create_ground_truth(final_mlp, final_model, raw_output_folder,
Clément Pinard's avatar
Clément Pinard committed
267
                              final_occlusions, final_splats)
268
269
270
271
    viz_folder.makedirs_p()
    kitti_format_folder.makedirs_p()

    i_pv += 1
Clément Pinard's avatar
Clément Pinard committed
272
    print_step_pv(i_pv, "Convert to KITTI format and create video with GT visualization")
273
274

    cd.convert_dataset(final_model,
Clément Pinard's avatar
Clément Pinard committed
275
                       raw_output_folder / "ground_truth_depth" / video_name.stem,
276
                       images_root_folder,
Clément Pinard's avatar
Clément Pinard committed
277
                       raw_output_folder / "occlusion_depth" / video_name.stem,
278
                       kitti_format_folder, viz_folder,
Clément Pinard's avatar
Clément Pinard committed
279
280
                       metadata, interpolated_frames,
                       visualization=True, video=True, downscale=4, threads=8, **env)
281
    interpolated_frames_list.copy(kitti_format_folder)
Clément Pinard's avatar
Clément Pinard committed
282
283
    if save_space:
        (raw_output_folder / "occlusion_depth" / video_name.stem).rmtree_p()
284
285

    return