video_localization.py 11.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
import numpy as np
from path import Path

from cli_utils import print_step
from colmap_util.read_model import read_images_text, read_images_binary
from filter_colmap_model import filter_colmap_model
import pandas as pd
import add_video_to_db as avtd
import extract_video_from_model as evfm
import convert_dataset as cd
import generate_sky_masks as gsm
import meshlab_xml_writer as mxw


def is_video_in_model(video_name, colmap_model, metadata):

    mapped_images_ids = read_images_binary(colmap_model/"images.bin").keys()
    video_image_ids = pd.read_csv(metadata)["db_id"]
    return sum(video_image_ids.isin(mapped_images_ids)) > 0


def sift_and_match(colmap, more_sift_features, indexed_vocab_tree, image_list, **env):
    while True:
        try:
            colmap.extract_features(image_list=image_list, more=more_sift_features)
            colmap.match(method="sequential", vocab_tree=indexed_vocab_tree)
        except Exception:
            # If it failed, that's because sift gpu has failed.
            print("Error With colmap, will retry")
            pass
        else:
            return


def error_empty():
    print("Error, empty localization")
    return
    # print("will try map from video")
    # colmap.db = lowfps_db
    # colmap.map(output_model=video_output_model, start_frame_id=added_frames[int(len(added_frames)/2)])
    # colmap.align_model(output_model=video_output_model,
    #                    input_model=video_output_model / "0",
    #                    ref_images=current_video_folder / "georef.txt")
    # colmap.db = full_db
    # colmap.register_images(output_model=video_output_model, input_model=video_output_model)
    # colmap.adjust_bundle(output_model=video_output_model, input_model=video_output_model)
    # empty = not evfm.extract_video(input_model=video_output_model,
    #                                output_model=final_output_model,
    #                                video_metadata_path=current_metadata,
    #                                output_format=".txt")
    # if empty:
    #     print("Error could not map anything, aborting this video")
    #     continue


def localize_video(video_name, video_frames_folder, thorough_db, metadata, lowfps_image_list_path, lowfps_db,
                   chunk_image_list_paths, chunk_dbs,
                   colmap_models_root, full_model, lowfps_model, chunk_models, final_model,
                   output_env, eth3d, colmap, ffmpeg, pcl_util,
                   video_index=None, num_videos=None, already_localized=False, filter_model=True,
                   save_space=False, triangulate=False, **env):

    def print_step_pv(step_number, step_name):
        if video_index is not None and num_videos is not None:
            progress = "{}/{}".format(video_index, num_videos)
            substep = ".{}".format(video_index)
        else:
            progress = ""
            substep = ""
        print_step("6{}.{}".format(substep, step_number),
                   "[Video {}, {}] \n {}".format(video_name.basename(),
                                                 progress,
                                                 step_name))

    def clean_workspace():
        if save_space:
            with open(env["video_frame_list_thorough"], "r") as f:
                files_to_keep = [Path(path.split("\n")[0]) for path in f.readlines()]
            with open(lowfps_image_list_path, "r") as f:
                files_to_keep += [Path(path.split("\n")[0]) for path in f.readlines()]
            files_to_keep += [file.relpath(env["image_path"]) for file in [metadata,
                                                                           lowfps_image_list_path,
                                                                           *chunk_image_list_paths]]
            for file in sorted(video_frames_folder.files()):
                if file.relpath(env["image_path"]) not in files_to_keep:
                    file.remove()
            colmap_models_root.rmtree_p()

    # Perform checks if it has not already been computed
    if already_localized:
        print("already done")
        return

    i_pv = 1

    thorough_db.copy(lowfps_db)
    colmap.db = lowfps_db

    print_step_pv(i_pv, "Full video extraction")
    if save_space:
        ffmpeg.extract_images(video_name, video_frames_folder)
    else:
        print("Already Done.")

    i_pv += 1
    print_step_pv(i_pv, "Sky mask generation")
    gsm.process_folder(folder_to_process=video_frames_folder, **env)

    i_pv += 1
    print_step_pv(i_pv, "Complete photogrammetry with video at {} fps".format(env["lowfps"]))
    avtd.add_to_db(lowfps_db, metadata, lowfps_image_list_path)

    sift_and_match(colmap, image_list=lowfps_image_list_path, **env)

    lowfps_model.makedirs_p()
    colmap.map(output=lowfps_model, input=env["georef_recon"])
    if not is_video_in_model(video_name, lowfps_model, metadata):
        error_empty()
        clean_workspace()
        return

    # when colmap map is called, the model is normalized so we have to georegister it again
    # Can be done either with model_aligner, or with model_merger
    # Additionally, we add the new positions to a full model that will be used for lidar registration
    # and also occlusion mesh computing
    colmap.merge_models(output=env["georef_full_recon"], input1=env["georef_full_recon"], input2=lowfps_model)
    colmap.merge_models(output=lowfps_model, input1=env["georef_recon"], input2=lowfps_model)
    # colmap.align_model(output=lowfps_model,
    #                    input=lowfps_model,
    #                    ref_images=env["georef_frames_list"])

    i_pv += 1
    print_step_pv(i_pv, "Localizing remaining frames")

    for k, (list_path, full_db, chunk_model) in enumerate(zip(chunk_image_list_paths,
                                                              chunk_dbs,
                                                              chunk_models)):
        print("\nLocalizing Chunk {}/{}".format(k + 1, len(chunk_dbs)))
        chunk_model.makedirs_p()
        lowfps_db.copy(full_db)
        colmap.db = full_db
        avtd.add_to_db(full_db, metadata, frame_list_path=list_path)
        sift_and_match(colmap, image_list=list_path, **env)
        colmap.register_images(output=chunk_model, input=lowfps_model)
        colmap.adjust_bundle(output=chunk_model, input=chunk_model)
    chunk_models[0].merge_tree(full_model)
    if len(chunk_model) > 1:
        for chunk in chunk_models[1:]:
            colmap.merge_models(output=full_model, input1=full_model, input2=chunk)
    final_model.makedirs_p()
    empty = not evfm.extract_video(input=full_model,
                                   output=final_model,
                                   video_metadata_path=metadata,
                                   output_format=".bin" if triangulate else ".txt")

    if empty:
        error_empty()
        clean_workspace()

    if triangulate:
        i_pv += 1
        print_step_pv(i_pv, "Re-Alignment of triangulated points with Lidar point cloud")

        colmap.triangulate_points(final_model, final_model)
        colmap.export_model(final_model, final_model, output_type="TXT")
        ply_name = final_model / "georef_{}.ply".format(video_name.namebase)
        matrix_name = final_model / "matrix.txt"
        colmap.export_model(ply_name, final_model, output_type="PLY")
        pcl_util.register_reconstruction(georef=ply_name, lidar=env["lidar_ply"],
                                         output_matrix=matrix_name, output_cloud=env["lidar_ply"],
                                         max_distance=10)

    if filter_model:
        i_pv += 1
        print_step_pv(i_pv, "Filtering model to have continuous localization")
        (final_model / "images.txt").rename(final_model / "images_raw.txt")
        interpolated_frames = filter_colmap_model(input_images_colmap=final_model / "images_raw.txt",
                                                  output_images_colmap=final_model / "images.txt",
                                                  metadata_path=metadata, **env)

    output_env["video_frames_folder"].makedirs_p()
    video_frames_folder.merge_tree(output_env["video_frames_folder"])

    output_env["model_folder"].makedirs_p()
    colmap_models_root.merge_tree(output_env["model_folder"])

    if filter_model:
        with open(output_env["interpolated_frames_list"], "w") as f:
            f.write("\n".join(interpolated_frames) + "\n")

    clean_workspace()


def generate_GT(video_name, output_folder, images_root_folder, video_frames_folder,
                viz_folder, kitti_format_folder, metadata, interpolated_frames_list,
                final_model, global_registration_matrix, video_fps,
                eth3d, colmap,
                video_index=None, num_videos=None, GT_already_done=False,
                save_space=False, inspect_dataset=False, **env):
    if GT_already_done:
        return
    if not final_model.isdir():
        print("Video not localized, rerun the script without skipping former step")
        return
    model_length = len(read_images_text(final_model / "images.txt"))
    if model_length < 2:
        return

    final_lidar = final_model / "aligned_lidar.mlp"
    final_occlusions = final_model / "occlusions.mlp"
    final_splats = final_model / "splats.mlp"
    specific_matrix_path = final_model / "matrix.txt"
    if specific_matrix_path.isfile():
        registration_matrix = np.linalg.inv(np.fromfile(specific_matrix_path, sep=" ").reshape(4, 4))
    else:
        registration_matrix = global_registration_matrix

    mxw.apply_transform_to_project(env["lidar_mlp"], final_lidar, registration_matrix)
    mxw.create_project(final_occlusions, [env["occlusion_ply"]], transforms=[np.eye(4)])
    mxw.create_project(final_splats, [env["splats_ply"]], transforms=[np.eye(4)])

    if inspect_dataset:
        eth3d.image_path = images_root_folder
        # Do 3 inspections :
        #  - inspection with reconstructed cloud
        #  - inspection with lidar cloud without occlusion
        #  - inspection with lidar cloud and occlusion models
        georef_mlp = env["georef_recon"]/"georef_recon.mlp"
        eth3d.inspect_dataset(georef_mlp, final_model)
        eth3d.inspect_dataset(final_lidar, final_model)
        eth3d.inspect_dataset(final_lidar, final_model,
                              final_occlusions, final_splats)

    print("Creating GT on video {} [{}/{}]".format(video_name.basename(), video_index, num_videos))
    i_pv = 1
    print_step(i_pv, "Creating Ground truth data with ETH3D")

    # eth3d.create_ground_truth(final_lidar, final_model, output_folder,
    #                           final_occlusions, final_splats)
    viz_folder.makedirs_p()
    kitti_format_folder.makedirs_p()

    i_pv += 1
    print_step(i_pv, "Convert to KITTI format and create video with GT vizualisation")

    cd.convert_dataset(final_model,
                       output_folder / "ground_truth_depth" / video_name.namebase,
                       images_root_folder,
                       output_folder / "occlusion_depth" / video_name.namebase,
                       kitti_format_folder, viz_folder,
                       metadata, interpolated_frames_list,
                       video=True, fps=video_fps, downscale=4, threads=8, **env)
    interpolated_frames_list.copy(kitti_format_folder)

    return