Commit 1d64aa91 authored by Clément Pinard's avatar Clément Pinard
Browse files

Varius bugfixes

parent ad66ecbc
......@@ -17,8 +17,7 @@ parser.add_argument('--database', metavar='DB', required=True,
help='path to colmap database file, to get the image ids right')
def add_to_db(db_path, metadata_path, frame_list_path, **env):
metadata = pd.read_csv(metadata_path)
def add_to_db(db_path, metadata, frame_list_path, **env):
database = db.COLMAPDatabase.connect(db_path)
frame_list = []
......@@ -62,7 +61,7 @@ def get_frame_without_features(db_path):
def main():
args = parser.parse_args()
add_to_db(args.database, args.metadata, args.frame_list)
add_to_db(args.database, pd.read_csv(args.metadata), args.frame_list)
return
......
......@@ -20,7 +20,7 @@ def add_main_options(parser):
main_parser.add_argument('--show_steps', action="store_true")
main_parser.add_argument('--add_new_videos', action="store_true",
help="If selected, will skip first 6 steps to directly register videos without mapping")
main_parser.add_argument('--generate_groundtruth_for_individual_images', action="store_true",
main_parser.add_argument('--generate_groundtruth_for_individual_images', '--gt_images', action="store_true",
help="If selected, will generate Ground truth for individual images as well as videos")
main_parser.add_argument('--save_space', action="store_true")
main_parser.add_argument('-v', '--verbose', action="count", default=0)
......
......@@ -42,12 +42,10 @@ def save_intrinsics(cameras, images, output_dir, output_width=None, downscale=No
yaml.dump(camera_dict, f, default_flow_style=False)
if len(cameras) == 1:
print("bonjour")
cam = cameras[list(cameras.keys())[0]]
save_cam(cam, output_dir / "intrinsics.txt", output_dir / "camera.yaml")
else:
print("au revoir")
for _, img in images.items():
cam = cameras[img.camera_id]
......@@ -210,7 +208,7 @@ def convert_dataset(final_model, depth_dir, images_root_folder, occ_dir,
metadata = metadata.set_index("db_id", drop=False).sort_values("time")
framerate = metadata["framerate"].values[0]
# image_df = image_df.reindex(metadata.index)
images_list = metadata["image_path"]
images_list = metadata["image_path"].values
else:
assert images_list is not None
framerate = None
......
......@@ -273,15 +273,15 @@ def main():
video_index=j+1,
step_index=i,
num_videos=len(env["videos_to_localize"]),
metadata=video_env["metadata"],
metadata_path=video_env["metadata_path"],
**video_env["output_env"], **env)
if env["generate_groundtruth_for_individual_images"]:
by_folder = pi.group_pics_by_folder(env["individual_pictures"])
for folder, pic_list in by_folder.items():
generate_GT_individual_pictures(input_colmap_model=env["georef_full_recon"],
individual_pictures=pic_list,
relpath=folder,
step_index=i, **env)
for folder, pic_list in by_folder.items():
generate_GT_individual_pictures(input_colmap_model=env["georef_full_recon"],
individual_pictures_list=pic_list,
relpath=folder,
step_index=i, **env)
if __name__ == '__main__':
......
......@@ -95,7 +95,7 @@ def main():
by_folder = pi.group_pics_by_folder(env["individual_pictures"])
for folder, pic_list in by_folder.items():
generate_GT_individual_pictures(input_colmap_model=env["georef_full_recon"],
individual_pictures=pic_list,
individual_pictures_list=pic_list,
relpath=folder,
step_index=i, **env)
......
......@@ -15,7 +15,7 @@ import meshlab_xml_writer as mxw
def is_video_in_model(video_name, colmap_model, metadata):
mapped_images_ids = read_images_binary(colmap_model/"images.bin").keys()
video_image_ids = pd.read_csv(metadata)["db_id"]
video_image_ids = metadata["db_id"]
return sum(video_image_ids.isin(mapped_images_ids)) > 0
......@@ -273,8 +273,8 @@ def generate_GT(video_name, raw_output_folder, images_root_folder, video_frames_
i_pv += 1
print_step_pv(i_pv, "Creating Ground truth data with ETH3D")
eth3d.create_ground_truth(final_mlp, final_model, raw_output_folder,
final_occlusions, final_splats)
# eth3d.create_ground_truth(final_mlp, final_model, raw_output_folder,
# final_occlusions, final_splats)
viz_folder.makedirs_p()
kitti_format_folder.makedirs_p()
......@@ -286,7 +286,7 @@ def generate_GT(video_name, raw_output_folder, images_root_folder, video_frames_
images_root_folder,
raw_output_folder / "occlusion_depth" / video_name.stem,
kitti_format_folder, viz_folder,
metadata, interpolated_frames,
metadata=metadata, interpolated_frames=interpolated_frames,
visualization=True, video=True, downscale=4, threads=8, **env)
if filter_models:
interpolated_frames_list.copy(kitti_format_folder)
......@@ -296,7 +296,7 @@ def generate_GT(video_name, raw_output_folder, images_root_folder, video_frames_
return
def generate_GT_individual_pictures(colmap_img_root, individual_pictures, raw_output_folder,
def generate_GT_individual_pictures(colmap_img_root, individual_pictures_list, raw_output_folder,
converted_output_folder, input_colmap_model,
aligned_mlp, relpath,
occlusion_ply, splats_ply,
......@@ -315,7 +315,7 @@ def generate_GT_individual_pictures(colmap_img_root, individual_pictures, raw_ou
return
i_pv = 1
print_step_pv(i_pv, "Copy individual images to output dataset {}".format(raw_output_folder))
for p in individual_pictures:
for p in individual_pictures_list:
output_path = raw_output_folder / "images" / p
output_path.parent.makedirs_p()
(colmap_img_root / p).copy(output_path)
......@@ -326,7 +326,7 @@ def generate_GT_individual_pictures(colmap_img_root, individual_pictures, raw_ou
pictures_colmap_model.makedirs_p()
epfm.extract_pictures(input=input_colmap_model,
output=pictures_colmap_model,
picture_list=individual_pictures,
picture_list=individual_pictures_list,
output_format=".txt")
i_pv += 1
......@@ -348,7 +348,7 @@ def generate_GT_individual_pictures(colmap_img_root, individual_pictures, raw_ou
raw_output_folder / "images",
occlusion_depth_folder,
kitti_format_folder, viz_folder,
images_list=individual_pictures,
images_list=individual_pictures_list,
visualization=True, video=False, downscale=4, threads=8, **env)
if save_space:
(raw_output_folder / "occlusion_depth" / "individual_pictures").rmtree_p()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment