Commit fab56cc2 authored by nicolas's avatar nicolas
Browse files

add own cloud registrator

parent 8f5663f9
......@@ -21,6 +21,7 @@ def add_to_db(db_path, metadata_path, frame_list_path, **env):
database = db.COLMAPDatabase.connect(db_path)
frame_list = []
frame_ids = []
if frame_list_path is not None:
with open(frame_list_path, "r") as f:
frame_list = [line[:-1] for line in f.readlines()]
......@@ -34,8 +35,7 @@ def add_to_db(db_path, metadata_path, frame_list_path, **env):
else:
frame_gps = np.full(3, np.NaN)
try:
print(image_path, camera_id)
database.add_image(image_path, int(camera_id), prior_t=frame_gps)
frame_ids.append(database.add_image(image_path, int(camera_id), prior_t=frame_gps))
except IntegrityError:
sql_string = "SELECT camera_id FROM images WHERE name='{}'".format(image_path)
row = next(database.execute(sql_string))
......@@ -43,6 +43,19 @@ def add_to_db(db_path, metadata_path, frame_list_path, **env):
assert(existing_camera_id == camera_id)
database.commit()
database.close()
return frame_ids
def get_frame_without_features(db_path):
database = db.COLMAPDatabase.connect(db_path)
first_string = "SELECT image_id FROM descriptors WHERE cols=0"
descriptors = list(database.execute(first_string))
for d in descriptors:
second_string = "SELECT name FROM images WHERE image_id={}".format(d)
row = list(database.execute(second_string))[0]
print(row)
database.close()
def main():
......
from subprocess import PIPE, call
import pandas as pd
import numpy as np
from scipy import integrate
import tempfile
def extrapolate_position(speeds, timestamps, initial_position, final_position):
......@@ -26,11 +24,12 @@ def preprocess_metadata(metadata, proj, centroid):
return pd.Series(proj(*x), index=["x", "y"])
position_xy = metadata[["location_longitude", "location_latitude"]].apply(lambda_fun, axis=1)
metadata = metadata.join(position_xy)
metadata["z"] = metadata["location_altitude"]
# Extrapolate position from speed and previous frames
speed = metadata[["speed_east", "speed_north", "speed_down"]].values * np.array([1, 1, -1])
timestamps = metadata["time"].values * 1e-6
positions = metadata[["x", "y", "location_altitude"]].values
positions = metadata[["x", "y", "z"]].values
if metadata["location_valid"].unique().tolist() == [0]:
metadata["indoor"] = True
positions = extrapolate_position(speed, timestamps, None, None)
......@@ -55,15 +54,15 @@ def preprocess_metadata(metadata, proj, centroid):
for start, end in zip(invalidity_start, validity_start):
positions[start:end] = extrapolate_position(speed[start:end], timestamps[start:end], positions[start], positions[end-1])
positions -= centroid
print(positions)
metadata["x"], metadata["y"], metadata["location_altitude"] = positions.transpose()
metadata["x"], metadata["y"], metadata["z"] = positions.transpose()
return metadata
def extract_metadata(folder_path, file_path, native_wrapper, proj, w, h, f, centroid, save_path=None):
metadata = native_wrapper.vmeta_extract(file_path)
metadata = metadata.iloc[:-1]
metadata = preprocess_metadata(metadata, proj, centroid)
video_quality = h * w / f
metadata["video_quality"] = video_quality
......
......@@ -89,7 +89,7 @@ def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
:param data: data to send, if multiple elements are sent at the same time,
they should be encapsuled either in a list or a tuple
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
should eb the same length as the data list or tuple
should be the same length as the data list or tuple
:param endian_character: Any of {@, =, <, >, !}
"""
if isinstance(data, (list, tuple)):
......@@ -165,7 +165,7 @@ def write_cameras_text(cameras, path):
'# Number of cameras: {}\n'.format(len(cameras))
with open(path, "w") as fid:
fid.write(HEADER)
for id, cam in cameras.items():
for _, cam in cameras.items():
to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params]
line = " ".join([str(elem) for elem in to_write])
fid.write(line + "\n")
......@@ -263,16 +263,18 @@ def write_images_text(images, path):
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
mean_observations = sum((len(img.point3D_ids) for id, img in images.items()))/len(images)
HEADER = ('# Image list with two lines of data per image:\n'
'# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n'
'# POINTS2D[] as (X, Y, POINT3D_ID)\n'
'# Number of images: {}, mean observations per image: {}\n').format(len(images), mean_observations)
if len(images) == 0:
mean_observations = 0
else:
mean_observations = sum((len(img.point3D_ids) for _, img in images.items()))/len(images)
HEADER = '# Image list with two lines of data per image:\n'
'# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n'
'# POINTS2D[] as (X, Y, POINT3D_ID)\n'
'# Number of images: {}, mean observations per image: {}\n'.format(len(images), mean_observations)
with open(path, "w") as fid:
fid.write(HEADER)
for id, img in images.items():
for _, img in images.items():
image_header = [img.id, *img.qvec, *img.tvec, img.camera_id, img.name]
first_line = " ".join(map(str, image_header))
fid.write(first_line + "\n")
......@@ -291,16 +293,16 @@ def write_images_binary(images, path_to_model_file):
"""
with open(path_to_model_file, "wb") as fid:
write_next_bytes(fid, len(images), "Q")
for id, image in images.items():
write_next_bytes(fid, image.id, "i")
write_next_bytes(fid, image.qvec.tolist(), "dddd")
write_next_bytes(fid, image.tvec.tolist(), "ddd")
write_next_bytes(fid, image.camera_id, "i")
for char in image.name:
for _, img in images.items():
write_next_bytes(fid, img.id, "i")
write_next_bytes(fid, img.qvec.tolist(), "dddd")
write_next_bytes(fid, img.tvec.tolist(), "ddd")
write_next_bytes(fid, img.camera_id, "i")
for char in img.name:
write_next_bytes(fid, char.encode("utf-8"), "c")
write_next_bytes(fid, b"\x00", "c")
write_next_bytes(fid, len(image.point3D_ids), "Q")
for xy, p3d_id in zip(image.xys, image.point3D_ids):
write_next_bytes(fid, len(img.point3D_ids), "Q")
for xy, p3d_id in zip(img.xys, img.point3D_ids):
write_next_bytes(fid, [*xy, p3d_id], "ddq")
......@@ -370,14 +372,14 @@ def write_points3D_text(points3D, path):
if len(points3D) == 0:
mean_track_length = 0
else:
mean_track_length = sum((len(pt.image_ids) for id, pt in points3D.items()))/len(points3D)
mean_track_length = sum((len(pt.image_ids) for _, pt in points3D.items()))/len(points3D)
HEADER = '# 3D point list with one line of data per point:\n'
'# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n'
'# Number of points: {}, mean track length: {}\n'.format(len(points3D), mean_track_length)
with open(path, "w") as fid:
fid.write(HEADER)
for id, pt in points3D.items():
for _, pt in points3D.items():
point_header = [pt.id, *pt.xyz, *pt.rgb, pt.error]
fid.write(" ".join(map(str, point_header)) + " ")
track_strings = []
......@@ -386,7 +388,7 @@ def write_points3D_text(points3D, path):
fid.write(" ".join(track_strings) + "\n")
def write_points3D_binary(points3D, path_to_model_file):
def write_points3d_binary(points3D, path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
......@@ -394,14 +396,14 @@ def write_points3D_binary(points3D, path_to_model_file):
"""
with open(path_to_model_file, "wb") as fid:
write_next_bytes(fid, len(points3D), "Q")
for id, point in points3D.items():
write_next_bytes(fid, point.id, "Q")
write_next_bytes(fid, point.xyz.tolist(), "ddd")
write_next_bytes(fid, point.rgb.tolist(), "BBB")
write_next_bytes(fid, point.error, "d")
track_length = point.image_ids.shape[0]
for _, pt in points3D.items():
write_next_bytes(fid, pt.id, "Q")
write_next_bytes(fid, pt.xyz.tolist(), "ddd")
write_next_bytes(fid, pt.rgb.tolist(), "BBB")
write_next_bytes(fid, pt.error, "d")
track_length = pt.image_ids.shape[0]
write_next_bytes(fid, track_length, "Q")
for image_id, point2D_id in zip(point.image_ids, point.point2D_idxs):
for image_id, point2D_id in zip(pt.image_ids, pt.point2D_idxs):
write_next_bytes(fid, [image_id, point2D_id], "ii")
......@@ -425,7 +427,7 @@ def write_model(cameras, images, points3D, path, ext):
else:
write_cameras_binary(cameras, os.path.join(path, "cameras" + ext))
write_images_binary(images, os.path.join(path, "images" + ext))
write_points3D_binary(points3D, os.path.join(path, "points3D") + ext)
write_points3d_binary(points3D, os.path.join(path, "points3D") + ext)
return cameras, images, points3D
......@@ -456,17 +458,15 @@ def rotmat2qvec(R):
return qvec
parser = argparse.ArgumentParser(description='Read and write COLMAP binary and text models')
parser.add_argument('input_model', help='path to input model folder')
parser.add_argument('input_format', choices=['.bin', '.txt'],
help='input model format')
parser.add_argument('--output_model', metavar='PATH',
help='path to output model folder')
parser.add_argument('--output_format', choices=['.bin', '.txt'],
help='outut model format', default='.txt')
def main():
parser = argparse.ArgumentParser(description='Read and write COLMAP binary and text models')
parser.add_argument('input_model', help='path to input model folder')
parser.add_argument('input_format', choices=['.bin', '.txt'],
help='input model format')
parser.add_argument('--output_model', metavar='PATH',
help='path to output model folder')
parser.add_argument('--output_format', choices=['.bin', '.txt'],
help='outut model format', default='.txt')
args = parser.parse_args()
cameras, images, points3D = read_model(path=args.input_model, ext=args.input_format)
......
......@@ -11,6 +11,7 @@ parser.add_argument('--video_list', metavar='PATH',
parser.add_argument('--input_model', metavar='DIR', type=Path)
parser.add_argument('--output_model', metavar='DIR', default=None, type=Path)
parser.add_argument('--output_format', choices=['.txt', '.bin'], default='.txt')
parser.add_argument('--metadata_path', metavar="CSV", type=Path)
def extract_video(input_model, output_model, video_metadata_path, output_format='.bin'):
......@@ -18,15 +19,19 @@ def extract_video(input_model, output_model, video_metadata_path, output_format=
images = rm.read_images_binary(input_model / "images.bin")
images_per_name = {}
video_metadata = pd.read_csv(video_metadata_path)
image_names = video_metadata["image_path"]
image_names = video_metadata["image_path"].values
for id, image in images.items():
if image.name in image_names:
image._replace(xys=[])
image._replace(point3D_ids=[])
images_per_name[image.name] = image
camera_ids = video_metadata["camera_id"].unique()
output_cameras = {cid: cameras[cid] for cid in camera_ids}
output_cameras = {cid: cameras[cid] for cid in camera_ids if cid in cameras.keys()}
rm.write_model(output_cameras, images_per_name, {}, output_model, output_format)
return len(images_per_name) > 1
def main():
args = parser.parse_args()
......
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from wrappers import Colmap, FFMpeg, PDraw, ETH3D, PCLUtil
from global_options import add_global_options
from pyproj import Proj
from edit_exif import get_gps_location
import meshlab_xml_writer as mxw
import add_video_to_db as avtd
import extract_video_from_model as evfm
from path import Path
import numpy as np
import videos_to_colmap as v2c
import viz_depth as vd
import generate_sky_masks as gsm
import pandas as pd
import las2ply
import rawpy
import imageio
......@@ -24,7 +28,8 @@ pre_vid_steps = ["Full video extraction",
"Complete photogrammetry with video at 1 fps",
"Localizing remaining frames",
"Creating Ground truth data",
"Create video with GT vizualisation"]
"Create video with GT vizualisation",
"Convert to KITTI format"]
parser = ArgumentParser(description='Main pipeline, from LIDAR pictures and videos to GT depth enabled videos',
formatter_class=ArgumentDefaultsHelpFormatter)
......@@ -35,7 +40,7 @@ main_parser.add_argument('--input_folder', metavar='PATH', default=Path("."), ty
main_parser.add_argument('--workspace', metavar='PATH', default=Path("."),
help='path to workspace where COLMAP operations will be done', type=Path)
main_parser.add_argument('--output_folder', metavar='PATH', default=Path("."),
help='path to output folder : must be big !')
help='path to output folder : must be big !', type=Path)
main_parser.add_argument('--skip_step', metavar="N", nargs="*", default=[], type=int)
main_parser.add_argument('--begin_step', metavar="N", type=int, default=None)
main_parser.add_argument('--show_steps', action="store_true")
......@@ -47,7 +52,7 @@ main_parser.add_argument('--fine_sift_features', action="store_true")
main_parser.add_argument('--save_space', action="store_true")
main_parser.add_argument('--add_new_videos', action="store_true")
pcp_parser = parser.add_argument("PointCLoud preparation")
pcp_parser = parser.add_argument_group("PointCLoud preparation")
pcp_parser.add_argument("--pointcloud_resolution", default=0.1, type=float)
pcp_parser.add_argument("--SOR", default=[6, 2], nargs=2, type=int)
......@@ -59,13 +64,14 @@ ve_parser.add_argument('--num_neighbours', default=10, type=int)
ve_parser.add_argument('--system', default="epsg:2154")
exec_parser = parser.add_argument_group("Executable files")
exec_parser.add_argument('--nw', default="native-wrapper.sh",
exec_parser.add_argument('--log', default=None, type=Path)
exec_parser.add_argument('--nw', default="native-wrapper.sh", type=Path,
help="native-wrapper.sh file location")
exec_parser.add_argument("--colmap", default="colmap",
exec_parser.add_argument("--colmap", default="colmap", type=Path,
help="colmap exec file location")
exec_parser.add_argument("--eth3d", default="../dataset-pipeline/build",
type=Path, help="ETH3D detaset pipeline exec files folder location")
exec_parser.add_argument("--ffmpeg", default="ffmpeg")
exec_parser.add_argument("--ffmpeg", default="ffmpeg", type=Path)
exec_parser.add_argument("--pcl_util", default="pcl_util/build", type=Path)
vr_parser = parser.add_argument_group("Video Registration")
......@@ -75,9 +81,6 @@ om_parser = parser.add_argument_group("Occlusion Mesh")
om_parser.add_argument('--normal_radius', default=0.2, type=float)
om_parser.add_argument('--mesh_resolution', default=0.2, type=float)
om_parser.add_argument('--splat_threshold', default=0.1, type=float)
om_parser.add_argument('--resolution_weight', default=1, type=float)
om_parser.add_argument('--num_neighbours', default=10, type=int)
om_parser.add_argument('--system', default="epsg:2154")
def print_workflow():
......@@ -106,16 +109,38 @@ def convert_point_cloud(pointclouds, lidar_path, verbose, eth3d, pcl_util, point
verbose=verbose >= 1)
centroids.append(centroid)
eth3d.clean_pointcloud(ply, filter=(6, 2))
pcl_util.subsample(input_file=ply + ".inliers.ply", output_file=ply + "_subsampled.ply", resolution=pointcloud_resolution)
pcl_util.subsample(input_file=ply + ".inliers.ply", output_file=ply.stripext() + "_subsampled.ply", resolution=pointcloud_resolution)
if save_space:
(ply + ".inliers.ply").remove()
(ply + ".outliers.ply").remove()
ply.remove()
converted_clouds.append(ply + "_subsampled.ply")
converted_clouds.append(ply.stripext() + "_subsampled.ply")
temp_mlp = env["workspace"] / "lidar_unaligned.mlp"
mxw.create_project(temp_mlp, converted_clouds, labels=None, transforms=None)
if len(converted_clouds) > 1:
eth3d.align_with_ICP(temp_mlp, env["lidar_mlp"], scales=5)
else:
temp_mlp.move(env["lidar_mlp"])
return converted_clouds, centroids[0]
def extract_gps_and_path(existing_pictures, image_path, system, centroid, **env):
proj = Proj(system)
georef_list = []
for img in existing_pictures:
gps = get_gps_location(img)
if gps is not None:
lat, lng, alt = gps
x, y = proj(lng, lat)
x -= centroid[0]
y -= centroid[1]
alt -= centroid[2]
georef_list.append("{} {} {} {}\n".format(img.relpath(image_path), x, y, alt))
return georef_list
def extract_pictures_to_workspace(input_folder, image_path, workspace, colmap, raw_ext, pic_ext, fine_sift_features, **env):
picture_folder = input_folder / "Pictures"
picture_folder.merge_tree(image_path)
......@@ -165,10 +190,13 @@ def prepare_workspace(path, env):
env["video_frame_list_thorough"] = env["image_path"] / "video_frames_for_thorough_scan.txt"
env["georef_frames_list"] = env["image_path"] / "georef.txt"
env["lidar_mlp"] = env["workspace"] / "lidar.mlp"
env["lidar_ply"] = env["lidar_path"] / "aligned.ply"
env["aligned_mlp"] = env["workspace"] / "aligned_model.mlp"
env["occlusion_ply"] = env["lidar_path"] / "occlusion_model.ply"
env["splats_ply"] = env["lidar_path"] / "splats_model.ply"
env["occlusion_mlp"] = env["lidar_path"] / "occlusions.mlp"
env["splats_mlp"] = env["lidar_path"] / "splats.mlp"
env["georefrecon_ply"] = env["georef_recon"] / "georef_reconstruction.ply"
......@@ -186,15 +214,17 @@ def main():
colmap = Colmap(db=args.workspace/"thorough_scan.db",
image_path=env["image_path"],
mask_path=env["mask_path"],
binary=args.colmap, quiet=args.verbose < 1)
binary=args.colmap,
quiet=args.verbose < 1,
logfile=args.log)
env["colmap"] = colmap
ffmpeg = FFMpeg(args.ffmpeg, quiet=args.verbose < 2)
ffmpeg = FFMpeg(args.ffmpeg, quiet=args.verbose < 2, logfile=args.log)
env["ffmpeg"] = ffmpeg
pdraw = PDraw(args.nw, quiet=args.verbose < 2)
pdraw = PDraw(args.nw, quiet=args.verbose < 2, logfile=args.log)
env["pdraw"] = pdraw
eth3d = ETH3D(args.eth3d, env["image_path"], quiet=args.verbose < 1)
eth3d = ETH3D(args.eth3d, env["image_path"], quiet=args.verbose < 1, logfile=args.log)
env["eth3d"] = eth3d
pcl_util = PCLUtil(args.pcl_util)
pcl_util = PCLUtil(args.pcl_util, quiet=args.verbose < 2, logfile=args.log)
env["pcl_util"] = pcl_util
las_files = (args.input_folder/"Lidar").files("*.las")
......@@ -222,15 +252,24 @@ def main():
i, s = next(i_global_steps)
if i + 1 not in args.skip_step:
print_step(i, s)
existing_georef = extract_gps_and_path(**env)
path_lists, env["videos_output_folders"] = extract_videos_to_workspace(**env)
if path_lists is not None:
with open(env["video_frame_list_thorough"], "w") as f:
f.write("\n".join(path_lists["thorough"]))
f.write("\n".join(path_lists["thorough"]["frames"]))
with open(env["georef_frames_list"], "w") as f:
f.write("\n".join(path_lists["georef"]))
f.write("\n".join(existing_georef) + "\n")
f.write("\n".join(path_lists["thorough"]["georef"]) + "\n")
for v in env["videos_list"]:
with open(env["videos_output_folders"][v] / "to_scan.txt", "w") as f:
f.write("\n".join(path_lists[v]))
video_folder = env["videos_output_folders"][v]
with open(video_folder / "to_scan.txt", "w") as f:
f.write("\n".join(path_lists[v]["frames_lowfps"]) + "\n")
with open(video_folder / "georef.txt", "w") as f:
f.write("\n".join(existing_georef) + "\n")
f.write("\n".join(path_lists["thorough"]["georef"]) + "\n")
f.write("\n".join(path_lists[v]["georef_lowfps"]) + "\n")
with open(video_folder / "full.txt", "w") as f:
f.write("\n".join(path_lists[v]["frames_full"]) + "\n")
else:
env["videos_output_folders"] = {}
by_name = {v.namebase: v for v in env["videos_list"]}
......@@ -258,32 +297,38 @@ def main():
colmap.export_model(output_ply=env["georefrecon_ply"],
input_model=env["georef_recon"])
mxw.create_project(env["workspace"] / "unaligned.mlp", [env["georefrecon_ply"]] + env["pointclouds"])
eth3d.align_with_ICP(env["workspace"] / "unaligned.mlp", env["aligned_mlp"], scales=5)
mxw.remove_mesh_from_project(env["aligned_mlp"], env["aligned_mlp"], 0)
i, s = next(i_global_steps)
if i + 1 not in args.skip_step:
print_step(i, s)
with_normals_path = env["lidar_path"] / "with_normals.ply"
eth3d.compute_normals(with_normals_path, env["aligned_mlp"], neighbor_radius=args.normal_radius)
eth3d.compute_normals(with_normals_path, env["lidar_mlp"], neighbor_radius=args.normal_radius)
pcl_util.triangulate_mesh(env["occlusion_ply"], with_normals_path, resolution=args.mesh_resolution)
eth3d.create_splats(env["splats_ply"], with_normals_path, env["occlusion_ply"], threshold=args.splat_threshold)
mxw.create_project(env["occlusion_mlp"], [env["occlusion_ply"], env["splats_ply"]])
if args.save_space:
with_normals_path.remove()
matrix_path = env["workspace"] / "matrix_thorough.txt"
pcl_util.register_reconstruction(georef=env["georefrecon_ply"],
lidar=with_normals_path,
output_matrix=matrix_path,
output_cloud=env["lidar_ply"],
max_distance=10)
matrix = np.fromfile(matrix_path, sep=" ").reshape(4, 4)
mxw.apply_transform_to_project(env["lidar_mlp"], env["aligned_mlp"], matrix)
mxw.create_project(env["occlusion_mlp"], [env["occlusion_ply"]], transforms=[matrix])
mxw.create_project(env["splats_mlp"], [env["splats_ply"]], transforms=[matrix])
for v in env["videos_list"]:
i_pv_steps = enumerate(pre_vid_steps)
print("Now working on video {}".format(v))
current_video_folder = env["videos_output_folders"][v]
former_db = colmap.db
current_db = current_video_folder / "video.db"
thorough_db = args.workspace / "thorough_scan.db"
lowfps_db = current_video_folder / "video1fps.db"
full_db = current_video_folder / "video_full.db"
current_metadata = current_video_folder / "metadata.csv"
former_db.copy(current_db)
image_list_path = current_video_folder / "to_scan.txt"
colmap.db = current_db
thorough_db.copy(lowfps_db)
map_image_list_path = current_video_folder / "to_scan.txt"
full_image_list_path = current_video_folder / "full.txt"
colmap.db = lowfps_db
i, s = next(i_pv_steps)
print_step(i, s)
......@@ -299,41 +344,92 @@ def main():
i, s = next(i_pv_steps)
print_step(i, s)
image_list_path = current_video_folder / "to_scan.txt"
avtd.add_to_db(current_db, current_metadata, image_list_path)
colmap.extract_features(image_list=image_list_path, fine=args.fine_sift_features)
added_frames = avtd.add_to_db(lowfps_db, current_metadata, map_image_list_path)
colmap.extract_features(image_list=map_image_list_path, fine=args.fine_sift_features)
colmap.match(method="sequential", vocab_tree=args.vocab_tree)
video_output_model = env["video_recon"] / v.basename()
video_output_model = env["video_recon"] / v.namebase
video_output_model.makedirs_p()
colmap.map(output_model=video_output_model, input_model=env["georef_recon"])
# when colmap map is called, the model is normalized so we have georegister it again
colmap.align_model(output_model=video_output_model,
input_model=video_output_model,
ref_images=env["georef_frames_list"])
i, s = next(i_pv_steps)
print_step(i, s)
avtd.add_to_db(current_db, current_metadata, frame_list_path=None)
colmap.extract_features(fine=args.fine_sift_features)
lowfps_db.copy(full_db)
colmap.db = full_db
avtd.add_to_db(full_db, current_metadata, frame_list_path=None)
colmap.extract_features(image_list=full_image_list_path, fine=args.fine_sift_features)
colmap.match(method="sequential", vocab_tree=args.vocab_tree)
colmap.register_images(output_model=video_output_model, input_model=video_output_model)
colmap.adjust_bundle(output_model=video_output_model, input_model=video_output_model)
colmap.align_model(output_model=video_output_model,
input_model=video_output_model,
ref_images=env["georef_frames_list"])
final_output_model = video_output_model / "final"
final_output_model.makedirs_p()
evfm.extract_video(input_model=video_output_model,
output_model=final_output_model,
video_metadata_path=current_metadata)
empty = not evfm.extract_video(input_model=video_output_model,
output_model=final_output_model,
video_metadata_path=current_metadata,
output_format=".bin")
if empty:
print("Error, empty localization, will try map from video")
colmap.db = lowfps_db
colmap.map(output_model=video_output_model, start_frame_id=added_frames[0])
colmap.align_model(output_model=video_output_model,
input_model=video_output_model / "0",
ref_images=current_video_folder / "georef.txt")
colmap.db = full_db
colmap.register_images(output_model=video_output_model, input_model=video_output_model)
colmap.adjust_bundle(output_model=video_output_model, input_model=video_output_model)
empty = not evfm.extract_video(input_model=video_output_model,
output_model=final_output_model,
video_metadata_path=current_metadata,
output_format=".bin")
i, s = next(i_pv_steps)
print_step(i, s)
colmap.triangulate_points(final_output_model, final_output_model)
colmap.export_model(final_output_model, final_output_model, output_type="TXT")
ply_name = final_output_model / "georef_{}.ply".format(v.namebase)
matrix_name = final_output_model / "georef_maxtrix_{}.txt".format(v.namebase)
colmap.export_model(ply_name, final_output_model, output_type="PLY")
pcl_util.register_reconstruction(georef=ply_name,
lidar=env["lidar_ply"],
output_matrix=matrix_name,
output_cloud=env["lidar_ply"],
max_distance=10)
matrix = np.fromfile(matrix_name, sep=" ").reshape(4, 4)
final_lidar = final_output_model / "aligned_lidar.mlp"
final_occlusions = final_output_model / "occlusions.mlp"
final_splats = final_output_model / "splats.mlp"
mxw.apply_transform_to_project(env["aligned_mlp"], final_lidar, matrix)
mxw.apply_transform_to_project(env["occlusion_mlp"], final_occlusions, matrix)
mxw.apply_transform_to_project(env["splats_mlp"], final_splats, matrix)
i, s = next(i_pv_steps)
print_step(i, s)
eth3d.create_ground_truth(env["aligned_mlp"], env["occlusion_mlp"], final_output_model, args.output_folder)
output_images_folder = args.output_folder / "Images" / v.namebase
output_images_folder.makedirs_p()
current_video_folder.merge_tree(output_images_folder)
eth3d.create_ground_truth(final_lidar, final_occlusions,
final_splats, final_output_model,
args.output_folder)
output_vizualisation_folder = args.output_folder / "video" / v.namebase
output_vizualisation_folder.makedirs_p()
fps = pd.read_csv(current_metadata)["framerate"].iloc[0]
vd.process_viz(args.output_folder / "ground_truth_depth" / v.namebase,
output_images_folder,
args.output_folder / "occlusion_depth" / v.namebase,
output_vizualisation_folder,
video=True, fps=fps, downscale=4, threads=8, **env)
if args.save_space:
for file in current_video_folder.files():
if file not in existing_images: