Commit 1695f357 authored by Clément Pinard's avatar Clément Pinard
Browse files

add pointcloud_sor

 * add videos splitting when they are too long
 * add already coputed groundtruth index
 * add point cloud densening by colmap
 * use matrix until the end instead of mlp files : thanks to this, the matrix can be modified manually with a custom point cloud registration
parent fab56cc2
......@@ -16,7 +16,7 @@ parser.add_argument('--database', metavar='DB', required=True,
help='path to colmap database file, to get the image ids right')
def add_to_db(db_path, metadata_path, frame_list_path, **env):
def add_to_db(db_path, metadata_path, frame_list_path, input_frame_ids=None, **env):
metadata = pd.read_csv(metadata_path)
database = db.COLMAPDatabase.connect(db_path)
......@@ -26,8 +26,11 @@ def add_to_db(db_path, metadata_path, frame_list_path, **env):
with open(frame_list_path, "r") as f:
frame_list = [line[:-1] for line in f.readlines()]
metadata = metadata[metadata["image_path"].isin(frame_list)]
if input_frame_ids:
assert(len(metadata) == len(input_frame_ids))
metadata["input_frame_id"] = input_frame_ids
for image_id, row in tqdm(metadata.iterrows(), total=len(metadata)):
for _, row in tqdm(metadata.iterrows(), total=len(metadata)):
image_path = row["image_path"]
camera_id = row["camera_id"]
if row["location_valid"]:
......@@ -35,12 +38,14 @@ def add_to_db(db_path, metadata_path, frame_list_path, **env):
else:
frame_gps = np.full(3, np.NaN)
try:
frame_ids.append(database.add_image(image_path, int(camera_id), prior_t=frame_gps))
input_id = row["input_frame_id"] if input_frame_ids else None
frame_ids.append(database.add_image(image_path, int(camera_id), prior_t=frame_gps, image_id=input_id))
except IntegrityError:
sql_string = "SELECT camera_id FROM images WHERE name='{}'".format(image_path)
row = next(database.execute(sql_string))
existing_camera_id = row[0]
sql_string = "SELECT camera_id, image_id FROM images WHERE name='{}'".format(image_path)
sql_output = next(database.execute(sql_string))
existing_camera_id = sql_output[0]
assert(existing_camera_id == camera_id)
frame_ids.append(sql_output[1])
database.commit()
database.close()
return frame_ids
......
......@@ -14,9 +14,9 @@ parser.add_argument('--output_format', choices=['.txt', '.bin'], default='.txt')
parser.add_argument('--metadata_path', metavar="CSV", type=Path)
def extract_video(input_model, output_model, video_metadata_path, output_format='.bin'):
cameras = rm.read_cameras_binary(input_model / "cameras.bin")
images = rm.read_images_binary(input_model / "images.bin")
def extract_video(input, output, video_metadata_path, output_format='.bin'):
cameras = rm.read_cameras_binary(input / "cameras.bin")
images = rm.read_images_binary(input / "images.bin")
images_per_name = {}
video_metadata = pd.read_csv(video_metadata_path)
image_names = video_metadata["image_path"].values
......@@ -28,7 +28,7 @@ def extract_video(input_model, output_model, video_metadata_path, output_format=
camera_ids = video_metadata["camera_id"].unique()
output_cameras = {cid: cameras[cid] for cid in camera_ids if cid in cameras.keys()}
rm.write_model(output_cameras, images_per_name, {}, output_model, output_format)
rm.write_model(output_cameras, images_per_name, {}, output, output_format)
return len(images_per_name) > 1
......
......@@ -78,6 +78,8 @@ def process_folder(folder_to_process, image_path, mask_path, pic_ext, verbose=Fa
to_process = []
if to_process:
extract_sky_mask(network, to_process, mask_folder)
del network
torch.cuda.empty_cache()
parser = ArgumentParser(description='sky mask generator using ENet trained on cityscapes',
......
......@@ -29,7 +29,6 @@ def load_and_convert(input_file, output_folder, verbose=False):
*(cloud.centroid/1000)))
output_centroid = cloud.centroid
print(cloud.centroid)
np.savetxt(txt_path, output_centroid)
xyz = cloud.points[['x', 'y', 'z']]
......
This diff is collapsed.
......@@ -16,3 +16,6 @@ target_link_libraries (MeshTriangulator ${PCL_LIBRARIES} glog)
add_executable (CloudRegistrator cloud_registrator.cpp)
target_link_libraries(CloudRegistrator ${PCL_LIBRARIES} glog)
add_executable (CloudSOR pointcloud_sor.cpp)
target_link_libraries(CloudSOR ${PCL_LIBRARIES} glog)
......@@ -4,10 +4,12 @@
#include <pcl/console/parse.h>
#include <pcl/point_types.h>
#include <pcl/registration/icp.h>
#include <pcl/registration/transformation_estimation_svd_scale.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/io/ply_io.h>
int
main (int argc, char** argv)
int main (int argc, char** argv)
{
FLAGS_logtostderr = 1;
google::InitGoogleLogging(argv[0]);
......@@ -34,7 +36,7 @@ int
std::string output_cloud_path;
pcl::console::parse_argument(argc, argv, "--output_cloud", output_cloud_path);
if (output_cloud_path.empty() && output_cloud_path.empty()){
if (output_matrix_path.empty() && output_cloud_path.empty()){
LOG(ERROR) << "No output path was given";
LOG(INFO) << "Usage: " << argv[0] << " --georef <file.ply> --lidar <file.ply> "
<< "--max_distance <int> --output_matrix <file.txt> (--output_cloud <file.ply>)";
......@@ -55,12 +57,25 @@ int
return EXIT_FAILURE;
}
LOG(INFO) << "point clouds loaded...";
// Filter to get inlier cloud, store in filtered_cloud.
pcl::PointCloud<pcl::PointXYZ>::Ptr geroef_filtered (new pcl::PointCloud<pcl::PointXYZ>);
pcl::StatisticalOutlierRemoval<pcl::PointXYZ> sor;
sor.setInputCloud(geroef);
sor.setMeanK(6);
sor.setStddevMulThresh(0.1);
sor.filter(*geroef_filtered);
pcl::IterativeClosestPoint<pcl::PointXYZ, pcl::PointXYZ> icp;
pcl::registration::TransformationEstimationSVDScale<pcl::PointXYZ, pcl::PointXYZ>::Ptr est;
est.reset(new pcl::registration::TransformationEstimationSVDScale<pcl::PointXYZ, pcl::PointXYZ>);
icp.setTransformationEstimation(est);
icp.setMaxCorrespondenceDistance (max_distance);
icp.setTransformationEpsilon(0.0001);
icp.setMaximumIterations(500);
icp.setEuclideanFitnessEpsilon(0.0001);
icp.setInputSource(geroef);
icp.setInputSource(geroef_filtered);
icp.setInputTarget(lidar);
pcl::PointCloud<pcl::PointXYZ> Final;
......
#include <glog/logging.h>
#include <iostream>
#include <pcl/console/parse.h>
#include <pcl/point_types.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/io/ply_io.h>
int main (int argc, char** argv)
{
FLAGS_logtostderr = 1;
google::InitGoogleLogging(argv[0]);
pcl::console::setVerbosityLevel(pcl::console::L_DEBUG);
// Parse arguments.
int dummy;
if (argc <= 1 ||
pcl::console::parse_argument(argc, argv, "-h", dummy) >= 0 ||
pcl::console::parse_argument(argc, argv, "--help", dummy) >= 0) {
LOG(INFO) << "Usage: " << argv[0] << " --input <file.ply> "
<< "--knn <int> --std <int> --output <file.ply>)";
return EXIT_FAILURE;
}
std::string input_path;
pcl::console::parse_argument(argc, argv, "--input", input_path);
float max_distance = 1; //1m max distance
std::string output_path;
pcl::console::parse_argument(argc, argv, "--output", output_path);
float knn = 1; //1m max distance
pcl::console::parse_argument(argc, argv, "--knn", knn);
float std = 1; //1m max distance
pcl::console::parse_argument(argc, argv, "--std", std);
if (output_path.empty()){
LOG(ERROR) << "No output path was given";
LOG(INFO) << "Usage: " << argv[0] << " --georef <file.ply> --lidar <file.ply> "
<< "--max_distance <int> --output_matrix <file.txt> (--output_cloud <file.ply>)";
return EXIT_FAILURE;
}
// Load point cloud with normals.
LOG(INFO) << "Loading point cloud ...";
pcl::PointCloud<pcl::PointXYZ>::Ptr input(
new pcl::PointCloud<pcl::PointXYZ>());
if (pcl::io::loadPLYFile(input_path, *input) < 0) {
return EXIT_FAILURE;
}
LOG(INFO) << "point cloud loaded...";
// Filter to get inlier cloud, store in filtered_cloud.
pcl::PointCloud<pcl::PointXYZ>::Ptr ouptut (new pcl::PointCloud<pcl::PointXYZ>);
pcl::StatisticalOutlierRemoval<pcl::PointXYZ> sor;
sor.setInputCloud(input);
sor.setMeanK(knn);
sor.setStddevMulThresh(std);
sor.filter(*ouptut);
if (!output_path.empty())
pcl::io::savePLYFileBinary(output_path, *ouptut);
return (0);
}
\ No newline at end of file
......@@ -12,4 +12,6 @@ ezdxf
meshio
scikit-learn
pebble
dicttoxml
\ No newline at end of file
dicttoxml
lxml
scikit-image
......@@ -121,7 +121,7 @@ def register_new_cameras(cameras_dataframe, database, camera_dict, model_name="P
def process_video_folder(videos_list, existing_pictures, output_video_folder, image_path, system, centroid,
workspace, fps=1, total_frames=500, orientation_weight=1, resolution_weight=1,
output_colmap_format="bin", save_space=False, **env):
output_colmap_format="bin", save_space=False, max_sequence_length=1000, **env):
proj = Proj(system)
indoor_videos = []
final_metadata = []
......@@ -228,7 +228,8 @@ def process_video_folder(videos_list, existing_pictures, output_video_folder, im
georef, frame_paths = get_georef(video_metadata_1fps)
path_lists_output[v]["frames_lowfps"] = frame_paths
path_lists_output[v]["georef_lowfps"] = georef
path_lists_output[v]["frames_full"] = list(video_metadata["image_path"])
num_chunks = len(video_metadata) // max_sequence_length + 1
path_lists_output[v]["frames_full"] = [list(frames) for frames in np.array_split(video_metadata["image_path"], num_chunks)]
if save_space:
frame_ids = list(video_metadata[video_metadata["sampled"]]["frame"].values)
if len(frame_ids) > 0:
......
......@@ -34,49 +34,80 @@ class Colmap(Wrapper):
self.__call__(options)
def map(self, output_model, input_model=None, multiple_models=False, start_frame_id=None):
def map(self, output, input=None, multiple_models=False, start_frame_id=None):
options = ["mapper", "--database_path", self.db,
"--image_path", self.image_path,
"--output_path", output_model,
"--Mapper.tri_ignore_two_view_tracks", "0"]
"--output_path", output]
if start_frame_id is not None:
options += ["--Mapper.init_image_id1", str(start_frame_id)]
if not multiple_models:
options += ["--Mapper.multiple_models", "0"]
if input_model is not None:
options += ["--input_path", input_model]
if input is not None:
options += ["--input_path", input]
options += ["--Mapper.fix_existing_images", "1"]
self.__call__(options)
def register_images(self, output_model, input_model):
def register_images(self, output, input):
options = ["image_registrator", "--database_path", self.db,
"--output_path", output_model,
"--input_path", input_model]
"--output_path", output,
"--input_path", input]
self.__call__(options)
def adjust_bundle(self, output_model, input_model, num_iter=10):
def adjust_bundle(self, output, input, num_iter=10):
options = ["bundle_adjuster",
"--output_path", output_model,
"--input_path", input_model,
"--output_path", output,
"--input_path", input,
"--BundleAdjustment.refine_extra_params", "0",
"--BundleAdjustment.max_num_iterations", str(num_iter)]
self.__call__(options)
def triangulate_points(self, output_model, input_model):
def triangulate_points(self, output, input, clear_points=True):
options = ["point_triangulator",
"--database_path", self.db,
"--image_path", self.image_path,
"--output_path", output_model,
"--input_path", input_model]
"--output_path", output,
"--input_path", input]
if clear_points:
options += ["--clear_points", "1"]
self.__call__(options)
def align_model(self, output_model, input_model, ref_images, max_error=5):
options = ["model_aligner", "--input_path", input_model,
"--output_path", output_model, "--ref_images_path",
def align_model(self, output, input, ref_images, max_error=5):
options = ["model_aligner", "--input_path", input,
"--output_path", output, "--ref_images_path",
ref_images, "--robust_alignment_max_error", str(max_error)]
self.__call__(options)
def export_model(self, output_path, input_model, output_type="PLY"):
options = ["model_converter", "--input_path", input_model,
"--output_path", output_path, "--output_type", output_type]
def export_model(self, output, input, output_type="PLY"):
options = ["model_converter", "--input_path", input,
"--output_path", output, "--output_type", output_type]
self.__call__(options)
def undistort(self, output, input, max_image_size=1000):
options = ["image_undistorter", "--image_path", self.image_path,
"--input_path", input, "--output_path", output,
"--output_type", "COLMAP", "--max_image_size", str(max_image_size)]
self.__call__(options)
def dense_stereo(self, workspace):
options = ["patch_match_stereo", "--workspace_path", workspace,
"--workspace_format", "COLMAP",
"--PatchMatchStereo.geom_consistency", "1"]
self.__call__(options)
def stereo_fusion(self, workspace, output_model):
options = ["stereo_fusion" "--workspace_path", workspace,
"--workspace_format", "COLMAP",
"--input_type", "geometric",
"--output_path", output_model]
self.__call__(options)
def merge_models(self, output, input1, input2):
options = ["model_merger", "--output_path", output,
"--input_path1", input1,
"--input_path2", input2]
self.__call__(options)
def index_images(self, vocab_tree_output, vocab_tree_input):
options = ["vocab_tree_retriever", "--database_path", self.db,
"--vocab_tree_path", vocab_tree_input, "--output_index", vocab_tree_output]
self.__call__(options)
......@@ -28,3 +28,9 @@ class PCLUtil(Wrapper):
if output_cloud is not None:
options += ["--output_cloud", output_cloud]
self.__call__(options)
def filter_cloud(self, output_file, input_file, knn=6, std=5):
options = ["CloudSOR", "--input", input_file,
"--output", output_file, "--knn", str(knn),
"--std", str(std)]
self.__call__(options)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment