Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Pinard Clement
drone-depth-validation-set
Commits
0038991a
Commit
0038991a
authored
Sep 17, 2020
by
Clément Pinard
Browse files
deal with video without GPS
parent
6b6ad0c6
Changes
7
Show whitespace changes
Inline
Side-by-side
Dockerfile
View file @
0038991a
...
...
@@ -8,6 +8,7 @@ ARG DEBIAN_FRONTEND=noninteractive
RUN
apt update
\
&&
apt
install
-y
git
\
repo
\
ffmpeg
\
python3
\
python3-pip
\
python
\
...
...
cli_utils.py
View file @
0038991a
...
...
@@ -23,10 +23,6 @@ def set_argparser():
main_parser
.
add_argument
(
'--vid_ext'
,
nargs
=
'+'
,
default
=
[
".mp4"
,
".MP4"
])
main_parser
.
add_argument
(
'--pic_ext'
,
nargs
=
'+'
,
default
=
[
".jpg"
,
".JPG"
,
".png"
,
".PNG"
])
main_parser
.
add_argument
(
'--raw_ext'
,
nargs
=
'+'
,
default
=
[
".ARW"
,
".NEF"
,
".DNG"
])
main_parser
.
add_argument
(
'--more_sift_features'
,
action
=
"store_true"
)
main_parser
.
add_argument
(
'--triangulate'
,
action
=
"store_true"
)
main_parser
.
add_argument
(
'--save_space'
,
action
=
"store_true"
)
main_parser
.
add_argument
(
'--add_new_videos'
,
action
=
"store_true"
)
main_parser
.
add_argument
(
'--resume_work'
,
action
=
"store_true"
)
main_parser
.
add_argument
(
'--inspect_dataset'
,
action
=
"store_true"
)
main_parser
.
add_argument
(
'--registration_method'
,
choices
=
[
"simple"
,
"eth3d"
,
"interactive"
],
default
=
"simple"
)
...
...
@@ -57,14 +53,21 @@ def set_argparser():
exec_parser
.
add_argument
(
"--ffmpeg"
,
default
=
"ffmpeg"
,
type
=
Path
)
exec_parser
.
add_argument
(
"--pcl_util"
,
default
=
"pcl_util/build"
,
type
=
Path
)
vr_parser
=
parser
.
add_argument_group
(
"Video Registration"
)
vr_parser
.
add_argument
(
"--vocab_tree"
,
type
=
Path
,
default
=
"vocab_tree_flickr100K_words256K.bin"
)
pm_parser
=
parser
.
add_argument_group
(
"Photogrammetry"
)
pm_parser
.
add_argument
(
'--vocab_tree'
,
type
=
Path
,
default
=
"vocab_tree_flickr100K_words256K.bin"
)
pm_parser
.
add_argument
(
'--triangulate'
,
action
=
"store_true"
)
pm_parser
.
add_argument
(
'--multiple_models'
,
action
=
'store_true'
,
help
=
'If selected, will let colmap mapper do multiple models.'
'The biggest one will then be chosen'
)
pm_parser
.
add_argument
(
'--more_sift_features'
,
action
=
"store_true"
)
pm_parser
.
add_argument
(
'--save_space'
,
action
=
"store_true"
)
pm_parser
.
add_argument
(
'--add_new_videos'
,
action
=
"store_true"
)
om_parser
=
parser
.
add_argument_group
(
"Occlusion Mesh"
)
om_parser
.
add_argument
(
'--normals_method'
,
default
=
"radius"
,
choices
=
[
"radius"
,
"neighbours"
])
om_parser
.
add_argument
(
'--normals_radius'
,
default
=
0.2
,
type
=
float
)
om_parser
.
add_argument
(
'--normals_neighbours'
,
default
=
8
,
type
=
int
)
om_parser
.
add_argument
(
'--mesh_resolution'
,
default
=
0.2
,
type
=
float
)
om_parser
.
add_argument
(
'--splats'
,
action
=
'store_true'
)
om_parser
.
add_argument
(
'--splat_threshold'
,
default
=
0.1
,
type
=
float
)
om_parser
.
add_argument
(
'--max_occlusion_depth'
,
default
=
250
,
type
=
float
)
return
parser
...
...
install_dependencies.sh
View file @
0038991a
...
...
@@ -17,6 +17,7 @@ sudo apt update
sudo
apt
install
-y
git
\
curl
\
cmake
\
ffmpeg
\
build-essential
\
pkg-config
\
libboost-all-dev
\
...
...
main_pipeline.py
View file @
0038991a
...
...
@@ -115,17 +115,24 @@ def main():
colmap
.
extract_features
(
image_list
=
env
[
"video_frame_list_thorough"
],
more
=
args
.
more_sift_features
)
colmap
.
index_images
(
vocab_tree_output
=
env
[
"indexed_vocab_tree"
],
vocab_tree_input
=
args
.
vocab_tree
)
colmap
.
match
(
method
=
"vocab_tree"
,
vocab_tree
=
env
[
"indexed_vocab_tree"
])
colmap
.
map
(
output
=
env
[
"thorough_recon"
])
colmap
.
adjust_bundle
(
output
=
env
[
"thorough_recon"
]
/
"0"
,
input
=
env
[
"thorough_recon"
]
/
"0"
,
colmap
.
map
(
output
=
env
[
"thorough_recon"
],
multiple_models
=
env
[
"multiple_models"
])
thorough_model
=
pi
.
choose_biggest_model
(
env
[
"thorough_recon"
])
colmap
.
adjust_bundle
(
thorough_model
,
thorough_model
,
num_iter
=
100
,
refine_extra_params
=
True
)
else
:
thorough_model
=
pi
.
choose_biggest_model
(
env
[
"thorough_recon"
])
i
+=
1
if
i
not
in
args
.
skip_step
:
print_step
(
i
,
"Alignment of photogrammetric reconstruction with GPS"
)
env
[
"georef_recon"
].
makedirs_p
()
colmap
.
align_model
(
output
=
env
[
"georef_recon"
],
input
=
env
[
"
thorough_
recon"
]
/
"0"
,
input
=
thorough_
model
,
ref_images
=
env
[
"georef_frames_list"
])
if
not
(
env
[
"georef_frames_list"
]
/
"images.bin"
).
isfile
():
# GPS alignment failed, possibly because not enough GPS referenced images
# Copy the original model without alignment
(
env
[
"thorough_recon"
]
/
"0"
).
merge_tree
(
env
[
"georef_full_recon"
])
env
[
"georef_recon"
].
merge_tree
(
env
[
"georef_full_recon"
])
if
args
.
inspect_dataset
:
colmap
.
export_model
(
output
=
env
[
"georef_recon"
]
/
"georef_sparse.ply"
,
...
...
@@ -217,6 +224,7 @@ def main():
pcl_util
.
create_vis_file
(
env
[
"georefrecon_ply"
],
env
[
"with_normals_path"
],
resolution
=
args
.
mesh_resolution
,
output
=
env
[
"with_normals_path"
].
stripext
()
+
"_subsampled.ply"
)
colmap
.
delaunay_mesh
(
env
[
"occlusion_ply"
],
input_ply
=
env
[
"with_normals_path"
].
stripext
()
+
"_subsampled.ply"
)
if
args
.
splats
:
eth3d
.
create_splats
(
env
[
"splats_ply"
],
env
[
"with_normals_path"
],
env
[
"occlusion_ply"
],
threshold
=
args
.
splat_threshold
)
if
args
.
inspect_dataset
:
...
...
main_pipeline_no_lidar.py
View file @
0038991a
...
...
@@ -73,9 +73,12 @@ def main():
colmap
.
extract_features
(
image_list
=
env
[
"video_frame_list_thorough"
],
more
=
args
.
more_sift_features
)
colmap
.
index_images
(
vocab_tree_output
=
env
[
"indexed_vocab_tree"
],
vocab_tree_input
=
args
.
vocab_tree
)
colmap
.
match
(
method
=
"vocab_tree"
,
vocab_tree
=
env
[
"indexed_vocab_tree"
])
colmap
.
map
(
output
=
env
[
"thorough_recon"
])
colmap
.
adjust_bundle
(
output
=
env
[
"thorough_recon"
]
/
"0"
,
input
=
env
[
"thorough_recon"
]
/
"0"
,
colmap
.
map
(
output
=
env
[
"thorough_recon"
],
multiple_models
=
env
[
"multiple_models"
])
thorough_model
=
pi
.
choose_biggest_model
(
env
[
"thorough_recon"
])
colmap
.
adjust_bundle
(
thorough_model
,
thorough_model
,
num_iter
=
100
,
refine_extra_params
=
True
)
else
:
thorough_model
=
pi
.
choose_biggest_model
(
env
[
"thorough_recon"
])
i
+=
1
if
i
not
in
args
.
skip_step
:
...
...
@@ -84,6 +87,10 @@ def main():
colmap
.
align_model
(
output
=
env
[
"georef_recon"
],
input
=
env
[
"thorough_recon"
]
/
"0"
,
ref_images
=
env
[
"georef_frames_list"
])
if
not
(
env
[
"georef_frames_list"
]
/
"images.bin"
).
isfile
():
# GPS alignment failed, possibly because not enough GPS referenced images
# Copy the original model without alignment
(
env
[
"thorough_recon"
]
/
"0"
).
merge_tree
(
env
[
"georef_full_recon"
])
env
[
"georef_recon"
].
merge_tree
(
env
[
"georef_full_recon"
])
if
args
.
inspect_dataset
:
colmap
.
export_model
(
output
=
env
[
"georef_recon"
]
/
"georef_sparse.ply"
,
...
...
@@ -130,6 +137,7 @@ def main():
if
i
not
in
args
.
skip_step
:
print_step
(
i
,
"Occlusion Mesh computing"
)
colmap
.
delaunay_mesh
(
env
[
"occlusion_ply"
],
input_ply
=
env
[
"georefrecon_ply"
])
if
args
.
splats
:
eth3d
.
create_splats
(
env
[
"splats_ply"
],
env
[
"georefrecon_ply"
].
stripext
()
+
"_filtered.ply"
,
env
[
"occlusion_ply"
],
threshold
=
args
.
splat_threshold
)
if
args
.
inspect_dataset
:
...
...
prepare_images.py
View file @
0038991a
...
...
@@ -5,6 +5,7 @@ import rawpy
import
imageio
import
generate_sky_masks
as
gsm
import
videos_to_colmap
as
v2c
import
colmap_util
as
ci
def
extract_gps_and_path
(
existing_pictures
,
image_path
,
system
,
centroid
=
None
,
**
env
):
...
...
@@ -62,3 +63,9 @@ def extract_videos_to_workspace(video_path, video_frame_list_thorough, georef_fr
f
.
write
(
"
\n
"
.
join
(
l
)
+
"
\n
"
)
gsm
.
process_folder
(
folder_to_process
=
video_path
,
**
env
)
return
extracted_video_folders
def
choose_biggest_model
(
dir
):
colmap_model_dirs
=
dir
.
dirs
(
"[0-9]*"
)
model_sizes
=
[
len
(
ci
.
read_model
.
read_images_binary
(
d
/
"images.bin"
))
for
d
in
colmap_model_dirs
]
return
colmap_model_dirs
[
model_sizes
.
index
(
max
((
model_sizes
)))]
requirements.txt
View file @
0038991a
...
...
@@ -15,3 +15,4 @@ pebble
dicttoxml
lxml
scikit-image
rawpy
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment