Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Pinard Clement
drone-depth-validation-set
Commits
4335146a
Commit
4335146a
authored
Dec 02, 2020
by
Clément Pinard
Browse files
Add metadata to converted dataset if available
parent
f045d588
Changes
4
Hide whitespace changes
Inline
Side-by-side
anafi_metadata.py
View file @
4335146a
...
...
@@ -75,9 +75,8 @@ def extract_metadata(folder_path, file_path, native_wrapper, proj, w, h, f, save
metadata
[
'frame'
]
=
metadata
.
index
+
1
metadata
[
"location_valid"
]
=
metadata
[
"location_valid"
]
==
1
fx
=
metadata
[
"width"
]
/
(
2
*
np
.
tan
(
metadata
[
"picture_hfov"
]
*
np
.
pi
/
360
))
fy
=
metadata
[
"v_focal"
]
=
metadata
[
"height"
]
/
(
2
*
np
.
tan
(
metadata
[
"picture_vfov"
]
*
np
.
pi
/
360
))
params
=
np
.
stack
([
fx
.
values
,
fy
.
values
],
axis
=-
1
)
print
(
params
.
shape
)
fy
=
metadata
[
"height"
]
/
(
2
*
np
.
tan
(
metadata
[
"picture_vfov"
]
*
np
.
pi
/
360
))
params
=
np
.
stack
([
fx
.
values
,
fy
.
values
,
metadata
[
"width"
]
/
2
,
metadata
[
"height"
]
/
2
],
axis
=-
1
)
metadata
[
"camera_params"
]
=
[
tuple
(
p
)
for
p
in
params
]
if
save_path
is
not
None
:
...
...
convert_dataset.py
View file @
4335146a
...
...
@@ -12,12 +12,13 @@ from wrappers import FFMpeg
import
gzip
from
pebble
import
ProcessPool
import
yaml
from
itertools
import
product
def
save_intrinsics
(
cameras
,
images
,
output_dir
,
output_width
=
None
,
downscale
=
None
):
def
construct_intrinsics
(
cam
,
downscale
):
# assert('PINHOLE' in cam.model)
if
'SIMPLE'
in
cam
.
model
:
if
'SIMPLE'
in
cam
.
model
or
'RADIAL'
in
cam
.
model
:
fx
,
cx
,
cy
,
*
_
=
cam
.
params
fy
=
fx
else
:
...
...
@@ -36,7 +37,7 @@ def save_intrinsics(cameras, images, output_dir, output_width=None, downscale=No
np
.
savetxt
(
intrinsics_path
,
intrinsics
)
with
open
(
yaml_path
,
'w'
)
as
f
:
camera_dict
=
{
"model"
:
cam
.
model
,
"params"
:
cam
.
params
,
"params"
:
cam
.
params
.
tolist
()
,
"width"
:
cam
.
width
/
current_downscale
,
"height"
:
cam
.
height
/
current_downscale
}
yaml
.
dump
(
camera_dict
,
f
,
default_flow_style
=
False
)
...
...
@@ -60,17 +61,22 @@ def to_transform_matrix(q, t):
return
transform
def
save_pos
itions
(
images
,
output_dir
):
def
save_pos
es
(
images
,
images
_list
,
output_dir
):
starting_pos
=
None
positions
=
[]
for
_
,
img
in
images
.
items
():
current_pos
=
to_transform_matrix
(
img
.
qvec
,
img
.
tvec
)
if
starting_pos
is
None
:
starting_pos
=
current_pos
relative_position
=
np
.
linalg
.
inv
(
starting_pos
)
@
current_pos
positions
.
append
(
relative_position
[:
3
])
positions
=
np
.
stack
(
positions
)
np
.
savetxt
(
output_dir
/
'poses.txt'
,
positions
.
reshape
((
len
(
images
),
-
1
)))
poses
=
[]
for
i
in
images_list
:
try
:
img
=
images
[
i
]
current_pos
=
to_transform_matrix
(
img
.
qvec
,
img
.
tvec
)
if
starting_pos
is
None
:
starting_pos
=
current_pos
relative_position
=
np
.
linalg
.
inv
(
starting_pos
)
@
current_pos
poses
.
append
(
relative_position
[:
3
])
except
KeyError
:
poses
.
append
(
np
.
full
((
3
,
4
),
np
.
NaN
))
poses
=
np
.
stack
(
poses
)
np
.
savetxt
(
output_dir
/
'poses.txt'
,
poses
.
reshape
((
len
(
images_list
),
-
1
)))
return
poses
def
high_res_colormap
(
low_res_cmap
,
resolution
=
1000
,
max_value
=
1
):
...
...
@@ -215,21 +221,20 @@ def convert_dataset(final_model, depth_dir, images_root_folder, occ_dir,
video
=
False
# Discard images and cameras that are not represented by the image list
images
=
{
k
:
i
for
k
,
i
in
images
.
items
()
if
i
.
name
in
images_list
}
images
=
{
i
.
name
:
i
for
k
,
i
in
images
.
items
()
if
i
.
name
in
images_list
}
cameras_ids
=
set
([
i
.
camera_id
for
i
in
images
.
values
()])
cameras
=
{
k
:
cameras
[
k
]
for
k
in
cameras_ids
}
if
downscale
is
None
:
assert
width
is
not
None
save_intrinsics
(
cameras
,
images
,
dataset_output_dir
,
width
,
downscale
)
save_positions
(
images
,
dataset_output_dir
)
poses
=
save_poses
(
images
,
images
_list
,
dataset_output_dir
)
depth_maps
=
[]
occ_maps
=
[]
interpolated
=
[]
imgs
=
[]
cameras
=
[]
not_registered
=
0
registered
=
[]
for
i
in
images_list
:
img_path
=
images_root_folder
/
i
...
...
@@ -242,6 +247,7 @@ def convert_dataset(final_model, depth_dir, images_root_folder, occ_dir,
depth_path
+=
".gz"
occ_path
+=
".gz"
if
depth_path
.
isfile
():
registered
.
append
(
True
)
if
occ_path
.
isfile
():
occ_maps
.
append
(
occ_path
)
else
:
...
...
@@ -256,12 +262,16 @@ def convert_dataset(final_model, depth_dir, images_root_folder, occ_dir,
else
:
if
verbose
>
2
:
print
(
"Image {} was not registered"
.
format
(
fname
))
not_
registered
+=
1
registered
.
append
(
False
)
depth_maps
.
append
(
None
)
occ_maps
.
append
(
None
)
interpolated
.
append
(
False
)
print
(
'{}/{} Frames not registered ({:.2f}%)'
.
format
(
not_registered
,
len
(
images_list
),
100
*
not_registered
/
len
(
images_list
)))
print
(
'{}/{} Frames interpolated ({:.2f}%)'
.
format
(
sum
(
interpolated
),
len
(
images_list
),
100
*
sum
(
interpolated
)
/
len
(
images_list
)))
print
(
'{}/{} Frames not registered ({:.2f}%)'
.
format
(
len
(
images_list
)
-
sum
(
registered
),
len
(
images_list
),
100
*
(
1
-
sum
(
registered
)
/
len
(
images_list
))))
print
(
'{}/{} Frames interpolated ({:.2f}%)'
.
format
(
sum
(
interpolated
),
len
(
images_list
),
100
*
sum
(
interpolated
)
/
len
(
images_list
)))
if
threads
==
1
:
for
i
,
d
,
o
,
n
in
tqdm
(
zip
(
imgs
,
depth_maps
,
occ_maps
,
interpolated
),
total
=
len
(
imgs
)):
process_one_frame
(
i
,
d
,
o
,
dataset_output_dir
,
video_output_dir
,
downscale
,
n
,
visualization
,
viz_width
=
1920
)
...
...
@@ -278,6 +288,38 @@ def convert_dataset(final_model, depth_dir, images_root_folder, occ_dir,
tasks
.
cancel
()
raise
e
if
metadata
is
not
None
:
wanted_keys
=
[
'image_path'
,
'time'
,
'height'
,
'width'
,
'camera_model'
,
'camera_id'
]
filtered_metadata
=
metadata
[
wanted_keys
].
copy
()
filtered_metadata
[
'interpolated'
]
=
interpolated
filtered_metadata
[
'registered'
]
=
registered
for
i
,
j
in
product
(
range
(
3
),
range
(
4
)):
filtered_metadata
[
'pose{}{}'
.
format
(
i
,
j
)]
=
poses
[:,
i
,
j
]
filtered_metadata
[
"fx"
]
=
np
.
NaN
filtered_metadata
[
"fy"
]
=
np
.
NaN
filtered_metadata
[
"cx"
]
=
np
.
NaN
filtered_metadata
[
"cy"
]
=
np
.
NaN
for
cam_id
in
filtered_metadata
[
"camera_id"
].
unique
():
if
cam_id
not
in
cameras
.
keys
():
continue
cam
=
cameras
[
cam_id
]
rows
=
filtered_metadata
[
"camera_id"
]
==
cam_id
filtered_metadata
.
loc
[
rows
,
"fx"
]
=
cam
.
params
[
0
]
if
"SIMPLE"
in
cam
.
model
or
"RADIAL"
in
cam
.
model
:
filtered_metadata
.
loc
[
rows
,
"fy"
]
=
cam
.
params
[
0
]
filtered_metadata
.
loc
[
rows
,
"cx"
]
=
cam
.
params
[
1
]
filtered_metadata
.
loc
[
rows
,
"cy"
]
=
cam
.
params
[
2
]
else
:
filtered_metadata
.
loc
[
rows
,
"fy"
]
=
cam
.
params
[
1
]
filtered_metadata
.
loc
[
rows
,
"cx"
]
=
cam
.
params
[
2
]
filtered_metadata
.
loc
[
rows
,
"cy"
]
=
cam
.
params
[
3
]
filtered_metadata
.
to_csv
(
dataset_output_dir
/
'metadata.csv'
)
not_registered
=
[
i
+
'
\n
'
for
i
,
r
in
zip
(
images_list
,
registered
)
if
not
r
]
with
open
(
dataset_output_dir
/
'not_registered.txt'
,
'w'
)
as
f
:
f
.
writelines
(
not_registered
)
if
video
:
video_path
=
str
(
video_output_dir
.
parent
/
'{}_groundtruth_viz.mp4'
.
format
(
video_output_dir
.
stem
))
glob_pattern
=
str
(
video_output_dir
/
'*.png'
)
...
...
depth_evaluation.py
View file @
4335146a
...
...
@@ -2,7 +2,7 @@ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from
path
import
Path
import
numpy
as
np
import
pandas
as
pd
import
matplotlib.py
t
plot
as
plt
import
matplotlib.pyplot
as
plt
parser
=
ArgumentParser
(
description
=
'Convert EuroC dataset to COLMAP'
,
formatter_class
=
ArgumentDefaultsHelpFormatter
)
...
...
@@ -10,7 +10,7 @@ parser = ArgumentParser(description='Convert EuroC dataset to COLMAP',
parser
.
add_argument
(
'--dataset_root'
,
metavar
=
'DIR'
,
type
=
Path
)
parser
.
add_argument
(
'--est_depth'
,
metavar
=
'DIR'
,
type
=
Path
,
help
=
'where the depth maps are stored, must be a 3D npy file'
)
parser
.
add_argument
(
'--evaluation_list'
,
metavar
a
=
'PATH'
,
type
=
Path
,
parser
.
add_argument
(
'--evaluation_list'
,
metavar
=
'PATH'
,
type
=
Path
,
help
=
'File with list of images to test for depth evaluation'
)
parser
.
add_argument
(
'--flight_path_vector_list'
,
metavar
=
'PATH'
,
type
=
Path
,
help
=
'File with list of speed vectors, used to compute error wrt direction'
)
...
...
@@ -140,3 +140,7 @@ def main():
print
(
"Results for usual metrics"
)
print
(
"{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}"
.
format
(
*
error_names
))
print
(
"{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}"
.
format
(
*
errors
))
if
__name__
==
'__main__'
:
main
()
inference_toolkit.py
View file @
4335146a
...
...
@@ -2,6 +2,7 @@ import numpy as np
from
path
import
Path
from
imageio
import
imread
import
time
from
argparse
import
ArgumentParser
,
ArgumentDefaultsHelpFormatter
class
Timer
:
...
...
@@ -32,15 +33,15 @@ class Timer:
class
inferenceFramework
(
object
):
def
__init__
(
self
,
root
,
test_files
,
seq_length
=
3
,
min_depth
=
1e-3
,
max_depth
=
80
,
max_shift
=
50
):
self
.
root
=
root
def
__init__
(
self
,
root
,
test_files
,
min_depth
=
1e-3
,
max_depth
=
80
,
max_shift
=
50
,
frame_transform
=
None
):
self
.
root
=
Path
(
root
)
self
.
test_files
=
test_files
self
.
min_depth
,
self
.
max_depth
=
min_depth
,
max_depth
self
.
max_shift
=
max_shift
def
__getitem__
(
self
,
i
):
timer
=
Timer
()
sample
=
inferenceSample
(
self
.
root
,
self
.
test_files
[
i
],
timer
,
self
.
max_shift
)
sample
=
inferenceSample
(
self
.
root
,
self
.
test_files
[
i
],
timer
,
self
.
max_shift
,
self
.
frame_transform
)
sample
.
timer
.
start
()
return
sample
...
...
@@ -88,7 +89,7 @@ class inferenceSample(object):
if
self
.
frame_transform
is
not
None
:
img
=
self
.
frame_transform
(
img
)
self
.
timer
.
start
()
return
img
,
self
.
intrinsics
[
shift
]
return
img
,
self
.
intrinsics
[
shift
]
,
self
.
poses
[
shift
]
def
get_previous_frame
(
self
,
shift
=
1
,
displacement
=
None
,
max_rot
=
1
):
self
.
timer
.
stop
()
...
...
@@ -103,3 +104,41 @@ class inferenceSample(object):
final_shift
=
np
.
where
(
rot_valid
[
-
1
-
shift
:])[
-
1
]
self
.
timer
.
start
()
return
*
self
.
get_frame
(
final_shift
),
self
.
poses
[
final_shift
]
def
inference_toolkit_example
():
parser
=
ArgumentParser
(
description
=
'Example usage of Inference toolkit'
,
formatter_class
=
ArgumentDefaultsHelpFormatter
)
parser
.
add_argument
(
'--dataset_root'
,
metavar
=
'DIR'
,
type
=
Path
)
parser
.
add_argument
(
'--depth_output'
,
metavar
=
'DIR'
,
type
=
Path
,
help
=
'where to store the estimated depth maps, must be a npy file'
)
parser
.
add_argument
(
'--evaluation_list_path'
,
metavar
=
'PATH'
,
type
=
Path
,
help
=
'File with list of images to test for depth evaluation'
)
parser
.
add_argument
(
'--scale-invariant'
,
action
=
'store_true'
,
help
=
'If selected, will rescale depth map with ratio of medians'
)
args
=
parser
.
parse_args
()
with
open
(
args
.
evaluation_list_path
)
as
f
:
evaluation_list
=
[
line
[:
-
1
]
for
line
in
f
.
readlines
()]
def
my_model
(
frame
,
previous
,
pose
):
# Mock up function that uses two frames and translation magnitude
return
np
.
linalg
.
norm
(
pose
[:,
-
1
])
*
np
.
linalg
.
norm
(
frame
-
previous
,
axis
=-
1
)
engine
=
inferenceFramework
(
args
.
root
,
evaluation_list
,
lambda
x
:
x
.
transpose
(
2
,
0
,
1
).
astype
(
np
.
float32
)[
None
]
/
255
)
esimated_depth_maps
=
{}
mean_time
=
[]
for
sample
,
image_path
in
zip
(
engine
,
evaluation_list
):
latest_frame
,
latest_intrinsics
,
_
=
sample
.
get_frame
()
previous_frame
,
previous_intrinsics
,
previous_pose
=
sample
.
get_previous_frame
(
displacement
=
0.3
)
esimated_depth_maps
[
image_path
]
=
(
my_model
(
latest_frame
,
previous_frame
))
time_spent
=
engine
.
finish_frame
(
sample
)
mean_time
.
append
(
time_spent
)
print
(
"Mean time per sample : {:.2f}us"
.
format
(
1e6
*
sum
(
mean_time
)
/
len
(
mean_time
)))
np
.
savez
(
args
.
depth_output
,
**
esimated_depth_maps
)
if
__name__
==
'__main__'
:
inference_toolkit_example
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment