Commit 35cc7f0a authored by cn1n18's avatar cn1n18

Upload New File

parent 84749b24
\relax
\citation{brambilla2013swarm}
\citation{balta2013terrain}
\citation{bhoi2019monocular}
\citation{godard2017unsupervised}
\citation{xu2018structured}
\citation{eigen2014depth}
\citation{laina2016deeper}
\citation{alhashim2018high}
\citation{geiger2013vision}
\citation{valada16iser}
\citation{saxena2008make3d}
\citation{saxena2007learning}
\citation{firman2016rgbd}
\citation{Silberman:ECCV12}
\@writefile{toc}{\contentsline {section}{\numberline {I}Introduction}{1}}
\@writefile{toc}{\contentsline {section}{\numberline {II}Mobile sensor platform setup}{1}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces \textbf {Depth data rig.} Our recording system is equipped with a D435i depth camera, rotary encoder, microcontroller board, and GPS.}}{2}}
\newlabel{fig:platform}{{1}{2}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces \textbf {Sensors setup.} This figure shows the mounting positions and dimensions of each sensing device on recording system. It shows the top view of the recording platform. The black lines represent the wheels and the box. The blue lines represent the sensors we use, which are the depth camera and its base, the rotary encoder, and the micro-control board. The stippling line indicates the part obscured by other components. Red represents the name and dimension of each device. The height of camera from ground is 150mm, height of wheel axis is 50mm. Dimensions are in millimeters.}}{2}}
\newlabel{fig:setup}{{2}{2}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces \textbf {GPS satellite map.} This figure shows one of the GPS trajectories in Southampton common, GPS information is included in the metadata of the CSV file set}}{2}}
\newlabel{fig:path}{{3}{2}}
\@writefile{toc}{\contentsline {section}{\numberline {III}Forest environment dataset}{2}}
\@writefile{toc}{\contentsline {subsection}{\numberline {III-A}Data description}{2}}
\@writefile{toc}{\contentsline {section}{\numberline {IV}Quality of our forest environment dataset}{2}}
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces \textbf {Examples from the forest environment dataset.} A diverse set of scenes in RGB (left), and the aligned depth in grayscale (middle) and color (right), were recorded in the forest. In grayscale, lighter pixels indicate regions further from the camera, and white pixels are out of range. The gradients in depth are better illustrated in color, with brighter colored pixels indicating regions closer to the camera. In both color schemes, black pixels indicate the depth could not be estimated.}}{3}}
\newlabel{fig:example-frames}{{4}{3}}
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces \textbf {Velocity and acceleration of the mobile sensor platform.} The linear velocity and acceleration of the mobile sensor platform in the forward direction, while being pushed through the forest. Data for the distribution was aggregated across all five runs of the dataset. Instantaneous velocity and acceleration were estimated with the rotatory encoder position sensor.}}{3}}
\newlabel{fig:platform-speed}{{5}{3}}
\bibstyle{IEEEtran}
\bibdata{IEEEabrv}
\bibcite{brambilla2013swarm}{1}
\bibcite{balta2013terrain}{2}
\bibcite{bhoi2019monocular}{3}
\bibcite{godard2017unsupervised}{4}
\bibcite{xu2018structured}{5}
\bibcite{eigen2014depth}{6}
\bibcite{laina2016deeper}{7}
\@writefile{lot}{\contentsline {table}{\numberline {I}{\ignorespaces \textbf {Forest environment recording conditions.} Our mobile sensor platform was pushed through the forest in five separate runs across different weather conditions and times of day. The data recorded illustrates variations in the lighting conditions. The luminosity or perceived brightness in an image is estimated with the Y channel in the YUV color scheme.}}{4}}
\newlabel{tab:env}{{I}{4}}
\@writefile{toc}{\contentsline {section}{\numberline {V}Depth estimation}{4}}
\@writefile{lot}{\contentsline {table}{\numberline {II}{\ignorespaces Evaluation metrics.}}{4}}
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces \textbf {Velocity, Luminosity and Fill rate} This figure shows the correlation between velocity - fill rate and luminosity - fill rate respectively. The color scale has been added to the velocity axis, and the color from blue to red indicates that velocity is from low to high. }}{4}}
\newlabel{fig:vlf}{{6}{4}}
\@writefile{toc}{\contentsline {section}{\numberline {VI}Summary and future work}{4}}
\@writefile{toc}{\contentsline {section}{References}{4}}
\bibcite{alhashim2018high}{8}
\bibcite{geiger2013vision}{9}
\bibcite{valada16iser}{10}
\bibcite{saxena2008make3d}{11}
\bibcite{saxena2007learning}{12}
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces \textbf {Position of sampled points for accuracy of depth images.} Nine points at varying depth and positions were sampled from a typical forest scene. Points 1, 3, 5 and 7 are on a fallen tree brach, points 4 and 6 are part of the forest floor, particularly close to the camera, and points 2, 8 and 9 are located on tree trunks close to the ground. The points 4 and 8, are nearest to and furtherst from the camera, respectively.}}{5}}
\newlabel{fig:depth-points}{{7}{5}}
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces \textbf {Accuracy of the depth image data.} The accuracy of depth images for nine sampled points, P1 to P9. Ground truth measurements were averaged over three replicates. Depth image data was averaged over $7 \times 7$ pixels surrounding the focal point and over two replicates. Points on the diagonal dotted line indicate depth estimates identical to ground truth measurements.}}{5}}
\newlabel{fig:depth-error}{{8}{5}}
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces \textbf {Depth estimation results.} }}{5}}
\newlabel{fig:depth}{{9}{5}}
\bibcite{firman2016rgbd}{13}
\bibcite{Silberman:ECCV12}{14}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment