You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

104 lines
4.0 KiB
TeX

\pgfplotsset{
compat=1.11,
legend image code/.code={
\draw[mark repeat=2,mark phase=2]
plot coordinates {
(0cm,0cm)
(0.15cm,0cm) %% default is (0.3cm,0cm)
(0.3cm,0cm) %% default is (0.6cm,0cm)
};%
}
}
\begin{figure}
\begin{subfigure}[h!]{\textwidth}
\begin{tikzpicture}
\begin{axis}[legend cell align={left},yticklabel style={/pgf/number format/fixed,
/pgf/number format/precision=3},tick style = {draw = none}, width = 0.975\textwidth,
height = 0.6\textwidth, legend
style={at={(0.0125,0.7)},anchor=north west},
xlabel = {Epoch}, ylabel = {Test Accuracy}, cycle
list/Dark2, every axis plot/.append style={line width
=1.25pt, mark = *, mark size=1pt},
xtick = {1, 3, 5,7,9,11,13,15,17,19},
xticklabels = {$2$, $4$, $6$, $8$,
$10$,$12$,$14$,$16$,$18$,$20$}]
\addplot table
[x=epoch, y=val_accuracy, col sep=comma] {Figures/Data/GD_01.log};
\addplot table
[x=epoch, y=val_accuracy, col sep=comma, mark = *] {Figures/Data/GD_05.log};
\addplot table
[x=epoch, y=val_accuracy, col sep=comma, mark = *] {Figures/Data/GD_1.log};
\addplot table
[x=epoch, y=val_accuracy, col sep=comma, mark = *]
{Figures/Data/SGD_01_b32.log};
\addlegendentry{GD$_{0.01}$}
\addlegendentry{GD$_{0.05}$}
\addlegendentry{GD$_{0.1}$}
\addlegendentry{SGD$_{0.01}$}
\end{axis}
\end{tikzpicture}
\caption{Test accuracy during training.}
\end{subfigure}
% \begin{subfigure}[b]{\textwidth}
% \begin{tikzpicture}
% \begin{axis}[tick style = {draw = none}, width = \textwidth,
% height = 0.6\textwidth,
% ytick = {0, 1, 2, 3, 4},
% yticklabels = {$0$, $1$, $\phantom{0.}2$, $3$, $4$},
% xtick = {1, 3, 5,7,9,11,13,15,17,19},
% xticklabels = {$2$, $4$, $6$, $8$,
% $10$,$12$,$14$,$16$,$18$,$20$},
% xlabel = {training epoch}, ylabel = {error measure\vphantom{fy}}]
% \addplot table
% [x=epoch, y=val_loss, col sep=comma] {Figures/Data/GD_01.log};
% \addplot table
% [x=epoch, y=val_loss, col sep=comma] {Figures/Data/GD_05.log};
% \addplot table
% [x=epoch, y=val_loss, col sep=comma] {Figures/Data/GD_1.log};
% \addplot table
% [x=epoch, y=val_loss, col sep=comma] {Figures/Data/SGD_01_b32.log};
% \addlegendentry{GD$_{0.01}$}
% \addlegendentry{GD$_{0.05}$}
% \addlegendentry{GD$_{0.1}$}
% \addlegendentry{SGD$_{0.01}$}
% \end{axis}
% \end{tikzpicture}
% \caption{Performance metrics during training}
% \end{subfigure}
% \\~\\
\begin{subfigure}[b]{1.0\linewidth}
\begin{tabu} to \textwidth {@{} *4{X[c]}c*4{X[c]} @{}}
\multicolumn{4}{c}{Test Accuracy}
&~&\multicolumn{4}{c}{Test Loss}
\\\cline{1-4}\cline{6-9}
GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$&&GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$
\\\cline{1-4}\cline{6-9}
0.265&0.633&0.203&0.989&&2.267&1.947&3.911&0.032 \\
\multicolumn{4}{c}{Training Accuracy}
&~&\multicolumn{4}{c}{Training Loss}
\\\cline{1-4}\cline{6-9}
GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$&&GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$
\\\cline{1-4}\cline{6-9}
0.250&0.599&0.685&0.996&&2.271&1.995&1.089&0.012 \\
\end{tabu}
\caption{Performance metrics after 20 training epochs.}
\label{table:sgd_vs_gd}
\end{subfigure}
\caption[Performance Comparison of SDG and GD]{The neural network
given in Figure~\ref{fig:mnist_architecture} trained with different
algorithms on the MNIST handwritten digits data set. For gradient
descent the learning rated 0.01, 0.05, and 0.1 are (GD$_{\cdot}$). For
stochastic gradient descend a batch size of 32 and learning rate
of 0.01 is used (SDG$_{0.01}$).}
\label{fig:sgd_vs_gd}
\end{figure}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "../main"
%%% End: