|
|
|
\pgfplotsset{
|
|
|
|
compat=1.11,
|
|
|
|
legend image code/.code={
|
|
|
|
\draw[mark repeat=2,mark phase=2]
|
|
|
|
plot coordinates {
|
|
|
|
(0cm,0cm)
|
|
|
|
(0.0cm,0cm) %% default is (0.3cm,0cm)
|
|
|
|
(0.0cm,0cm) %% default is (0.6cm,0cm)
|
|
|
|
};%
|
|
|
|
}
|
|
|
|
}
|
|
|
|
\begin{figure}
|
|
|
|
\begin{subfigure}[h!]{\textwidth}
|
|
|
|
\begin{tikzpicture}
|
|
|
|
\begin{axis}[tick style = {draw = none}, width = \textwidth,
|
|
|
|
height = 0.65\textwidth,
|
|
|
|
xtick = {1, 3, 5,7,9,11,13,15,17,19},
|
|
|
|
xticklabels = {$2$, $4$, $6$, $8$,
|
|
|
|
$10$,$12$,$14$,$16$,$18$,$20$},
|
|
|
|
xlabel = {training epoch}, ylabel = {classification accuracy}]
|
|
|
|
\addplot table
|
|
|
|
[x=epoch, y=val_accuracy, col sep=comma] {Plots/Data/GD_01.log};
|
|
|
|
\addplot table
|
|
|
|
[x=epoch, y=val_accuracy, col sep=comma] {Plots/Data/GD_05.log};
|
|
|
|
\addplot table
|
|
|
|
[x=epoch, y=val_accuracy, col sep=comma] {Plots/Data/GD_1.log};
|
|
|
|
\addplot table
|
|
|
|
[x=epoch, y=val_accuracy, col sep=comma]
|
|
|
|
{Plots/Data/SGD_01_b32.log};
|
|
|
|
|
|
|
|
\addlegendentry{GD$_{0.01}$}
|
|
|
|
\addlegendentry{GD$_{0.05}$}
|
|
|
|
\addlegendentry{GD$_{0.1}$}
|
|
|
|
\addlegendentry{SGD$_{0.01}$}
|
|
|
|
\end{axis}
|
|
|
|
\end{tikzpicture}
|
|
|
|
%\caption{Classification accuracy}
|
|
|
|
\end{subfigure}
|
|
|
|
\begin{subfigure}[b]{\textwidth}
|
|
|
|
\begin{tikzpicture}
|
|
|
|
\begin{axis}[tick style = {draw = none}, width = \textwidth,
|
|
|
|
height = 0.65\textwidth,
|
|
|
|
ytick = {0, 1, 2, 3, 4},
|
|
|
|
yticklabels = {$0$, $1$, $\phantom{0.}2$, $3$, $4$},
|
|
|
|
xtick = {1, 3, 5,7,9,11,13,15,17,19},
|
|
|
|
xticklabels = {$2$, $4$, $6$, $8$,
|
|
|
|
$10$,$12$,$14$,$16$,$18$,$20$},
|
|
|
|
xlabel = {training epoch}, ylabel = {error measure}]
|
|
|
|
\addplot table
|
|
|
|
[x=epoch, y=val_loss, col sep=comma] {Plots/Data/GD_01.log};
|
|
|
|
\addplot table
|
|
|
|
[x=epoch, y=val_loss, col sep=comma] {Plots/Data/GD_05.log};
|
|
|
|
\addplot table
|
|
|
|
[x=epoch, y=val_loss, col sep=comma] {Plots/Data/GD_1.log};
|
|
|
|
\addplot table
|
|
|
|
[x=epoch, y=val_loss, col sep=comma] {Plots/Data/SGD_01_b32.log};
|
|
|
|
|
|
|
|
\addlegendentry{GD$_{0.01}$}
|
|
|
|
\addlegendentry{GD$_{0.05}$}
|
|
|
|
\addlegendentry{GD$_{0.1}$}
|
|
|
|
\addlegendentry{SGD$_{0.01}$}
|
|
|
|
|
|
|
|
\end{axis}
|
|
|
|
\end{tikzpicture}
|
|
|
|
\caption{Performance metrics during training}
|
|
|
|
\end{subfigure}
|
|
|
|
% \\~\\
|
|
|
|
\caption{The neural network given in ?? trained with different
|
|
|
|
algorithms on the MNIST handwritten digits data set. For gradient
|
|
|
|
descent the learning rated 0.01, 0.05 and 0.1 are (GD$_{\cdot}$). For
|
|
|
|
stochastic gradient descend a batch size of 32 and learning rate
|
|
|
|
of 0.01 is used (SDG$_{0.01}$).}
|
|
|
|
\label{fig:sgd_vs_gd}
|
|
|
|
\end{figure}
|
|
|
|
|
|
|
|
\begin{table}
|
|
|
|
\begin{tabu} to \textwidth {@{} *4{X[c]}c*4{X[c]} @{}}
|
|
|
|
\multicolumn{4}{c}{Classification Accuracy}
|
|
|
|
&~&\multicolumn{4}{c}{Error Measure}
|
|
|
|
\\\cline{1-4}\cline{6-9}
|
|
|
|
GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$&&GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$
|
|
|
|
\\\cline{1-4}\cline{6-9}
|
|
|
|
1&1&1&1&&1&1&1&1
|
|
|
|
\end{tabu}
|
|
|
|
\caption{Performace metrics of the networks trained in
|
|
|
|
Figure~\ref{ref:sdg_vs_gd} after 20 training epochs.}
|
|
|
|
\label{sgd_vs_gd}
|
|
|
|
\end{table}
|
|
|
|
%%% Local Variables:
|
|
|
|
%%% mode: latex
|
|
|
|
%%% TeX-master: "../main"
|
|
|
|
%%% End:
|