progress
parent
1a45e7d596
commit
bad8e42630
@ -0,0 +1,53 @@
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist0.pdf}
|
||||
\caption{T-shirt/top}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist1.pdf}
|
||||
\caption{Trousers}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist2.pdf}
|
||||
\caption{Pullover}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist3.pdf}
|
||||
\caption{Dress}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist4.pdf}
|
||||
\caption{Coat}
|
||||
\end{subfigure}\\
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist5.pdf}
|
||||
\caption{Sandal}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist6.pdf}
|
||||
\caption{Shirt}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist7.pdf}
|
||||
\caption{Sneaker}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist8.pdf}
|
||||
\caption{Bag}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.19\textwidth}
|
||||
\includegraphics[width=\textwidth]{Plots/Data/fashion_mnist9.pdf}
|
||||
\caption{Ankle boot}
|
||||
\end{subfigure}
|
||||
\caption{The fashtion MNIST data set contains 70.000 images of
|
||||
preprocessed product images from Zalando, which are categorized as
|
||||
T-shirt/top, Trouser, Pullover, Dress, Coat, Sandal, Shirt,
|
||||
Sneaker, Bag, Ankle boot. Of these images 60.000 are used as training images, while
|
||||
the rest are used to validate the models trained.}
|
||||
\label{fig:MNIST}
|
||||
\end{figure}
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "../main"
|
||||
%%% End:
|
@ -0,0 +1,79 @@
|
||||
\pgfplotsset{
|
||||
compat=1.11,
|
||||
legend image code/.code={
|
||||
\draw[mark repeat=2,mark phase=2]
|
||||
plot coordinates {
|
||||
(0cm,0cm)
|
||||
(0.3cm,0cm) %% default is (0.3cm,0cm)
|
||||
(0.6cm,0cm) %% default is (0.6cm,0cm)
|
||||
};%
|
||||
}
|
||||
}
|
||||
\begin{figure}
|
||||
\begin{subfigure}[h]{\textwidth}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[legend cell align={left},yticklabel style={/pgf/number format/fixed,
|
||||
/pgf/number format/precision=3},tick style = {draw = none}, width = \textwidth,
|
||||
height = 0.6\textwidth, ymin = 0.988, legend style={at={(0.9825,0.0175)},anchor=south east},
|
||||
xlabel = {epoch}, ylabel = {Classification Accuracy}, cycle list/Dark2]
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma, mark = none]
|
||||
{Plots/Data/adam_datagen_full_mean.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma, mark = none]
|
||||
{Plots/Data/adam_datagen_dropout_02_full_mean.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma, mark = none]
|
||||
{Plots/Data/adam_datagen_dropout_04_full_mean.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma, mark = none]
|
||||
{Plots/Data/adam_dropout_02_full_mean.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma, mark = none]
|
||||
{Plots/Data/adam_dropout_04_full_mean.log};
|
||||
\addplot [dashed] table
|
||||
[x=epoch, y=val_accuracy, col sep=comma, mark = none]
|
||||
{Plots/Data/adam_full_mean.log};
|
||||
|
||||
\addlegendentry{\footnotesize{G.}}
|
||||
\addlegendentry{\footnotesize{G. + D. 0.2}}
|
||||
\addlegendentry{\footnotesize{G. + D. 0.4}}
|
||||
\addlegendentry{\footnotesize{D. 0.2}}
|
||||
\addlegendentry{\footnotesize{D. 0.4}}
|
||||
\addlegendentry{\footnotesize{Default}}
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\caption{Classification accuracy}
|
||||
\vspace{.25cm}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}[h]{1.0\linewidth}
|
||||
\begin{tabu} to \textwidth {@{} l *6{X[c]} @{}}
|
||||
\multicolumn{7}{c}{Classification Accuracy}\Bstrut
|
||||
\\\hline
|
||||
&\textsc{Adam}&D. 0.2&D. 0.4&G.&G.+D.~0.2&G.+D.~0.4 \Tstrut \Bstrut
|
||||
\\\hline
|
||||
mean&0.9914&0.9918&0.9928&0.9937&0.9938&0.9940 \Tstrut \\
|
||||
max& \\
|
||||
min& \\
|
||||
\multicolumn{7}{c}{Training Accuracy}\Bstrut
|
||||
\\\hline
|
||||
mean&0.9994&0.9990&0.9989&0.9967&0.9954&0.9926 \Tstrut \\
|
||||
max& \\
|
||||
min& \\
|
||||
|
||||
\end{tabu}
|
||||
\caption{Mean and maximum accuracy after 48 epochs of training.}
|
||||
\end{subfigure}
|
||||
\caption{Accuracy for the net given in ... with Dropout (D.),
|
||||
data generation (G.), a combination, or neither (Default) implemented and trained
|
||||
with \textsc{Adam}. For each epoch the 60.000 training samples
|
||||
were used, or for data generation 10.000 steps with each using
|
||||
batches of 60 generated data points. For each configuration the
|
||||
model was trained 5 times and the average accuracies at each epoch
|
||||
are given in (a). Mean, maximum and minimum values of accuracy on
|
||||
the test and training set are given in (b).}
|
||||
\end{figure}
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "../main"
|
||||
%%% End:
|
Loading…
Reference in New Issue