You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

26 lines
1.8 KiB
Plaintext

\BOOKMARK [1][-]{section.1}{Introduction}{}% 1
\BOOKMARK [1][-]{section.2}{Introduction to Neural Networks}{}% 2
\BOOKMARK [2][-]{subsection.2.1}{Nonlinearity of Neural Networks}{section.2}% 3
\BOOKMARK [2][-]{subsection.2.2}{Training Neural Networks}{section.2}% 4
\BOOKMARK [3][-]{subsubsection.2.2.1}{Nonlinearity in the Last Layer}{subsection.2.2}% 5
\BOOKMARK [3][-]{subsubsection.2.2.2}{Error Measurement}{subsection.2.2}% 6
\BOOKMARK [3][-]{subsubsection.2.2.3}{Gradient Descent Algorithm}{subsection.2.2}% 7
\BOOKMARK [1][-]{section.3}{Shallow Neural Networks}{}% 8
\BOOKMARK [2][-]{subsection.3.1}{Convergence Behavior of One-Dimensional Randomized Shallow Neural Networks}{section.3}% 9
\BOOKMARK [2][-]{subsection.3.2}{Simulations}{section.3}% 10
\BOOKMARK [1][-]{section.4}{Application of Neural Networks to Higher Complexity Problems}{}% 11
\BOOKMARK [2][-]{subsection.4.1}{Convolution}{section.4}% 12
\BOOKMARK [2][-]{subsection.4.2}{Convolutional Neural Networks}{section.4}% 13
\BOOKMARK [2][-]{subsection.4.3}{Stochastic Training Algorithms}{section.4}% 14
\BOOKMARK [2][-]{subsection.4.4}{Modified Stochastic Gradient Descent}{section.4}% 15
\BOOKMARK [2][-]{subsection.4.5}{Combating Overfitting}{section.4}% 16
\BOOKMARK [3][-]{subsubsection.4.5.1}{Dropout}{subsection.4.5}% 17
\BOOKMARK [3][-]{subsubsection.4.5.2}{Manipulation of Input Data}{subsection.4.5}% 18
\BOOKMARK [3][-]{subsubsection.4.5.3}{Comparisons}{subsection.4.5}% 19
\BOOKMARK [3][-]{subsubsection.4.5.4}{Effectiveness for Small Training Sets}{subsection.4.5}% 20
\BOOKMARK [1][-]{section.5}{Summary and Outlook}{}% 21
4 years ago
\BOOKMARK [1][-]{section*.27}{Appendices}{}% 22
\BOOKMARK [1][-]{Appendix.1.A}{Notes on Proofs of Lemmata in Section 3.1}{}% 23
\BOOKMARK [1][-]{Appendix.1.B}{Implementations}{}% 24
\BOOKMARK [1][-]{Appendix.1.C}{Additional Comparisons}{}% 25