added poincare type inequality and neuron diagram

main
Tobias Arndt 5 years ago
parent a18b06f98d
commit 5a33ed3c8e

@ -1,8 +1,4 @@
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "main"
%%% End:
\section{Introduction to Neural Networks}
Neural Networks (NN) are a mathematical construct inspired by the
@ -34,6 +30,8 @@ except for the input layer, which recieves the components of the input.
\resizebox{\textwidth}{!}{%
\begin{tikzpicture}[x=1.75cm, y=1.75cm, >=stealth]
\tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
{\arrow[scale=1.5,>=stealth]{>}}},postaction={decorate}}}
\foreach \m/\l [count=\y] in {1,2,3,missing,4}
\node [every neuron/.try, neuron \m/.try] (input-\m) at (0,2.5-\y) {};
@ -48,7 +46,7 @@ except for the input layer, which recieves the components of the input.
\node [every neuron/.try, neuron \m/.try ] (output-\m) at (7,1.5-\y) {};
\foreach \l [count=\i] in {1,2,3,d_i}
\draw [<-] (input-\i) -- ++(-1,0)
\draw [myptr] (input-\i)+(-1,0) -- (input-\i)
node [above, midway] {$x_{\l}$};
\foreach \l [count=\i] in {1,n_1}
@ -58,20 +56,20 @@ except for the input layer, which recieves the components of the input.
\node [above] at (hidden2-\i.north) {$\mathcal{N}_{l,\l}$};
\foreach \l [count=\i] in {1,d_o}
\draw [->] (output-\i) -- ++(1,0)
\draw [myptr] (output-\i) -- ++(1,0)
node [above, midway] {$O_{\l}$};
\foreach \i in {1,...,4}
\foreach \j in {1,...,2}
\draw [->] (input-\i) -- (hidden1-\j);
\draw [myptr] (input-\i) -- (hidden1-\j);
\foreach \i in {1,...,2}
\foreach \j in {1,...,2}
\draw [->] (hidden1-\i) -- (hidden2-\j);
\draw [myptr] (hidden1-\i) -- (hidden2-\j);
\foreach \i in {1,...,2}
\foreach \j in {1,...,2}
\draw [->] (hidden2-\i) -- (output-\j);
\draw [myptr] (hidden2-\i) -- (output-\j);
\node [align=center, above] at (0,2) {Input\\layer};
\node [align=center, above] at (2,2) {Hidden \\layer $1$};
@ -84,7 +82,10 @@ except for the input layer, which recieves the components of the input.
\caption{test}
\end{figure}
\begin{tikzpicture}[x=1.5cm, y=1.5cm, >=stealth]
\begin{figure}
\begin{tikzpicture}[x=1.5cm, y=1.5cm]
\tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
{\arrow[scale=1.5,>=stealth]{>}}},postaction={decorate}}}
\foreach \m/\l [count=\y] in {1}
\node [every neuron/.try, neuron \m/.try] (input-\m) at (0,0.5-\y) {};
@ -96,7 +97,7 @@ except for the input layer, which recieves the components of the input.
\node [every neuron/.try, neuron \m/.try ] (output-\m) at (2.5,0.5-\y) {};
\foreach \l [count=\i] in {1}
\draw [<-] (input-\i) -- ++(-1,0)
\draw [myptr] (input-\i)+(-1,0) -- (input-\i)
node [above, midway] {$x$};
\foreach \l [count=\i] in {1,2,n-1,n}
@ -106,19 +107,127 @@ except for the input layer, which recieves the components of the input.
\node [above] at (output-\i.north) {};
\foreach \l [count=\i] in {1}
\draw [->] (output-\i) -- ++(1,0)
\draw [myptr, >=stealth] (output-\i) -- ++(1,0)
node [above, midway] {$y$};
\foreach \i in {1}
\foreach \j in {1,2,...,3,4}
\draw [->] (input-\i) -- (hidden-\j);
\draw [myptr, >=stealth] (input-\i) -- (hidden-\j);
\foreach \i in {1,2,...,3,4}
\foreach \j in {1}
\draw [->] (hidden-\i) -- (output-\j);
\draw [myptr, >=stealth] (hidden-\i) -- (output-\j);
\node [align=center, above] at (0,1) {Input \\layer};
\node [align=center, above] at (1.25,3) {Hidden layer};
\node [align=center, above] at (2.5,1) {Output \\layer};
\end{tikzpicture}
\caption{Shallow Neural Network with input- and output-dimension of \(d
= 1\)}
\end{figure}
\begin{figure}
\begin{tikzpicture}[x=1.5cm, y=1.5cm, >=stealth]
\tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
{\arrow[scale=1.5,>=stealth]{>}}},postaction={decorate}}}
\node [circle, draw, fill=black, inner sep = 0pt, minimum size =
1.5mm, left] (i_1) at (0, 2.5) {};
\node [align=left, left] at (-0.125, 2.5) {\(i_1\)};
\node [circle, draw, fill=black, inner sep = 0pt, minimum size =
1.5mm] (i_2) at (0, 1.25) {};
\node [align=left, left] at (-0.125, 1.25) {\(i_2\)};
\node [neuron missing] (i_3) at (0, 0) {};
\node [circle, draw, fill=black, inner sep = 0pt, minimum size =
1.5mm] (i_4) at (0, -1.25) {};
\node [align=left, left] at (-0.125, -1.25) {\(i_m\)};
\draw[decoration={calligraphic brace,amplitude=5pt, mirror}, decorate, line width=1.25pt]
(-0.6,2.7) -- (-0.6,-1.45) node [black, midway, xshift=-0.6cm, left] {Input};
\node [align = center, above] at (1.25, 3) {Synaptic\\weights};
\node [every neuron] (w_1) at (1.25, 2.5) {\(w_{k, 1}\)};
\node [every neuron] (w_2) at (1.25, 1.25) {\(w_{k, 2}\)};
\node [neuron missing] (w_3) at (1.25, 0) {};
\node [every neuron] (w_4) at (1.25, -1.25) {\(w_{k, m}\)};
\node [circle, draw] (sig) at (3, 0.625) {\Large\(\sum\)};
\node [align = center, below] at (3, 0) {Summing \\junction};
\node [draw, minimum size = 1.25cm] (act) at (4.5, 0.625)
{\(\psi(.)\)};
\node [align = center, above] at (4.5, 1.25) {Activation \\function};
\node [circle, draw, fill=black, inner sep = 0pt, minimum size =
1.5mm] (b) at (3, 2.5) {};
\node [align = center, above] at (3, 2.75) {Bias \\\(b_k\)};
\node [align = center] (out) at (6, 0.625) {Output \\\(o_k\)};
\draw [myptr] (i_1) -- (w_1);
\draw [myptr] (i_2) -- (w_2);
\draw [myptr] (i_4) -- (w_4);
\draw [myptr] (w_1) -- (sig);
\draw [myptr] (w_2) -- (sig);
\draw [myptr] (w_4) -- (sig);
\draw [myptr] (b) -- (sig);
\draw [myptr] (sig) -- (act);
\draw [myptr] (act) -- (out);
% \foreach \m [count=\y] in {1,2,missing,3,4}
% \node [every neuron/.try, neuron \m/.try ] (hidden-\m) at (1.25,3.25-\y*1.25) {\(w_{k,\y}\)};
% \foreach \m [count=\y] in {1}
% \node [every neuron/.try, neuron \m/.try ] (output-\m) at (2.5,0.5-\y) {};
% \foreach \l [count=\i] in {1}
% \draw [<-] (input-\i) -- ++(-1,0)
% node [above, midway] {$x$};
% \foreach \l [count=\i] in {1,2,n-1,n}
% \node [above] at (hidden-\i.north) {$\mathcal{N}_{\l}$};
% \foreach \l [count=\i] in {1,n_l}
% \node [above] at (output-\i.north) {};
% \foreach \l [count=\i] in {1}
% \draw [->] (output-\i) -- ++(1,0)
% node [above, midway] {$y$};
% \foreach \i in {1}
% \foreach \j in {1,2,...,3,4}
% \draw [->] (input-\i) -- (hidden-\j);
% \foreach \i in {1,2,...,3,4}
% \foreach \j in {1}
% \draw [->] (hidden-\i) -- (output-\j);
\end{tikzpicture}
\caption{Structure of a single neuron}
\end{figure}
\begin{tikzpicture}
\tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
{\arrow[scale=2,>=stealth]{>}}},postaction={decorate}}}
%1
\draw [->,>=stealth] (0,.5) -- (2,.5);
%2
\draw [myptr] (0,0) -- (2,0);
\end{tikzpicture}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "main"
%%% End:

@ -26,9 +26,10 @@
\usepackage{makecell}
\usepackage{dsfont}
\usepackage{tikz}
\usepackage{nicefrac}
\usetikzlibrary{matrix,chains,positioning,decorations.pathreplacing,arrows}
\usetikzlibrary{positioning,calc}
\usetikzlibrary{positioning,calc,calligraphy}
\usepackage{pgfplots}
\usepgfplotslibrary{colorbrewer}
@ -55,6 +56,7 @@
\thispagestyle{plain}
\newtheorem{Theorem}{Theorem}[section]
\newtheorem{Definition}[Theorem]{Definition}
\newtheorem{Lemma}[Theorem]{Lemma}
\newtheorem{Algorithm}[Theorem]{Algorithm}
\newtheorem{Example}[Theorem]{Example}

@ -29,3 +29,70 @@ as defined in ??? and ??? respectively.
\end{Theorem}
In order to proof Theo~\ref{theo:main1} we need to proof a number of
auxilary Lemmata first.
\begin{Definition}[Sobolev Norm]
\label{def:sobonorm}
The natural norm of the sobolev space is given by
\[
\norm{f}_{W^{k,p}(K)} =
\begin{cases}
\left(\sum_{\abs{\alpha} \leq k}
\norm{f^{(\alpha)}}^p_{L^p}\right)^{\nicefrac{1}{p}},&
\text{for } 1 \leq p < \infty \\
max_{\abs{\alpha} \leq k}\left\{f^{(\alpha)}\right\},& \text{for
} p = \infty
\end{cases}
.
\]
\end{Definition}
\begin{Lemma}[Poincar\'e typed inequality]
Let \(f:\mathbb{R} \to \mathbb{R}\) differentiable with \(f' :
\mathbb{R} \to \mathbb{R}\) Lesbeque integrable. Then for \(K=[a,b]
\subset \mathbb{R}\) with \(f(a)=0\) it holds that
\begin{equation}
\label{eq:pti1}
\exists C_K^{\infty} \in \mathbb{R}_{>0} :
\norm{f}_{w^{1,\infty}(K)} \leq C_K^{\infty}
\norm{f'}_{L^{\infty}(K)}.
\end{equation}
If additionaly \(f'\) is differentiable with \(f'': \mathbb{R} \to
\mathbb{R}\) Lesbeque integrable then additionally
\begin{equation}
\label{eq:pti2}
\exists C_K^2 \in \mathbb{R}_{>0} : \norm{f}_{W^{1,\infty}(K)} \leq
C_K^2 \norm{f''}_{L^2(K)}.
\end{equation}
\proof
With the fundamental theorem of calculus, if
\(\norm{f}_{L^{\infty}(K)}<\infty\) we get
\begin{equation}
\label{eq:f_f'}
\norm{f}_{L^{\infty}(K)} = \sup_{x \in K}\abs{\int_a^x f'(s) ds} \leq
\sup_{x \in K}\abs{\int_a^x \sup_{y \in K} \abs{f'(y)} ds} \leq \abs{b-a}
\sup_{y \in K}\abs{f'(y)}.
\end{equation}
Using this we can bound \(\norm{f}_{w^{1,\infty}(K)}\) by
\[
\norm{f}_{w^{1,\infty}(K)} \stackrel{\text{Def~\ref{def:sobonorm}}}{=}
\max\left\{\norm{f}_{L^{\infty}(K)},
\norm{f'}_{L^{\infty}(K)}\right\}
\stackrel{(\ref{eq:f_f'})}{\leq} max\left\{\abs{b-a},
1\right\}\norm{f'}_{L^{\infty}(K)}.
\]
With \(C_k^{\infty} \coloneqq max\left\{\abs{b-a}, 1\right\}\) we
get (\ref{eq:pti1}).
By using the Hölder inequality, we can proof the second claim.
\begin{align*}
\norm{f'}_{L^{\infty}(K)} &= \sup_{x \in K} \abs{\int_a^bf''(y)
\mathds{1}_{[a,x]}(y)dy} \leq \sup_{x \in
K}\norm{f''\mathds{1}_{[a,x]}}_{L^1(K)}\\
&\hspace{-6pt} \stackrel{\text{Hölder}}{\leq} sup_{x
\in
K}\norm{f''}_{L^2(K)}\norm{\mathds{1}_{[a,x]}}_{L^2(K)}
= \abs{b-a}\norm{f''}_{L^2(K)}.
\end{align*}
Thus (\ref{eq:pti2}) follows with \(C_K^2 \coloneqq
\abs{b-a}C_K^{\infty}\).
\qed
\end{Lemma}
Loading…
Cancel
Save