mammut commit of last two monts
This commit is contained in:
parent
74113d5060
commit
46031fcd5d
14
.gitignore
vendored
14
.gitignore
vendored
@ -4,12 +4,26 @@
|
||||
*.toc
|
||||
*.gz
|
||||
*.xml
|
||||
*.el
|
||||
*.bbl
|
||||
*.tdo
|
||||
*.blg
|
||||
TeX/auto/*
|
||||
main-blx.bib
|
||||
|
||||
# emacs autosaves
|
||||
*.tex~
|
||||
*#*.tex*
|
||||
*~
|
||||
|
||||
# no pdfs
|
||||
*.pdf
|
||||
|
||||
# no images
|
||||
*image*
|
||||
*.png
|
||||
*.jpg
|
||||
*.xcf
|
||||
|
||||
# no slurm logs
|
||||
*slurm*.out
|
||||
|
26
Cluster/mnist.py
Normal file
26
Cluster/mnist.py
Normal file
@ -0,0 +1,26 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.callbacks import CSVLogger
|
||||
mnist = tf.keras.datasets.mnist
|
||||
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
|
||||
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
|
||||
y_train = tf.keras.utils.to_categorical(y_train)
|
||||
y_test = tf.keras.utils.to_categorical(y_test)
|
||||
|
||||
model = tf.keras.models.Sequential()
|
||||
model.add(tf.keras.layers.Conv2D(24,kernel_size=5,padding='same',activation='relu',
|
||||
input_shape=(28,28,1)))
|
||||
model.add(tf.keras.layers.MaxPool2D())
|
||||
model.add(tf.keras.layers.Conv2D(64,kernel_size=5,padding='same',activation='relu'))
|
||||
model.add(tf.keras.layers.MaxPool2D(padding='same'))
|
||||
model.add(tf.keras.layers.Flatten())
|
||||
model.add(tf.keras.layers.Dense(256, activation='relu'))
|
||||
model.add(tf.keras.layers.Dense(10, activation='softmax'))
|
||||
model.compile(optimizer=tf.keras.optimizers.SGD(), loss="categorical_crossentropy", metrics=["accuracy"])
|
||||
|
||||
|
||||
csv_logger = CSVLogger('SGD_01_b32.log')
|
||||
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size = 32, epochs=20, callbacks=[csv_logger])
|
22
Cluster/test.py
Normal file
22
Cluster/test.py
Normal file
@ -0,0 +1,22 @@
|
||||
import tensorflow as tf
|
||||
mnist = tf.keras.datasets.mnist
|
||||
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
|
||||
model = tf.keras.models.Sequential([
|
||||
tf.keras.layers.Flatten(input_shape=(28, 28)),
|
||||
tf.keras.layers.Dense(128, activation='relu'),
|
||||
tf.keras.layers.Dropout(0.2),
|
||||
tf.keras.layers.Dense(10)
|
||||
])
|
||||
|
||||
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
|
||||
model.compile(optimizer='adam',
|
||||
loss=loss_fn,
|
||||
metrics=['accuracy'])
|
||||
|
||||
model.fit(x_train, y_train, epochs=10)
|
||||
|
||||
|
26
Cluster/test/mnist.py
Normal file
26
Cluster/test/mnist.py
Normal file
@ -0,0 +1,26 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.callbacks import CSVLogger
|
||||
mnist = tf.keras.datasets.mnist
|
||||
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
|
||||
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
|
||||
y_train = tf.keras.utils.to_categorical(y_train)
|
||||
y_test = tf.keras.utils.to_categorical(y_test)
|
||||
|
||||
model = tf.keras.models.Sequential()
|
||||
model.add(tf.keras.layers.Conv2D(24,kernel_size=5,padding='same',activation='relu',
|
||||
input_shape=(28,28,1)))
|
||||
model.add(tf.keras.layers.MaxPool2D())
|
||||
model.add(tf.keras.layers.Conv2D(64,kernel_size=5,padding='same',activation='relu'))
|
||||
model.add(tf.keras.layers.MaxPool2D(padding='same'))
|
||||
model.add(tf.keras.layers.Flatten())
|
||||
model.add(tf.keras.layers.Dense(256, activation='relu'))
|
||||
model.add(tf.keras.layers.Dense(10, activation='softmax'))
|
||||
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.1), loss="categorical_crossentropy", metrics=["accuracy"])
|
||||
|
||||
|
||||
csv_logger = CSVLogger('GD_1.log')
|
||||
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size = x_train.shape[0], epochs=20, callbacks=[csv_logger])
|
22
Cluster/test/test.py
Normal file
22
Cluster/test/test.py
Normal file
@ -0,0 +1,22 @@
|
||||
import tensorflow as tf
|
||||
mnist = tf.keras.datasets.mnist
|
||||
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
|
||||
model = tf.keras.models.Sequential([
|
||||
tf.keras.layers.Flatten(input_shape=(28, 28)),
|
||||
tf.keras.layers.Dense(128, activation='relu'),
|
||||
tf.keras.layers.Dropout(0.2),
|
||||
tf.keras.layers.Dense(10)
|
||||
])
|
||||
|
||||
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
|
||||
model.compile(optimizer='adam',
|
||||
loss=loss_fn,
|
||||
metrics=['accuracy'])
|
||||
|
||||
model.fit(x_train, y_train, epochs=10)
|
||||
|
||||
|
10
Cluster/test/tf_test.slurm
Normal file
10
Cluster/test/tf_test.slurm
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash -l
|
||||
|
||||
#SBATCH --job-name="Keras MNIST"
|
||||
#SBATCH --ntasks=1
|
||||
#SBATCH --ntasks-per-core=1
|
||||
#SBATCH --time=0-00:10:00
|
||||
#SBATCH --nodelist=node18
|
||||
|
||||
srun python3 mnist.py
|
||||
|
10
Cluster/tf_test.slurm
Normal file
10
Cluster/tf_test.slurm
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash -l
|
||||
|
||||
#SBATCH --job-name="Keras MNIST"
|
||||
#SBATCH --ntasks=1
|
||||
#SBATCH --ntasks-per-core=1
|
||||
#SBATCH --time=0-00:10:00
|
||||
#SBATCH --nodelist=node18
|
||||
|
||||
srun python3 mnist.py
|
||||
|
59
R/convolution.R
Normal file
59
R/convolution.R
Normal file
@ -0,0 +1,59 @@
|
||||
x=seq(0, 2*pi,0.1)
|
||||
y=sin(x)
|
||||
plot(x,y)
|
||||
x_i = x
|
||||
y_i = y
|
||||
x = x+rnorm(63,0,0.15)
|
||||
y = y+rnorm(63,0,0.15)
|
||||
plot(x, y)
|
||||
x_d = x
|
||||
y_d = y
|
||||
|
||||
for(i in 5:63){
|
||||
x[i] = (sum(x_d[(i-4):i] * c(1/20,1/6,1/5,1/4,1/3)))
|
||||
}
|
||||
|
||||
for(i in 5:63){
|
||||
y[i] = (sum(y_d[(i-4):i] * c(1/20,1/6,1/5,1/4,1/3)))
|
||||
}
|
||||
#x[1:4] = NA
|
||||
#y[1:4] = NA
|
||||
|
||||
plot(x[-(1:4)],y[-(1:4)])
|
||||
|
||||
|
||||
image = image_read(path = "~/Masterarbeit/TeX/Plots/Data/klammern60_80.jpg")
|
||||
kernel <- matrix(0, ncol = 3, nrow = 3)
|
||||
kernel[c(1,3),1] = -1
|
||||
kernel[c(1,3),3] = 1
|
||||
kernel[2,1] = -2
|
||||
kernel[2,3] = 2
|
||||
kernel
|
||||
|
||||
kernel <- matrix(data = c(1,4,7,4,1,4,16,26,16,4,7,26,41,26,7,4,16,26,16,4,1,4,7,4,1),
|
||||
ncol = 5, nrow=5)
|
||||
kernel = kernel/273
|
||||
|
||||
n=11
|
||||
s=4
|
||||
kernel = matrix(0,nrow = n, ncol = n)
|
||||
for(i in 1:n){
|
||||
for(j in 1:n){
|
||||
kernel[i,j] = 1/(2*pi*s) * exp(-(i+j)/(2*s))
|
||||
}
|
||||
}
|
||||
|
||||
image_con <- image_convolve(image, (kernel))
|
||||
image_con
|
||||
image_write(image_con, "~/Masterarbeit/TeX/Plots/Data/image_conv11.png", format="png")
|
||||
img <- readPNG("~/Masterarbeit/TeX/Plots/Data/image_conv11.png")
|
||||
|
||||
|
||||
out <- matrix(0, ncol = 15, nrow=20)
|
||||
for(j in 1:15){
|
||||
for(i in 1:20){
|
||||
out[i,j] = max(img[((i-1)*4 +1):((i-1)*4+4), ((j-1)*4 +1):((j-1)*4+4)])
|
||||
}
|
||||
}
|
||||
|
||||
writePNG(out, target = "~/Masterarbeit/TeX/Plots/Data/image_conv12.png")
|
22
TF/test.py
Normal file
22
TF/test.py
Normal file
@ -0,0 +1,22 @@
|
||||
import tensorflow as tf
|
||||
mnist = tf.keras.datasets.mnist
|
||||
|
||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
||||
|
||||
model = tf.keras.models.Sequential([
|
||||
tf.keras.layers.Flatten(input_shape=(28, 28)),
|
||||
tf.keras.layers.Dense(128, activation='relu'),
|
||||
tf.keras.layers.Dropout(0.2),
|
||||
tf.keras.layers.Dense(10)
|
||||
])
|
||||
|
||||
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
|
||||
model.compile(optimizer='adam',
|
||||
loss=loss_fn,
|
||||
metrics=['accuracy'])
|
||||
|
||||
model.fit(x_train, y_train, epochs=5)
|
||||
|
||||
|
17
TeX/Plots/Data/data_sin_d_t.csv
Executable file
17
TeX/Plots/Data/data_sin_d_t.csv
Executable file
@ -0,0 +1,17 @@
|
||||
x,y
|
||||
-3.141592653589793,0.0802212608585366
|
||||
-2.722713633111154,-0.3759376368887911
|
||||
-2.303834612632515,-1.3264180339054117
|
||||
-1.8849555921538759,-0.8971334213504949
|
||||
-1.4660765716752369,-0.7724344034354425
|
||||
-1.0471975511965979,-0.9501497164520739
|
||||
-0.6283185307179586,-0.6224628757084738
|
||||
-0.2094395102393194,-0.35622668982623207
|
||||
0.2094395102393194,-0.18377660088356823
|
||||
0.6283185307179586,0.7836770998126841
|
||||
1.0471975511965974,0.5874762732054489
|
||||
1.4660765716752362,1.0696991264956026
|
||||
1.8849555921538759,1.1297065441952743
|
||||
2.3038346126325155,0.7587275382323738
|
||||
2.7227136331111543,-0.030547103790458163
|
||||
3.1415926535897922,0.044327111895927106
|
|
1002
TeX/Plots/Data/matlab_0.csv
Executable file
1002
TeX/Plots/Data/matlab_0.csv
Executable file
File diff suppressed because it is too large
Load Diff
1002
TeX/Plots/Data/matlab_1.csv
Executable file
1002
TeX/Plots/Data/matlab_1.csv
Executable file
File diff suppressed because it is too large
Load Diff
1002
TeX/Plots/Data/matlab_3.csv
Executable file
1002
TeX/Plots/Data/matlab_3.csv
Executable file
File diff suppressed because it is too large
Load Diff
1002
TeX/Plots/Data/matlab_sin_d_01.csv
Executable file
1002
TeX/Plots/Data/matlab_sin_d_01.csv
Executable file
File diff suppressed because it is too large
Load Diff
1002
TeX/Plots/Data/matlab_sin_d_1.csv
Executable file
1002
TeX/Plots/Data/matlab_sin_d_1.csv
Executable file
File diff suppressed because it is too large
Load Diff
1002
TeX/Plots/Data/matlab_sin_d_3.csv
Executable file
1002
TeX/Plots/Data/matlab_sin_d_3.csv
Executable file
File diff suppressed because it is too large
Load Diff
1202
TeX/Plots/Data/overfit.csv
Normal file
1202
TeX/Plots/Data/overfit.csv
Normal file
File diff suppressed because it is too large
Load Diff
1202
TeX/Plots/Data/overfit_spline.csv
Normal file
1202
TeX/Plots/Data/overfit_spline.csv
Normal file
File diff suppressed because it is too large
Load Diff
101
TeX/Plots/Data/scala_out_d_1_t.csv
Executable file
101
TeX/Plots/Data/scala_out_d_1_t.csv
Executable file
@ -0,0 +1,101 @@
|
||||
x_n_5000_tl_0.1,y_n_5000_tl_0.1,x_n_5000_tl_1.0,y_n_5000_tl_1.0,x_n_5000_tl_3.0,y_n_5000_tl_3.0
|
||||
-5.0,1.794615305950707,-5.0,0.3982406589003759,-5.0,-0.4811539502118497
|
||||
-4.898989898989899,1.6984389486364895,-4.898989898989899,0.35719218031912614,-4.898989898989899,-0.48887996302459025
|
||||
-4.797979797979798,1.6014200743009022,-4.797979797979798,0.3160182633093358,-4.797979797979798,-0.4966732473871599
|
||||
-4.696969696969697,1.5040575427157106,-4.696969696969697,0.27464978660531225,-4.696969696969697,-0.5045073579233731
|
||||
-4.595959595959596,1.4061194142774731,-4.595959595959596,0.23293440418365288,-4.595959595959596,-0.5123589845230747
|
||||
-4.494949494949495,1.3072651356075136,-4.494949494949495,0.19100397829173557,-4.494949494949495,-0.5202738824510786
|
||||
-4.393939393939394,1.2078259346207492,-4.393939393939394,0.1488314515422353,-4.393939393939394,-0.5282281154332915
|
||||
-4.292929292929293,1.1079271590765678,-4.292929292929293,0.10646618526238515,-4.292929292929293,-0.536250283913464
|
||||
-4.191919191919192,1.0073183089866045,-4.191919191919192,0.0637511521454329,-4.191919191919192,-0.5443068679044686
|
||||
-4.090909090909091,0.9064682044248323,-4.090909090909091,0.020965778107027506,-4.090909090909091,-0.5524049731989601
|
||||
-3.9898989898989896,0.805095064694333,-3.9898989898989896,-0.02200882631350869,-3.9898989898989896,-0.5605562335116703
|
||||
-3.888888888888889,0.7032463151196859,-3.888888888888889,-0.06548644224881082,-3.888888888888889,-0.5687680272492979
|
||||
-3.787878787878788,0.6007843964001714,-3.787878787878788,-0.10914135786185346,-3.787878787878788,-0.5770307386196555
|
||||
-3.686868686868687,0.4978572358270573,-3.686868686868687,-0.15292201515712506,-3.686868686868687,-0.5853131654059709
|
||||
-3.5858585858585856,0.39465522349482535,-3.5858585858585856,-0.19694472820060063,-3.5858585858585856,-0.593636189078738
|
||||
-3.484848484848485,0.29091175104318323,-3.484848484848485,-0.24139115547918963,-3.484848484848485,-0.6019914655156898
|
||||
-3.383838383838384,0.1868284306918275,-3.383838383838384,-0.28617728400089926,-3.383838383838384,-0.6103823599700093
|
||||
-3.282828282828283,0.0817944681090728,-3.282828282828283,-0.33119615483860937,-3.282828282828283,-0.6188088888423856
|
||||
-3.1818181818181817,-0.023670753859105602,-3.1818181818181817,-0.3764480559542342,-3.1818181818181817,-0.6272515625106694
|
||||
-3.080808080808081,-0.1299349094939808,-3.080808080808081,-0.42202262988259276,-3.080808080808081,-0.6357221532633648
|
||||
-2.9797979797979797,-0.2360705715363967,-2.9797979797979797,-0.467584017465408,-2.9797979797979797,-0.6440454918766952
|
||||
-2.878787878787879,-0.34125419448980393,-2.878787878787879,-0.5126079284225549,-2.878787878787879,-0.65203614244987
|
||||
-2.7777777777777777,-0.443504036212927,-2.7777777777777777,-0.5569084060463078,-2.7777777777777777,-0.6594896031012563
|
||||
-2.676767676767677,-0.5411482698953787,-2.676767676767677,-0.6002683604183435,-2.676767676767677,-0.6661215834468585
|
||||
-2.5757575757575757,-0.6363089624800997,-2.5757575757575757,-0.6396725440402657,-2.5757575757575757,-0.6715398637661353
|
||||
-2.474747474747475,-0.725241414197713,-2.474747474747475,-0.6753456416248385,-2.474747474747475,-0.674565545688341
|
||||
-2.3737373737373737,-0.8010191169999671,-2.3737373737373737,-0.7066964605752718,-2.3737373737373737,-0.6765307025278043
|
||||
-2.272727272727273,-0.8626605255789729,-2.272727272727273,-0.7348121862404637,-2.272727272727273,-0.6766187567521622
|
||||
-2.1717171717171717,-0.911435840482434,-2.1717171717171717,-0.7592451818361001,-2.1717171717171717,-0.6747200340049733
|
||||
-2.070707070707071,-0.9518228090965052,-2.070707070707071,-0.7755022118880182,-2.070707070707071,-0.6711535886166349
|
||||
-1.9696969696969697,-0.9791642715505677,-1.9696969696969697,-0.7889078495544403,-1.9696969696969697,-0.6653309071624213
|
||||
-1.868686868686869,-0.9959505678135467,-1.868686868686869,-0.7978655263590677,-1.868686868686869,-0.6574048849245917
|
||||
-1.7676767676767677,-1.0042572630521163,-1.7676767676767677,-0.8024926242661324,-1.7676767676767677,-0.6465258005011485
|
||||
-1.6666666666666665,-1.0031374573437621,-1.6666666666666665,-0.8024786300118695,-1.6666666666666665,-0.6326231142587367
|
||||
-1.5656565656565657,-0.9924082586558415,-1.5656565656565657,-0.7967021619463882,-1.5656565656565657,-0.6166476676023103
|
||||
-1.4646464646464645,-0.9734669180157094,-1.4646464646464645,-0.7849942222838879,-1.4646464646464645,-0.5979735104135664
|
||||
-1.3636363636363638,-0.9509454078185711,-1.3636363636363638,-0.7662349774950723,-1.3636363636363638,-0.5774876452737464
|
||||
-1.2626262626262625,-0.9231872651397443,-1.2626262626262625,-0.7433085627087517,-1.2626262626262625,-0.554712230754877
|
||||
-1.1616161616161618,-0.8903321986477033,-1.1616161616161618,-0.7150493507052204,-1.1616161616161618,-0.5295933185437713
|
||||
-1.0606060606060606,-0.8533989447900909,-1.0606060606060606,-0.6814643745239313,-1.0606060606060606,-0.5021785239088743
|
||||
-0.9595959595959593,-0.8107636317978494,-0.9595959595959593,-0.6421615608115637,-0.9595959595959593,-0.472606158673678
|
||||
-0.858585858585859,-0.7612745578549842,-0.858585858585859,-0.5973114244123007,-0.858585858585859,-0.4405007246413654
|
||||
-0.7575757575757578,-0.7079734098301842,-0.7575757575757578,-0.5483264663676062,-0.7575757575757578,-0.4059991890198415
|
||||
-0.6565656565656566,-0.6488963804386183,-0.6565656565656566,-0.49554278063844803,-0.6565656565656566,-0.3695525928005769
|
||||
-0.5555555555555554,-0.5859222961089965,-0.5555555555555554,-0.4403758682478846,-0.5555555555555554,-0.33111757514282614
|
||||
-0.45454545454545503,-0.5162955936688821,-0.45454545454545503,-0.38037108381900747,-0.45454545454545503,-0.28897806883385513
|
||||
-0.3535353535353538,-0.4413321076045784,-0.3535353535353538,-0.31690399361617216,-0.3535353535353538,-0.24421776219711205
|
||||
-0.2525252525252526,-0.3616414699818406,-0.2525252525252526,-0.25204481791119354,-0.2525252525252526,-0.19795939679257332
|
||||
-0.15151515151515138,-0.2780916794094584,-0.15151515151515138,-0.18575713332565263,-0.15151515151515138,-0.15066195015784248
|
||||
-0.050505050505050164,-0.18977454284683343,-0.050505050505050164,-0.11797643773197505,-0.050505050505050164,-0.10274021898431054
|
||||
0.050505050505050164,-0.0969321739577506,0.050505050505050164,-0.049351343645831554,0.050505050505050164,-0.05414525935109969
|
||||
0.15151515151515138,-4.4802289442360816E-4,0.15151515151515138,0.019464788799119597,0.15151515151515138,-0.005354051541524688
|
||||
0.2525252525252526,0.09918485823776255,0.2525252525252526,0.08804193897553166,0.2525252525252526,0.0433816826222638
|
||||
0.3535353535353538,0.1998735386668185,0.3535353535353538,0.15569793996298523,0.3535353535353538,0.09176342956997338
|
||||
0.45454545454545414,0.2999169047201809,0.45454545454545414,0.2218157527002848,0.45454545454545414,0.13952481930457306
|
||||
0.5555555555555554,0.3978204122760816,0.5555555555555554,0.2846069052305317,0.5555555555555554,0.18668380673527113
|
||||
0.6565656565656566,0.49120659266814587,0.6565656565656566,0.34467300454040606,0.6565656565656566,0.23277011860523958
|
||||
0.7575757575757578,0.5777980409414698,0.7575757575757578,0.40208229496894643,0.7575757575757578,0.27613740421328176
|
||||
0.8585858585858581,0.6568213676446025,0.8585858585858581,0.45705882493784666,0.8585858585858581,0.316305372116494
|
||||
0.9595959595959593,0.7305067401293432,0.9595959595959593,0.5066458373898202,0.9595959595959593,0.35343427932594923
|
||||
1.0606060606060606,0.7966609096765547,1.0606060606060606,0.5516149744358979,1.0606060606060606,0.38717949746647334
|
||||
1.1616161616161618,0.8521200140106753,1.1616161616161618,0.5878017101641295,1.1616161616161618,0.4170777567516486
|
||||
1.262626262626262,0.8975259277901253,1.262626262626262,0.6168588441570951,1.262626262626262,0.4446516626376453
|
||||
1.3636363636363633,0.9290861930067627,1.3636363636363633,0.6411836178298306,1.3636363636363633,0.46927636759559477
|
||||
1.4646464646464645,0.9508521659740165,1.4646464646464645,0.6610795923876176,1.4646464646464645,0.4901812911280025
|
||||
1.5656565656565657,0.9612143570080512,1.5656565656565657,0.6768219209716341,1.5656565656565657,0.5079918402617868
|
||||
1.666666666666667,0.9590141254017294,1.666666666666667,0.6878304863477654,1.666666666666667,0.5233400296358803
|
||||
1.7676767676767673,0.9434050911299104,1.7676767676767673,0.6925040592034013,1.7676767676767673,0.5351552186913862
|
||||
1.8686868686868685,0.9166484175947194,1.8686868686868685,0.6900246131027935,1.8686868686868685,0.5441567759439713
|
||||
1.9696969696969697,0.8762489440965586,1.9696969696969697,0.6764843940414706,1.9696969696969697,0.5496025817549586
|
||||
2.070707070707071,0.821609113516158,2.070707070707071,0.6566284893291617,2.070707070707071,0.5536820874974513
|
||||
2.1717171717171713,0.7581599898835192,2.1717171717171713,0.6308981649064993,2.1717171717171713,0.5533100035360206
|
||||
2.2727272727272725,0.6877704486402438,2.2727272727272725,0.6016976467409065,2.2727272727272725,0.550251787575325
|
||||
2.3737373737373737,0.610815603287697,2.3737373737373737,0.5704721438286479,2.3737373737373737,0.5445865851994449
|
||||
2.474747474747475,0.5275282181728166,2.474747474747475,0.5362814307290142,2.474747474747475,0.537858723684707
|
||||
2.5757575757575752,0.44098299617705367,2.5757575757575752,0.5007018478259194,2.5757575757575752,0.5301810557083476
|
||||
2.6767676767676765,0.3535127269572474,2.6767676767676765,0.4635791072799046,2.6767676767676765,0.5214280506499815
|
||||
2.7777777777777777,0.2669314340184933,2.7777777777777777,0.4252681214470508,2.7777777777777777,0.5119428002841875
|
||||
2.878787878787879,0.18244774892195767,2.878787878787879,0.3860805361925665,2.878787878787879,0.5020280103571171
|
||||
2.9797979797979792,0.10009287374461422,2.9797979797979792,0.34649978327862213,2.9797979797979792,0.4918997465440798
|
||||
3.0808080808080813,0.01825358803182036,3.0808080808080813,0.3067456416075246,3.0808080808080813,0.48152164248236273
|
||||
3.1818181818181817,-0.06257603867024951,3.1818181818181817,0.2670556605010131,3.1818181818181817,0.4710506406469346
|
||||
3.282828282828282,-0.14256250037038515,3.282828282828282,0.22747478740583862,3.282828282828282,0.46061400021772264
|
||||
3.383838383838384,-0.22183964093761221,3.383838383838384,0.18823442296238005,3.383838383838384,0.4502063176185161
|
||||
3.4848484848484844,-0.3000530710681483,3.4848484848484844,0.14930923451816047,3.4848484848484844,0.43983195563012295
|
||||
3.5858585858585865,-0.37715837046834677,3.5858585858585865,0.11064727810620513,3.5858585858585865,0.4294855408707603
|
||||
3.686868686868687,-0.4535879015098929,3.686868686868687,0.0721761317620166,3.686868686868687,0.41918651120808587
|
||||
3.787878787878787,-0.5295958753874862,3.787878787878787,0.03385158496402993,3.787878787878787,0.4089211108732785
|
||||
3.8888888888888893,-0.605341954214415,3.8888888888888893,-0.004196426105451837,3.8888888888888893,0.3986849690078671
|
||||
3.9898989898989896,-0.6805725256650321,3.9898989898989896,-0.04204424507819378,3.9898989898989896,0.3884698016669201
|
||||
4.09090909090909,-0.7553382625080638,4.09090909090909,-0.0795288839270637,4.09090909090909,0.37826736472008937
|
||||
4.191919191919192,-0.8294318073700058,4.191919191919192,-0.11675718948094181,4.191919191919192,0.36808861016948324
|
||||
4.292929292929292,-0.9025671571505313,4.292929292929292,-0.15379169226972225,4.292929292929292,0.3579396881040081
|
||||
4.3939393939393945,-0.9751233932017581,4.3939393939393945,-0.19069301489402432,4.3939393939393945,0.3478279422102407
|
||||
4.494949494949495,-1.0471623188798242,4.494949494949495,-0.227426975503073,4.494949494949495,0.3377388026398381
|
||||
4.595959595959595,-1.1187532876284094,4.595959595959595,-0.263878605240927,4.595959595959595,0.32767338817749475
|
||||
4.696969696969697,-1.189660915888889,4.696969696969697,-0.3001960056492053,4.696969696969697,0.3176530967513947
|
||||
4.797979797979798,-1.2601246569645388,4.797979797979798,-0.3363281464377301,4.797979797979798,0.3076778013243957
|
||||
4.8989898989899,-1.3303637186847002,4.8989898989899,-0.37225330321499334,4.8989898989899,0.29772768053304777
|
||||
5.0,-1.4004134094571867,5.0,-0.4080316669473787,5.0,0.2878184725593889
|
|
101
TeX/Plots/Data/scala_out_sin.csv
Executable file
101
TeX/Plots/Data/scala_out_sin.csv
Executable file
@ -0,0 +1,101 @@
|
||||
x_n_50_tl_0.0,y_n_50_tl_0.0,x_n_500_tl_0.0,y_n_500_tl_0.0,x_n_5000_tl_0.0,y_n_5000_tl_0.0,x_n_50_tl_1.0,y_n_50_tl_1.0,x_n_500_tl_1.0,y_n_500_tl_1.0,x_n_5000_tl_1.0,y_n_5000_tl_1.0,x_n_50_tl_3.0,y_n_50_tl_3.0,x_n_500_tl_3.0,y_n_500_tl_3.0,x_n_5000_tl_3.0,y_n_5000_tl_3.0
|
||||
-5.0,-0.8599583057554976,-5.0,1.6797068787192495,-5.0,1.7379689606223239,-5.0,-0.42741272499487776,-5.0,0.23661838590976328,-5.0,0.20399386816229978,-5.0,0.13095951218866275,-5.0,-0.46242184829078237,-5.0,-0.41058629664051305
|
||||
-4.898989898989899,-0.8456047840536887,-4.898989898989899,1.5940442438460278,-4.898989898989899,1.6472202329485999,-4.898989898989899,-0.4276431031893983,-4.898989898989899,0.20862681459226723,-4.898989898989899,0.17824071850107404,-4.898989898989899,0.10539057470765349,-4.898989898989899,-0.4609018322257037,-4.898989898989899,-0.4110599614729015
|
||||
-4.797979797979798,-0.8312512623518801,-4.797979797979798,1.5066655952530659,-4.797979797979798,1.5560370024912986,-4.797979797979798,-0.42787348138391906,-4.797979797979798,0.18056404254218186,-4.797979797979798,0.1523309553054011,-4.797979797979798,0.07982163722664384,-4.797979797979798,-0.4593800781031771,-4.797979797979798,-0.41155161184122596
|
||||
-4.696969696969697,-0.8168977406500709,-4.696969696969697,1.4192486056640365,-4.696969696969697,1.4641612521550218,-4.696969696969697,-0.42810385957843955,-4.696969696969697,0.1524990189306639,-4.696969696969697,0.1262143553005724,-4.696969696969697,0.05464380509332076,-4.696969696969697,-0.4578583174084625,-4.696969696969697,-0.41205688060740875
|
||||
-4.595959595959596,-0.8025442189482614,-4.595959595959596,1.3308076153149195,-4.595959595959596,1.3718747642404912,-4.595959595959596,-0.42833423777296026,-4.595959595959596,0.12443399531914556,-4.595959595959596,0.10000299804643913,-4.595959595959596,0.029720704709016,-4.595959595959596,-0.45633655338498746,-4.595959595959596,-0.4126005212950324
|
||||
-4.494949494949495,-0.788190697246453,-4.494949494949495,1.2408764237610932,-4.494949494949495,1.2794547935729972,-4.494949494949495,-0.42856461596748074,-4.494949494949495,0.09628036393480953,-4.494949494949495,0.07370213597938947,-4.494949494949495,0.004797604324711557,-4.494949494949495,-0.45481454100468904,-4.494949494949495,-0.41317280828652125
|
||||
-4.393939393939394,-0.7757194193374484,-4.393939393939394,1.150777108936673,-4.393939393939394,1.1865984175078124,-4.393939393939394,-0.4287949941620015,-4.393939393939394,0.06803799087458409,-4.393939393939394,0.047353868838267546,-4.393939393939394,-0.019952866294811474,-4.393939393939394,-0.4532902682540511,-4.393939393939394,-0.41378088791316736
|
||||
-4.292929292929293,-0.7635428572249876,-4.292929292929293,1.0606777941122512,-4.292929292929293,1.0935156155193826,-4.292929292929293,-0.42902537235652216,-4.292929292929293,0.039745189354681264,-4.292929292929293,0.020863777423783696,-4.292929292929293,-0.04424719286600705,-4.292929292929293,-0.45176167641583376,-4.292929292929293,-0.41441903123033147
|
||||
-4.191919191919192,-0.7514991436388702,-4.191919191919192,0.9705784792878309,-4.191919191919192,0.9999451479756023,-4.191919191919192,-0.42925575055104276,-4.191919191919192,0.01144626171509771,-4.191919191919192,-0.005903721047402898,-4.191919191919192,-0.06854151943720274,-4.191919191919192,-0.4502329821869361,-4.191919191919192,-0.415076548381381
|
||||
-4.090909090909091,-0.7396941691045894,-4.090909090909091,0.8798554638230421,-4.090909090909091,0.9059203084364202,-4.090909090909091,-0.42948612874556336,-4.090909090909091,-0.016952280979816926,-4.090909090909091,-0.03298925765732338,-4.090909090909091,-0.09283584600839848,-4.090909090909091,-0.44869972853751156,-4.090909090909091,-0.4157629995846106
|
||||
-3.9898989898989896,-0.7279252765177078,-3.9898989898989896,0.7884244803113447,-3.9898989898989896,0.811474387051809,-3.9898989898989896,-0.42971650694008423,-3.9898989898989896,-0.04548036359257723,-3.9898989898989896,-0.06017986522111469,-3.9898989898989896,-0.11713017257959416,-3.9898989898989896,-0.44715472797022665,-3.9898989898989896,-0.41647096691012625
|
||||
-3.888888888888889,-0.7161580919866168,-3.888888888888889,0.6966140451148786,-3.888888888888889,0.7168906385054419,-3.888888888888889,-0.4299468851346048,-3.888888888888889,-0.07408610945271141,-3.888888888888889,-0.0874709084540591,-3.888888888888889,-0.14142449915078953,-3.888888888888889,-0.4456015995456161,-3.888888888888889,-0.4171930364234525
|
||||
-3.787878787878788,-0.7043909074555256,-3.787878787878788,0.604803249010758,-3.787878787878788,0.6219712537736367,-3.787878787878788,-0.4301772633291252,-3.787878787878788,-0.10285723661640957,-3.787878787878788,-0.11503695886523099,-3.787878787878788,-0.16571882572198493,-3.787878787878788,-0.4440477592686527,-3.787878787878788,-0.41792735866227004
|
||||
-3.686868686868687,-0.6926237229244344,-3.686868686868687,0.512070766385858,-3.686868686868687,0.5265347560169878,-3.686868686868687,-0.4304076415236461,-3.686868686868687,-0.13176620357773466,-3.686868686868687,-0.1429497539600965,-3.686868686868687,-0.19001315229318066,-3.686868686868687,-0.44249216926013074,-3.686868686868687,-0.4186788950692494
|
||||
-3.5858585858585856,-0.680856538393343,-3.5858585858585856,0.418341406261733,-3.5858585858585856,0.43037422799158725,-3.5858585858585856,-0.43063801971816673,-3.5858585858585856,-0.16072772857488207,-3.5858585858585856,-0.17103810603915154,-3.5858585858585856,-0.21430747886437626,-3.5858585858585856,-0.44093657925160834,-3.5858585858585856,-0.41944890491602094
|
||||
-3.484848484848485,-0.6690893538622519,-3.484848484848485,0.3230008626762439,-3.484848484848485,0.33347359833985296,-3.484848484848485,-0.43086839791268744,-3.484848484848485,-0.189786562504877,-3.484848484848485,-0.1992640699299042,-3.484848484848485,-0.238601805435572,-3.484848484848485,-0.4393809892430859,-3.484848484848485,-0.4202525693559286
|
||||
-3.383838383838384,-0.6573221693311603,-3.383838383838384,0.22755806300474243,-3.383838383838384,0.23599152727957395,-3.383838383838384,-0.4310987761072079,-3.383838383838384,-0.21885301172451227,-3.383838383838384,-0.22770533404467666,-3.383838383838384,-0.2628961320067672,-3.383838383838384,-0.43781693796746485,-3.383838383838384,-0.4210766722370822
|
||||
-3.282828282828283,-0.6455549848000697,-3.282828282828283,0.13172938749299176,-3.282828282828283,0.13785071540835,-3.282828282828283,-0.4313291543017285,-3.282828282828283,-0.24792012144222308,-3.282828282828283,-0.25633384693349226,-3.282828282828283,-0.28719045857796294,-3.282828282828283,-0.4362515901030497,-3.282828282828283,-0.42192705020460003
|
||||
-3.1818181818181817,-0.6337878002689783,-3.1818181818181817,0.03583960513370717,-3.1818181818181817,0.03926297085619488,-3.1818181818181817,-0.43155953249624923,-3.1818181818181817,-0.2770868438988566,-3.1818181818181817,-0.28512064843139634,-3.1818181818181817,-0.3114847851491585,-3.1818181818181817,-0.4346861097486259,-3.1818181818181817,-0.42279043662854426
|
||||
-3.080808080808081,-0.6219933944673289,-3.080808080808081,-0.06005017722557655,-3.080808080808081,-0.05953650043486377,-3.080808080808081,-0.4317899106907698,-3.080808080808081,-0.30634202732953336,-3.080808080808081,-0.3140197227479732,-3.080808080808081,-0.33577911172035446,-3.080808080808081,-0.4331124443470669,-3.080808080808081,-0.42366980349780375
|
||||
-2.9797979797979797,-0.6084802589111126,-2.9797979797979797,-0.15590935392992944,-2.9797979797979797,-0.15810366579897028,-2.9797979797979797,-0.4320202888852905,-2.9797979797979797,-0.33549678779642544,-2.9797979797979797,-0.3430021282671825,-2.9797979797979797,-0.3600734382915496,-2.9797979797979797,-0.4315218307109141,-2.9797979797979797,-0.42449207343700956
|
||||
-2.878787878787879,-0.5891232690738096,-2.878787878787879,-0.24713180817765498,-2.878787878787879,-0.2552003497036097,-2.878787878787879,-0.43225066707981114,-2.878787878787879,-0.36352866123332933,-2.878787878787879,-0.3716002292573769,-2.878787878787879,-0.38436776486274526,-2.878787878787879,-0.42982012082652077,-2.878787878787879,-0.4251380414134998
|
||||
-2.7777777777777777,-0.5636588831509095,-2.7777777777777777,-0.33701300990207655,-2.7777777777777777,-0.35066910453142525,-2.7777777777777777,-0.4324810452743318,-2.7777777777777777,-0.3911342117000581,-2.7777777777777777,-0.39951657101606874,-2.7777777777777777,-0.4086620914339411,-2.7777777777777777,-0.42794280685642583,-2.7777777777777777,-0.4254095546530059
|
||||
-2.676767676767677,-0.538194497228009,-2.676767676767677,-0.4265304961947721,-2.676767676767677,-0.4419057912445846,-2.676767676767677,-0.4295143886441945,-2.676767676767677,-0.41758811768544335,-2.676767676767677,-0.4264377612958712,-2.676767676767677,-0.4329564180051365,-2.676767676767677,-0.4251801800597513,-2.676767676767677,-0.42514350551302893
|
||||
-2.5757575757575757,-0.5127301113051083,-2.5757575757575757,-0.5160338868263108,-2.5757575757575757,-0.530562896182845,-2.5757575757575757,-0.4209813938653777,-2.5757575757575757,-0.4421888684751682,-2.5757575757575757,-0.4521958194404763,-2.5757575757575757,-0.4572507445763323,-2.5757575757575757,-0.4220835438175992,-2.5757575757575757,-0.42424941235712643
|
||||
-2.474747474747475,-0.48726572538220836,-2.474747474747475,-0.6045443334592155,-2.474747474747475,-0.615529859161848,-2.474747474747475,-0.4124483990865609,-2.474747474747475,-0.4657884717671948,-2.474747474747475,-0.4762840194362591,-2.474747474747475,-0.480179747245649,-2.474747474747475,-0.4184871960008546,-2.474747474747475,-0.4227211360179997
|
||||
-2.3737373737373737,-0.4618013394593081,-2.3737373737373737,-0.6866461198443653,-2.3737373737373737,-0.6916556206405179,-2.3737373737373737,-0.4039154043077441,-2.3737373737373737,-0.4872175481179362,-2.3737373737373737,-0.49664688375599,-2.3737373737373737,-0.5021327343044837,-2.3737373737373737,-0.4148617786025484,-2.3737373737373737,-0.42058969704823307
|
||||
-2.272727272727273,-0.4363369535364072,-2.272727272727273,-0.7664221699283893,-2.272727272727273,-0.76211944205629,-2.272727272727273,-0.3953824095289272,-2.272727272727273,-0.5066515567337302,-2.272727272727273,-0.5156479697413601,-2.272727272727273,-0.5240857213633179,-2.272727272727273,-0.4101489198915738,-2.272727272727273,-0.41773244666508813
|
||||
-2.1717171717171717,-0.41087256761350716,-2.1717171717171717,-0.8294863656303931,-2.1717171717171717,-0.8275864122047706,-2.1717171717171717,-0.38684941475011053,-2.1717171717171717,-0.5248642081767847,-2.1717171717171717,-0.5320776321494358,-2.1717171717171717,-0.5460387084221523,-2.1717171717171717,-0.40386935734460455,-2.1717171717171717,-0.41386532161191136
|
||||
-2.070707070707071,-0.38540818169060687,-2.070707070707071,-0.8777818560548117,-2.070707070707071,-0.8828614286116081,-2.070707070707071,-0.37790597680581006,-2.070707070707071,-0.5419305295559403,-2.070707070707071,-0.5450192204063132,-2.070707070707071,-0.5535021346303699,-2.070707070707071,-0.3970390682426877,-2.070707070707071,-0.40816135821642785
|
||||
-1.9696969696969697,-0.3599437957677064,-1.9696969696969697,-0.9240065596308831,-1.9696969696969697,-0.9252381701217932,-1.9696969696969697,-0.3679210297690768,-1.9696969696969697,-0.5515520831674893,-1.9696969696969697,-0.5532507694312989,-1.9696969696969697,-0.5395642887779512,-1.9696969696969697,-0.3899536977126602,-1.9696969696969697,-0.4010221140801823
|
||||
-1.868686868686869,-0.3344794098448062,-1.868686868686869,-0.9642081153190732,-1.868686868686869,-0.9553319880266173,-1.868686868686869,-0.3579360827323437,-1.868686868686869,-0.5596849243269256,-1.868686868686869,-0.556146459781286,-1.868686868686869,-0.5226399861377664,-1.868686868686869,-0.38238093755017905,-1.868686868686869,-0.3924834151653046
|
||||
-1.7676767676767677,-0.3090150239219054,-1.7676767676767677,-1.0007396420666628,-1.7676767676767677,-0.9785388909278812,-1.7676767676767677,-0.34795113569561026,-1.7676767676767677,-0.5614467949548656,-1.7676767676767677,-0.556098671354368,-1.7676767676767677,-0.4982759643499402,-1.7676767676767677,-0.37323932215085087,-1.7676767676767677,-0.3822790688909727
|
||||
-1.6666666666666665,-0.2835506379990052,-1.6666666666666665,-1.0187333297343348,-1.6666666666666665,-0.990642179129256,-1.6666666666666665,-0.3378404050890797,-1.6666666666666665,-0.5581030917440444,-1.6666666666666665,-0.5516597526410076,-1.6666666666666665,-0.47067804898067184,-1.6666666666666665,-0.3614402633008814,-1.6666666666666665,-0.37030436851426224
|
||||
-1.5656565656565657,-0.2580862520761052,-1.5656565656565657,-1.0247628857811257,-1.5656565656565657,-0.9908786897501635,-1.5656565656565657,-0.32764529263529574,-1.5656565656565657,-0.5521110428952534,-1.5656565656565657,-0.543054168961121,-1.5656565656565657,-0.44308013361140386,-1.5656565656565657,-0.34868249075072216,-1.5656565656565657,-0.35699361568660476
|
||||
-1.4646464646464645,-0.2326218661532044,-1.4646464646464645,-1.0034906902849632,-1.4646464646464645,-0.9791175953628313,-1.4646464646464645,-0.3174501801815117,-1.4646464646464645,-0.5459322825614802,-1.4646464646464645,-0.5306579767422843,-1.4646464646464645,-0.41548221824213516,-1.4646464646464645,-0.3311832422822113,-1.4646464646464645,-0.3422960409489238
|
||||
-1.3636363636363638,-0.20715748023030392,-1.3636363636363638,-0.9673348570651019,-1.3636363636363638,-0.9595107779813504,-1.3636363636363638,-0.30725506772772765,-1.3636363636363638,-0.5358046337748493,-1.3636363636363638,-0.5149935986561597,-1.3636363636363638,-0.3878843028728669,-1.3636363636363638,-0.3132121589299601,-1.3636363636363638,-0.32640862478895577
|
||||
-1.2626262626262625,-0.1816930943074038,-1.2626262626262625,-0.9225014127525308,-1.2626262626262625,-0.9337929369785798,-1.2626262626262625,-0.29705995527394363,-1.2626262626262625,-0.5219865374295057,-1.2626262626262625,-0.49551878203869837,-1.2626262626262625,-0.3602863875035988,-1.2626262626262625,-0.2946441284959401,-1.2626262626262625,-0.3093875165551468
|
||||
-1.1616161616161618,-0.15622870838450328,-1.1616161616161618,-0.8751043056611054,-1.1616161616161618,-0.8989581380947891,-1.1616161616161618,-0.2868560938657385,-1.1616161616161618,-0.5034750880272445,-1.1616161616161618,-0.47203943335323734,-1.1616161616161618,-0.33268847213433056,-1.1616161616161618,-0.274883632364574,-1.1616161616161618,-0.290930041718859
|
||||
-1.0606060606060606,-0.13076432246160322,-1.0606060606060606,-0.821606899074672,-1.0606060606060606,-0.8584249497008333,-1.0606060606060606,-0.27660353819390815,-1.0606060606060606,-0.48270847299437897,-1.0606060606060606,-0.44464074915622404,-1.0606060606060606,-0.3050905567650622,-1.0606060606060606,-0.25396600066040825,-1.0606060606060606,-0.27118022111102713
|
||||
-0.9595959595959593,-0.1052999365387022,-0.9595959595959593,-0.7640740662013277,-0.9595959595959593,-0.8091349495541134,-0.9595959595959593,-0.2663509825220778,-0.9595959595959593,-0.4531496187924299,-0.9595959595959593,-0.4131252245857649,-0.9595959595959593,-0.2774926413957938,-0.9595959595959593,-0.2325608605277687,-0.9595959595959593,-0.24999263682664583
|
||||
-0.858585858585859,-0.07983555061580246,-0.858585858585859,-0.6997648036121712,-0.858585858585859,-0.7481101580520273,-0.858585858585859,-0.24945014324598108,-0.858585858585859,-0.4128551081137216,-0.858585858585859,-0.3783375004573455,-0.858585858585859,-0.24988890615957382,-0.858585858585859,-0.20970608424200354,-0.858585858585859,-0.22760758480332924
|
||||
-0.7575757575757578,-0.054371164692902076,-0.7575757575757578,-0.6349094271338603,-0.7575757575757578,-0.6820384544330558,-0.7575757575757578,-0.22976061598357173,-0.7575757575757578,-0.37194755761368214,-0.7575757575757578,-0.34125536540984164,-0.7575757575757578,-0.22211577202959193,-0.7575757575757578,-0.18612295967753525,-0.7575757575757578,-0.20435972492122192
|
||||
-0.6565656565656566,-0.028906778770001355,-0.6565656565656566,-0.5675463340257147,-0.6565656565656566,-0.6095055279444694,-0.6565656565656566,-0.21007108872116223,-0.6565656565656566,-0.33089771921954814,-0.6565656565656566,-0.3018873155488892,-0.6565656565656566,-0.193901705770251,-0.6565656565656566,-0.16215648653127196,-0.6565656565656566,-0.17931671250996567
|
||||
-0.5555555555555554,-0.003442392847101086,-0.5555555555555554,-0.4979737843441253,-0.5555555555555554,-0.5294156894319434,-0.5555555555555554,-0.17756203711819088,-0.5555555555555554,-0.28543993548509355,-0.5555555555555554,-0.26041062451302716,-0.5555555555555554,-0.1652647608815763,-0.5555555555555554,-0.13697108727984195,-0.5555555555555554,-0.15330854213602407
|
||||
-0.45454545454545503,0.022021993075799252,-0.45454545454545503,-0.41446378537016554,-0.45454545454545503,-0.44063136513918405,-0.45454545454545503,-0.14370193132078618,-0.45454545454545503,-0.2395445410097954,-0.45454545454545503,-0.21652789115320525,-0.45454545454545503,-0.13529651419425484,-0.45454545454545503,-0.11162353028803523,-0.45454545454545503,-0.12623393965312618
|
||||
-0.3535353535353538,0.047486378998699605,-0.3535353535353538,-0.32279891003383887,-0.3535353535353538,-0.3477046435373429,-0.3535353535353538,-0.10934683153775412,-0.3535353535353538,-0.19101529776271153,-0.3535353535353538,-0.17035416577174828,-0.3535353535353538,-0.10509845793132169,-0.3535353535353538,-0.08626013443382194,-0.3535353535353538,-0.0984136402387288
|
||||
-0.2525252525252526,0.07295076492159988,-0.2525252525252526,-0.2310925448666578,-0.2525252525252526,-0.25069145628093464,-0.2525252525252526,-0.07491795886312486,-0.2525252525252526,-0.14150481827496786,-0.2525252525252526,-0.12255925867115473,-0.2525252525252526,-0.07490040166838845,-0.2525252525252526,-0.060434579838324495,-0.2525252525252526,-0.07006332009798681
|
||||
-0.15151515151515138,0.09843047923373265,-0.15151515151515138,-0.13636354870852932,-0.15151515151515138,-0.15095910699954188,-0.15151515151515138,-0.040306119685216676,-0.15151515151515138,-0.08982558834407159,-0.15151515151515138,-0.07398207558396772,-0.15151515151515138,-0.044702345405455264,-0.15151515151515138,-0.033631412543263274,-0.15151515151515138,-0.04141233375856603
|
||||
-0.050505050505050164,0.12391212075429944,-0.050505050505050164,-0.03941345742250633,-0.050505050505050164,-0.04947445191778734,-0.050505050505050164,-0.005694280507308445,-0.050505050505050164,-0.03797674651308919,-0.050505050505050164,-0.025080464074353173,-0.050505050505050164,-0.014504289142522105,-0.050505050505050164,-0.006446181090338347,-0.050505050505050164,-0.012381418678247798
|
||||
0.050505050505050164,0.14939376227486617,0.050505050505050164,0.056551574802519614,0.050505050505050164,0.0525838784102356,0.050505050505050164,0.028911158365061536,0.050505050505050164,0.013973891774473416,0.050505050505050164,0.023794553267499748,0.050505050505050164,0.01583885016218507,0.050505050505050164,0.021038028372213642,0.050505050505050164,0.016846741994686543
|
||||
0.15151515151515138,0.17487540379543332,0.15151515151515138,0.15017264202689645,0.15151515151515138,0.15408973105493792,0.15151515151515138,0.062183868537649845,0.15151515151515138,0.06589471730593952,0.15151515151515138,0.07245763138776953,0.15151515151515138,0.046508129166361926,0.15151515151515138,0.04842915541973139,0.15151515151515138,0.04601083462340586
|
||||
0.2525252525252526,0.2003570453160002,0.2525252525252526,0.24151055338001104,0.2525252525252526,0.2530277286116801,0.2525252525252526,0.09533027991528796,0.2525252525252526,0.11633887943820748,0.2525252525252526,0.11992049316059605,0.2525252525252526,0.07717740817053882,0.2525252525252526,0.07538338916654858,0.2525252525252526,0.07493657104851133
|
||||
0.3535353535353538,0.22583868683656727,0.3535353535353538,0.3245702345293225,0.3535353535353538,0.3487077570947679,0.3535353535353538,0.12847669129292608,0.3535353535353538,0.1661606781018032,0.3535353535353538,0.16626942811591283,0.3535353535353538,0.10784668717471575,0.3535353535353538,0.10205978943459323,0.3535353535353538,0.10356289911566637
|
||||
0.45454545454545414,0.25132032835713397,0.45454545454545414,0.4042440047834261,0.45454545454545414,0.4412637068427958,0.45454545454545414,0.15705349698246504,0.45454545454545414,0.21489012650224273,0.45454545454545414,0.21055873443432177,0.45454545454545414,0.1385159661788923,0.45454545454545414,0.12849799626750344,0.45454545454545414,0.13171638145035697
|
||||
0.5555555555555554,0.2768019698777009,0.5555555555555554,0.48386343064481413,0.5555555555555554,0.5292644209820558,0.5555555555555554,0.1822941322301175,0.5555555555555554,0.26332131026810235,0.5555555555555554,0.25282542280637477,0.5555555555555554,0.16918524518306918,0.5555555555555554,0.1537986605041808,0.5555555555555554,0.15885558014342485
|
||||
0.6565656565656566,0.30228361139826787,0.6565656565656566,0.5582703975525269,0.6565656565656566,0.6095279265110211,0.6565656565656566,0.20753476747777022,0.6565656565656566,0.311752494033962,0.6565656565656566,0.2926908500466596,0.6565656565656566,0.1998545241872461,0.6565656565656566,0.17827793057103108,0.6565656565656566,0.18425901109338033
|
||||
0.7575757575757578,0.3276630675001063,0.7575757575757578,0.6240165672599972,0.7575757575757578,0.6817170975194252,0.7575757575757578,0.23277540272542308,0.7575757575757578,0.35585725421977105,0.7575757575757578,0.32990973382338223,0.7575757575757578,0.23052380319142296,0.7575757575757578,0.19993717433313357,0.7575757575757578,0.20856541522380753
|
||||
0.8585858585858581,0.35294340046326517,0.8585858585858581,0.6832251591090945,0.8585858585858581,0.7490023509530548,0.8585858585858581,0.2580160379730755,0.8585858585858581,0.39411574874383437,0.8585858585858581,0.3646086605463153,0.8585858585858581,0.2611930821955996,0.8585858585858581,0.21959645347898898,0.8585858585858581,0.2319021251050189
|
||||
0.9595959595959593,0.3782237334264241,0.9595959595959593,0.7379264665053952,0.9595959595959593,0.8101967957597399,0.9595959595959593,0.28325667322072823,0.9595959595959593,0.4295026011065611,0.9595959595959593,0.39755496473819213,0.9595959595959593,0.2918623611997765,0.9595959595959593,0.23923560012200779,0.9595959595959593,0.25414437767202697
|
||||
1.0606060606060606,0.40350406638958297,1.0606060606060606,0.7921580999576039,1.0606060606060606,0.865038072851208,1.0606060606060606,0.3013557830052828,1.0606060606060606,0.4622992830762259,1.0606060606060606,0.4264742505103137,1.0606060606060606,0.3225316402039533,1.0606060606060606,0.2583177367004956,1.0606060606060606,0.27509011865395333
|
||||
1.1616161616161618,0.4287843993527419,1.1616161616161618,0.8463753861957045,1.1616161616161618,0.9101530745705552,1.1616161616161618,0.30890687222540525,1.1616161616161618,0.4901351365169132,1.1616161616161618,0.45319808589043276,1.1616161616161618,0.35276108409396234,1.1616161616161618,0.27725858987652097,1.1616161616161618,0.29560030900846
|
||||
1.262626262626262,0.4540647323159006,1.262626262626262,0.897495878378595,1.262626262626262,0.9417185837581196,1.262626262626262,0.3164579614455276,1.262626262626262,0.5145846409490937,1.262626262626262,0.4780296455205537,1.262626262626262,0.3794404038170447,1.262626262626262,0.2943968389517317,1.262626262626262,0.3152556528081
|
||||
1.3636363636363633,0.47934506527905946,1.3636363636363633,0.9371746663372353,1.3636363636363633,0.9683350572505884,1.3636363636363633,0.32400905066565,1.3636363636363633,0.5362370529858077,1.3636363636363633,0.49985847015098533,1.3636363636363633,0.4061135157391696,1.3636363636363633,0.31086438420332474,1.3636363636363633,0.33319398921001137
|
||||
1.4646464646464645,0.5046253982422182,1.4646464646464645,0.9707358108138878,1.4646464646464645,0.9850292043911345,1.4646464646464645,0.3315601398857724,1.4646464646464645,0.5531019255981576,1.4646464646464645,0.5181848921010453,1.4646464646464645,0.43278662766129444,1.4646464646464645,0.32679862313827224,1.4646464646464645,0.34908318351734496
|
||||
1.5656565656565657,0.519310758600954,1.5656565656565657,0.9906032176938914,1.5656565656565657,0.9918397190961462,1.5656565656565657,0.3391112291058948,1.5656565656565657,0.5659801950328859,1.5656565656565657,0.5323498791465002,1.5656565656565657,0.4511009412793216,1.5656565656565657,0.34162587661768695,1.5656565656565657,0.3628958484057042
|
||||
1.666666666666667,0.51401635833774,1.666666666666667,1.005715077214144,1.666666666666667,0.9899656142606021,1.666666666666667,0.346662318326017,1.666666666666667,0.575829702298404,1.666666666666667,0.541260149475436,1.666666666666667,0.4674803110925756,1.666666666666667,0.35601920704359724,1.666666666666667,0.37572761649169056
|
||||
1.7676767676767673,0.5060676795476615,1.7676767676767673,1.0131883048070176,1.7676767676767673,0.9786887428475383,1.7676767676767673,0.35421340754613934,1.7676767676767673,0.5828151703640635,1.7676767676767673,0.5455395874048847,1.7676767676767673,0.4838510153495891,1.7676767676767673,0.3703169330810678,1.7676767676767673,0.38781035405087
|
||||
1.8686868686868685,0.495017206229559,1.8686868686868685,0.9802541539054102,1.8686868686868685,0.9559310588882513,1.8686868686868685,0.3617644967662619,1.8686868686868685,0.5839088497682434,1.8686868686868685,0.5467157898697311,1.8686868686868685,0.49835864007261943,1.8686868686868685,0.38410765063343066,1.8686868686868685,0.3977196343512365
|
||||
1.9696969696969697,0.48396673291145637,1.9696969696969697,0.9263388630289161,1.9696969696969697,0.9221166683929235,1.9696969696969697,0.36931558598638414,1.9696969696969697,0.5804936028756624,1.9696969696969697,0.5450082343452209,1.9696969696969697,0.5115510651058692,1.9696969696969697,0.39647206872026003,1.9696969696969697,0.4057110985660076
|
||||
2.070707070707071,0.4729162595933537,2.070707070707071,0.8698358861835761,2.070707070707071,0.8764481362001709,2.070707070707071,0.3768666752065065,2.070707070707071,0.574716686049867,2.070707070707071,0.5394474878302619,2.070707070707071,0.5097127295818997,2.070707070707071,0.4049032898801099,2.070707070707071,0.41126316053027995
|
||||
2.1717171717171713,0.46186578627525116,2.1717171717171713,0.8081407617658106,2.1717171717171713,0.8224404974364862,2.1717171717171713,0.38441776442662906,2.1717171717171713,0.5655375705620478,2.1717171717171713,0.5300324428024472,2.1717171717171713,0.49554940844796147,2.1717171717171713,0.4101839304627971,2.1717171717171713,0.4155357725301964
|
||||
2.2727272727272725,0.4491770446280175,2.2727272727272725,0.7442526428212628,2.2727272727272725,0.7592323649828391,2.2727272727272725,0.391968853646751,2.2727272727272725,0.552350323381661,2.2727272727272725,0.5163813504127768,2.2727272727272725,0.48094925798793925,2.2727272727272725,0.413936941837358,2.2727272727272725,0.41843071308941276
|
||||
2.3737373737373737,0.43609986761848685,2.3737373737373737,0.675405575107383,2.3737373737373737,0.6874741372997285,2.3737373737373737,0.39951994286687353,2.3737373737373737,0.5335539998256553,2.3737373737373737,0.49865541506871236,2.3737373737373737,0.4655571015656922,2.3737373737373737,0.4173906236056948,2.3737373737373737,0.42027249977934045
|
||||
2.474747474747475,0.4066895271847391,2.474747474747475,0.5978840366507735,2.474747474747475,0.6073682995880296,2.474747474747475,0.40692119452733155,2.474747474747475,0.5117177142842388,2.474747474747475,0.4784532511364369,2.474747474747475,0.4501649451434452,2.474747474747475,0.4206585025597512,2.474747474747475,0.4213399238172195
|
||||
2.5757575757575752,0.3749622763477891,2.5757575757575752,0.5099585586540418,2.5757575757575752,0.5223271133442401,2.5757575757575752,0.41415264022012394,2.5757575757575752,0.4850415148130571,2.5757575757575752,0.4567094947730761,2.5757575757575752,0.43458555601387144,2.5757575757575752,0.42158324745022285,2.5757575757575752,0.42181632222498416
|
||||
2.6767676767676765,0.3432350255108388,2.6767676767676765,0.4205365946887392,2.6767676767676765,0.432906236858961,2.6767676767676765,0.4199131836378292,2.6767676767676765,0.45218830888592937,2.6767676767676765,0.4332394825941561,2.6767676767676765,0.41774264448225407,2.6767676767676765,0.42145613907090707,2.6767676767676765,0.4215504924390677
|
||||
2.7777777777777777,0.3115077746738885,2.7777777777777777,0.32930350370842715,2.7777777777777777,0.3412321347424227,2.7777777777777777,0.42274639662898705,2.7777777777777777,0.4163402713183856,2.7777777777777777,0.40851950219775013,2.7777777777777777,0.40089973295063663,2.7777777777777777,0.4209228617300304,2.7777777777777777,0.4203590184673923
|
||||
2.878787878787879,0.27978052383693824,2.878787878787879,0.23807041272811588,2.878787878787879,0.24760314946640188,2.878787878787879,0.42557960962014507,2.878787878787879,0.3802049595409251,2.878787878787879,0.383057999391408,2.878787878787879,0.3840568214190192,2.878787878787879,0.41938009129458526,2.878787878787879,0.41854626446476473
|
||||
2.9797979797979792,0.24805327299998842,2.9797979797979792,0.14646854757187647,2.9797979797979792,0.15264712621771054,2.9797979797979792,0.428104678899817,2.9797979797979792,0.3432577786602793,2.9797979797979792,0.35694448241628624,2.9797979797979792,0.367213909887402,2.9797979797979792,0.41773298189050795,2.9797979797979792,0.4163510447804036
|
||||
3.0808080808080813,0.21632602216303798,3.0808080808080813,0.05456143993271787,3.0808080808080813,0.057336396951423035,3.0808080808080813,0.42910204221273207,3.0808080808080813,0.30602019255320434,3.0808080808080813,0.3305660520102483,3.0808080808080813,0.3503709983557844,3.0808080808080813,0.41593157838764133,3.0808080808080813,0.41396474245507225
|
||||
3.1818181818181817,0.18459877132608776,3.1818181818181817,-0.03733538955626138,3.1818181818181817,-0.03779843888287274,3.1818181818181817,0.4300994055256468,3.1818181818181817,0.26873960102765904,3.1818181818181817,0.30419224859801247,3.1818181818181817,0.3335280868241671,3.1818181818181817,0.41409475876758717,3.1818181818181817,0.41152646064562604
|
||||
3.282828282828282,0.15287152048913782,3.282828282828282,-0.12920906194738088,3.282828282828282,-0.13249853932321157,3.282828282828282,0.43099899837317435,3.282828282828282,0.2314874157056526,3.282828282828282,0.27788417508140784,3.282828282828282,0.3164995410780566,3.282828282828282,0.4122620364061852,3.282828282828282,0.40912247673587887
|
||||
3.383838383838384,0.12114426965218736,3.383838383838384,-0.22108273433850145,3.383838383838384,-0.22672866959540386,3.383838383838384,0.4318917322435721,3.383838383838384,0.19424068277399548,3.383838383838384,0.25176947991950477,3.383838383838384,0.2992528546417876,3.383838383838384,0.41043205422405316,3.383838383838384,0.40674183306733336
|
||||
3.4848484848484844,0.08941701881523752,3.4848484848484844,-0.3129564067296208,3.4848484848484844,-0.3204339220693533,3.4848484848484844,0.43278446611396965,3.4848484848484844,0.15713787053146627,3.4848484848484844,0.22587592408322044,3.4848484848484844,0.2820061682055188,3.4848484848484844,0.4086021011097265,3.4848484848484844,0.4043698847877142
|
||||
3.5858585858585865,0.058162275193419995,3.5858585858585865,-0.40462815693660914,3.5858585858585865,-0.41324795154433747,3.5858585858585865,0.4336771999843675,3.5858585858585865,0.12019800234358827,3.5858585858585865,0.20009983185318994,3.5858585858585865,0.2647594817692496,3.5858585858585865,0.4067722514909233,3.5858585858585865,0.40203120630187705
|
||||
3.686868686868687,0.027654025225499562,3.686868686868687,-0.49422269067564845,3.686868686868687,-0.505293720158625,3.686868686868687,0.43456993385476517,3.686868686868687,0.08338176166505175,3.686868686868687,0.17451220690194294,3.686868686868687,0.24694025624429472,3.686868686868687,0.40494401437700783,3.686868686868687,0.39972779600606
|
||||
3.787878787878787,-0.0028542247424208616,3.787878787878787,-0.5825355853286744,3.787878787878787,-0.5971159649192432,3.787878787878787,0.4354626677251625,3.787878787878787,0.04665044899957155,3.787878787878787,0.14916273839002891,3.787878787878787,0.22899283485249716,3.787878787878787,0.40312179798093106,3.787878787878787,0.39746202764807126
|
||||
3.8888888888888893,-0.03336247471034154,3.8888888888888893,-0.6703463394238872,3.8888888888888893,-0.68824406601414,3.8888888888888893,0.4363554015955604,3.8888888888888893,0.009919136334091362,3.8888888888888893,0.12414842115967273,3.8888888888888893,0.21104541346069938,3.8888888888888893,0.4013011021954902,3.8888888888888893,0.3952295870367829
|
||||
3.9898989898989896,-0.06387072467826214,3.9898989898989896,-0.7575928168757736,3.9898989898989896,-0.7784133912470257,3.9898989898989896,0.437248135465958,3.9898989898989896,-0.026722390982327433,3.9898989898989896,0.09939234299162882,3.9898989898989896,0.19309799206890174,3.9898989898989896,0.399484052282032,3.9898989898989896,0.3930265651896393
|
||||
4.09090909090909,-0.0943789746461824,4.09090909090909,-0.8443788481067765,4.09090909090909,-0.8681309126980375,4.09090909090909,0.43814086933635565,4.09090909090909,-0.06308596529257729,4.09090909090909,0.07491765400345742,4.09090909090909,0.1750164743635475,4.09090909090909,0.39766754707663343,4.09090909090909,0.3908577509521082
|
||||
4.191919191919192,-0.12488722461410334,4.191919191919192,-0.9297917533069101,4.191919191919192,-0.9573364023412008,4.191919191919192,0.4390336032067535,4.191919191919192,-0.09929539509789244,4.191919191919192,0.05074971564267564,4.191919191919192,0.1568727764842795,4.191919191919192,0.3958543351530404,4.191919191919192,0.38872432233841003
|
||||
4.292929292929292,-0.15539547458202363,4.292929292929292,-1.0140884125491687,4.292929292929292,-1.0459165238042567,4.292929292929292,0.4399263370771512,4.292929292929292,-0.1349334585206603,4.292929292929292,0.02675516616820918,4.292929292929292,0.13872907860501169,4.292929292929292,0.3940418892740997,4.292929292929292,0.38661923148208605
|
||||
4.3939393939393945,-0.18590372454994458,4.3939393939393945,-1.0972974392893766,4.3939393939393945,-1.1342383379633272,4.3939393939393945,0.4408190709475487,4.3939393939393945,-0.16982980680843562,4.3939393939393945,0.002964652994963484,4.3939393939393945,0.11796054958424437,4.3939393939393945,0.3922298874756054,4.3939393939393945,0.3845302650106349
|
||||
4.494949494949495,-0.216411974517865,4.494949494949495,-1.179182894055243,4.494949494949495,-1.2221355458185688,4.494949494949495,0.44032091498508585,4.494949494949495,-0.20469748939648835,4.494949494949495,-0.0206002794035424,4.494949494949495,0.09701325884395126,4.494949494949495,0.39041788567711144,4.494949494949495,0.38248614430609396
|
||||
4.595959595959595,-0.24692022448578524,4.595959595959595,-1.2601894992373368,4.595959595959595,-1.3091379548259912,4.595959595959595,0.4390119198940737,4.595959595959595,-0.239564339118166,4.595959595959595,-0.044064215802437315,4.595959595959595,0.07606596810365834,4.595959595959595,0.38861853091288373,4.595959595959595,0.3804739406387159
|
||||
4.696969696969697,-0.2774284744537062,4.696969696969697,-1.3408190143954206,4.696969696969697,-1.395667382198044,4.696969696969697,0.4377029248030613,4.696969696969697,-0.2744311888398445,4.696969696969697,-0.06739710896332894,4.696969696969697,0.05511867736336504,4.696969696969697,0.38683625018149875,4.696969696969697,0.37848669218529357
|
||||
4.797979797979798,-0.3079367244216266,4.797979797979798,-1.4214485295534998,4.797979797979798,-1.4814148159277154,4.797979797979798,0.436393929712049,4.797979797979798,-0.3092980385615221,4.797979797979798,-0.09057526494106827,4.797979797979798,0.034171386623072064,4.797979797979798,0.3850542123238927,4.797979797979798,0.37652869146057905
|
||||
4.8989898989899,-0.3384449743895474,4.8989898989899,-1.5019215376311323,4.8989898989899,-1.5662892316768398,4.8989898989899,0.4350560618496009,4.8989898989899,-0.34416306870335767,4.8989898989899,-0.11357143325279366,4.8989898989899,0.013224095882778591,4.8989898989899,0.383272237289863,4.8989898989899,0.37460430584833954
|
||||
5.0,-0.3689532243574676,5.0,-1.5820215750973248,5.0,-1.6508596672714462,5.0,0.43307940950570034,5.0,-0.37879161071248096,5.0,-0.13636462992911846,5.0,-0.007723194857514326,5.0,0.38149127984729847,5.0,0.37272620912380855
|
|
7
TeX/Plots/Data/sin_6.csv
Normal file
7
TeX/Plots/Data/sin_6.csv
Normal file
@ -0,0 +1,7 @@
|
||||
x,y
|
||||
-3.14159265358979 , -1.22464679914735e-16
|
||||
-1.88495559215388 , -0.951056516295154
|
||||
-0.628318530717959 , -0.587785252292473
|
||||
0.628318530717959 , 0.587785252292473
|
||||
1.88495559215388 , 0.951056516295154
|
||||
3.14159265358979 , 1.22464679914735e-16
|
|
64
TeX/Plots/Data/sin_conv.csv
Normal file
64
TeX/Plots/Data/sin_conv.csv
Normal file
@ -0,0 +1,64 @@
|
||||
,x_i,y_i,x_d,y_d,x,y
|
||||
"1",0,0,-0.251688505259414,-0.109203329280437,-0.0838961684198045,-0.0364011097601456
|
||||
"2",0.1,0.0998334166468282,0.216143831477992,0.112557051753147,0.00912581751114394,0.0102181849309398
|
||||
"3",0.2,0.198669330795061,0.351879533708722,0.52138915851383,0.120991434720523,0.180094983253476
|
||||
"4",0.3,0.29552020666134,-0.0169121548298757,0.0870956013269369,0.0836131805695847,0.163690012207993
|
||||
"5",0.4,0.389418342308651,0.278503661037003,0.464752686490904,0.182421968363305,0.294268636359638
|
||||
"6",0.5,0.479425538604203,0.241783494554983,0.521480762031938,0.216291763003623,0.399960258238722
|
||||
"7",0.6,0.564642473395035,0.67288177436767,0.617435509386938,0.35521581484916,0.469717955748659
|
||||
"8",0.7,0.644217687237691,0.692239292735764,0.395366561077235,0.492895242512842,0.472257444593698
|
||||
"9",0.8,0.717356090899523,0.779946606884677,0.830045203984444,0.621840812496715,0.609161571471379
|
||||
"10",0.9,0.783326909627483,0.796987424421658,0.801263132114778,0.723333122197902,0.682652280249237
|
||||
"11",1,0.841470984807897,1.06821012817873,0.869642838589798,0.860323524382936,0.752971972337735
|
||||
"12",1.1,0.891207360061435,1.50128637982775,0.899079529605641,1.09148187598916,0.835465707990221
|
||||
"13",1.2,0.932039085967226,1.1194263347154,0.906626360727432,1.13393429991233,0.875953352580199
|
||||
"14",1.3,0.963558185417193,1.24675170552299,1.07848030956084,1.2135821540696,0.950969562327306
|
||||
"15",1.4,0.98544972998846,1.32784804980202,0.76685418220594,1.2818141129714,0.899892140468108
|
||||
"16",1.5,0.997494986604054,1.23565831982523,1.07310713979952,1.2548338349408,0.961170357331681
|
||||
"17",1.6,0.999573603041505,1.90289281875567,0.88003153305018,1.47254506382487,0.94006950203764
|
||||
"18",1.7,0.991664810452469,1.68871194985252,1.01829329437246,1.56940444551462,0.955793455192302
|
||||
"19",1.8,0.973847630878195,1.72179983981017,1.02268013575533,1.64902528694529,0.988666907865147
|
||||
"20",1.9,0.946300087687414,2.0758716236832,0.805032560816536,1.83908127693465,0.928000158917177
|
||||
"21",2,0.909297426825682,2.11118945422405,1.0134691646089,1.94365432453739,0.957334347939419
|
||||
"22",2.1,0.863209366648874,2.00475777514698,0.86568986134637,1.9826265174693,0.924298444442167
|
||||
"23",2.2,0.80849640381959,2.40773948766051,0.667018023975934,2.15807575978944,0.826761739840873
|
||||
"24",2.3,0.74570521217672,2.14892522112975,0.872704236332415,2.17485332420928,0.839957045849706
|
||||
"25",2.4,0.675463180551151,2.41696701330131,0.253955021611832,2.26412064248401,0.631186439537074
|
||||
"26",2.5,0.598472144103957,2.4087686184711,0.49450592290142,2.33847747374241,0.557319074033222
|
||||
"27",2.6,0.515501371821464,2.55312145187913,0.343944677655963,2.4151672191424,0.467867318187242
|
||||
"28",2.7,0.42737988023383,2.6585492172135,0.528990826178838,2.51649125567521,0.447178678139147
|
||||
"29",2.8,0.334988150155905,2.86281283456189,0.311400289332401,2.65184232661008,0.399952143417531
|
||||
"30",2.9,0.239249329213982,2.74379162744449,0.501282616227342,2.70796893413474,0.432791852065713
|
||||
"31",3,0.141120008059867,2.95951338295806,0.241385538727577,2.81576254355573,0.373424929745113
|
||||
"32",3.1,0.0415806624332905,2.87268165585702,0.0764217470113609,2.85626015646841,0.264426413128825
|
||||
"33",3.2,-0.0583741434275801,3.29898326143096,-0.272500742891131,3.0101734240017,0.0756660807058224
|
||||
"34",3.3,-0.157745694143249,3.64473302259565,-0.24394459655987,3.24463496592626,-0.0688606479078372
|
||||
"35",3.4,-0.255541102026832,3.46698556586598,-0.184272732807665,3.35339770834784,-0.15210430721581
|
||||
"36",3.5,-0.35078322768962,3.67208160089566,-0.119933071489115,3.51318482264886,-0.176430496141549
|
||||
"37",3.6,-0.442520443294852,3.73738883546162,-0.486197268315415,3.62961845872181,-0.283186040443485
|
||||
"38",3.7,-0.529836140908493,3.77209072631297,-0.70275845349803,3.68619468325631,-0.422698101171958
|
||||
"39",3.8,-0.611857890942719,3.66424718733509,-0.482410535792735,3.69727905622484,-0.462935060857071
|
||||
"40",3.9,-0.687766159183974,3.72257849834575,-0.58477261395861,3.71784166083333,-0.543108060927685
|
||||
"41",4,-0.756802495307928,3.85906293918747,-0.703015362823377,3.76539960460785,-0.618449987254768
|
||||
"42",4.1,-0.818277111064411,4.0131961543859,-0.900410257326814,3.84632588679948,-0.708384794580195
|
||||
"43",4.2,-0.871575772413588,4.0263131749378,-0.906044808231391,3.92085812717095,-0.789303202089581
|
||||
"44",4.3,-0.916165936749455,4.77220075671212,-0.530827398816399,4.22925719163087,-0.729943577630504
|
||||
"45",4.4,-0.951602073889516,4.4795636311648,-1.26672674728111,4.35331987391088,-0.921377204806384
|
||||
"46",4.5,-0.977530117665097,4.5088210845027,-0.886168448505782,4.44898342417679,-0.914264630323723
|
||||
"47",4.6,-0.993691003633465,4.70645816063034,-1.1082213336257,4.58861983576766,-0.97806804633887
|
||||
"48",4.7,-0.999923257564101,4.48408312008838,-0.98352521226689,4.55827710678399,-1.01979325501755
|
||||
"49",4.8,-0.996164608835841,4.97817348334347,-1.03043977928678,4.69715193557134,-1.02203657500247
|
||||
"50",4.9,-0.982452612624332,5.09171179984929,-0.948912592308037,4.8484480091335,-0.999631162740658
|
||||
"51",5,-0.958924274663138,4.87710566000798,-0.825224506141761,4.87693462801326,-0.937722874707385
|
||||
"52",5.1,-0.925814682327732,5.04139294635392,-0.718936957124138,4.97198282698482,-0.856650521199568
|
||||
"53",5.2,-0.883454655720153,4.94893136398377,-0.992753696742329,4.98294046406006,-0.885371127105841
|
||||
"54",5.3,-0.832267442223901,5.38128555915899,-0.717434652733088,5.10670981664685,-0.816103747160468
|
||||
"55",5.4,-0.772764487555987,5.46192736637355,-0.724060934669406,5.2398375587704,-0.780347098915984
|
||||
"56",5.5,-0.705540325570392,5.30834840605735,-0.721772537926303,5.28807996342596,-0.766498807502665
|
||||
"57",5.6,-0.631266637872321,5.53199687756185,-0.583133415115471,5.40779902870202,-0.688843253413245
|
||||
"58",5.7,-0.550685542597638,5.9238064899769,-0.541063721566544,5.59865656961444,-0.627040990301198
|
||||
"59",5.8,-0.464602179413757,5.8067999294844,-0.43156566524513,5.68077207716296,-0.552246304884294
|
||||
"60",5.9,-0.373876664830236,5.93089453525347,-0.604056792592816,5.80084302534748,-0.550733954237757
|
||||
"61",6,-0.279415498198926,6.02965160059402,-0.234452930170458,5.91786841211583,-0.434812265604247
|
||||
"62",6.1,-0.182162504272095,5.88697419016579,-0.135764844759742,5.91990685000071,-0.323660336266941
|
||||
"63",6.2,-0.0830894028174964,5.91445270773648,-0.0073552500992853,5.92798052258888,-0.205537962618181
|
|
138
TeX/Plots/RN_vs_RS.tex
Normal file
138
TeX/Plots/RN_vs_RS.tex
Normal file
@ -0,0 +1,138 @@
|
||||
\pgfplotsset{
|
||||
compat=1.11,
|
||||
legend image code/.code={
|
||||
\draw[mark repeat=2,mark phase=2]
|
||||
plot coordinates {
|
||||
(0cm,0cm)
|
||||
(0.075cm,0cm) %% default is (0.3cm,0cm)
|
||||
(0.15cm,0cm) %% default is (0.6cm,0cm)
|
||||
};%
|
||||
}
|
||||
}
|
||||
\begin{figure}
|
||||
\begin{subfigure}[b]{0.5\textwidth}
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{adjustbox}{width=\textwidth, height=0.25\textheight}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[
|
||||
ytick = {-1, 0, 1, 2},
|
||||
yticklabels = {$-1$, $\phantom{-0.}0$, $1$, $2$},]
|
||||
\addplot table [x=x, y=y, col sep=comma, only marks,
|
||||
forget plot] {Plots/Data/sin_6.csv};
|
||||
\addplot [black, line width=2pt] table [x=x, y=y, col
|
||||
sep=comma, mark=none] {Plots/Data/matlab_0.csv};
|
||||
\addplot [red, line width = 1.5pt, dashed] table [x=x_n_5000_tl_0.0,
|
||||
y=y_n_5000_tl_0.0, col sep=comma, mark=none] {Plots/Data/scala_out_sin.csv};
|
||||
\addlegendentry{$f_1^{*, 0.1}$};
|
||||
\addlegendentry{$\mathcal{RN}_w^{\tilde{\lambda}}$};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{$\lambda = 0.1$}
|
||||
\end{subfigure}\\
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{adjustbox}{width=\textwidth, height=0.25\textheight}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}
|
||||
\addplot table [x=x, y=y, col sep=comma, only marks,
|
||||
forget plot] {Plots/Data/sin_6.csv};
|
||||
\addplot [black, line width=2pt] table [x=x, y=y, col sep=comma, mark=none] {Plots/Data/matlab_1.csv};
|
||||
\addplot [red, line width = 1.5pt, dashed] table [x=x_n_5000_tl_1.0,
|
||||
y=y_n_5000_tl_1.0, col sep=comma, mark=none] {Plots/Data/scala_out_sin.csv};
|
||||
\addlegendentry{$f_1^{*, 1.0}$};
|
||||
\addlegendentry{$\mathcal{RN}_w^{\tilde{\lambda}}$};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{$\lambda = 1.0$}
|
||||
\end{subfigure}\\
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{adjustbox}{width=\textwidth, height=0.25\textheight}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}
|
||||
\addplot table [x=x, y=y, col sep=comma, only marks,
|
||||
forget plot] {Plots/Data/sin_6.csv};
|
||||
\addplot [black, line width=2pt] table [x=x, y=y, col sep=comma, mark=none] {Plots/Data/matlab_3.csv};
|
||||
\addplot [red, line width = 1.5pt, dashed] table [x=x_n_5000_tl_3.0,
|
||||
y=y_n_5000_tl_3.0, col sep=comma, mark=none] {Plots/Data/scala_out_sin.csv};
|
||||
\addlegendentry{$f_1^{*, 3.0}$};
|
||||
\addlegendentry{$\mathcal{RN}_w^{\tilde{\lambda}}$};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{$\lambda = 3.0$}
|
||||
\end{subfigure}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}[b]{0.5\textwidth}
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{adjustbox}{width=\textwidth, height=0.245\textheight}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[
|
||||
ytick = {-2,-1, 0, 1, 2},
|
||||
yticklabels = {$-2$,$-1$, $\phantom{-0.}0$, $1$, $2$},]
|
||||
\addplot table [x=x, y=y, col sep=comma, only marks,
|
||||
forget plot] {Plots/Data/data_sin_d_t.csv};
|
||||
\addplot [black, line width=2pt] table [x=x, y=y, col sep=comma, mark=none] {Plots/Data/matlab_sin_d_01.csv};
|
||||
\addplot [red, line width = 1.5pt, dashed] table [x=x_n_5000_tl_0.1,
|
||||
y=y_n_5000_tl_0.1, col sep=comma, mark=none] {Plots/Data/scala_out_d_1_t.csv};
|
||||
\addlegendentry{$f_1^{*, 0.1}$};
|
||||
\addlegendentry{$\mathcal{RN}_w^{\tilde{\lambda}}$};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{$\lambda = 0.1$}
|
||||
\end{subfigure}\\
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{adjustbox}{width=\textwidth, height=0.25\textheight}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}
|
||||
\addplot table [x=x, y=y, col sep=comma, only marks,
|
||||
forget plot] {Plots/Data/data_sin_d_t.csv};
|
||||
\addplot [black, line width=2pt] table [x=x, y=y, col sep=comma, mark=none] {Plots/Data/matlab_sin_d_1.csv};
|
||||
\addplot [red, line width = 1.5pt, dashed] table [x=x_n_5000_tl_1.0,
|
||||
y=y_n_5000_tl_1.0, col sep=comma, mark=none] {Plots/Data/scala_out_d_1_t.csv};
|
||||
\addlegendentry{$f_1^{*, 1.0}$};
|
||||
\addlegendentry{$\mathcal{RN}_w^{\tilde{\lambda},*}$};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{$\lambda = 1.0$}
|
||||
\end{subfigure}\\
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{adjustbox}{width=\textwidth, height=0.25\textheight}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}
|
||||
\addplot table [x=x, y=y, col sep=comma, only marks,
|
||||
forget plot] {Plots/Data/data_sin_d_t.csv};
|
||||
\addplot [black, line width=2pt] table [x=x, y=y, col sep=comma, mark=none] {Plots/Data/matlab_sin_d_3.csv};
|
||||
\addplot [red, line width = 1.5pt, dashed] table [x=x_n_5000_tl_3.0,
|
||||
y=y_n_5000_tl_3.0, col sep=comma, mark=none] {Plots/Data/scala_out_d_1_t.csv};
|
||||
\addlegendentry{$f_1^{*, 3.0}$};
|
||||
\addlegendentry{$\mathcal{RN}_w^{\tilde{\lambda}}$};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{$\lambda = 3.0$}
|
||||
\end{subfigure}
|
||||
\end{subfigure}
|
||||
\caption{% In these Figures the behaviour stated in ... is visualized
|
||||
% in two exaples. For $(a), (b), (c)$ six values of sinus equidistantly
|
||||
% spaced on $[-\pi, \pi]$ have been used as training data. For
|
||||
% $(d),(e),(f)$ 15 equidistand values have been used, where
|
||||
% $y_i^{train} = \sin(x_i^{train}) + \varepsilon_i$ and
|
||||
% $\varepsilon_i \sim \mathcal{N}(0, 0.3)$. For
|
||||
% $\mathcal{RN}_w^{\tilde{\lambda, *}}$ the random weights are
|
||||
% distributed as follows
|
||||
% \begin{align*}
|
||||
% \xi_k &\sim
|
||||
% \end{align*}
|
||||
Ridge Penalized Neural Network compared to Regression Spline,
|
||||
with them being trained on $\text{data}_A$ in a), b), c) and on
|
||||
$\text{data}_B$ in d), e), f).
|
||||
The Parameters of each are given above.
|
||||
}
|
||||
\end{figure}
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master:
|
||||
%%% End:
|
91
TeX/Plots/SGD_vs_GD.tex
Normal file
91
TeX/Plots/SGD_vs_GD.tex
Normal file
@ -0,0 +1,91 @@
|
||||
\pgfplotsset{
|
||||
compat=1.11,
|
||||
legend image code/.code={
|
||||
\draw[mark repeat=2,mark phase=2]
|
||||
plot coordinates {
|
||||
(0cm,0cm)
|
||||
(0.0cm,0cm) %% default is (0.3cm,0cm)
|
||||
(0.0cm,0cm) %% default is (0.6cm,0cm)
|
||||
};%
|
||||
}
|
||||
}
|
||||
\begin{figure}
|
||||
\begin{subfigure}[h!]{\textwidth}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[tick style = {draw = none}, width = \textwidth,
|
||||
height = 0.65\textwidth,
|
||||
xtick = {1, 3, 5,7,9,11,13,15,17,19},
|
||||
xticklabels = {$2$, $4$, $6$, $8$,
|
||||
$10$,$12$,$14$,$16$,$18$,$20$},
|
||||
xlabel = {training epoch}, ylabel = {classification accuracy}]
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma] {Plots/Data/GD_01.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma] {Plots/Data/GD_05.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma] {Plots/Data/GD_1.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma]
|
||||
{Plots/Data/SGD_01_b32.log};
|
||||
|
||||
\addlegendentry{GD$_{0.01}$}
|
||||
\addlegendentry{GD$_{0.05}$}
|
||||
\addlegendentry{GD$_{0.1}$}
|
||||
\addlegendentry{SGD$_{0.01}$}
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
%\caption{Classification accuracy}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[tick style = {draw = none}, width = \textwidth,
|
||||
height = 0.65\textwidth,
|
||||
ytick = {0, 1, 2, 3, 4},
|
||||
yticklabels = {$0$, $1$, $\phantom{0.}2$, $3$, $4$},
|
||||
xtick = {1, 3, 5,7,9,11,13,15,17,19},
|
||||
xticklabels = {$2$, $4$, $6$, $8$,
|
||||
$10$,$12$,$14$,$16$,$18$,$20$},
|
||||
xlabel = {training epoch}, ylabel = {error measure}]
|
||||
\addplot table
|
||||
[x=epoch, y=val_loss, col sep=comma] {Plots/Data/GD_01.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_loss, col sep=comma] {Plots/Data/GD_05.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_loss, col sep=comma] {Plots/Data/GD_1.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_loss, col sep=comma] {Plots/Data/SGD_01_b32.log};
|
||||
|
||||
\addlegendentry{GD$_{0.01}$}
|
||||
\addlegendentry{GD$_{0.05}$}
|
||||
\addlegendentry{GD$_{0.1}$}
|
||||
\addlegendentry{SGD$_{0.01}$}
|
||||
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\caption{Performance metrics during training}
|
||||
\end{subfigure}
|
||||
% \\~\\
|
||||
\caption{The neural network given in ?? trained with different
|
||||
algorithms on the MNIST handwritten digits data set. For gradient
|
||||
descent the learning rated 0.01, 0.05 and 0.1 are (GD$_{\cdot}$). For
|
||||
stochastic gradient descend a batch size of 32 and learning rate
|
||||
of 0.01 is used (SDG$_{0.01}$).}
|
||||
\label{fig:sgd_vs_gd}
|
||||
\end{figure}
|
||||
|
||||
\begin{table}
|
||||
\begin{tabu} to \textwidth {@{} *4{X[c]}c*4{X[c]} @{}}
|
||||
\multicolumn{4}{c}{Classification Accuracy}
|
||||
&~&\multicolumn{4}{c}{Error Measure}
|
||||
\\\cline{1-4}\cline{6-9}
|
||||
GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$&&GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$
|
||||
\\\cline{1-4}\cline{6-9}
|
||||
1&1&1&1&&1&1&1&1
|
||||
\end{tabu}
|
||||
\caption{Performace metrics of the networks trained in
|
||||
Figure~\ref{ref:sdg_vs_gd} after 20 training epochs.}
|
||||
\end{table}
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "../main"
|
||||
%%% End:
|
71
TeX/Plots/_region_.tex
Normal file
71
TeX/Plots/_region_.tex
Normal file
@ -0,0 +1,71 @@
|
||||
\message{ !name(pfg_test.tex)}\documentclass{article}
|
||||
\usepackage{pgfplots}
|
||||
\usepackage{filecontents}
|
||||
\usepackage{subcaption}
|
||||
\usepackage{adjustbox}
|
||||
\usepackage{xcolor}
|
||||
\usepackage{graphicx}
|
||||
\usetikzlibrary{calc, 3d}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\message{ !name(pfg_test.tex) !offset(6) }
|
||||
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{True position (\textcolor{red}{red}), distorted data (black)}
|
||||
\end{figure}
|
||||
\begin{center}
|
||||
\begin{figure}[h]
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\includegraphics[width=\textwidth]{Data/klammern.jpg}
|
||||
\caption{Original Picure}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\includegraphics[width=\textwidth]{Data/image_conv4.png}
|
||||
\caption{test}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\includegraphics[width=\textwidth]{Data/image_conv5.png}
|
||||
\caption{test}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\includegraphics[width=\textwidth]{Data/image_conv6.png}
|
||||
\caption{test}
|
||||
\end{subfigure}
|
||||
\end{figure}
|
||||
\end{center}
|
||||
|
||||
\begin{figure}
|
||||
\begin{adjustbox}{width=\textwidth}
|
||||
\begin{tikzpicture}
|
||||
\begin{scope}[x = (0:1cm), y=(90:1cm), z=(15:-0.5cm)]
|
||||
\node[canvas is xy plane at z=0, transform shape] at (0,0)
|
||||
{\includegraphics[width=5cm]{Data/klammern_r.jpg}};
|
||||
\node[canvas is xy plane at z=2, transform shape] at (0,-0.2)
|
||||
{\includegraphics[width=5cm]{Data/klammern_g.jpg}};
|
||||
\node[canvas is xy plane at z=4, transform shape] at (0,-0.4)
|
||||
{\includegraphics[width=5cm]{Data/klammern_b.jpg}};
|
||||
\node[canvas is xy plane at z=4, transform shape] at (-8,-0.2)
|
||||
{\includegraphics[width=5.3cm]{Data/klammern_rgb.jpg}};
|
||||
\end{scope}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{On the right the red, green and blue chanels of the picture
|
||||
are displayed. In order to better visualize the color channes the
|
||||
black and white picture of each channel has been colored in the
|
||||
respective color. Combining the layers results in the image on the
|
||||
left}
|
||||
\end{figure}
|
||||
|
||||
|
||||
|
||||
\message{ !name(pfg_test.tex) !offset(3) }
|
||||
|
||||
\end{document}
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: t
|
||||
%%% End:
|
146
TeX/Plots/pfg_test.tex
Normal file
146
TeX/Plots/pfg_test.tex
Normal file
@ -0,0 +1,146 @@
|
||||
\documentclass{article}
|
||||
\usepackage{pgfplots}
|
||||
\usepackage{filecontents}
|
||||
\usepackage{subcaption}
|
||||
\usepackage{adjustbox}
|
||||
\usepackage{xcolor}
|
||||
\usepackage{tabu}
|
||||
\usepackage{graphicx}
|
||||
\usetikzlibrary{calc, 3d}
|
||||
|
||||
\begin{document}
|
||||
\pgfplotsset{
|
||||
compat=1.11,
|
||||
legend image code/.code={
|
||||
\draw[mark repeat=2,mark phase=2]
|
||||
plot coordinates {
|
||||
(0cm,0cm)
|
||||
(0.0cm,0cm) %% default is (0.3cm,0cm)
|
||||
(0.0cm,0cm) %% default is (0.6cm,0cm)
|
||||
};%
|
||||
}
|
||||
}
|
||||
\begin{figure}
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[tick style = {draw = none}, width = \textwidth,
|
||||
height = 0.7\textwidth,
|
||||
xtick = {1, 3, 5,7,9,11,13,15,17,19},
|
||||
xticklabels = {$2$, $4$, $6$, $8$,
|
||||
$10$,$12$,$14$,$16$,$18$,$20$},
|
||||
xlabel = {epoch}, ylabel = {Classification Accuracy}]
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma] {Data/GD_01.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma] {Data/GD_05.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma] {Data/GD_1.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_accuracy, col sep=comma]
|
||||
{Data/SGD_01_b32.log};
|
||||
|
||||
\addlegendentry{GD$_{0.01}$}
|
||||
\addlegendentry{GD$_{0.05}$}
|
||||
\addlegendentry{GD$_{0.1}$}
|
||||
\addlegendentry{SGD$_{0.01}$}
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
%\caption{Classification accuracy}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}[b]{\textwidth}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[tick style = {draw = none}, width = \textwidth,
|
||||
height = 0.7\textwidth,
|
||||
ytick = {0, 1, 2, 3, 4},
|
||||
yticklabels = {$0$, $1$, $\phantom{0.}2$, $3$, $4$},
|
||||
xtick = {1, 3, 5,7,9,11,13,15,17,19},
|
||||
xticklabels = {$2$, $4$, $6$, $8$,
|
||||
$10$,$12$,$14$,$16$,$18$,$20$},
|
||||
xlabel = {epoch}, ylabel = {Error Measure}]
|
||||
\addplot table
|
||||
[x=epoch, y=val_loss, col sep=comma] {Data/GD_01.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_loss, col sep=comma] {Data/GD_05.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_loss, col sep=comma] {Data/GD_1.log};
|
||||
\addplot table
|
||||
[x=epoch, y=val_loss, col sep=comma] {Data/SGD_01_b32.log};
|
||||
|
||||
\addlegendentry{GD$_{0.01}$}
|
||||
\addlegendentry{GD$_{0.05}$}
|
||||
\addlegendentry{GD$_{0.1}$}
|
||||
\addlegendentry{SGD$_{0.01}$}
|
||||
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\caption{Performance metrics during training}
|
||||
\end{subfigure}
|
||||
\\~\\
|
||||
\begin{subfigure}[b]{1.0\linewidth}
|
||||
\begin{tabu} to \textwidth {@{} *4{X[c]}c*4{X[c]} @{}}
|
||||
\multicolumn{4}{c}{Classification Accuracy}
|
||||
&~&\multicolumn{4}{c}{Error Measure}
|
||||
\\\cline{1-4}\cline{6-9}
|
||||
GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$&&GD$_{0.01}$&GD$_{0.05}$&GD$_{0.1}$&SGD$_{0.01}$
|
||||
\\\cline{1-4}\cline{6-9}
|
||||
1&1&1&1&&1&1&1&1
|
||||
\end{tabu}
|
||||
\caption{Performace metrics after 20 epochs}
|
||||
\end{subfigure}
|
||||
\caption{The neural network given in ?? trained with different
|
||||
algorithms on the MNIST handwritten digits data set. For gradient
|
||||
descent the learning rated 0.01, 0.05 and 0.1 are (GD$_{\text{rate}}$). For
|
||||
stochastic gradient descend a batch size of 32 and learning rate
|
||||
of 0.01 is used (SDG$_{0.01}$)}
|
||||
\end{figure}
|
||||
|
||||
\begin{center}
|
||||
\begin{figure}[h]
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\includegraphics[width=\textwidth]{Data/klammern.jpg}
|
||||
\caption{Original Picure}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\includegraphics[width=\textwidth]{Data/image_conv4.png}
|
||||
\caption{test}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\includegraphics[width=\textwidth]{Data/image_conv5.png}
|
||||
\caption{test}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.49\textwidth}
|
||||
\includegraphics[width=\textwidth]{Data/image_conv6.png}
|
||||
\caption{test}
|
||||
\end{subfigure}
|
||||
\end{figure}
|
||||
\end{center}
|
||||
|
||||
\begin{figure}
|
||||
\begin{adjustbox}{width=\textwidth}
|
||||
\begin{tikzpicture}
|
||||
\begin{scope}[x = (0:1cm), y=(90:1cm), z=(15:-0.5cm)]
|
||||
\node[canvas is xy plane at z=0, transform shape] at (0,0)
|
||||
{\includegraphics[width=5cm]{Data/klammern_r.jpg}};
|
||||
\node[canvas is xy plane at z=2, transform shape] at (0,-0.2)
|
||||
{\includegraphics[width=5cm]{Data/klammern_g.jpg}};
|
||||
\node[canvas is xy plane at z=4, transform shape] at (0,-0.4)
|
||||
{\includegraphics[width=5cm]{Data/klammern_b.jpg}};
|
||||
\node[canvas is xy plane at z=4, transform shape] at (-8,-0.2)
|
||||
{\includegraphics[width=5.3cm]{Data/klammern_rgb.jpg}};
|
||||
\end{scope}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{On the right the red, green and blue chanels of the picture
|
||||
are displayed. In order to better visualize the color channes the
|
||||
black and white picture of each channel has been colored in the
|
||||
respective color. Combining the layers results in the image on the
|
||||
left}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\end{document}
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: t
|
||||
%%% End:
|
64
TeX/Plots/sin_conv.csv
Normal file
64
TeX/Plots/sin_conv.csv
Normal file
@ -0,0 +1,64 @@
|
||||
"","x_i","y_i","x_d","y_d","x","y"
|
||||
"1",0,0,0.0815633019993375,0.095134925029757,0.0815633019993375,0.095134925029757
|
||||
"2",0.1,0.0998334166468282,-0.137539012603596,0.503920419784276,-0.137539012603596,0.503920419784276
|
||||
"3",0.2,0.198669330795061,0.219868163218743,0.32022289024623,0.219868163218743,0.32022289024623
|
||||
"4",0.3,0.29552020666134,0.378332723534869,0.474906286765401,0.378332723534869,0.474906286765401
|
||||
"5",0.4,0.389418342308651,0.286034335293811,0.422891394375764,0.215056588291437,0.412478430748051
|
||||
"6",0.5,0.479425538604203,-0.109871707385461,0.229661026779107,0.122574532557623,0.353221043330047
|
||||
"7",0.6,0.564642473395035,0.91036951450573,0.56079130435097,0.451160317716352,0.452893574072324
|
||||
"8",0.7,0.644217687237691,0.899001194675409,0.714355793051917,0.491731451724399,0.514477919331008
|
||||
"9",0.8,0.717356090899523,0.733791390723896,0.694085383523086,0.488943974889845,0.530054084580656
|
||||
"10",0.9,0.783326909627483,0.893642943873427,0.739792642916928,0.599785378272423,0.575149967162231
|
||||
"11",1,0.841470984807897,0.895913227983752,0.658288213778898,0.650886140047209,0.577618711891772
|
||||
"12",1.1,0.891207360061435,1.01252219752013,0.808981437684505,0.726263244907525,0.643161394030218
|
||||
"13",1.2,0.932039085967226,1.30930912337975,1.04111824066026,0.872590842152803,0.745714536528734
|
||||
"14",1.3,0.963558185417193,1.0448292335495,0.741250429230841,0.850147062957694,0.687171673021914
|
||||
"15",1.4,0.98544972998846,1.57369086195552,1.17277927321094,1.06520673597544,0.847936751231165
|
||||
"16",1.5,0.997494986604054,1.61427415976939,1.3908361301708,1.15616745244604,0.969474391592075
|
||||
"17",1.6,0.999573603041505,1.34409615749122,0.976992098566069,1.13543598207093,0.889434319996364
|
||||
"18",1.7,0.991664810452469,1.79278028030419,1.02939764179765,1.33272772191879,0.935067381106346
|
||||
"19",1.8,0.973847630878195,1.50721559744085,0.903076361857071,1.30862923824728,0.91665506605512
|
||||
"20",1.9,0.946300087687414,1.835014641556,0.830477479204284,1.45242210409837,0.889715842048808
|
||||
"21",2,0.909297426825682,1.98589997236352,0.887302138185342,1.56569111721857,0.901843632635883
|
||||
"22",2.1,0.863209366648874,2.31436634488224,0.890096618924313,1.73810390755555,0.899632162941341
|
||||
"23",2.2,0.80849640381959,2.14663445612581,0.697012453130415,1.77071083163663,0.831732978616874
|
||||
"24",2.3,0.74570521217672,2.17162372560288,0.614243640399509,1.84774268936257,0.787400621584077
|
||||
"25",2.4,0.675463180551151,2.2488591417345,0.447664288915269,1.93366609303299,0.707449056213168
|
||||
"26",2.5,0.598472144103957,2.56271588872389,0.553368843490625,2.08922735802261,0.702402440783529
|
||||
"27",2.6,0.515501371821464,2.60986205081511,0.503762006272682,2.17548673152621,0.657831176057599
|
||||
"28",2.7,0.42737988023383,2.47840649766003,0.215060732402894,2.20251747034638,0.533903400086802
|
||||
"29",2.8,0.334988150155905,2.99861119922542,0.28503285049582,2.43015164462239,0.512492561673074
|
||||
"30",2.9,0.239249329213982,3.09513467852082,0.245355736487949,2.54679545455398,0.461447717313721
|
||||
"31",3,0.141120008059867,2.86247369846558,0.0960140633436418,2.55274767368554,0.371740588261606
|
||||
"32",3.1,0.0415806624332905,2.79458017090243,-0.187923650913249,2.59422388058738,0.234694070506915
|
||||
"33",3.2,-0.0583741434275801,3.6498183243501,-0.186738431858275,2.9216851043241,0.173308072295566
|
||||
"34",3.3,-0.157745694143249,3.19424275971809,-0.221908035274934,2.86681135711315,0.101325637659584
|
||||
"35",3.4,-0.255541102026832,3.53166785156005,-0.295496842654793,3.03827050777863,0.0191967841533109
|
||||
"36",3.5,-0.35078322768962,3.53250700922714,-0.364585027403596,3.12709094619305,-0.0558446366563474
|
||||
"37",3.6,-0.442520443294852,3.52114271616751,-0.363845774016092,3.18702722489489,-0.10585071711408
|
||||
"38",3.7,-0.529836140908493,3.72033580551176,-0.386489608468821,3.31200591645168,-0.158195730190865
|
||||
"39",3.8,-0.611857890942719,4.0803717995796,-0.64779795182054,3.49862620703954,-0.284999326812438
|
||||
"40",3.9,-0.687766159183974,3.88351729419721,-0.604406622894426,3.51908925124143,-0.324791870057922
|
||||
"41",4,-0.756802495307928,3.9941257036697,-0.8061112437715,3.62222513609486,-0.438560071688316
|
||||
"42",4.1,-0.818277111064411,3.81674488816054,-0.548538951165239,3.63032709398802,-0.41285438330036
|
||||
"43",4.2,-0.871575772413588,4.47703348424544,-0.998992385231986,3.88581748102334,-0.592305016590357
|
||||
"44",4.3,-0.916165936749455,4.46179199544059,-0.969288921090897,3.96444243944485,-0.643076376622242
|
||||
"45",4.4,-0.951602073889516,4.15184730382548,-1.11987501275525,3.93838897981045,-0.743258835859858
|
||||
"46",4.5,-0.977530117665097,4.64522916494355,-0.772872365801468,4.15504805602606,-0.691414328153313
|
||||
"47",4.6,-0.993691003633465,4.68087925098283,-0.650422764094352,4.24176417425486,-0.675107584174976
|
||||
"48",4.7,-0.999923257564101,5.00475403211142,-0.922605880059771,4.41432228408005,-0.770625346502085
|
||||
"49",4.8,-0.996164608835841,4.71428836112322,-1.14280193223997,4.41279031790692,-0.861010494025717
|
||||
"50",4.9,-0.982452612624332,5.02115518218406,-0.9819618243158,4.57449352886454,-0.843786948015608
|
||||
"51",5,-0.958924274663138,4.92057344952522,-0.872931430146499,4.61418118503201,-0.836318916150308
|
||||
"52",5.1,-0.925814682327732,5.37277893732831,-0.91444926304078,4.81555148166217,-0.864686555983682
|
||||
"53",5.2,-0.883454655720153,5.19524942845082,-1.41169784739596,4.84152902094499,-1.03768305406186
|
||||
"54",5.3,-0.832267442223901,5.4432222181271,-0.726481337519931,4.98565483155961,-0.856094353978009
|
||||
"55",5.4,-0.772764487555987,4.98285013865449,-0.692803346852181,4.90897053115903,-0.838425020062396
|
||||
"56",5.5,-0.705540325570392,5.33298025214155,-0.343702005257262,5.0497327607228,-0.711573964373115
|
||||
"57",5.6,-0.631266637872321,5.49935694796791,-0.828968673188174,5.15036520204232,-0.816467931201244
|
||||
"58",5.7,-0.550685542597638,5.69204187550805,-0.481580461165225,5.26232964126231,-0.689500817105975
|
||||
"59",5.8,-0.464602179413757,5.84391772412888,-0.20453899468884,5.38069867877875,-0.564365367144995
|
||||
"60",5.9,-0.373876664830236,5.48166674139637,-0.597796931577294,5.3357436834558,-0.649913835818738
|
||||
"61",6,-0.279415498198926,5.77474590863769,-0.280234463056808,5.46956415981143,-0.524503219480344
|
||||
"62",6.1,-0.182162504272095,6.36764321572312,-0.0996286988755344,5.7169871104113,-0.422854073705143
|
||||
"63",6.2,-0.0830894028174964,6.46175133910451,-0.025702847911482,5.83540227044819,-0.355719019286555
|
|
45
TeX/Plots/sin_conv.tex
Normal file
45
TeX/Plots/sin_conv.tex
Normal file
@ -0,0 +1,45 @@
|
||||
\begin{figure}
|
||||
\centering
|
||||
\begin{subfigure}[b]{0.49\textwidth}
|
||||
\centering
|
||||
\begin{adjustbox}{width=\textwidth, height=0.25\textheight}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[tick style = {draw = none}, xticklabel = \empty,
|
||||
yticklabel=\empty]
|
||||
\addplot [mark options={scale = 0.7}, mark = o] table
|
||||
[x=x_d,y=y_d, col sep = comma] {Plots/Data/sin_conv.csv};
|
||||
\addplot [red, mark=x] table [x=x_i, y=y_i, col sep=comma, color ='black'] {Plots/Data/sin_conv.csv};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{True position (\textcolor{red}{red}), distorted position data (black)}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}[b]{0.49\textwidth}
|
||||
\centering
|
||||
\begin{adjustbox}{width=\textwidth, height=0.25\textheight}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[tick style = {draw = none}, xticklabel = \empty,
|
||||
yticklabel=\empty]
|
||||
\addplot [mark options={scale = 0.7}, mark = o] table [x=x,y=y, col
|
||||
sep = comma] {Plots/Data/sin_conv.csv};
|
||||
\addplot [red, mark=x] table [x=x_i, y=y_i, col sep=comma, color ='black'] {Plots/Data/sin_conv.csv};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{True position (\textcolor{red}{red}), filtered position data (black)}
|
||||
\end{subfigure}
|
||||
\caption{Example for noise reduction using convolution with simulated
|
||||
positional data. As filter
|
||||
$g(i)=\left(\nicefrac{1}{3},\nicefrac{1}{4},\nicefrac{1}{5},\nicefrac{1}{6},\nicefrac{1}{20}\right)_{(i-1)}$
|
||||
is chosen and applied to the $x$ and $y$ coordinate
|
||||
data seperately. The convolution of both signals with $g$
|
||||
improves the MSE of the positions from 0.196 to 0.170 and
|
||||
visibly smoothes the data.
|
||||
}
|
||||
\label{fig:sin_conv}
|
||||
\end{figure}
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "../main"
|
||||
%%% End:
|
5
TeX/Plots/y.tex
Normal file
5
TeX/Plots/y.tex
Normal file
@ -0,0 +1,5 @@
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "../main"
|
||||
%%% End:
|
33
TeX/appendixA.tex
Normal file
33
TeX/appendixA.tex
Normal file
@ -0,0 +1,33 @@
|
||||
|
||||
\newpage
|
||||
\begin{appendices}
|
||||
\section{Proofs for sone Lemmata in ...}
|
||||
In the following there will be proofs for some important Lemmata in
|
||||
Section~\ref{sec:theo38}. Further proofs not discussed here can be
|
||||
found in \textcite{heiss2019}
|
||||
\begin{Theorem}[Proof of Lemma~\ref{theo38}]
|
||||
\end{Theorem}
|
||||
|
||||
\begin{Lemma}[$\frac{w^{*,\tilde{\lambda}}_k}{v_k}\approx\mathcal{O}(\frac{1}{n})$]
|
||||
For any $\lambda > 0$ and training data $(x_i^{\text{train}},
|
||||
y_i^{\text{train}}) \in \mathbb{R}^2, \, i \in
|
||||
\left\{1,\dots,N\right\}$, we have
|
||||
\[
|
||||
\max_{k \in \left\{1,\dots,n\right\}} \frac{w^{*,
|
||||
\tilde{\lambda}}_k}{v_k} = \po_{n\to\infty}
|
||||
\]
|
||||
|
||||
|
||||
\end{Lemma}
|
||||
\end{appendices}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "main"
|
||||
%%% End:
|
||||
|
58
TeX/bibliograpy.bib
Normal file
58
TeX/bibliograpy.bib
Normal file
@ -0,0 +1,58 @@
|
||||
@UNPUBLISHED{heiss2019,
|
||||
series = {arXiv},
|
||||
author = {Heiss, Jakob and Teichmann, Josef and Wutte, Hanna},
|
||||
publisher = {Cornell University},
|
||||
year = {2019},
|
||||
language = {en},
|
||||
copyright = {In Copyright - Non-Commercial Use Permitted},
|
||||
keywords = {early stopping; implicit regularization; machine learning; neural networks; spline; regression; gradient descent; artificial intelligence},
|
||||
size = {53 p.},
|
||||
address = {Ithaca, NY},
|
||||
abstract = {Today, various forms of neural networks are trained to perform approximation tasks in many fields. However, the solutions obtained are not fully understood. Empirical results suggest that typical training algorithms favor regularized solutions.These observations motivate us to analyze properties of the solutions found by gradient descent initialized close to zero, that is frequently employed to perform the training task. As a starting point, we consider one dimensional (shallow) ReLU neural networks in which weights are chosen randomly and only the terminal layer is trained. We show that the resulting solution converges to the smooth spline interpolation of the training data as the number of hidden nodes tends to infinity. Moreover, we derive a correspondence between the early stopped gradient descent and the smoothing spline regression. This might give valuable insight on the properties of the solutions obtained using gradient descent methods in general settings.},
|
||||
DOI = {10.3929/ethz-b-000402003},
|
||||
title = {How Implicit Regularization of Neural Networks Affects the Learned Function – Part I},
|
||||
PAGES = {1911.02903}
|
||||
}
|
||||
|
||||
@article{Dropout,
|
||||
author = {Nitish Srivastava and Geoffrey Hinton and Alex Krizhevsky and Ilya Sutskever and Ruslan Salakhutdinov},
|
||||
title = {Dropout: A Simple Way to Prevent Neural Networks from Overfitting},
|
||||
journal = {Journal of Machine Learning Research},
|
||||
year = 2014,
|
||||
volume = 15,
|
||||
number = 56,
|
||||
pages = {1929-1958},
|
||||
url = {http://jmlr.org/papers/v15/srivastava14a.html}
|
||||
}
|
||||
|
||||
@article{ADADELTA,
|
||||
author = {Matthew D. Zeiler},
|
||||
title = {{ADADELTA:} An Adaptive Learning Rate Method},
|
||||
journal = {CoRR},
|
||||
volume = {abs/1212.5701},
|
||||
year = 2012,
|
||||
url = {http://arxiv.org/abs/1212.5701},
|
||||
archivePrefix = {arXiv},
|
||||
eprint = {1212.5701},
|
||||
timestamp = {Mon, 13 Aug 2018 16:45:57 +0200},
|
||||
biburl = {https://dblp.org/rec/journals/corr/abs-1212-5701.bib},
|
||||
bibsource = {dblp computer science bibliography, https://dblp.org}
|
||||
}
|
||||
|
||||
@article{backprop,
|
||||
author={Rumelhart, David E.
|
||||
and Hinton, Geoffrey E.
|
||||
and Williams, Ronald J.},
|
||||
title={Learning representations by back-propagating errors},
|
||||
journal={Nature},
|
||||
year={1986},
|
||||
month={Oct},
|
||||
day={01},
|
||||
volume={323},
|
||||
number={6088},
|
||||
pages={533-536},
|
||||
abstract={We describe a new learning procedure, back-propagation, for networks of neurone-like units. The procedure repeatedly adjusts the weights of the connections in the network so as to minimize a measure of the difference between the actual output vector of the net and the desired output vector. As a result of the weight adjustments, internal `hidden' units which are not part of the input or output come to represent important features of the task domain, and the regularities in the task are captured by the interactions of these units. The ability to create useful new features distinguishes back-propagation from earlier, simpler methods such as the perceptron-convergence procedure1.},
|
||||
issn={1476-4687},
|
||||
doi={10.1038/323533a0},
|
||||
url={https://doi.org/10.1038/323533a0}
|
||||
}
|
329
TeX/further_applications_of_nn.tex
Normal file
329
TeX/further_applications_of_nn.tex
Normal file
@ -0,0 +1,329 @@
|
||||
\section{Application of NN to higher complexity Problems}
|
||||
|
||||
As neural networks are applied to problems of higher complexity often
|
||||
resulting in higher dimensionality of the input the amount of
|
||||
parameters in the network rises drastically. For example a network
|
||||
with ...
|
||||
A way to combat the
|
||||
|
||||
\subsection{Convolution}
|
||||
|
||||
Convolution is a mathematical operation, where the product of two
|
||||
functions is integrated after one has been reversed and shifted.
|
||||
|
||||
\[
|
||||
(f * g) (t) \coloneqq \int_{-\infty}^{\infty} f(t-s) g(s) ds.
|
||||
\]
|
||||
|
||||
This operation can be described as a filter-function $g$ being applied
|
||||
to $f$,
|
||||
as values $f(t)$ are being replaced by an average of values of $f$
|
||||
weighted by $g$ in position $t$.
|
||||
The convolution operation allows plentiful manipulation of data, with
|
||||
a simple example being smoothing of real-time data. Consider a sensor
|
||||
measuring the location of an object (e.g. via GPS). We expect the
|
||||
output of the sensor to be noisy as a result of a number of factors
|
||||
that will impact the accuracy. In order to get a better estimate of
|
||||
the actual location we want to smooth
|
||||
the data to reduce the noise. Using convolution for this task, we
|
||||
can control the significance we want to give each data-point. We
|
||||
might want to give a larger weight to more recent measurements than
|
||||
older ones. If we assume these measurements are taken on a discrete
|
||||
timescale, we need to introduce discrete convolution first. Let $f$,
|
||||
$g: \mathbb{Z} \to \mathbb{R}$ then
|
||||
|
||||
\[
|
||||
(f * g)(t) = \sum_{i \in \mathbb{Z}} f(t-i) g(i).
|
||||
\]
|
||||
Applying this on the data with the filter $g$ chosen accordingly we
|
||||
are
|
||||
able to improve the accuracy, which can be seen in
|
||||
Figure~\ref{fig:sin_conv}.
|
||||
\input{Plots/sin_conv.tex}
|
||||
This form of discrete convolution can also be applied to functions
|
||||
with inputs of higher dimensionality. Let $f$, $g: \mathbb{Z}^d \to
|
||||
\mathbb{R}$ then
|
||||
|
||||
\[
|
||||
(f * g)(x_1, \dots, x_d) = \sum_{i \in \mathbb{Z}^d} f(x_1 - i_1,
|
||||
\dots, x_d - i_d) g(i_1, \dots, i_d)
|
||||
\]
|
||||
This will prove to be a useful framework for image manipulation but
|
||||
in order to apply convolution to images we need to discuss
|
||||
representation of image data first. Most often images are represented
|
||||
by each pixel being a mixture of base colors these base colors define
|
||||
the color-space in which the image is encoded. Often used are
|
||||
color-spaces RGB (red,
|
||||
blue, green) or CMYK (cyan, magenta, yellow, black). An example of an
|
||||
image split in its red, green and blue channel is given in
|
||||
Figure~\ref{fig:rgb} Using this
|
||||
encoding of the image we can define a corresponding discrete function
|
||||
describing the image, by mapping the coordinates $(x,y)$ of an pixel
|
||||
and the
|
||||
channel (color) $c$ to the respective value $v$
|
||||
|
||||
\begin{align}
|
||||
\begin{split}
|
||||
I: \mathbb{N}^3 & \to \mathbb{R}, \\
|
||||
(x,y,c) & \mapsto v.
|
||||
\end{split}
|
||||
\label{def:I}
|
||||
\end{align}
|
||||
|
||||
\begin{figure}
|
||||
\begin{adjustbox}{width=\textwidth}
|
||||
\begin{tikzpicture}
|
||||
\begin{scope}[x = (0:1cm), y=(90:1cm), z=(15:-0.5cm)]
|
||||
\node[canvas is xy plane at z=0, transform shape] at (0,0)
|
||||
{\includegraphics[width=5cm]{Plots/Data/klammern_r.jpg}};
|
||||
\node[canvas is xy plane at z=2, transform shape] at (0,-0.2)
|
||||
{\includegraphics[width=5cm]{Plots/Data/klammern_g.jpg}};
|
||||
\node[canvas is xy plane at z=4, transform shape] at (0,-0.4)
|
||||
{\includegraphics[width=5cm]{Plots/Data/klammern_b.jpg}};
|
||||
\node[canvas is xy plane at z=4, transform shape] at (-8,-0.2)
|
||||
{\includegraphics[width=5.3cm]{Plots/Data/klammern_rgb.jpg}};
|
||||
\end{scope}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{On the right the red, green and blue chances of the picture
|
||||
are displayed. In order to better visualize the color channels the
|
||||
black and white picture of each channel has been colored in the
|
||||
respective color. Combining the layers results in the image on the
|
||||
left.}
|
||||
\label{fig:rgb}
|
||||
\end{figure}
|
||||
|
||||
With this representation of an image as a function, we can apply
|
||||
filters to the image using convolution for multidimensional functions
|
||||
as described above. In order to simplify the notation we will write
|
||||
the function $I$ given in (\ref{def:I}) as well as the filter-function $g$
|
||||
as a tensor from now on, resulting in the modified notation of
|
||||
convolution
|
||||
|
||||
\[
|
||||
(I * g)_{x,y,c} = \sum_{i,j,l \in \mathbb{Z}} I_{x-i,y-j,c-l} g_{i,j,l}.
|
||||
\]
|
||||
|
||||
Simple examples for image manipulation using
|
||||
convolution are smoothing operations or
|
||||
rudimentary detection of edges in grayscale images, meaning they only
|
||||
have one channel. A popular filter for smoothing images
|
||||
is the Gauss-filter which for a given $\sigma \in \mathbb{R}_+$ and
|
||||
size $s \in \mathbb{N}$ is
|
||||
defined as
|
||||
\[
|
||||
G_{x,y} = \frac{1}{2 \pi \sigma^2} e^{-\frac{x^2 + y^2}{2
|
||||
\sigma^2}}, ~ x,y \in \left\{1,\dots,s\right\}.
|
||||
\]
|
||||
|
||||
For edge detection purposes the Sobel operator is widespread. Here two
|
||||
filters are applied to the
|
||||
image $I$ and then combined. Edges in the $x$ direction are detected
|
||||
by convolution with
|
||||
\[
|
||||
G =\left[
|
||||
\begin{matrix}
|
||||
-1 & 0 & 1 \\
|
||||
-2 & 0 & 2 \\
|
||||
-1 & 0 & 1
|
||||
\end{matrix}\right],
|
||||
\]
|
||||
and edges is the y direction by convolution with $G^T$, the final
|
||||
output is given by
|
||||
|
||||
\[
|
||||
O = \sqrt{(I * G)^2 + (I*G^T)^2}
|
||||
\]
|
||||
where $\sqrt{\cdot}$ and $\cdot^2$ are applied component
|
||||
wise. Examples of convolution with both kernels are given in Figure~\ref{fig:img_conv}.
|
||||
|
||||
|
||||
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
\begin{subfigure}{0.3\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Plots/Data/klammern.jpg}
|
||||
\caption{Original Picture}
|
||||
\label{subf:OrigPicGS}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.3\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Plots/Data/image_conv9.png}
|
||||
\caption{Gaussian Blur $\sigma^2 = 1$}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.3\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Plots/Data/image_conv10.png}
|
||||
\caption{Gaussian Blur $\sigma^2 = 4$}
|
||||
\end{subfigure}\\
|
||||
\begin{subfigure}{0.3\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Plots/Data/image_conv4.png}
|
||||
\caption{Sobel Operator $x$-direction}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.3\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Plots/Data/image_conv5.png}
|
||||
\caption{Sobel Operator $y$-direction}
|
||||
\end{subfigure}
|
||||
\begin{subfigure}{0.3\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Plots/Data/image_conv6.png}
|
||||
\caption{Sobel Operator combined}
|
||||
\end{subfigure}
|
||||
% \begin{subfigure}{0.24\textwidth}
|
||||
% \centering
|
||||
% \includegraphics[width=\textwidth]{Plots/Data/image_conv6.png}
|
||||
% \caption{test}
|
||||
% \end{subfigure}
|
||||
\caption{Convolution of original greyscale Image (a) with different
|
||||
kernels. In (b) and (c) Gaussian kernels of size 11 and stated
|
||||
$\sigma^2$ are used. In (d) - (f) the above defined Sobel Operator
|
||||
kernels are used.}
|
||||
\label{fig:img_conv}
|
||||
\end{figure}
|
||||
\clearpage
|
||||
\newpage
|
||||
\subsection{Convolutional NN}
|
||||
|
||||
In conventional neural networks as described in chapter ... all layers
|
||||
are fully connected, meaning each output node in a layer is influenced
|
||||
by all inputs. For $i$ inputs and $o$ output nodes this results in $i
|
||||
+ 1$ variables at each node (weights and bias) and a total $o(i + 1)$
|
||||
variables. For large inputs like image data the amount of variables
|
||||
that have to be trained in order to fit the model can get excessive
|
||||
and hinder the ability to train the model due to memory and
|
||||
computational restrictions. By using convolution we can extract
|
||||
meaningful information such as edges in an image with a kernel of a
|
||||
small size $k$ in the tens or hundreds independent of the size of the
|
||||
original image. Thus for a large image $k \cdot i$ can be several
|
||||
orders of magnitude smaller than $o\cdot i$ .
|
||||
|
||||
|
||||
As seen convolution lends itself for image manipulation. In this
|
||||
chapter we will explore how we can incorporate convolution in neural
|
||||
networks, and how that might be beneficial.
|
||||
|
||||
Convolutional Neural Networks as described by ... are made up of
|
||||
convolutional layers, pooling layers, and fully connected ones. The
|
||||
fully connected layers are layers in which each input node is
|
||||
connected to each output node which is the structure introduced in
|
||||
chapter ...
|
||||
|
||||
In a convolutional layer instead of combining all input nodes for each
|
||||
output node, the input nodes are interpreted as a tensor on which a
|
||||
kernel is applied via convolution, resulting in the output. Most often
|
||||
multiple kernels are used, resulting in multiple output tensors. These
|
||||
kernels are the variables, which can be altered in order to fit the
|
||||
model to the data. Using multiple kernels it is possible to extract
|
||||
different features from the image (e.g. edges -> sobel). As this
|
||||
increases dimensionality even further which is undesirable as it
|
||||
increases the amount of variables in later layers of the model, a convolutional layer
|
||||
is often followed by a pooling one. In a pooling layer the input is
|
||||
reduced in size by extracting a single value from a
|
||||
neighborhood \todo{moving...}... . The resulting output size is dependent on
|
||||
the offset of the neighborhoods used. Popular is max-pooling where the
|
||||
largest value in a neighborhood is used or.
|
||||
|
||||
This construct allows for extraction of features from the input while
|
||||
using far less input variables.
|
||||
|
||||
... \todo{Beispiel mit kleinem Bild, am besten das von oben}
|
||||
|
||||
\subsubsection{Parallels to the Visual Cortex in Mammals}
|
||||
|
||||
The choice of convolution for image classification tasks is not
|
||||
arbitrary. ... auge... bla bla
|
||||
|
||||
|
||||
\subsection{Limitations of the Gradient Descent Algorithm}
|
||||
|
||||
-Hyperparameter guesswork
|
||||
-Problems navigating valleys -> momentum
|
||||
-Different scale of gradients for vars in different layers -> ADAdelta
|
||||
|
||||
\subsection{Stochastic Training Algorithms}
|
||||
|
||||
For many applications in which neural networks are used such as
|
||||
image classification or segmentation, large training data sets become
|
||||
detrimental to capture the nuances of the
|
||||
data. However as training sets get larger the memory requirement
|
||||
during training grows with it.
|
||||
In order to update the weights with the gradient descent algorithm
|
||||
derivatives of the network with respect for each
|
||||
variable need to be calculated for all data points in order to get the
|
||||
full gradient of the error of the network.
|
||||
Thus the amount of memory and computing power available limits the
|
||||
size of the training data that can be efficiently used in fitting the
|
||||
network. A class of algorithms that augment the gradient descent
|
||||
algorithm in order to lessen this problem are stochastic gradient
|
||||
descent algorithms. Here the premise is that instead of using the whole
|
||||
dataset a (different) subset of data is chosen to
|
||||
compute the gradient in each iteration.
|
||||
The amount of iterations until each data point has been considered in
|
||||
updating the parameters is commonly called a ``epoch''.
|
||||
This reduces the amount of memory and computing power required for
|
||||
each iteration. This allows for use of very large training
|
||||
sets. Additionally the noise introduced on the gradient can improve
|
||||
the accuracy of the fit as stochastic gradient descent algorithms are
|
||||
less likely to get stuck on local extrema.
|
||||
|
||||
\input{Plots/SGD_vs_GD.tex}
|
||||
|
||||
Another benefit of using subsets even if enough memory is available to
|
||||
use the whole dataset is that depending on the size of the subsets the
|
||||
gradient can be calculated far quicker which allows to make more steps
|
||||
in the same time. If the approximated gradient is close enough to the
|
||||
``real'' one this can drastically cut down the time required for
|
||||
training the model.
|
||||
|
||||
\begin{itemize}
|
||||
\item ADAM
|
||||
\item momentum
|
||||
\item ADADETLA \textcite{ADADELTA}
|
||||
|
||||
|
||||
\end{itemize}
|
||||
|
||||
|
||||
|
||||
% \subsubsubsection{Stochastic Gradient Descent}
|
||||
|
||||
\subsection{Combating Overfitting}
|
||||
|
||||
% As in many machine learning applications if the model is overfit in
|
||||
% the data it can drastically reduce the generalization of the model. In
|
||||
% many machine learning approaches noise introduced in the learning
|
||||
% algorithm in order to reduce overfitting. This results in a higher
|
||||
% bias of the model but the trade off of lower variance of the model is
|
||||
% beneficial in many cases. For example the regression tree model
|
||||
% ... benefits greatly from restricting the training algorithm on
|
||||
% randomly selected features in every iteration and then averaging many
|
||||
% such trained trees inserted of just using a single one. \todo{noch
|
||||
% nicht sicher ob ich das nehmen will} For neural networks similar
|
||||
% strategies exist. A popular approach in regularizing convolutional neural network
|
||||
% is \textit{dropout} which has been first introduced in
|
||||
% \cite{Dropout}
|
||||
|
||||
Similarly to shallow networks overfitting still can impact the quality of
|
||||
convolutional neural networks. A popular way to combat this problem is
|
||||
by introducing noise into the training of the model. This is a
|
||||
successful strategy for ofter models as well, the a conglomerate of
|
||||
descision trees grown on bootstrapped trainig samples benefit greatly
|
||||
of randomizing the features available to use in each training
|
||||
iteration (Hastie, Bachelorarbeit??). The way noise is introduced into
|
||||
the model is by deactivating certain nodes (setting the output of the
|
||||
node to 0) in the fully connected layers of the convolutional neural
|
||||
networks. The nodes are chosen at random and change in every
|
||||
iteration, this practice is called Dropout and was introduced by
|
||||
\textcite{Dropout}.
|
||||
|
||||
\todo{Vergleich verschiedene dropout größen auf MNSIT o.ä.}
|
||||
|
||||
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "main"
|
||||
%%% End:
|
10
TeX/introduction.tex
Normal file
10
TeX/introduction.tex
Normal file
@ -0,0 +1,10 @@
|
||||
\section{Introduction}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "main"
|
||||
%%% End:
|
@ -26,107 +26,67 @@ except for the input layer, which recieves the components of the input.
|
||||
\begin{figure}[h!]
|
||||
\center
|
||||
|
||||
\fbox{
|
||||
|
||||
\resizebox{\textwidth}{!}{%
|
||||
\begin{tikzpicture}[x=1.75cm, y=1.75cm, >=stealth]
|
||||
\tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
|
||||
{\arrow[scale=1.5,>=stealth]{>}}},postaction={decorate}}}
|
||||
% \fbox{
|
||||
|
||||
\resizebox{\textwidth}{!}{%
|
||||
\begin{tikzpicture}[x=1.75cm, y=1.75cm, >=stealth]
|
||||
\tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
|
||||
{\arrow[scale=1.5,>=stealth]{>}}},postaction={decorate}}}
|
||||
|
||||
\foreach \m/\l [count=\y] in {1,2,3,missing,4}
|
||||
\node [every neuron/.try, neuron \m/.try] (input-\m) at (0,2.5-\y) {};
|
||||
|
||||
\foreach \m [count=\y] in {1,missing,2}
|
||||
\node [every neuron/.try, neuron \m/.try ] (hidden1-\m) at (2,2-\y*1.25) {};
|
||||
|
||||
\foreach \m [count=\y] in {1,missing,2}
|
||||
\node [every neuron/.try, neuron \m/.try ] (hidden2-\m) at (5,2-\y*1.25) {};
|
||||
|
||||
\foreach \m/\l [count=\y] in {1,2,3,missing,4}
|
||||
\node [every neuron/.try, neuron \m/.try] (input-\m) at (0,2.5-\y) {};
|
||||
\foreach \m [count=\y] in {1,missing,2}
|
||||
\node [every neuron/.try, neuron \m/.try ] (output-\m) at (7,1.5-\y) {};
|
||||
|
||||
\foreach \l [count=\i] in {1,2,3,d_i}
|
||||
\draw [myptr] (input-\i)+(-1,0) -- (input-\i)
|
||||
node [above, midway] {$x_{\l}$};
|
||||
|
||||
\foreach \l [count=\i] in {1,n_1}
|
||||
\node [above] at (hidden1-\i.north) {$\mathcal{N}_{1,\l}$};
|
||||
|
||||
\foreach \l [count=\i] in {1,n_l}
|
||||
\node [above] at (hidden2-\i.north) {$\mathcal{N}_{l,\l}$};
|
||||
|
||||
\foreach \l [count=\i] in {1,d_o}
|
||||
\draw [myptr] (output-\i) -- ++(1,0)
|
||||
node [above, midway] {$O_{\l}$};
|
||||
|
||||
\foreach \i in {1,...,4}
|
||||
\foreach \j in {1,...,2}
|
||||
\draw [myptr] (input-\i) -- (hidden1-\j);
|
||||
|
||||
\foreach \i in {1,...,2}
|
||||
\foreach \j in {1,...,2}
|
||||
\draw [myptr] (hidden1-\i) -- (hidden2-\j);
|
||||
|
||||
\foreach \i in {1,...,2}
|
||||
\foreach \j in {1,...,2}
|
||||
\draw [myptr] (hidden2-\i) -- (output-\j);
|
||||
|
||||
\node [align=center, above] at (0,2) {Input\\layer};
|
||||
\node [align=center, above] at (2,2) {Hidden \\layer $1$};
|
||||
\node [align=center, above] at (5,2) {Hidden \\layer $l$};
|
||||
\node [align=center, above] at (7,2) {Output \\layer};
|
||||
|
||||
\node[fill=white,scale=1.5,inner xsep=10pt,inner ysep=10mm] at ($(hidden1-1)!.5!(hidden2-2)$) {$\dots$};
|
||||
|
||||
\foreach \m [count=\y] in {1,missing,2}
|
||||
\node [every neuron/.try, neuron \m/.try ] (hidden1-\m) at (2,2-\y*1.25) {};
|
||||
|
||||
\foreach \m [count=\y] in {1,missing,2}
|
||||
\node [every neuron/.try, neuron \m/.try ] (hidden2-\m) at (5,2-\y*1.25) {};
|
||||
|
||||
\foreach \m [count=\y] in {1,missing,2}
|
||||
\node [every neuron/.try, neuron \m/.try ] (output-\m) at (7,1.5-\y) {};
|
||||
|
||||
\foreach \l [count=\i] in {1,2,3,d_i}
|
||||
\draw [myptr] (input-\i)+(-1,0) -- (input-\i)
|
||||
node [above, midway] {$x_{\l}$};
|
||||
|
||||
\foreach \l [count=\i] in {1,n_1}
|
||||
\node [above] at (hidden1-\i.north) {$\mathcal{N}_{1,\l}$};
|
||||
|
||||
\foreach \l [count=\i] in {1,n_l}
|
||||
\node [above] at (hidden2-\i.north) {$\mathcal{N}_{l,\l}$};
|
||||
|
||||
\foreach \l [count=\i] in {1,d_o}
|
||||
\draw [myptr] (output-\i) -- ++(1,0)
|
||||
node [above, midway] {$O_{\l}$};
|
||||
|
||||
\foreach \i in {1,...,4}
|
||||
\foreach \j in {1,...,2}
|
||||
\draw [myptr] (input-\i) -- (hidden1-\j);
|
||||
|
||||
\foreach \i in {1,...,2}
|
||||
\foreach \j in {1,...,2}
|
||||
\draw [myptr] (hidden1-\i) -- (hidden2-\j);
|
||||
|
||||
\foreach \i in {1,...,2}
|
||||
\foreach \j in {1,...,2}
|
||||
\draw [myptr] (hidden2-\i) -- (output-\j);
|
||||
|
||||
\node [align=center, above] at (0,2) {Input\\layer};
|
||||
\node [align=center, above] at (2,2) {Hidden \\layer $1$};
|
||||
\node [align=center, above] at (5,2) {Hidden \\layer $l$};
|
||||
\node [align=center, above] at (7,2) {Output \\layer};
|
||||
|
||||
\node[fill=white,scale=1.5,inner xsep=10pt,inner ysep=10mm] at ($(hidden1-1)!.5!(hidden2-2)$) {$\dots$};
|
||||
|
||||
\end{tikzpicture}}}
|
||||
\caption{test}
|
||||
\end{tikzpicture}}%}
|
||||
\caption{Illustration of a neural network with $d_i$ inputs, $l$
|
||||
hidden layers with $n_{\cdot}$ nodes in each layer, as well as
|
||||
$d_o$ outputs.
|
||||
}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}
|
||||
\begin{tikzpicture}[x=1.5cm, y=1.5cm]
|
||||
\tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
|
||||
{\arrow[scale=1.5,>=stealth]{>}}},postaction={decorate}}}
|
||||
|
||||
\foreach \m/\l [count=\y] in {1}
|
||||
\node [every neuron/.try, neuron \m/.try] (input-\m) at (0,0.5-\y) {};
|
||||
\subsection{Nonlinearity of Neural Networks}
|
||||
|
||||
\foreach \m [count=\y] in {1,2,missing,3,4}
|
||||
\node [every neuron/.try, neuron \m/.try ] (hidden-\m) at (1.25,3.25-\y*1.25) {};
|
||||
|
||||
\foreach \m [count=\y] in {1}
|
||||
\node [every neuron/.try, neuron \m/.try ] (output-\m) at (2.5,0.5-\y) {};
|
||||
|
||||
\foreach \l [count=\i] in {1}
|
||||
\draw [myptr] (input-\i)+(-1,0) -- (input-\i)
|
||||
node [above, midway] {$x$};
|
||||
|
||||
\foreach \l [count=\i] in {1,2,n-1,n}
|
||||
\node [above] at (hidden-\i.north) {$\mathcal{N}_{\l}$};
|
||||
|
||||
\foreach \l [count=\i] in {1,n_l}
|
||||
\node [above] at (output-\i.north) {};
|
||||
|
||||
\foreach \l [count=\i] in {1}
|
||||
\draw [myptr, >=stealth] (output-\i) -- ++(1,0)
|
||||
node [above, midway] {$y$};
|
||||
|
||||
|
||||
\foreach \i in {1}
|
||||
\foreach \j in {1,2,...,3,4}
|
||||
\draw [myptr, >=stealth] (input-\i) -- (hidden-\j);
|
||||
|
||||
\foreach \i in {1,2,...,3,4}
|
||||
\foreach \j in {1}
|
||||
\draw [myptr, >=stealth] (hidden-\i) -- (output-\j);
|
||||
|
||||
\node [align=center, above] at (0,1) {Input \\layer};
|
||||
\node [align=center, above] at (1.25,3) {Hidden layer};
|
||||
\node [align=center, above] at (2.5,1) {Output \\layer};
|
||||
|
||||
\end{tikzpicture}
|
||||
\caption{Shallow Neural Network with input- and output-dimension of \(d
|
||||
= 1\)}
|
||||
\end{figure}
|
||||
|
||||
|
||||
\begin{figure}
|
||||
@ -159,7 +119,7 @@ except for the input layer, which recieves the components of the input.
|
||||
\node [align = center, below] at (3, 0) {Summing \\junction};
|
||||
|
||||
\node [draw, minimum size = 1.25cm] (act) at (4.5, 0.625)
|
||||
{\(\psi(.)\)};
|
||||
{\(\sigma(.)\)};
|
||||
\node [align = center, above] at (4.5, 1.25) {Activation \\function};
|
||||
|
||||
\node [circle, draw, fill=black, inner sep = 0pt, minimum size =
|
||||
@ -215,17 +175,125 @@ except for the input layer, which recieves the components of the input.
|
||||
\caption{Structure of a single neuron}
|
||||
\end{figure}
|
||||
|
||||
\begin{tikzpicture}
|
||||
\tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
|
||||
{\arrow[scale=2,>=stealth]{>}}},postaction={decorate}}}
|
||||
%1
|
||||
\draw [->,>=stealth] (0,.5) -- (2,.5);
|
||||
%2
|
||||
\draw [myptr] (0,0) -- (2,0);
|
||||
\end{tikzpicture}
|
||||
\clearpage
|
||||
\subsection{Training Neural Networks}
|
||||
|
||||
After a neural network model is designed, like most statistical models
|
||||
it has to be fit to the data. In the machine learning context this is
|
||||
often called ``training'' as due to the complexity and amount of
|
||||
variables in these models they are fitted iteratively to the data,
|
||||
``learing'' the properties of the data better with each iteration.
|
||||
|
||||
There are two main categories of machine learning models, being
|
||||
supervised and unsupervised learners. Unsupervised learners learn
|
||||
structure in the data without guidance form outside (as labeling data
|
||||
beforehand for training) popular examples of this are clustering
|
||||
algorithms\todo{quelle}. Supervised learners on the other hand are as
|
||||
the name suggest supervised during learning. This generally amounts to
|
||||
using data with the expected response (label) attached to each
|
||||
data-point in fitting the model, where usually some distance between
|
||||
the model output and the labels is minimized.
|
||||
|
||||
\subsubsection{Interpreting the Output}
|
||||
|
||||
In order to properly interpret the output of a neural network and
|
||||
training it, depending on the problem it might be advantageous to
|
||||
transform the output form the last layer. Given the nature of the
|
||||
neural network the value at each output node is a real number. This is
|
||||
desirable for applications where the desired output is a real numbered
|
||||
vector (e.g. steering inputs for a autonomous car), however for
|
||||
classification problems it is desirable to transform this
|
||||
output. Often classification problems are modeled in such a way that
|
||||
each output node corresponds to a class. Then the output vector needs
|
||||
to be normalized in order to give a prediction. The naive approach is
|
||||
to transform the output vector $o$ into a one-hot vector $p$
|
||||
corresponding to a $0$
|
||||
entry for all classes except one, which is the predicted class.
|
||||
|
||||
\[
|
||||
p_i =
|
||||
\begin{cases}
|
||||
1,& i < j, \forall i,j \in \text{arg}\max o_i, \\
|
||||
0,& \text{else.}
|
||||
\end{cases}
|
||||
\]\todo{besser formulieren}
|
||||
|
||||
However this imposes difficulties in training the network as with this
|
||||
addition the model is no longer differentiable which imitates the
|
||||
ways the model can be trained. Additionally information about the
|
||||
``certainty'' for each class in the prediction gets lost. A popular
|
||||
way to circumvent this problem is to normalize the output vector is
|
||||
such a way that the entries add up to one, this allows for the
|
||||
interpretation of probabilities assigned to each class.
|
||||
|
||||
\subsubsection{Error Measurement}
|
||||
|
||||
In order to make assessment about the quality of a network $\mathcal{NN}$ and train
|
||||
it we need to discuss how we measure error. As for regression problems
|
||||
the output is continuous in contrast to the class predictions in a
|
||||
classification problem, we need to discuss these problems separately.
|
||||
\paragraph{Regression Problems}
|
||||
|
||||
\subsubsection{Gradient Descent Algorithm}
|
||||
|
||||
When trying to fit a neural network it is hard
|
||||
to predict the impact of the single parameters on the accuracy of the
|
||||
output. Thus applying numeric optimization algorithms is the only
|
||||
feasible way to fit the model. A attractive algorithm for training
|
||||
neural networks is gradient descent where each parameter $\theta_i$ is
|
||||
iterative changed according to the gradient regarding the error
|
||||
measure and a step size $\gamma$. For this all parameters are
|
||||
initialized (often random or close to zero) and then iteratively
|
||||
updated until a certain criteria is hit, mostly either being a fixed
|
||||
number of iterations or a desired upper limit for the error measure.
|
||||
% For a function $f_\theta$ with parameters $\theta \in \mathbb{R}^n$
|
||||
% and a error function $L(f_\theta)$ the gradient descent algorithm is
|
||||
% given in \ref{alg:gd}.
|
||||
|
||||
\begin{algorithm}[H]
|
||||
\SetAlgoLined
|
||||
\KwInput{function $f_\theta$ with parameters $\theta \in
|
||||
\mathbb{R}^n$ \newline step size $\gamma$}
|
||||
initialize $\theta^0$\;
|
||||
$i \leftarrow 1$\;
|
||||
\While{While termination condition is not met}{
|
||||
$\nabla \leftarrow \frac{\mathrm{d}f_\theta}{\mathrm{d} \theta}\vert_{\theta^{i-1}}$\;
|
||||
$\theta^i \leftarrow \theta^{i-1} - \gamma \nabla $\;
|
||||
$i \leftarrow i +1$\;
|
||||
}
|
||||
|
||||
\caption{Gradient Descent}
|
||||
\label{alg:gd}
|
||||
\end{algorithm}
|
||||
|
||||
The algorithm for gradient descent is given in
|
||||
Algorithm~\ref{alg:gd}. In the context of fitting a neural network
|
||||
$f_\theta$ corresponds to the error measurement of the network
|
||||
$L\left(\mathcal{NN}_{\theta}\right)$ where $\theta$ is a vector
|
||||
containing all the weights and biases of the network.
|
||||
As ca be seen this requires computing the derivative of the network
|
||||
with regard to each variable. With the number of variables getting
|
||||
large in networks with multiple layers of high neuron count naively
|
||||
computing these can get quite memory and computational expensive. But
|
||||
by using the chain rule and exploiting the layered structure we can
|
||||
compute the gradient much more efficiently by using backpropagation
|
||||
first introduced by \textcite{backprop}.
|
||||
|
||||
\subsubsection{Backpropagation}
|
||||
|
||||
As with an increasing amount of layers the derivative of a loss
|
||||
function with respect to a certain variable becomes more intensive to
|
||||
compute there have been efforts in increasing the efficiency of
|
||||
computing these derivatives. Today the BACKPROPAGATION algorithm is
|
||||
widely used to compute the derivatives needed for the optimization
|
||||
algorithms. Here instead of naively calculating the derivative for
|
||||
each variable, the chain rule is used in order to compute derivatives
|
||||
for each layer from output layer towards the first layer while only
|
||||
needing to ....
|
||||
|
||||
\[
|
||||
\frac{\partial L(...)}{}
|
||||
\]
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
|
54
TeX/main.tex
54
TeX/main.tex
@ -1,6 +1,8 @@
|
||||
\documentclass[a4paper, 12pt]{article}
|
||||
\documentclass[a4paper, 12pt, draft=true]{article}
|
||||
|
||||
%\usepackage[margin=1in]{geometry}
|
||||
%\geometry{a4paper, left=30mm, right=40mm,top=25mm, bottom=20mm}
|
||||
|
||||
\usepackage[margin=1in]{geometry}
|
||||
\usepackage[english]{babel}
|
||||
\usepackage[utf8]{inputenc}
|
||||
\usepackage[T1]{fontenc}
|
||||
@ -16,8 +18,8 @@
|
||||
\usepackage{sectsty}
|
||||
\usepackage{setspace}
|
||||
\usepackage{booktabs}
|
||||
\usepackage{caption}
|
||||
\usepackage[justification=RaggedRight, singlelinecheck=false]{caption}
|
||||
\usepackage[format=plain,
|
||||
textfont=it]{caption}
|
||||
%\usepackage{natbib} %[numbers]
|
||||
\usepackage{multirow}
|
||||
\usepackage{3parttable}
|
||||
@ -28,9 +30,18 @@
|
||||
\usepackage{tikz}
|
||||
\usepackage{nicefrac}
|
||||
\usepackage{enumitem}
|
||||
\usepackage[toc, page]{appendix}
|
||||
\usepackage{todonotes}
|
||||
\usepackage{lipsum}
|
||||
\usepackage[ruled,vlined]{algorithm2e}
|
||||
\usepackage{showframe}
|
||||
\usepackage[protrusion=true, expansion=true, kerning=true]{microtype}
|
||||
|
||||
\captionsetup[sub]{justification=centering}
|
||||
|
||||
\usetikzlibrary{matrix,chains,positioning,decorations.pathreplacing,arrows}
|
||||
\usetikzlibrary{positioning,calc,calligraphy}
|
||||
\usetikzlibrary{calc, 3d}
|
||||
|
||||
\usepackage{pgfplots}
|
||||
\usepgfplotslibrary{colorbrewer}
|
||||
@ -42,9 +53,8 @@
|
||||
|
||||
|
||||
\usepackage[style=authoryear, backend=bibtex]{biblatex}
|
||||
\addbibresource{Literaturverzeichnis.bib}
|
||||
\urlstyle{same}
|
||||
\bibliography{Literaturverzeichnis.bib}
|
||||
\bibliography{bibliograpy.bib}
|
||||
\numberwithin{figure}{section}
|
||||
\numberwithin{table}{section}
|
||||
\numberwithin{equation}{section}
|
||||
@ -66,16 +76,22 @@
|
||||
\DeclareMathOperator*{\plim}{\mathbb{P}\text{-}\lim}
|
||||
\DeclareMathOperator{\supp}{supp}
|
||||
\DeclareMathOperator*{\argmin}{arg\,min}
|
||||
\begin{document}
|
||||
\DeclareMathOperator*{\po}{\mathbb{P}\text{-}\mathcal{O}}
|
||||
\DeclareMathOperator*{\equals}{=}
|
||||
\begin{document}f
|
||||
|
||||
|
||||
|
||||
\newcommand{\plimn}[0]{\plim\limits_{n \to \infty}}
|
||||
\newcommand{\norm}[1]{\left\lVert#1\right\rVert}
|
||||
\newcommand*\circled[1]{\tikz[baseline=(char.base)]{
|
||||
\node[shape=circle,draw,inner sep=2pt] (char) {#1};}}
|
||||
\node[shape=circle,draw,inner sep=2pt] (char) {#1};}}
|
||||
|
||||
|
||||
\newcommand{\abs}[1]{\ensuremath{\left\vert#1\right\vert}}
|
||||
|
||||
\SetKwInput{KwInput}{Input}
|
||||
|
||||
%\newcommand{\myrightarrow}[1]{\xrightarrow{\makebox[2em][c]{$\scriptstyle#1$}}}
|
||||
|
||||
%Arndt Tobias \hfill 21.12.2017\newline
|
||||
@ -91,7 +107,13 @@
|
||||
\pagenumbering{gobble}
|
||||
\newpage
|
||||
%\setcounter{tocdepth}{4}
|
||||
\tableofcontents
|
||||
\tableofcontents
|
||||
\listoftodos
|
||||
\newpage
|
||||
\pagenumbering{arabic}
|
||||
% Introduction
|
||||
\input{introduction}
|
||||
|
||||
\newpage
|
||||
|
||||
% Introduction Neural Networks
|
||||
@ -100,7 +122,19 @@
|
||||
\newpage
|
||||
|
||||
% Theorem 3.8
|
||||
\input{theo_3_8.tex}
|
||||
\input{theo_3_8}
|
||||
|
||||
\newpage
|
||||
|
||||
% Kapitel 4
|
||||
\input{further_applications_of_nn}
|
||||
|
||||
\newpage
|
||||
|
||||
\printbibliography
|
||||
|
||||
% Appendix A
|
||||
\input{appendixA.tex}
|
||||
|
||||
\end{document}
|
||||
|
||||
|
25
TeX/pfg_test.tex
Normal file
25
TeX/pfg_test.tex
Normal file
@ -0,0 +1,25 @@
|
||||
\documentclass{article}
|
||||
\usepackage{pgfplots}
|
||||
\usepackage{filecontents}
|
||||
|
||||
|
||||
\begin{document}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}
|
||||
\addplot+ [mark options={scale = 0.7}, mark = o] table [x=x,y=y, col sep = comma,
|
||||
only marks] {data_sin_d_t.csv};
|
||||
\addplot [black] table [x=x, y=y, col sep=comma, mark=none, color = 'black'] {matlab_sin_d_01.csv};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}
|
||||
\addplot table [x=x, y=y, col sep=comma, only marks] {data_sin_d_t.csv};
|
||||
\addplot table [black, x=x, y=y, col sep=comma, mark=none, color = 'black'] {matlab_sin_d_01.csv};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{document}
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: t
|
||||
%%% End:
|
720
TeX/theo_3_8.tex
720
TeX/theo_3_8.tex
@ -4,15 +4,247 @@
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "main"
|
||||
%%% End:
|
||||
\section{Shallow Neural Networks}
|
||||
|
||||
In this section we will analyze the connection of shallow Neural
|
||||
Networks and regression splines. We will see that the punishment of
|
||||
wight size in training the shallow Neural Netowork will result in a
|
||||
function that minimizes the second derivative as the amount of hidden
|
||||
nodes ia grown to infinity. In order to properly formulate this relation we will
|
||||
first need to introduce some definitions.
|
||||
In order to examine some behavior of neural networks in this chapter
|
||||
we consider a simple class of networks, the shallow ones. These
|
||||
networks only contain one hidden layer and have a single output node.
|
||||
|
||||
\begin{Definition}[Shallow neural network]
|
||||
For a input dimension $d$ and a Lipschitz continuous activation function $\sigma:
|
||||
\mathbb{R} \to \mathbb{R}$ we define a shallow neural network with
|
||||
$n$ hidden nodes as
|
||||
$\mathcal{NN}_\vartheta : \mathbb{R}^d \to \mathbb{R}$ as
|
||||
\[
|
||||
\mathcal{NN}_\vartheta \coloneqq \sum_{k=1}^n w_k \sigma\left(b_k +
|
||||
\sum_{j=1}^d v_{k,j} x_j\right) + c ~~ \forall x \in \mathbb{R}^d
|
||||
\]
|
||||
with
|
||||
\begin{itemize}
|
||||
\item weights $w_k \in \mathbb{R},~k \in \left\{1,\dots,n\right\}$
|
||||
\item biases $b_k \in \mathbb{R},~k \in \left\{1, \dots,n\right\}$
|
||||
\item weights $v_k \in \mathbb{R}^d,~k\in\left\{1,\dots,n\right\}$
|
||||
\item bias $c \in \mathbb{R}$
|
||||
\item these weights and biases collected in
|
||||
\[
|
||||
\vartheta \coloneqq (w, b, v, c) \in \Theta \coloneqq
|
||||
\mathbb{R}^{n \times n \times (n \times d) \times 1}
|
||||
\]
|
||||
\end{itemize}
|
||||
\end{Definition}
|
||||
% \begin{figure}
|
||||
% \begin{tikzpicture}[x=1.5cm, y=1.5cm]
|
||||
% \tikzset{myptr/.style={decoration={markings,mark=at position 1 with %
|
||||
% {\arrow[scale=1.5,>=stealth]{>}}},postaction={decorate}}}
|
||||
|
||||
% \foreach \m/\l [count=\y] in {1}
|
||||
% \node [every neuron/.try, neuron \m/.try] (input-\m) at (0,0.5-\y) {};
|
||||
|
||||
% \foreach \m [count=\y] in {1,2,missing,3,4}
|
||||
% \node [every neuron/.try, neuron \m/.try ] (hidden-\m) at (1.25,3.25-\y*1.25) {};
|
||||
|
||||
% \foreach \m [count=\y] in {1}
|
||||
% \node [every neuron/.try, neuron \m/.try ] (output-\m) at (2.5,0.5-\y) {};
|
||||
|
||||
% \foreach \l [count=\i] in {1}
|
||||
% \draw [myptr] (input-\i)+(-1,0) -- (input-\i)
|
||||
% node [above, midway] {$x$};
|
||||
|
||||
% \foreach \l [count=\i] in {1,2,n-1,n}
|
||||
% \node [above] at (hidden-\i.north) {$\mathcal{N}_{\l}$};
|
||||
|
||||
% \foreach \l [count=\i] in {1,n_l}
|
||||
% \node [above] at (output-\i.north) {};
|
||||
|
||||
% \foreach \l [count=\i] in {1}
|
||||
% \draw [myptr, >=stealth] (output-\i) -- ++(1,0)
|
||||
% node [above, midway] {$y$};
|
||||
|
||||
|
||||
% \foreach \i in {1}
|
||||
% \foreach \j in {1,2,...,3,4}
|
||||
% \draw [myptr, >=stealth] (input-\i) -- (hidden-\j);
|
||||
|
||||
% \foreach \i in {1,2,...,3,4}
|
||||
% \foreach \j in {1}
|
||||
% \draw [myptr, >=stealth] (hidden-\i) -- (output-\j);
|
||||
|
||||
% \node [align=center, above] at (0,1) {Input \\layer};
|
||||
% \node [align=center, above] at (1.25,3) {Hidden layer};
|
||||
% \node [align=center, above] at (2.5,1) {Output \\layer};
|
||||
|
||||
% \end{tikzpicture}
|
||||
% \caption{Shallow Neural Network with input- and output-dimension of \(d
|
||||
% = 1\)}
|
||||
% \label{fig:shallowNN}
|
||||
% \end{figure}
|
||||
|
||||
As neural networks with a large amount of nodes have a large amount of
|
||||
parameters that can be tuned it can often fit the data quite well. If a ReLU
|
||||
\[
|
||||
\sigma(x) \coloneqq \max{(0, x)}
|
||||
\]
|
||||
is chosen as activation function one can easily prove that if the
|
||||
amount of hidden nodes exceeds the
|
||||
amount of data points in the training data a shallow network trained
|
||||
on MSE will perfectly fit the data.
|
||||
\begin{Theorem}[sinnvoller titel]
|
||||
For training data of size t
|
||||
\[
|
||||
\left(x_i^{\text{train}}, y_i^{\text{train}}\right) \in \mathbb{R}^d
|
||||
\times \mathbb{R},~i\in\left\{1,\dots,t\right\}
|
||||
\]
|
||||
a shallow neural network $\mathcal{NN}_\vartheta$ with $n \geq t$
|
||||
hidden nodes will perfectly fit the data when
|
||||
minimizing squared error loss.
|
||||
\proof
|
||||
W.l.o.g. all values $x_{ij}^{\text{train}} \in [0,1],~\forall i \in
|
||||
\left\{1,\dots\right\}, j \in \left\{1,\dots,d\right\}$. Now we
|
||||
chose $v^*$ in order to calculate a unique value for all
|
||||
$x_i^{\text{train}}$:
|
||||
\[
|
||||
v^*_{k,j} = v^*_{j} = 10^{j-1}, ~ \forall k \in \left\{1,\dots,n\right\}.
|
||||
\]
|
||||
Assuming $x_i^{\text{train}} \neq x_j^{\text{train}},~\forall i\neq
|
||||
j$ we get
|
||||
\[
|
||||
\left(v_k^*\right)^{\mathrm{T}} x_i^{\text{train}} \neq
|
||||
\left(v_k^*\right)^{\mathrm{T}} x_j^{\text{train}}, ~ \forall i
|
||||
\neq j.
|
||||
\]
|
||||
W.l.o.g assume $x_i^{\text{train}}$ are ordered such that
|
||||
$\left(v_k^*\right)^{\mathrm{T}} x_i^{\text{train}} <
|
||||
\left(v_k^*\right)^{\mathrm{T}} x_j^{\text{train}}, ~\forall j<j$,
|
||||
Then we can choose $b^*_k$ such that neuron $k$ is only active for all
|
||||
$x_i^{\text{train}}$ with $i \geq k$:
|
||||
\begin{align*}
|
||||
b^*_1 &> -\left(v^*\right)^{\mathrm{T}} x_1^{\text{train}},\\
|
||||
b^*_k &= -\left(v^*\right)^{\mathrm{T}}
|
||||
x_{k-1}^{\text{train}},~\forall k \in \left\{2, \dots,
|
||||
t\right\}, \\
|
||||
b_k^* &\leq -\left(v^*\right)^{\mathrm{T}}
|
||||
x_{t}^{\text{train}},~\forall k > t.
|
||||
\end{align*}
|
||||
With
|
||||
\begin{align*}
|
||||
w_k^* &= \frac{y_k^{\text{train}} - \sum_{j =1}^{k-1} w^*_j\left(b^*_j +
|
||||
x_k^{\text{train}}\right)}{b_k + \left(v^*\right)^{\mathrm{T}}
|
||||
x_k^{\text{train}}},~\forall k \in \left\{1,\dots,t\right\}\\
|
||||
w_k^* &\in \mathbb{R} \text{ arbitrary, } \forall k > t.
|
||||
\end{align*}
|
||||
and $\vartheta^* = (w^*, b^*, v^*, c = 0)$ we get
|
||||
\[
|
||||
\mathcal{NN}_{\vartheta^*} (x_i^{\text{train}}) = \sum_{k =
|
||||
1}^{i-1} w_k\left(\left(v^*\right)^{\mathrm{T}}
|
||||
x_i^{\text{train}}\right) + w_i\left(\left(v^*\right)^{\mathrm{T}}
|
||||
x_i^{\text{train}}\right) = y_i^{\text{train}}.
|
||||
\]
|
||||
As the squared error of $\mathcal{NN}_{\vartheta^*}$ is zero all
|
||||
squared error loss minimizing shallow networks with at least $t$ hidden
|
||||
nodes will perfectly fit the data.
|
||||
\qed
|
||||
\label{theo:overfit}
|
||||
\end{Theorem}
|
||||
|
||||
However this behavior is often not desired as over fit models often
|
||||
have bad generalization properties especially if noise is present in
|
||||
the data. This effect can be seen in
|
||||
Figure~\ref{fig:overfit}. Here a network that perfectly fits the
|
||||
training data regarding the MSE is \todo{Formulierung}
|
||||
constructed and compared to a regression spline
|
||||
(Definition~\ref{def:wrs}). While the network
|
||||
fits the data better than the spline, the spline is much closer to the
|
||||
underlying mechanism that was used to generate the data. The better
|
||||
generalization of the spline compared to the network is further
|
||||
illustrated by the better validation error computed with new generated
|
||||
test data.
|
||||
In order to improve the accuracy of the model we want to reduce
|
||||
overfitting. A possible way to achieve this is by explicitly
|
||||
regularizing the network through the cost function as done with
|
||||
ridge penalized networks
|
||||
(Definition~\ref{def:rpnn}) where large weights $w$ are punished. In
|
||||
Theorem~\ref{theo:main1} we will
|
||||
prove that this will result in the network converging to
|
||||
regressions splines as the amount of nodes in the hidden layer is
|
||||
increased.
|
||||
|
||||
|
||||
|
||||
|
||||
\begin{figure}
|
||||
\begin{adjustbox}{width = \textwidth}
|
||||
\pgfplotsset{
|
||||
compat=1.11,
|
||||
legend image code/.code={
|
||||
\draw[mark repeat=2,mark phase=2]
|
||||
plot coordinates {
|
||||
(0cm,0cm)
|
||||
(0.15cm,0cm) %% default is (0.3cm,0cm)
|
||||
(0.3cm,0cm) %% default is (0.6cm,0cm)
|
||||
};%
|
||||
}
|
||||
}
|
||||
\begin{tikzpicture}
|
||||
\begin{axis}[tick style = {draw = none}, width = \textwidth,
|
||||
height = 0.6\textwidth]
|
||||
\addplot table
|
||||
[x=x, y=y, col sep=comma, only marks,mark options={scale =
|
||||
0.7}] {Plots/Data/overfit.csv};
|
||||
\addplot [red, line width=0.8pt] table [x=x_n, y=s_n, col
|
||||
sep=comma, forget plot] {Plots/Data/overfit.csv};
|
||||
\addplot [black, line width=0.8pt] table [x=x_n, y=y_n, col
|
||||
sep=comma] {Plots/Data/overfit.csv};
|
||||
\addplot [black, line width=0.8pt, dashed] table [x=x, y=y, col
|
||||
sep=comma] {Plots/Data/overfit_spline.csv};
|
||||
|
||||
\addlegendentry{\footnotesize{data}};
|
||||
\addlegendentry{\footnotesize{$\mathcal{NN}_{\vartheta^*}$}};
|
||||
\addlegendentry{\footnotesize{spline}};
|
||||
\end{axis}
|
||||
\end{tikzpicture}
|
||||
\end{adjustbox}
|
||||
\caption{For data of the form $y=\sin(\frac{x+\pi}{2 \pi}) +
|
||||
\varepsilon,~ \varepsilon \sim \mathcal{N}(0,0.4)$
|
||||
(\textcolor{blue}{blue dots}) the neural network constructed
|
||||
according to the proof of Theorem~\ref{theo:overfit} (black) and the
|
||||
underlying signal (\textcolor{red}{red}). While the network has no
|
||||
bias a regression spline (black dashed) fits the data much
|
||||
better. For a test set of size 20 with uniformly distributed $x$
|
||||
values and responses of the same fashion as the training data the MSE of the neural network is
|
||||
0.30, while the MSE of the spline is only 0.14 thus generalizing
|
||||
much better.
|
||||
}
|
||||
\label{fig:overfit}
|
||||
\end{figure}
|
||||
|
||||
\clearpage
|
||||
\subsection{Convergence Behaviour of 1-dim. Randomized Shallow Neural
|
||||
Networks}
|
||||
|
||||
|
||||
In this section we will analyze the connection of randomized shallow
|
||||
Neural Networks with one dimensional input and regression splines. We
|
||||
will see that the punishment of the size of the weights in training
|
||||
the randomized shallow
|
||||
Neural Network will result in a function that minimizes the second
|
||||
derivative as the amount of hidden nodes is grown to infinity. In order
|
||||
to properly formulate this relation we will first need to introduce
|
||||
some definitions.
|
||||
|
||||
\begin{Definition}[Randomized shallow neural network]
|
||||
For an input dimension $d$, let $n \in \mathbb{N}$ be the number of
|
||||
hidden nodes and $v(\omega) \in \mathbb{R}^{i \times n}, b(\omega)
|
||||
\in \mathbb{R}^n$ randomly drawn weights. Then for a weight vector
|
||||
$w$ the corresponding randomized shallow neural network is given by
|
||||
\[
|
||||
\mathcal{RN}_{w, \omega} (x) = \sum_{k=1}^n w_k
|
||||
\sigma\left(b_k(\omega) + \sum_{j=1}^d v_{k, j}(\omega) x_j\right).
|
||||
\]
|
||||
\label{def:rsnn}
|
||||
\end{Definition}
|
||||
|
||||
\begin{Definition}[Ridge penalized Neural Network]
|
||||
\label{def:rpnn}
|
||||
Let $\mathcal{RN}_{w, \omega}$ be a randomized shallow neural
|
||||
network, as introduced in ???. Then the optimal ridge penalized
|
||||
network is given by
|
||||
@ -24,12 +256,11 @@ first need to introduce some definitions.
|
||||
\[
|
||||
w^{*,\tilde{\lambda}}(\omega) :\in \argmin_{w \in
|
||||
\mathbb{R}^n} \underbrace{ \left\{\overbrace{\sum_{i = 1}^N \left(\mathcal{RN}_{w,
|
||||
\omega}(x_i^{\text{train}}) -
|
||||
y_i^{\text{train}}\right)^2}^{L(\mathcal{RN}_{w, \omega})} +
|
||||
\tilde{\lambda} \norm{w}_2^2\right\}}_{\eqqcolon F_n^{\tilde{\lambda}}(\mathcal{RN}_{w,\omega})}.
|
||||
\omega}(x_i^{\text{train}}) -
|
||||
y_i^{\text{train}}\right)^2}^{L(\mathcal{RN}_{w, \omega})} +
|
||||
\tilde{\lambda} \norm{w}_2^2\right\}}_{\eqqcolon F_n^{\tilde{\lambda}}(\mathcal{RN}_{w,\omega})}.
|
||||
\]
|
||||
\end{Definition}
|
||||
\label{def:rpnn}
|
||||
In the ridge penalized Neural Network large weights are penalized, the
|
||||
extend of which can be tuned with the parameter $\tilde{\lambda}$. If
|
||||
$n$ is larger than the amount of training samples $N$ then for
|
||||
@ -43,14 +274,12 @@ having minimal weights, resulting in the \textit{minimum norm
|
||||
\[
|
||||
w^{\text{min}} \in \argmin_{w \in \mathbb{R}^n} \norm{w}, \text{
|
||||
s.t. }
|
||||
\mathcal{RN}_{w,\omega}(x_i^{train}) = y_i^{train}, \, \forall i \in
|
||||
\mathcal{RN}_{w,\omega}(x_i^{\text{train}}) = y_i^{\text{train}}, \, \forall i \in
|
||||
\left\{1,\dots,N\right\}.
|
||||
\]
|
||||
For $\tilde{\lambda} \to \infty$ the learned
|
||||
function will resemble the data less and less with the weights
|
||||
approaching $0$. Usually $\tilde{\lambda}$ lies between 0 and 1, as
|
||||
for larger values the focus of weight reduction is larger than fittig
|
||||
the data.\par
|
||||
approaching $0$. .\par
|
||||
In order to make the notation more convinient in the follwoing the
|
||||
$\omega$ used to express the realised random parameters will no longer
|
||||
be explizitly mentioned.
|
||||
@ -60,10 +289,10 @@ be explizitly mentioned.
|
||||
Network according to Definition~\ref{def:rsnn}, then kinks depending on the random parameters can
|
||||
be observed.
|
||||
\[
|
||||
\mathcal{RN}_w(x) = \sum_{k = 1}^n w_k \gamma(b_k + v_kx)
|
||||
\mathcal{RN}_w(x) = \sum_{k = 1}^n w_k \sigma(b_k + v_kx)
|
||||
\]
|
||||
Because we specified $\gamma(y) \coloneqq \max\left\{0, y\right\}$ a
|
||||
kink in $\gamma$ can be observed at $\gamma(0) = 0$. As $b_k + v_kx = 0$ for $x
|
||||
Because we specified $\sigma(y) \coloneqq \max\left\{0, y\right\}$ a
|
||||
kink in $\sigma$ can be observed at $\sigma(0) = 0$. As $b_k + v_kx = 0$ for $x
|
||||
= -\frac{b_k}{v_k}$ we define the following:
|
||||
\begin{enumerate}[label=(\alph*)]
|
||||
\item Let $\xi_k \coloneqq -\frac{b_k}{v_k}$ be the k-th kink of $\mathcal{RN}_w$.
|
||||
@ -91,7 +320,7 @@ smooth approximation of the RSNN.
|
||||
|
||||
\[
|
||||
\kappa_x(s) \coloneqq \mathds{1}_{\left\{\abs{s} \leq \frac{1}{2 \sqrt{n}
|
||||
g_{\xi}(x)}\right\}}(s)\sqrt{n} g_{\xi}(x), \, \forall s \in \mathbb{R}
|
||||
g_{\xi}(x)}\right\}}(s)\sqrt{n} g_{\xi}(x), \, \forall s \in \mathbb{R}
|
||||
\]
|
||||
|
||||
Using this kernel we define a smooth approximation of
|
||||
@ -113,69 +342,120 @@ that the ridge penalized neural network as defined in
|
||||
Definition~\ref{def:rpnn} converges a weighted regression spline, as
|
||||
the amount of hidden nodes is grown to inifity.
|
||||
|
||||
\begin{Definition}[Weighted regression spline]
|
||||
Let $x_i^{train}, y_i^{train} \in \mathbb{R}, i \in
|
||||
\begin{Definition}[Adapted Weighted regression spline]
|
||||
\label{def:wrs}
|
||||
Let $x_i^{\text{train}}, y_i^{\text{train}} \in \mathbb{R}, i \in
|
||||
\left\{1,\dots,N\right\}$ be trainig data. For a given $\lambda \in \mathbb{R}_{>0}$
|
||||
and a function $g: \mathbb{R} \to \mathbb{R}_{>0}$ the weighted
|
||||
regression spline $f^{*, \lambda}_g$ is given by
|
||||
|
||||
\[
|
||||
f^{*, \lambda}_g :\in \argmin_{\substack{f \in \mathcal{C}^2(\mathbb{R})
|
||||
\\ \supp(f) \subseteq \supp(g)}} \underbrace{\left\{ \overbrace{\sum_{i =
|
||||
1}^N \left(f(x_i^{train}) - y_i^{train}\right)^2}^{L(f)} +
|
||||
\lambda g(0) \int_{\supp(g)}\frac{\left(f''(x)\right)^2}{g(x)}
|
||||
dx\right\}}_{\eqqcolon F^{\lambda, g}(f)}.
|
||||
\\ \supp(f) \subseteq \supp(g)}} \underbrace{\left\{ \overbrace{\sum_{i =
|
||||
1}^N \left(f(x_i^{\text{train}}) - y_i^{\text{train}}\right)^2}^{L(f)} +
|
||||
\lambda g(0) \int_{\supp(g)}\frac{\left(f''(x)\right)^2}{g(x)}
|
||||
dx\right\}}_{\eqqcolon F^{\lambda, g}(f)}.
|
||||
\]
|
||||
\todo{Anforderung an Ableitung von f, doch nicht?}
|
||||
\end{Definition}
|
||||
|
||||
Similary to ridge weight penalized neural networks the parameter
|
||||
$\lambda$ controls a trade-off between accuracy on the training data
|
||||
and smoothness or low second dreivative. For $g \equiv 1$ and $\lambda \to 0$ the
|
||||
resuling function $f^{*, 0+}$ will interpolate the training data while minimizing
|
||||
the second derivative. Such a function is known as smooth spline
|
||||
interpolation or (cubic) smoothing spline.
|
||||
the second derivative. Such a function is known as cubic spline
|
||||
interpolation.
|
||||
\todo{cite cubic spline}
|
||||
|
||||
\[
|
||||
f^{*, 0+} \text{ smooth spline interpolation: }
|
||||
f^{*, 0+} \text{ smooth spline interpolation: }
|
||||
\]
|
||||
\[
|
||||
f^{*, 0+} \coloneqq \lim_{\lambda \to 0+} f^{*, \lambda}_1 \in
|
||||
\argmin_{\substack{f \in \mathcal{C}^2\mathbb{R}, \\ f(x_i^{train}) =
|
||||
y_i^{train}} = \left( \int _{\mathbb{R}} (f''(x))^2dx\right).
|
||||
\argmin_{\substack{f \in \mathcal{C}^2\mathbb{R}, \\ f(x_i^{\text{train}}) =
|
||||
y_i^{\text{train}}}} = \left( \int _{\mathbb{R}} (f''(x))^2dx\right).
|
||||
\]
|
||||
|
||||
For $\lambda \to \infty$ on the other hand $f_g^{*\lambda}$ converges
|
||||
to linear regression of the data.
|
||||
\begin{Definition}[Spline approximating Randomised Shallow Neural
|
||||
Network]
|
||||
\label{def:sann}
|
||||
Let $\mathcal{RN}$ be a randomised shallow Neural Network according
|
||||
to Definition~\ref{def:RSNN} and $f^{*, \lambda}_g$ be the weighted
|
||||
regression spline as introduced in Definition~\ref{def:wrs}. Then
|
||||
the randomised shallow neural network approximating $f^{*,
|
||||
\lambda}_g$ is given by
|
||||
\[
|
||||
\mathcal{RN}_{\tilde{w}}(x) = \sum_{k = 1}^n \tilde{w}_k \sigma(b_k + v_k x),
|
||||
\]
|
||||
with the weights $\tilde{w}_k$ defined as
|
||||
\[
|
||||
\tilde{w}_k \coloneqq \frac{h_{k,n} v_k}{\mathbb{E}[v^2 \vert \xi
|
||||
= \xi_k]} (f_g^{*, \lambda})''(\xi_k).
|
||||
\]
|
||||
\end{Definition}
|
||||
|
||||
The approximating nature of the network in
|
||||
Definition~\ref{def:sann} can be seen by LOOKING \todo{besseres Wort
|
||||
finden} at the first derivative of $\mathcal{RN}_{\tilde{w}}(x)$ which is given
|
||||
by
|
||||
\begin{align}
|
||||
\frac{\partial \mathcal{RN}_{\tilde{w}}}{\partial x}
|
||||
\Big{|}_{x} &= \sum_k^n \tilde{w}_k \mathds{1}_{\left\{b_k + v_k x >
|
||||
0\right\}}(v_k) = \sum_{\substack{k \in \mathbb{N} \\ \xi_k <
|
||||
x}} \tilde{w}_k v_k \nonumber \\
|
||||
&= \frac{1}{n} \sum_{\substack{k \in \mathbb{N} \\
|
||||
\xi_k < x}} \frac{v_k^2}{g_{\xi}(\xi_k) \mathbb{E}[v^2 \vert \xi
|
||||
= \xi_k]} (f_g^{*, \lambda})''(\xi_k). \label{eq:derivnn}
|
||||
\end{align}
|
||||
\todo{gescheite Ableitungs Notation}
|
||||
As the expression (\ref{eq:derivnn}) behaves similary to a
|
||||
Riemann-sum for $n \to \infty$ it will converge to the first
|
||||
derievative of $f^{*,\lambda}_g$. A formal proof of this behaviour
|
||||
is given in Lemma~\ref{lem:s0}.
|
||||
|
||||
|
||||
In order to formulate the theorem describing the convergence of $RN_w$
|
||||
we need to make a couple of assumptions.
|
||||
\todo{Bessere Formulierung}
|
||||
|
||||
\begin{Assumption}~
|
||||
\label{ass:theo38}
|
||||
\begin{enumerate}[label=(\alph*)]
|
||||
\item The probability density function of the kinks $\xi_k$, namely $g_\xi$
|
||||
\item The probability density fucntion of the kinks $\xi_k$,
|
||||
namely $g_{\xi}$ as defined in Definition~\ref{def:kink} exists
|
||||
and is well defined.
|
||||
\item The density function $g_\xi$
|
||||
has compact support on $\supp(g_{\xi})$.
|
||||
\item The density $g_{\xi}$ is uniformly continuous on $\supp(g_{\xi})$.
|
||||
\item $g_{\xi}(0) \neq 0$
|
||||
\item The density function $g_{\xi}$ is uniformly continuous on $\supp(g_{\xi})$.
|
||||
\item $g_{\xi}(0) \neq 0$.
|
||||
\item $\frac{1}{g_{\xi}}\Big|_{\supp(g_{\xi})}$ is uniformly
|
||||
continous on $\supp(g_{\xi})$.
|
||||
\item The conditional distribution $\mathcal{L}(v_k|\xi_k = x)$
|
||||
is uniformly continous on $\supp(g_{\xi})$.
|
||||
\item $\mathbb{E}\left[v_k^2\right] < \infty$.
|
||||
\end{enumerate}
|
||||
\end{Assumption}
|
||||
|
||||
\begin{Theorem}[Ridge weight penaltiy corresponds to adapted spline]
|
||||
\label{theo:main1}
|
||||
For arbitrary training data \(\left(x_i^{train}, y_i^{train}\right)\) it holds
|
||||
As we will prove the prorpsition in the Sobolev space, we hereby
|
||||
introduce it and its inuced\todo{richtiges wort?} norm.
|
||||
|
||||
\begin{Definition}[Sobolev Space]
|
||||
For $K \subset \mathbb{R}^n$ open and $1 \leq p \leq \infty$ we
|
||||
define the Sobolev space $W^{k,p}(K)$ as the space containing all
|
||||
real valued functions $u \in L^p(K)$ such that for every multi-index
|
||||
$\alpha \in \mathbb{N}^n$ with $\abs{\alpha} \leq
|
||||
k$ the mixed parial derivatives
|
||||
\[
|
||||
\plimn \norm{\mathcal{RN^{*, \tilde{\lambda}}} - f^{*,
|
||||
\tilde{\lambda}}_{g, \pm}}_{W^{1,\infty}(K)} = 0.
|
||||
u^{(\alpha)} = \frac{\partial^{\abs{\alpha}} u}{\partial
|
||||
x_1^{\alpha_1} \dots \partial x_n^{\alpha_n}}
|
||||
\]
|
||||
|
||||
With
|
||||
\begin{align*}
|
||||
\label{eq:1}
|
||||
\tilde{\lambda} &\coloneqq \lambda n g(0), \\
|
||||
g(x) &\coloneqq
|
||||
g_{\xi}(x)\mathbb{E}\left[ v_k^2 \vert \xi_k = x \right], \forall x
|
||||
\in \mathbb{R}
|
||||
\end{align*}
|
||||
and \(RN^{*, \tilde{\lambda}}\), \(f^{*,\tilde{\lambda}}_{g, \pm}\)
|
||||
as defined in ??? and ??? respectively.
|
||||
\end{Theorem}
|
||||
In order to proof Theo~\ref{theo:main1} we need to proof a number of
|
||||
auxiliary Lemmata first.
|
||||
|
||||
\begin{Definition}[Sobolev Norm]
|
||||
exists in the weak sense and
|
||||
\[
|
||||
\norm{u^{(\alpha)}}_{L^p} < \infty.
|
||||
\]
|
||||
\todo{feritg machen}
|
||||
\label{def:sobonorm}
|
||||
The natural norm of the sobolev space is given by
|
||||
\[
|
||||
@ -191,7 +471,51 @@ auxiliary Lemmata first.
|
||||
\]
|
||||
\end{Definition}
|
||||
|
||||
With these assumption in place we can formulate the main theorem.
|
||||
\todo{Bezug Raum}
|
||||
|
||||
|
||||
\begin{Theorem}[Ridge weight penaltiy corresponds to weighted regression spline]
|
||||
\label{theo:main1}
|
||||
For $N \in \mathbb{N}$ arbitrary training data
|
||||
\(\left(x_i^{\text{train}}, y_i^{\text{train}}
|
||||
\right)\) and $\mathcal{RN}^{*, \tilde{\lambda}}, f_g^{*, \lambda}$
|
||||
according to Definition~\ref{def:rpnn} and Definition~\ref{def:wrs}
|
||||
respectively with Assumption~\ref{ass:theo38} it holds
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:main1}
|
||||
\plimn \norm{\mathcal{RN^{*, \tilde{\lambda}}} - f^{*,
|
||||
\lambda}_{g}}_{W^{1,\infty}(K)} = 0.
|
||||
\end{equation}
|
||||
|
||||
With
|
||||
\begin{align*}
|
||||
g(x) & \coloneqq g_{\xi}(x)\mathbb{E}\left[ v_k^2 \vert \xi_k = x
|
||||
\right], \forall x \in \mathbb{R}, \\
|
||||
\tilde{\lambda} & \coloneqq \lambda n g(0).
|
||||
\end{align*}
|
||||
\end{Theorem}
|
||||
We will proof Theo~\ref{theo:main1} by showing that
|
||||
\begin{equation}
|
||||
\label{eq:main2}
|
||||
\plimn \norm{\mathcal{RN}^{*, \tilde{\lambda}} - f^{w^*}}_{W^{1,
|
||||
\infty}(K)} = 0
|
||||
\end{equation}
|
||||
and
|
||||
\begin{equation}
|
||||
\label{eq:main3}
|
||||
\plimn \norm{f^{w^*} - f_g^{*, \lambda}}_{W^{1,\infty}(K)} = 0
|
||||
\end{equation}
|
||||
and then using the triangle inequality to follow (\ref{eq:main1}). In
|
||||
order to prove (\ref{eq:main2}) and (\ref{eq:main3}) we will need to
|
||||
introduce a number of auxiliary lemmmata, proves to these will be
|
||||
provided in the appendix, as they would SPRENGEN DEN RAHMEN.
|
||||
|
||||
|
||||
|
||||
\begin{Lemma}[Poincar\'e typed inequality]
|
||||
\label{lem:pieq}
|
||||
Let \(f:\mathbb{R} \to \mathbb{R}\) differentiable with \(f' :
|
||||
\mathbb{R} \to \mathbb{R}\) Lesbeque integrable. Then for \(K=[a,b]
|
||||
\subset \mathbb{R}\) with \(f(a)=0\) it holds that
|
||||
@ -229,20 +553,21 @@ auxiliary Lemmata first.
|
||||
% get (\ref{eq:pti1}).
|
||||
% By using the Hölder inequality, we can proof the second claim.
|
||||
% \begin{align*}
|
||||
% \norm{f'}_{L^{\infty}(K)} &= \sup_{x \in K} \abs{\int_a^bf''(y)
|
||||
% \mathds{1}_{[a,x]}(y)dy} \leq \sup_{x \in
|
||||
% K}\norm{f''\mathds{1}_{[a,x]}}_{L^1(K)}\\
|
||||
% &\hspace{-6pt} \stackrel{\text{Hölder}}{\leq} sup_{x
|
||||
% \in
|
||||
% K}\norm{f''}_{L^2(K)}\norm{\mathds{1}_{[a,x]}}_{L^2(K)}
|
||||
% = \abs{b-a}\norm{f''}_{L^2(K)}.
|
||||
% \end{align*}
|
||||
% Thus (\ref{eq:pti2}) follows with \(C_K^2 \coloneqq
|
||||
% \abs{b-a}C_K^{\infty}\).
|
||||
% \qed
|
||||
% \norm{f'}_{L^{\infty}(K)} &= \sup_{x \in K} \abs{\int_a^bf''(y)
|
||||
% \mathds{1}_{[a,x]}(y)dy} \leq \sup_{x \in
|
||||
% K}\norm{f''\mathds{1}_{[a,x]}}_{L^1(K)}\\
|
||||
% &\hspace{-6pt} \stackrel{\text{Hölder}}{\leq} sup_{x
|
||||
% \in
|
||||
% K}\norm{f''}_{L^2(K)}\norm{\mathds{1}_{[a,x]}}_{L^2(K)}
|
||||
% = \abs{b-a}\norm{f''}_{L^2(K)}.
|
||||
% \end{align*}
|
||||
% Thus (\ref{eq:pti2}) follows with \(C_K^2 \coloneqq
|
||||
% \abs{b-a}C_K^{\infty}\).
|
||||
% \qed
|
||||
\end{Lemma}
|
||||
|
||||
\begin{Lemma}
|
||||
\label{lem:cnvh}
|
||||
Let $\mathcal{RN}$ be a shallow Neural network. For \(\varphi :
|
||||
\mathbb{R}^2 \to \mathbb{R}\) uniformly continous such that
|
||||
\[
|
||||
@ -252,68 +577,221 @@ auxiliary Lemmata first.
|
||||
it holds, that
|
||||
\[
|
||||
\plimn \sum_{k \in \kappa : \xi_k < T} \varphi(\xi_k, v_k)
|
||||
\frac{\bar{h}_k}{2}
|
||||
=\int_{max\left\{C_{g_{\xi}}^l,T\right\}}^{min\left\{C_{g_{\xi}}^u,T\right\}}
|
||||
h_{k,n}
|
||||
=\int_{\min\left\{C_{g_{\xi}}^l, T\right\}}^{min\left\{C_{g_{\xi}}^u,T\right\}}
|
||||
\mathbb{E}\left[\varphi(\xi, v) \vert \xi = x \right] dx
|
||||
\]
|
||||
uniformly in \(T \in K\).
|
||||
% \proof
|
||||
% For \(T \leq C_{g_{\xi}}^l\) both sides equal 0, so it is sufficient to
|
||||
% consider \(T > C_{g_{\xi}}^l\). With \(\varphi\) and
|
||||
% \(\nicefrac{1}{g_{\xi}}\) uniformly continous in \(\xi\),
|
||||
% \begin{equation}
|
||||
% \label{eq:psi_stet}
|
||||
% \forall \varepsilon > 0 : \exists \delta(\varepsilon) : \forall
|
||||
% \abs{\xi - \xi'} < \delta(\varepsilon) : \abs{\varphi(\xi, v)
|
||||
% \frac{1}{g_{\xi}(\xi)} - \varphi(\xi', v)
|
||||
% \frac{1}{g_{\xi}(\xi')}} < \varepsilon
|
||||
% \end{equation}
|
||||
% uniformly in \(v\). In order to
|
||||
% save space we use the notation \((a \wedge b) \coloneqq \min\{a,b\}\) for $a$ and $b
|
||||
% \in \mathbb{R}$. W.l.o.g. assume \(\sup(g_{\xi})\) in an
|
||||
% intervall. By splitting the interval in disjoint strips of length \(\delta
|
||||
% \leq \delta(\varepsilon)\) we get:
|
||||
% \proof
|
||||
% For \(T \leq C_{g_{\xi}}^l\) both sides equal 0, so it is sufficient to
|
||||
% consider \(T > C_{g_{\xi}}^l\). With \(\varphi\) and
|
||||
% \(\nicefrac{1}{g_{\xi}}\) uniformly continous in \(\xi\),
|
||||
% \begin{equation}
|
||||
% \label{eq:psi_stet}
|
||||
% \forall \varepsilon > 0 : \exists \delta(\varepsilon) : \forall
|
||||
% \abs{\xi - \xi'} < \delta(\varepsilon) : \abs{\varphi(\xi, v)
|
||||
% \frac{1}{g_{\xi}(\xi)} - \varphi(\xi', v)
|
||||
% \frac{1}{g_{\xi}(\xi')}} < \varepsilon
|
||||
% \end{equation}
|
||||
% uniformly in \(v\). In order to
|
||||
% save space we use the notation \((a \wedge b) \coloneqq \min\{a,b\}\) for $a$ and $b
|
||||
% \in \mathbb{R}$. W.l.o.g. assume \(\sup(g_{\xi})\) in an
|
||||
% intervall. By splitting the interval in disjoint strips of length \(\delta
|
||||
% \leq \delta(\varepsilon)\) we get:
|
||||
|
||||
% \[
|
||||
% \underbrace{\sum_{k \in \kappa : \xi_k < T} \varphi(\xi_k, v_k)
|
||||
% \frac{\bar{h}_k}{2}}_{\circled{1}} =
|
||||
% \underbrace{\sum_{l \in \mathbb{Z}:
|
||||
% \left[\delta l, \delta (l + 1)\right] \subseteq
|
||||
% \left[C_{g_{\xi}}^l, C_{g_{\xi}}^u \wedge T
|
||||
% \right]}}_{\coloneqq \, l \in I_{\delta}} \left( \, \sum_{\substack{k \in \kappa\\
|
||||
% \xi_k \in \left[\delta l, \delta (l + 1)\right]}}
|
||||
% \varphi\left(\xi_k, v_k\right)\frac{\bar{h}_k}{2} \right)
|
||||
% \]
|
||||
% Using (\ref{eq:psi_stet}) we can approximate $\circled{1}$ by
|
||||
% \begin{align*}
|
||||
% \circled{1} & \approx \sum_{l \in I_{\delta}} \left( \, \sum_{\substack{k \in \kappa\\
|
||||
% \xi_k \in \left[\delta l, \delta (l + 1)\right]}}
|
||||
% \left(\varphi\left(l\delta, v_k\right)\frac{1}{g_{\xi}(l\delta)}
|
||||
% \pm \varepsilon\right)\frac{1}{n} \underbrace{\frac{\abs{\left\{m \in
|
||||
% \kappa : \xi_m \in [\delta l, \delta(l + 1)]\right\}}}{\abs{\left\{m \in
|
||||
% \kappa : \xi_m \in [\delta l, \delta(l + 1)]\right\}}}}_{=
|
||||
% 1}\right) \\
|
||||
% % \intertext{}
|
||||
% &= \sum_{l \in I_{\delta}} \left( \frac{ \sum_{ \substack{k \in \kappa\\
|
||||
% \xi_k \in \left[\delta l, \delta (l + 1)\right]}}
|
||||
% \varphi\left(l\delta, v_k\right)}
|
||||
% {\abs{\left\{m \in
|
||||
% \kappa : \xi_m \in [\delta l, \delta(l + 1)]\right\}}}\frac{\abs{\left\{m \in
|
||||
% \kappa : \xi_m \in [\delta l, \delta(l +
|
||||
% 1)]\right\}}}{ng_{\xi}(l\delta)}\right) \pm \varepsilon .\\
|
||||
% \intertext{We use the mean to approximate the number of kinks in
|
||||
% each $\delta$-strip, as it follows a bonomial distribution this
|
||||
% amounts to
|
||||
% \[
|
||||
% \mathbb{E}\left[\abs{\left\{m \in \kappa : \xi_m \in [\delta l,
|
||||
% \delta(l + 1)]\right\}\right]} = n \int_{[\delta l, \delta (l +
|
||||
% 1)]} g_{\xi}(x)dx \approx n (\delta g_{\xi}(l\delta) \pm
|
||||
% \tilde{\varepsilon}).
|
||||
% \]
|
||||
% Bla Bla Bla $v_k$}
|
||||
% \circled{1} & \approx
|
||||
% \end{align*}
|
||||
% \[
|
||||
% \underbrace{\sum_{k \in \kappa : \xi_k < T} \varphi(\xi_k, v_k)
|
||||
% \frac{\bar{h}_k}{2}}_{\circled{1}} =
|
||||
% \underbrace{\sum_{l \in \mathbb{Z}:
|
||||
% \left[\delta l, \delta (l + 1)\right] \subseteq
|
||||
% \left[C_{g_{\xi}}^l, C_{g_{\xi}}^u \wedge T
|
||||
% \right]}}_{\coloneqq \, l \in I_{\delta}} \left( \, \sum_{\substack{k \in \kappa\\
|
||||
% \xi_k \in \left[\delta l, \delta (l + 1)\right]}}
|
||||
% \varphi\left(\xi_k, v_k\right)\frac{\bar{h}_k}{2} \right)
|
||||
% \]
|
||||
% Using (\ref{eq:psi_stet}) we can approximate $\circled{1}$ by
|
||||
% \begin{align*}
|
||||
% \circled{1} & \approx \sum_{l \in I_{\delta}} \left( \, \sum_{\substack{k \in \kappa\\
|
||||
% \xi_k \in \left[\delta l, \delta (l + 1)\right]}}
|
||||
% \left(\varphi\left(l\delta, v_k\right)\frac{1}{g_{\xi}(l\delta)}
|
||||
% \pm \varepsilon\right)\frac{1}{n} \underbrace{\frac{\abs{\left\{m \in
|
||||
% \kappa : \xi_m \in [\delta l, \delta(l + 1)]\right\}}}{\abs{\left\{m \in
|
||||
% \kappa : \xi_m \in [\delta l, \delta(l + 1)]\right\}}}}_{=
|
||||
% 1}\right) \\
|
||||
% % \intertext{}
|
||||
% &= \sum_{l \in I_{\delta}} \left( \frac{ \sum_{ \substack{k \in \kappa\\
|
||||
% \xi_k \in \left[\delta l, \delta (l + 1)\right]}}
|
||||
% \varphi\left(l\delta, v_k\right)}
|
||||
% {\abs{\left\{m \in
|
||||
% \kappa : \xi_m \in [\delta l, \delta(l + 1)]\right\}}}\frac{\abs{\left\{m \in
|
||||
% \kappa : \xi_m \in [\delta l, \delta(l +
|
||||
% 1)]\right\}}}{ng_{\xi}(l\delta)}\right) \pm \varepsilon .\\
|
||||
% \intertext{We use the mean to approximate the number of kinks in
|
||||
% each $\delta$-strip, as it follows a bonomial distribution this
|
||||
% amounts to
|
||||
% \[
|
||||
% \mathbb{E}\left[\abs{\left\{m \in \kappa : \xi_m \in [\delta l,
|
||||
% \delta(l + 1)]\right\}\right]} = n \int_{[\delta l, \delta (l +
|
||||
% 1)]} g_{\xi}(x)dx \approx n (\delta g_{\xi}(l\delta) \pm
|
||||
% \tilde{\varepsilon}).
|
||||
% \]
|
||||
% Bla Bla Bla $v_k$}
|
||||
% \circled{1} & \approx
|
||||
% \end{align*}
|
||||
\end{Lemma}
|
||||
|
||||
\begin{Lemma}[Step 0]
|
||||
For any $\lambda > 0$, training data $(x_i^{\text{train}}
|
||||
y_i^{\text{train}}) \in \mathbb{R}^2$, with $ i \in
|
||||
\left\{1,\dots,N\right\}$ and subset $K \subset \mathbb{R}$ the spline approximating randomized
|
||||
shallow neural network $\mathcal{RN}_{\tilde{w}}$ converges to the
|
||||
regression spline $f^{*, \lambda}_g$ in
|
||||
$\norm{.}_{W^{1,\infty}(K)}$ as the node count $n$ increases,
|
||||
\begin{equation}
|
||||
\label{eq:s0}
|
||||
\plimn \norm{\mathcal{RN}_{\tilde{w}} - f^{*, \lambda}_g}_{W^{1,
|
||||
\infty}(K)} = 0
|
||||
\end{equation}
|
||||
\proof
|
||||
Using Lemma~\ref{lem:pieq} it is sufficient to show
|
||||
\[
|
||||
\plimn \norm{\mathcal{RN}_{\tilde{w}}' - (f^{*,
|
||||
\lambda}_g)'}_{L^{\infty}} = 0.
|
||||
\]
|
||||
This can be achieved by using Lemma~\ref{lem:cnvh} with $\varphi(\xi_k,
|
||||
v_k) = \frac{v_k^2}{\mathbb{E}[v^2|\xi = z]} (f^{*, \lambda}_w)''(\xi_k) $
|
||||
thus obtaining
|
||||
\begin{align*}
|
||||
\plimn \frac{\partial \mathcal{RN}_{\tilde{w}}}{\partial x}
|
||||
\stackrel{(\ref{eq:derivnn})}{=}
|
||||
& \plimn \sum_{\substack{k \in \mathbb{N} \\
|
||||
\xi_k < x}} \frac{v_k^2}{\mathbb{E}[v^2 \vert \xi
|
||||
= \xi_k]} (f_g^{*, \lambda})''(\xi_k) h_{k,n}
|
||||
\stackrel{\text{Lemma}~\ref{lem:cnvh}}{=} \\
|
||||
\stackrel{\phantom{(\ref{eq:derivnn})}}{=}
|
||||
&
|
||||
\int_{\min\left\{C_{g_{\xi}}^l,T\right\}}^{min\left\{C_{g_{\xi}}^u,T\right\}}
|
||||
\mathbb{E}\left[\frac{v^2}{\mathbb{E}[v^2|\xi = z]} (f^{*,
|
||||
\lambda}_w)''(\xi) \vert
|
||||
\xi = x \right] dx \equals^{\text{Tower-}}_{\text{property}} \\
|
||||
\stackrel{\phantom{(\ref{eq:derivnn})}}{=}
|
||||
&
|
||||
\int_{\min\left\{C_{g_{\xi}}^l,
|
||||
T\right\}}^{min\left\{C_{g_{\xi}}^u,T\right\}}(f^{*,\lambda}_w)''(x)
|
||||
dx.
|
||||
\end{align*}
|
||||
By the fundamental theorem of calculus and $\supp(f') \subset
|
||||
\supp(f)$, (\ref{eq:s0}) follows with Lemma~\ref{lem:pieq}.
|
||||
\qed
|
||||
\end{Lemma}
|
||||
|
||||
\begin{Lemma}[Step 2]
|
||||
For any $\lambda > 0$ and training data $(x_i^{\text{train}},
|
||||
y_i^{\text{train}}) \in \mathbb{R}^2, \, i \in
|
||||
\left\{1,\dots,N\right\}$, we have
|
||||
\[
|
||||
\plimn F^{\tilde{\lambda}}_n(\mathcal{RN}_{\tilde{w}}) =
|
||||
F^{\lambda, g}(f^{*, \lambda}_g) = 0.
|
||||
\]
|
||||
\proof
|
||||
This can be prooven by showing
|
||||
\end{Lemma}
|
||||
|
||||
\begin{Lemma}[Step 3]
|
||||
For any $\lambda > 0$ and training data $(x_i^{\text{train}},
|
||||
y_i^{\text{train}}) \in \mathbb{R}^2, \, i \in
|
||||
\left\{1,\dots,N\right\}$, with $w^*$ and $\tilde{\lambda}$ as
|
||||
defined in Definition~\ref{def:rpnn} and Theroem~\ref{theo:main1}
|
||||
respectively, it holds
|
||||
\[
|
||||
\plimn \norm{\mathcal{RN}^{*,\tilde{\lambda}} -
|
||||
f^{w*, \tilde{\lambda}}}_{W^{1,\infty}(K)} = 0.
|
||||
\]
|
||||
\end{Lemma}
|
||||
|
||||
\begin{Lemma}[Step 4]
|
||||
For any $\lambda > 0$ and training data $(x_i^{\text{train}},
|
||||
y_i^{\text{train}}) \in \mathbb{R}^2, \, i \in
|
||||
\left\{1,\dots,N\right\}$, with $w^*$ and $\tilde{\lambda}$ as
|
||||
defined in Definition~\ref{def:rpnn} and Theroem~\ref{theo:main1}
|
||||
respectively, it holds
|
||||
\[
|
||||
\plimn \abs{F_n^{\lambda}(\mathcal{RN}^{*,\tilde{\lambda}}) -
|
||||
F^{\lambda, g}(f^{w*, \tilde{\lambda}})} = 0.
|
||||
\]
|
||||
\end{Lemma}
|
||||
|
||||
\begin{Lemma}[Step 7]
|
||||
For any $\lambda > 0$ and training data $(x_i^{\text{train}},
|
||||
y_i^{\text{train}}) \in \mathbb{R}^2, \, i \in
|
||||
\left\{1,\dots,N\right\}$, for any sequence of functions $f^n \in
|
||||
W^{2,2}$ with
|
||||
\[
|
||||
\plimn F^{\lambda, g} (f^n) = F^{\lambda, g}(f^{*, \lambda}),
|
||||
\]
|
||||
it follows
|
||||
\[
|
||||
\plimn \norm{f^n - f^{*, \lambda}} = 0.
|
||||
\]
|
||||
\end{Lemma}
|
||||
|
||||
\textcite{heiss2019} further show a link between ridge penalized
|
||||
networks and randomized shallow neural networks which are trained with
|
||||
gradient descent which is stopped after a certain amount of iterations.
|
||||
|
||||
\newpage
|
||||
\subsection{Simulations}
|
||||
In the following the behaviour described in Theorem~\ref{theo:main1}
|
||||
is visualized in a simulated example. For this two sets of training
|
||||
data have been generated.
|
||||
\begin{itemize}
|
||||
\item $\text{data}_A = (x_{i, A}^{\text{train}},
|
||||
y_{i,A}^{\text{train}})$ with
|
||||
\begin{align*}
|
||||
x_{i, A}^{\text{train}} &\coloneqq -\pi + \frac{2 \pi}{5} (i - 1),
|
||||
i \in \left\{1, \dots, 6\right\}, \\
|
||||
y_{i, A}^{\text{train}} &\coloneqq \sin( x_{i, A}^{\text{train}}). \phantom{(i - 1),
|
||||
i \in \left\{1, \dots, 6\right\}}
|
||||
\end{align*}
|
||||
\item $\text{data}_b = (x_{i, B}^{\text{train}}, y_{i,
|
||||
B}^{\text{train}})$ with
|
||||
\begin{align*}
|
||||
x_{i, B}^{\text{train}} &\coloneqq \pi\frac{i - 8}{7},
|
||||
i \in \left\{1, \dots, 15\right\}, \\
|
||||
y_{i, B}^{\text{train}} &\coloneqq \sin( x_{i, B}^{\text{train}}). \phantom{(i - 1),
|
||||
i \in \left\{1, \dots, 6\right\}}
|
||||
\end{align*}
|
||||
\end{itemize}
|
||||
For the $\mathcal{RN}$ the random weights are distributed
|
||||
as follows
|
||||
\begin{align*}
|
||||
\xi_i &\stackrel{i.i.d.}{\sim} \text{Unif}(-5,5), \\
|
||||
v_i &\stackrel{i.i.d.}{\sim} \mathcal{N}(0, 5), \\
|
||||
b_i &\stackrel{\phantom{i.i.d.}}{\sim} -\xi_i v_i.
|
||||
\end{align*}
|
||||
Note that by the choices for the distributions $g$ as defined in
|
||||
Theorem~\ref{theo:main1}
|
||||
would equate to $g(x) = \frac{\mathbb{E}[v_k^2|\xi_k = x]}{10}$. In
|
||||
order to utilize the
|
||||
smoothing spline implemented in Mathlab, $g$ has been simplified to $g
|
||||
\equiv \frac{1}{10}$ instead. For all figures $f_1^{*, \lambda}$ has
|
||||
been calculated with Matlab's ..... As ... minimizes
|
||||
\[
|
||||
\bar{\lambda} \sum_{i=1}^N(y_i^{train} - f(x_i^{train}))^2 + (1 -
|
||||
\bar{\lambda}) \int (f''(x))^2 dx
|
||||
\]
|
||||
the smoothing parameter used for fittment is $\bar{\lambda} =
|
||||
\frac{1}{1 + \lambda}$. The parameter $\tilde{\lambda}$ for training
|
||||
the networks is chosen as defined in Theorem~\ref{theo:main1} and each
|
||||
one is trained on the full training data for 5000 iterations using
|
||||
gradient descent. The
|
||||
results are given in Figure~\ref{blblb}, here it can be seen that in
|
||||
the intervall of the traing data $[-\pi, \pi]$ the neural network and
|
||||
smoothing spline are nearly identical, coinciding with the proposition.
|
||||
|
||||
\input{Plots/RN_vs_RS}
|
||||
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: "main"
|
||||
|
Loading…
Reference in New Issue
Block a user