You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

212 lines
7.4 KiB
TeX

\section{Code...}
In this ... the implementations of the models used in ... are
given. The randomized shallow neural network used in CHAPTER... are
implemented in Scala from ground up to ensure the model is exactly to
... of Theorem~\ref{theo:main1}.
The neural networks used in CHAPTER are implemented in python using
the Keras framework given in Tensorflow. Tensorflow is a library
containing highly efficient GPU implementations of most important
tensor operations, such as convolution as well as efficient algorithms
for training neural networks (computing derivatives, updating parameters).
\begin{itemize}
\item Code for randomized shallow neural network
\item Code for keras
\end{itemize}
\begin{lstfloat}
\begin{lstlisting}[language=iPython]
import breeze.stats.distributions.Uniform
import breeze.stats.distributions.Gaussian
import scala.language.postfixOps
object Activation {
def apply(x: Double): Double = math.max(0, x)
def d(x: Double): Double = if (x > 0) 1 else 0
}
class RSNN(val n: Int, val gamma: Double = 0.001) {
val g_unif = Uniform(-10, 10)
val g_gauss = Gaussian(0, 5)
val xis = g_unif.sample(n)
val vs = g_gauss.sample(n)
val bs = xis zip vs map {case(xi, v) => xi * v}
def computeL1(x: Double) = (bs zip vs) map {
case (b, v) => Activation(b + v * x) }
def computeL2(l1: Seq[Double], ws: Seq[Double]): Double =
(l1 zip ws) map { case (l, w) => w * l } sum
def output(ws: Seq[Double])(x: Double): Double =
computeL2(computeL1(x), ws)
def learn(data: Seq[(Double, Double)], ws: Seq[Double],
lamb: Double, gamma: Double): Seq[Double] = {
lazy val deltas = data.map {
case (x, y) =>
val l1 = computeL1(x)
val out = computeL2(l1, ws)
(l1 zip ws) map {case (l1, w) => (l1 * 2 * (out - y) +
lam * 2 * w) * gamma * -1}
}
deltas.foldRight(ws)(
(delta, ws) => ws zip (delta) map { case (w, d) => w + d })
}
def train(data: Seq[(Double, Double)], iter: Int, lam: Double,
gamma: Double = gamma): (Seq[Double], Double => Double) = {
val ws = (1 to iter).foldRight((1 to n).map(
_ => 0.0) :Seq[Double])((i, w) => {
println(s"Training iteration $i")
println(w.sum/w.length)
learn(data, w, lam, gamma / 10)
})
(ws, output(ws))
}
}
\end{lstlisting}
\caption{Scala code used to build and train the ridge penalized
randomized shallow neural network in .... The parameter \textit{lam}
in the train function represents the $\lambda$ parameter in the error
function. The parameters \textit{n} and \textit{gamma} set the number
of hidden nodes and the stepsize for training.}
\label{lst:rsnn}
\end{lstfloat}
\clearpage
\begin{lstfloat}
\begin{lstlisting}[language=iPython]
import tensorflow as tf
import numpy as np
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras.preprocessing.image import ImageDataGenerator
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_train = x_train / 255.0
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_test = x_test / 255.0
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(24,kernel_size=5,padding='same',
activation='relu',input_shape=(28,28,1)))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.Conv2D(64,kernel_size=5,padding='same',
activation='relu'))
model.add(tf.keras.layers.MaxPool2D(padding='same'))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam', loss="categorical_crossentropy",
metrics=["accuracy"])
datagen = ImageDataGenerator(
rotation_range = 30,
zoom_range = 0.15,
width_shift_range=2,
height_shift_range=2,
shear_range = 1)
csv_logger = CSVLogger(<Target File>)
history = model.fit(datagen.flow(x_train, y_train, batch_size=50),
validation_data=(x_test, y_test),
epochs=125, callbacks=[csv_logger],
steps_per_epoch = x_train.shape[0]//50)
\end{lstlisting}
\caption{Python code for the model used... the MNIST handwritten digits
dataset.}
\label{lst:handwriting}
\end{lstfloat}
\clearpage
\begin{lstfloat}
\begin{lstlisting}[language=iPython]
import tensorflow as tf
import numpy as np
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras.preprocessing.image import ImageDataGenerator
mnist = tf.keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters = 32, kernel_size = (3, 3),
activation='relu', input_shape = (28, 28, 1), padding='same'))
model.add(tf.keras.layers.Conv2D(filters = 32, kernel_size = (2, 2), activation='relu', padding = 'same'))
model.add(tf.keras.layers.MaxPool2D(strides=(2,2)))
model.add(tf.keras.layers.Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', padding='same'))
model.add(tf.keras.layers.Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', padding='same'))
model.add(tf.keras.layers.MaxPool2D(strides=(2,2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(lr = 1e-3), loss="categorical_crossentropy", metrics=["accuracy"])
datagen = ImageDataGenerator(
rotation_range = 15,
zoom_range = 0.1,
width_shift_range=2,
height_shift_range=2,
shear_range = 0.5,
fill_mode = 'constant',
cval = 0)
csv_logger = CSVLogger(<Target File>)
history = model.fit(datagen.flow(x_train, y_train, batch_size=30),
steps_per_epoch=x_train.shape[0]//30,
validation_data=(x_test, y_test),
epochs=125, callbacks=[csv_logger],
shuffle=True)
\end{lstlisting}
\caption{Python code for the model used... the fashion MNIST
dataset.}
\label{lst:fashion}
\end{lstfloat}
\clearpage
\begin{lstfloat}
\begin{lstlisting}[language=iPython]
def get_random_sample(a, b, number_of_samples=10):
x = []
y = []
for category_number in range(0,10):
# get all samples of a category
train_data_category = a[b==category_number]
# pick a number of random samples from the category
train_data_category = train_data_category[np.random.randint(
train_data_category.shape[0], size=number_of_samples), :]
x.extend(train_data_category)
y.append([category_number]*number_of_samples)
return (np.asarray(x).reshape(-1, 28, 28, 1),
np.asarray(y).reshape(10*number_of_samples,1))
\end{lstlisting}
\caption{Python code used to generate the datasets containing a
certain amount of random datapoints per class.}
\end{lstfloat}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "main"
%%% End: