183 lines
7.3 KiB
Plaintext
183 lines
7.3 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Coursework 1\n",
|
|
"\n",
|
|
"This notebook is intended to be used as a starting point for your experiments. The instructions can be found in the MLP2024_25_CW1_Spec.pdf (see Learn, Assignment Submission, Coursework 1). The methods provided here are just helper functions. If you want more complex graphs such as side by side comparisons of different experiments you should learn more about matplotlib and implement them. Before each experiment remember to re-initialize neural network weights and reset the data providers so you get a properly initialized experiment. For each experiment try to keep most hyperparameters the same except the one under investigation so you can understand what the effects of each are."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import matplotlib.pyplot as plt\n",
|
|
"%matplotlib inline\n",
|
|
"plt.style.use('ggplot')\n",
|
|
"\n",
|
|
"def train_model_and_plot_stats(\n",
|
|
" model, error, learning_rule, train_data, valid_data, num_epochs, stats_interval, notebook=True):\n",
|
|
" \n",
|
|
" # As well as monitoring the error over training also monitor classification\n",
|
|
" # accuracy i.e. proportion of most-probable predicted classes being equal to targets\n",
|
|
" data_monitors={'acc': lambda y, t: (y.argmax(-1) == t.argmax(-1)).mean()}\n",
|
|
"\n",
|
|
" # Use the created objects to initialise a new Optimiser instance.\n",
|
|
" optimiser = Optimiser(\n",
|
|
" model, error, learning_rule, train_data, valid_data, data_monitors, notebook=notebook)\n",
|
|
"\n",
|
|
" # Run the optimiser for num_epochs epochs (full passes through the training set)\n",
|
|
" # printing statistics every epoch.\n",
|
|
" stats, keys, run_time = optimiser.train(num_epochs=num_epochs, stats_interval=stats_interval)\n",
|
|
"\n",
|
|
" # Plot the change in the validation and training set error over training.\n",
|
|
" fig_1 = plt.figure(figsize=(8, 4))\n",
|
|
" ax_1 = fig_1.add_subplot(111)\n",
|
|
" for k in ['error(train)', 'error(valid)']:\n",
|
|
" ax_1.plot(np.arange(1, stats.shape[0]) * stats_interval, \n",
|
|
" stats[1:, keys[k]], label=k)\n",
|
|
" ax_1.legend(loc=0)\n",
|
|
" ax_1.set_xlabel('Epoch number')\n",
|
|
" ax_1.set_ylabel('Error')\n",
|
|
"\n",
|
|
" # Plot the change in the validation and training set accuracy over training.\n",
|
|
" fig_2 = plt.figure(figsize=(8, 4))\n",
|
|
" ax_2 = fig_2.add_subplot(111)\n",
|
|
" for k in ['acc(train)', 'acc(valid)']:\n",
|
|
" ax_2.plot(np.arange(1, stats.shape[0]) * stats_interval, \n",
|
|
" stats[1:, keys[k]], label=k)\n",
|
|
" ax_2.legend(loc=0)\n",
|
|
" ax_2.set_xlabel('Epoch number')\n",
|
|
" ax_2.set_xlabel('Accuracy')\n",
|
|
" \n",
|
|
" return stats, keys, run_time, fig_1, ax_1, fig_2, ax_2"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# The below code will set up the data providers, random number\n",
|
|
"# generator and logger objects needed for training runs. As\n",
|
|
"# loading the data from file take a little while you generally\n",
|
|
"# will probably not want to reload the data providers on\n",
|
|
"# every training run. If you wish to reset their state you\n",
|
|
"# should instead use the .reset() method of the data providers.\n",
|
|
"import numpy as np\n",
|
|
"import logging\n",
|
|
"import sys\n",
|
|
"# sys.path.append('/path/to/mlpractical')\n",
|
|
"from mlp.data_providers import MNISTDataProvider, EMNISTDataProvider\n",
|
|
"\n",
|
|
"# Seed a random number generator\n",
|
|
"seed = 11102019 \n",
|
|
"rng = np.random.RandomState(seed)\n",
|
|
"batch_size = 100\n",
|
|
"# Set up a logger object to print info about the training run to stdout\n",
|
|
"logger = logging.getLogger()\n",
|
|
"logger.setLevel(logging.INFO)\n",
|
|
"logger.handlers = [logging.StreamHandler()]\n",
|
|
"\n",
|
|
"# Create data provider objects for the MNIST data set\n",
|
|
"train_data = EMNISTDataProvider('train', batch_size=batch_size, rng=rng)\n",
|
|
"valid_data = EMNISTDataProvider('valid', batch_size=batch_size, rng=rng)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# The model set up code below is provided as a starting point.\n",
|
|
"# You will probably want to add further code cells for the\n",
|
|
"# different experiments you run.\n",
|
|
"\n",
|
|
"%pip install tqdm\n",
|
|
"\n",
|
|
"from mlp.layers import AffineLayer, SoftmaxLayer, SigmoidLayer, ReluLayer\n",
|
|
"from mlp.errors import CrossEntropySoftmaxError\n",
|
|
"from mlp.models import MultipleLayerModel\n",
|
|
"from mlp.initialisers import ConstantInit, GlorotUniformInit\n",
|
|
"from mlp.learning_rules import AdamLearningRule\n",
|
|
"from mlp.optimisers import Optimiser\n",
|
|
"\n",
|
|
"# Setup hyperparameters\n",
|
|
"learning_rate = 0.001\n",
|
|
"num_epochs = 100\n",
|
|
"stats_interval = 1\n",
|
|
"input_dim, output_dim, hidden_dim = 784, 47, 128\n",
|
|
"\n",
|
|
"weights_init = GlorotUniformInit(rng=rng)\n",
|
|
"biases_init = ConstantInit(0.)\n",
|
|
"\n",
|
|
"# Create model with ONE hidden layer\n",
|
|
"model = MultipleLayerModel([\n",
|
|
" AffineLayer(input_dim, hidden_dim, weights_init, biases_init), # hidden layer\n",
|
|
" ReluLayer(),\n",
|
|
" AffineLayer(hidden_dim, output_dim, weights_init, biases_init) # output layer\n",
|
|
"])\n",
|
|
"\n",
|
|
"error = CrossEntropySoftmaxError()\n",
|
|
"# Use a Adam learning rule\n",
|
|
"learning_rule = AdamLearningRule(learning_rate=learning_rate)\n",
|
|
"\n",
|
|
"# Remember to use notebook=False when you write a script to be run in a terminal\n",
|
|
"_ = train_model_and_plot_stats(\n",
|
|
" model, error, learning_rule, train_data, valid_data, num_epochs, stats_interval, notebook=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Create model with TWO hidden layers\n",
|
|
"model = MultipleLayerModel([\n",
|
|
" AffineLayer(input_dim, hidden_dim, weights_init, biases_init), # first hidden layer\n",
|
|
" ReluLayer(),\n",
|
|
" AffineLayer(hidden_dim, hidden_dim, weights_init, biases_init), # second hidden layer\n",
|
|
" ReluLayer(),\n",
|
|
" AffineLayer(hidden_dim, output_dim, weights_init, biases_init) # output layer\n",
|
|
"])\n",
|
|
"\n",
|
|
"error = CrossEntropySoftmaxError()\n",
|
|
"# Use a Adam learning rule\n",
|
|
"learning_rule = AdamLearningRule(learning_rate=learning_rate)\n",
|
|
"\n",
|
|
"# Remember to use notebook=False when you write a script to be run in a terminal\n",
|
|
"_ = train_model_and_plot_stats(\n",
|
|
" model, error, learning_rule, train_data, valid_data, num_epochs, stats_interval, notebook=True)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.10"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 4
|
|
}
|