255 lines
8.2 KiB
Plaintext
255 lines
8.2 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Machine Learning Practical: Coursework 1\n",
|
|
"\n",
|
|
"**Release date: Monday 10th October 2016** \n",
|
|
"**Due date: 16:00 Thursday 27th October 2016**\n",
|
|
"\n",
|
|
"Instructions for the coursework are [available as a PDF here](http://www.inf.ed.ac.uk/teaching/courses/mlp/2016/coursework_1.pdf)."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Part 1: Learning rate schedules"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# The below code will set up the data providers, random number\n",
|
|
"# generator and logger objects needed for training runs. As\n",
|
|
"# loading the data from file take a little while you generally\n",
|
|
"# will probably not want to reload the data providers on\n",
|
|
"# every training run. If you wish to reset their state you\n",
|
|
"# should instead use the .reset() method of the data providers.\n",
|
|
"import numpy as np\n",
|
|
"import logging\n",
|
|
"from mlp.data_providers import MNISTDataProvider\n",
|
|
"\n",
|
|
"# Seed a random number generator\n",
|
|
"seed = 10102016 \n",
|
|
"rng = np.random.RandomState(seed)\n",
|
|
"\n",
|
|
"# Set up a logger object to print info about the training run to stdout\n",
|
|
"logger = logging.getLogger()\n",
|
|
"logger.setLevel(logging.INFO)\n",
|
|
"logger.handlers = [logging.StreamHandler()]\n",
|
|
"\n",
|
|
"# Create data provider objects for the MNIST data set\n",
|
|
"train_data = MNISTDataProvider('train', batch_size=50, rng=rng)\n",
|
|
"valid_data = MNISTDataProvider('valid', batch_size=50, rng=rng)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# The model set up code below is provided as a starting point.\n",
|
|
"# You will probably want to add further code cells for the\n",
|
|
"# different experiments you run.\n",
|
|
"\n",
|
|
"from mlp.layers import AffineLayer, SoftmaxLayer, SigmoidLayer\n",
|
|
"from mlp.errors import CrossEntropySoftmaxError\n",
|
|
"from mlp.models import MultipleLayerModel\n",
|
|
"from mlp.initialisers import ConstantInit, GlorotUniformInit\n",
|
|
"\n",
|
|
"input_dim, output_dim, hidden_dim = 784, 10, 100\n",
|
|
"\n",
|
|
"weights_init = GlorotUniformInit(rng=rng)\n",
|
|
"biases_init = ConstantInit(0.)\n",
|
|
"\n",
|
|
"model = MultipleLayerModel([\n",
|
|
" AffineLayer(input_dim, hidden_dim, weights_init, biases_init), \n",
|
|
" SigmoidLayer(),\n",
|
|
" AffineLayer(hidden_dim, hidden_dim, weights_init, biases_init), \n",
|
|
" SigmoidLayer(),\n",
|
|
" AffineLayer(hidden_dim, output_dim, weights_init, biases_init)\n",
|
|
"])\n",
|
|
"\n",
|
|
"error = CrossEntropySoftmaxError()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Part 2: Momentum learning rule"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# The below code will set up the data providers, random number\n",
|
|
"# generator and logger objects needed for training runs. As\n",
|
|
"# loading the data from file take a little while you generally\n",
|
|
"# will probably not want to reload the data providers on\n",
|
|
"# every training run. If you wish to reset their state you\n",
|
|
"# should instead use the .reset() method of the data providers.\n",
|
|
"import numpy as np\n",
|
|
"import logging\n",
|
|
"from mlp.data_providers import MNISTDataProvider\n",
|
|
"\n",
|
|
"# Seed a random number generator\n",
|
|
"seed = 10102016 \n",
|
|
"rng = np.random.RandomState(seed)\n",
|
|
"\n",
|
|
"# Set up a logger object to print info about the training run to stdout\n",
|
|
"logger = logging.getLogger()\n",
|
|
"logger.setLevel(logging.INFO)\n",
|
|
"logger.handlers = [logging.StreamHandler()]\n",
|
|
"\n",
|
|
"# Create data provider objects for the MNIST data set\n",
|
|
"train_data = MNISTDataProvider('train', batch_size=50, rng=rng)\n",
|
|
"valid_data = MNISTDataProvider('valid', batch_size=50, rng=rng)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# The model set up code below is provided as a starting point.\n",
|
|
"# You will probably want to add further code cells for the\n",
|
|
"# different experiments you run.\n",
|
|
"\n",
|
|
"from mlp.layers import AffineLayer, SoftmaxLayer, SigmoidLayer\n",
|
|
"from mlp.errors import CrossEntropySoftmaxError\n",
|
|
"from mlp.models import MultipleLayerModel\n",
|
|
"from mlp.initialisers import ConstantInit, GlorotUniformInit\n",
|
|
"\n",
|
|
"input_dim, output_dim, hidden_dim = 784, 10, 100\n",
|
|
"\n",
|
|
"weights_init = GlorotUniformInit(rng=rng)\n",
|
|
"biases_init = ConstantInit(0.)\n",
|
|
"\n",
|
|
"model = MultipleLayerModel([\n",
|
|
" AffineLayer(input_dim, hidden_dim, weights_init, biases_init), \n",
|
|
" SigmoidLayer(),\n",
|
|
" AffineLayer(hidden_dim, hidden_dim, weights_init, biases_init), \n",
|
|
" SigmoidLayer(),\n",
|
|
" AffineLayer(hidden_dim, output_dim, weights_init, biases_init)\n",
|
|
"])\n",
|
|
"\n",
|
|
"error = CrossEntropySoftmaxError()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Part 3: Adaptive learning rules"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# The below code will set up the data providers, random number\n",
|
|
"# generator and logger objects needed for training runs. As\n",
|
|
"# loading the data from file take a little while you generally\n",
|
|
"# will probably not want to reload the data providers on\n",
|
|
"# every training run. If you wish to reset their state you\n",
|
|
"# should instead use the .reset() method of the data providers.\n",
|
|
"import numpy as np\n",
|
|
"import logging\n",
|
|
"from mlp.data_providers import MNISTDataProvider\n",
|
|
"\n",
|
|
"# Seed a random number generator\n",
|
|
"seed = 10102016 \n",
|
|
"rng = np.random.RandomState(seed)\n",
|
|
"\n",
|
|
"# Set up a logger object to print info about the training run to stdout\n",
|
|
"logger = logging.getLogger()\n",
|
|
"logger.setLevel(logging.INFO)\n",
|
|
"logger.handlers = [logging.StreamHandler()]\n",
|
|
"\n",
|
|
"# Create data provider objects for the MNIST data set\n",
|
|
"train_data = MNISTDataProvider('train', batch_size=50, rng=rng)\n",
|
|
"valid_data = MNISTDataProvider('valid', batch_size=50, rng=rng)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": false
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# The model set up code below is provided as a starting point.\n",
|
|
"# You will probably want to add further code cells for the\n",
|
|
"# different experiments you run.\n",
|
|
"\n",
|
|
"from mlp.layers import AffineLayer, SoftmaxLayer, SigmoidLayer\n",
|
|
"from mlp.errors import CrossEntropySoftmaxError\n",
|
|
"from mlp.models import MultipleLayerModel\n",
|
|
"from mlp.initialisers import ConstantInit, GlorotUniformInit\n",
|
|
"\n",
|
|
"input_dim, output_dim, hidden_dim = 784, 10, 100\n",
|
|
"\n",
|
|
"weights_init = GlorotUniformInit(rng=rng)\n",
|
|
"biases_init = ConstantInit(0.)\n",
|
|
"\n",
|
|
"model = MultipleLayerModel([\n",
|
|
" AffineLayer(input_dim, hidden_dim, weights_init, biases_init), \n",
|
|
" SigmoidLayer(),\n",
|
|
" AffineLayer(hidden_dim, hidden_dim, weights_init, biases_init), \n",
|
|
" SigmoidLayer(),\n",
|
|
" AffineLayer(hidden_dim, output_dim, weights_init, biases_init)\n",
|
|
"])\n",
|
|
"\n",
|
|
"error = CrossEntropySoftmaxError()"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"anaconda-cloud": {},
|
|
"kernelspec": {
|
|
"display_name": "Python [conda env:mlp]",
|
|
"language": "python",
|
|
"name": "conda-env-mlp-py"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 2
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython2",
|
|
"version": "2.7.12"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 1
|
|
}
|