Adding notebook with convolutional layer tests.
This commit is contained in:
parent
da9ee58609
commit
e0114b9aab
460
notebooks/Convolutional layer tests.ipynb
Normal file
460
notebooks/Convolutional layer tests.ipynb
Normal file
@ -0,0 +1,460 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For those who decide to implement and experiment with convolutional layers for the second coursework, below a skeleton class and associated test functions for the `fprop`, `bprop` and `grads_wrt_params` methods of the class are included.\n",
|
||||
"\n",
|
||||
"The test functions assume that in your implementation of `fprop` for the convolutional layer, outputs are calculated only for 'valid' overlaps of the kernel filters with the input - i.e. without any padding.\n",
|
||||
"\n",
|
||||
"It is also assumed that if convolutions with non-unit strides are implemented the default behaviour is to take unit-strides, with the test cases only correct for unit strides in both directions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class ConvolutionalLayer(layers.LayerWithParameters):\n",
|
||||
" \"\"\"Layer implementing a 2D convolution-based transformation of its inputs.\n",
|
||||
"\n",
|
||||
" The layer is parameterised by a set of 2D convolutional kernels, a four\n",
|
||||
" dimensional array of shape\n",
|
||||
" (num_output_channels, num_input_channels, kernel_dim_1, kernel_dim_2)\n",
|
||||
" and a bias vector, a one dimensional array of shape\n",
|
||||
" (num_output_channels,)\n",
|
||||
" i.e. one shared bias per output channel.\n",
|
||||
"\n",
|
||||
" Assuming no-padding is applied to the inputs so that outputs are only\n",
|
||||
" calculated for positions where the kernel filters fully overlap with the\n",
|
||||
" inputs, and that unit strides are used the outputs will have spatial extent\n",
|
||||
" output_dim_1 = input_dim_1 - kernel_dim_1 + 1\n",
|
||||
" output_dim_2 = input_dim_2 - kernel_dim_2 + 1\n",
|
||||
" \"\"\"\n",
|
||||
"\n",
|
||||
" def __init__(self, num_input_channels, num_output_channels,\n",
|
||||
" input_dim_1, input_dim_2,\n",
|
||||
" kernel_dim_1, kernel_dim_2,\n",
|
||||
" kernels_init=init.UniformInit(-0.01, 0.01),\n",
|
||||
" biases_init=init.ConstantInit(0.),\n",
|
||||
" kernels_penalty=None, biases_penalty=None):\n",
|
||||
" \"\"\"Initialises a parameterised convolutional layer.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" num_input_channels (int): Number of channels in inputs to\n",
|
||||
" layer (this may be number of colour channels in the input\n",
|
||||
" images if used as the first layer in a model, or the\n",
|
||||
" number of output channels, a.k.a. feature maps, from a\n",
|
||||
" a previous convolutional layer).\n",
|
||||
" num_output_channels (int): Number of channels in outputs\n",
|
||||
" from the layer, a.k.a. number of feature maps.\n",
|
||||
" input_dim_1 (int): Size of first input dimension of each 2D\n",
|
||||
" channel of inputs.\n",
|
||||
" input_dim_2 (int): Size of second input dimension of each 2D\n",
|
||||
" channel of inputs.\n",
|
||||
" kernel_dim_x (int): Size of first dimension of each 2D channel of\n",
|
||||
" kernels.\n",
|
||||
" kernel_dim_y (int): Size of second dimension of each 2D channel of\n",
|
||||
" kernels.\n",
|
||||
" kernels_intialiser: Initialiser for the kernel parameters.\n",
|
||||
" biases_initialiser: Initialiser for the bias parameters.\n",
|
||||
" kernels_penalty: Kernel-dependent penalty term (regulariser) or\n",
|
||||
" None if no regularisation is to be applied to the kernels.\n",
|
||||
" biases_penalty: Biases-dependent penalty term (regulariser) or\n",
|
||||
" None if no regularisation is to be applied to the biases.\n",
|
||||
" \"\"\"\n",
|
||||
" self.num_input_channels = num_input_channels\n",
|
||||
" self.num_output_channels = num_output_channels\n",
|
||||
" self.input_dim_1 = input_dim_1\n",
|
||||
" self.input_dim_2 = input_dim_2\n",
|
||||
" self.kernel_dim_1 = kernel_dim_1\n",
|
||||
" self.kernel_dim_2 = kernel_dim_2\n",
|
||||
" self.kernels_init = kernels_init\n",
|
||||
" self.biases_init = biases_init\n",
|
||||
" self.kernels_shape = (\n",
|
||||
" num_output_channels, num_input_channels, kernel_dim_1, kernel_dim_2\n",
|
||||
" )\n",
|
||||
" self.inputs_shape = (\n",
|
||||
" None, num_input_channels, input_dim_1, input_dim_2\n",
|
||||
" )\n",
|
||||
" self.kernels = self.kernels_init(self.kernels_shape)\n",
|
||||
" self.biases = self.biases_init(num_output_channels)\n",
|
||||
" self.kernels_penalty = kernels_penalty\n",
|
||||
" self.biases_penalty = biases_penalty\n",
|
||||
"\n",
|
||||
" def fprop(self, inputs):\n",
|
||||
" \"\"\"Forward propagates activations through the layer transformation.\n",
|
||||
"\n",
|
||||
" For inputs `x`, outputs `y`, kernels `K` and biases `b` the layer\n",
|
||||
" corresponds to `y = conv2d(x, K) + b`.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" inputs: Array of layer inputs of shape (batch_size, input_dim).\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" outputs: Array of layer outputs of shape (batch_size, output_dim).\n",
|
||||
" \"\"\"\n",
|
||||
" raise NotImplementedError()\n",
|
||||
"\n",
|
||||
" def bprop(self, inputs, outputs, grads_wrt_outputs):\n",
|
||||
" \"\"\"Back propagates gradients through a layer.\n",
|
||||
"\n",
|
||||
" Given gradients with respect to the outputs of the layer calculates the\n",
|
||||
" gradients with respect to the layer inputs.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" inputs: Array of layer inputs of shape\n",
|
||||
" (batch_size, num_input_channels, input_dim_1, input_dim_2).\n",
|
||||
" outputs: Array of layer outputs calculated in forward pass of\n",
|
||||
" shape\n",
|
||||
" (batch_size, num_output_channels, output_dim_1, output_dim_2).\n",
|
||||
" grads_wrt_outputs: Array of gradients with respect to the layer\n",
|
||||
" outputs of shape\n",
|
||||
" (batch_size, num_output_channels, output_dim_1, output_dim_2).\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" Array of gradients with respect to the layer inputs of shape\n",
|
||||
" (batch_size, input_dim).\n",
|
||||
" \"\"\"\n",
|
||||
" raise NotImplementedError()\n",
|
||||
"\n",
|
||||
" def grads_wrt_params(self, inputs, grads_wrt_outputs):\n",
|
||||
" \"\"\"Calculates gradients with respect to layer parameters.\n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" inputs: array of inputs to layer of shape (batch_size, input_dim)\n",
|
||||
" grads_wrt_to_outputs: array of gradients with respect to the layer\n",
|
||||
" outputs of shape\n",
|
||||
" (batch_size, num_output-_channels, output_dim_1, output_dim_2).\n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" list of arrays of gradients with respect to the layer parameters\n",
|
||||
" `[grads_wrt_kernels, grads_wrt_biases]`.\n",
|
||||
" \"\"\"\n",
|
||||
" raise NotImplementedError()\n",
|
||||
"\n",
|
||||
" def params_penalty(self):\n",
|
||||
" \"\"\"Returns the parameter dependent penalty term for this layer.\n",
|
||||
"\n",
|
||||
" If no parameter-dependent penalty terms are set this returns zero.\n",
|
||||
" \"\"\"\n",
|
||||
" params_penalty = 0\n",
|
||||
" if self.kernels_penalty is not None:\n",
|
||||
" params_penalty += self.kernels_penalty(self.kernels)\n",
|
||||
" if self.biases_penalty is not None:\n",
|
||||
" params_penalty += self.biases_penalty(self.biases)\n",
|
||||
" return params_penalty\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def params(self):\n",
|
||||
" \"\"\"A list of layer parameter values: `[kernels, biases]`.\"\"\"\n",
|
||||
" return [self.kernels, self.biases]\n",
|
||||
"\n",
|
||||
" @params.setter\n",
|
||||
" def params(self, values):\n",
|
||||
" self.kernels = values[0]\n",
|
||||
" self.biases = values[1]\n",
|
||||
"\n",
|
||||
" def __repr__(self):\n",
|
||||
" return (\n",
|
||||
" 'ConvolutionalLayer(\\n'\n",
|
||||
" ' num_input_channels={0}, num_output_channels={1},\\n'\n",
|
||||
" ' input_dim_1={2}, input_dim_2={3},\\n'\n",
|
||||
" ' kernel_dim_1={4}, kernel_dim_2={5}\\n'\n",
|
||||
" ')'\n",
|
||||
" .format(self.num_input_channels, self.num_output_channels,\n",
|
||||
" self.input_dim_1, self.input_dim_2, self.kernel_dim_1,\n",
|
||||
" self.kernel_dim_2)\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The three test functions are defined in the cell below. All the functions take as first argument the *class* corresponding to the convolutional layer implementation to be tested (**not** an instance of the class). It is assumed the class being tested has an `__init__` method with at least all of the arguments defined in the skeleton definition above. A boolean second argument to each function can be used to specify if the layer implements a cross-correlation or convolution based operation (see note in [seventh lecture slides](http://www.inf.ed.ac.uk/teaching/courses/mlp/2016/mlp07-cnn.pdf))."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def test_conv_layer_fprop(layer_class, do_cross_correlation=False):\n",
|
||||
" \"\"\"Tests `fprop` method of a convolutional layer.\n",
|
||||
" \n",
|
||||
" Checks the outputs of `fprop` method for a fixed input against known\n",
|
||||
" reference values for the outputs and raises an AssertionError if\n",
|
||||
" the outputted values are not consistent with the reference values. If\n",
|
||||
" tests are all passed returns True.\n",
|
||||
" \n",
|
||||
" Args:\n",
|
||||
" layer_class: Convolutional layer implementation following the \n",
|
||||
" interface defined in the provided skeleton class.\n",
|
||||
" do_cross_correlation: Whether the layer implements an operation\n",
|
||||
" corresponding to cross-correlation (True) i.e kernels are\n",
|
||||
" not flipped before sliding over inputs, or convolution\n",
|
||||
" (False) with filters being flipped.\n",
|
||||
"\n",
|
||||
" Raises:\n",
|
||||
" AssertionError: Raised if output of `layer.fprop` is inconsistent \n",
|
||||
" with reference values either in shape or values.\n",
|
||||
" \"\"\"\n",
|
||||
" inputs = np.arange(96).reshape((2, 3, 4, 4))\n",
|
||||
" kernels = np.arange(-12, 12).reshape((2, 3, 2, 2))\n",
|
||||
" if do_cross_correlation:\n",
|
||||
" kernels = kernels[:, :, ::-1, ::-1]\n",
|
||||
" biases = np.arange(2)\n",
|
||||
" true_output = np.array(\n",
|
||||
" [[[[ -958., -1036., -1114.],\n",
|
||||
" [-1270., -1348., -1426.],\n",
|
||||
" [-1582., -1660., -1738.]],\n",
|
||||
" [[ 1707., 1773., 1839.],\n",
|
||||
" [ 1971., 2037., 2103.],\n",
|
||||
" [ 2235., 2301., 2367.]]],\n",
|
||||
" [[[-4702., -4780., -4858.],\n",
|
||||
" [-5014., -5092., -5170.],\n",
|
||||
" [-5326., -5404., -5482.]],\n",
|
||||
" [[ 4875., 4941., 5007.],\n",
|
||||
" [ 5139., 5205., 5271.],\n",
|
||||
" [ 5403., 5469., 5535.]]]]\n",
|
||||
" )\n",
|
||||
" layer = layer_class(\n",
|
||||
" num_input_channels=kernels.shape[0], \n",
|
||||
" num_output_channels=kernels.shape[1], \n",
|
||||
" input_dim_1=inputs.shape[2], \n",
|
||||
" input_dim_2=inputs.shape[3],\n",
|
||||
" kernel_dim_1=kernels.shape[2],\n",
|
||||
" kernel_dim_2=kernels.shape[3]\n",
|
||||
" )\n",
|
||||
" layer.params = [kernels, biases]\n",
|
||||
" layer_output = layer.fprop(inputs)\n",
|
||||
" assert layer_output.shape == true_output.shape, (\n",
|
||||
" 'Layer fprop gives incorrect shaped output. '\n",
|
||||
" 'Correct shape is \\n\\n{0}\\n\\n but returned shape is \\n\\n{1}.'\n",
|
||||
" .format(true_output.shape, layer_output.shape)\n",
|
||||
" )\n",
|
||||
" assert np.allclose(layer_output, true_output), (\n",
|
||||
" 'Layer fprop does not give correct output. '\n",
|
||||
" 'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}.'\n",
|
||||
" .format(true_output, layer_output)\n",
|
||||
" )\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"def test_conv_layer_bprop(layer_class, do_cross_correlation=False):\n",
|
||||
" \"\"\"Tests `bprop` method of a convolutional layer.\n",
|
||||
" \n",
|
||||
" Checks the outputs of `bprop` method for a fixed input against known\n",
|
||||
" reference values for the gradients with respect to inputs and raises \n",
|
||||
" an AssertionError if the returned values are not consistent with the\n",
|
||||
" reference values. If tests are all passed returns True.\n",
|
||||
" \n",
|
||||
" Args:\n",
|
||||
" layer_class: Convolutional layer implementation following the \n",
|
||||
" interface defined in the provided skeleton class.\n",
|
||||
" do_cross_correlation: Whether the layer implements an operation\n",
|
||||
" corresponding to cross-correlation (True) i.e kernels are\n",
|
||||
" not flipped before sliding over inputs, or convolution\n",
|
||||
" (False) with filters being flipped.\n",
|
||||
"\n",
|
||||
" Raises:\n",
|
||||
" AssertionError: Raised if output of `layer.bprop` is inconsistent \n",
|
||||
" with reference values either in shape or values.\n",
|
||||
" \"\"\"\n",
|
||||
" inputs = np.arange(96).reshape((2, 3, 4, 4))\n",
|
||||
" kernels = np.arange(-12, 12).reshape((2, 3, 2, 2))\n",
|
||||
" if do_cross_correlation:\n",
|
||||
" kernels = kernels[:, :, ::-1, ::-1]\n",
|
||||
" biases = np.arange(2)\n",
|
||||
" grads_wrt_outputs = np.arange(-20, 16).reshape((2, 2, 3, 3))\n",
|
||||
" outputs = np.array(\n",
|
||||
" [[[[ -958., -1036., -1114.],\n",
|
||||
" [-1270., -1348., -1426.],\n",
|
||||
" [-1582., -1660., -1738.]],\n",
|
||||
" [[ 1707., 1773., 1839.],\n",
|
||||
" [ 1971., 2037., 2103.],\n",
|
||||
" [ 2235., 2301., 2367.]]],\n",
|
||||
" [[[-4702., -4780., -4858.],\n",
|
||||
" [-5014., -5092., -5170.],\n",
|
||||
" [-5326., -5404., -5482.]],\n",
|
||||
" [[ 4875., 4941., 5007.],\n",
|
||||
" [ 5139., 5205., 5271.],\n",
|
||||
" [ 5403., 5469., 5535.]]]]\n",
|
||||
" )\n",
|
||||
" true_grads_wrt_inputs = np.array(\n",
|
||||
" [[[[ 147., 319., 305., 162.],\n",
|
||||
" [ 338., 716., 680., 354.],\n",
|
||||
" [ 290., 608., 572., 294.],\n",
|
||||
" [ 149., 307., 285., 144.]],\n",
|
||||
" [[ 23., 79., 81., 54.],\n",
|
||||
" [ 114., 284., 280., 162.],\n",
|
||||
" [ 114., 272., 268., 150.],\n",
|
||||
" [ 73., 163., 157., 84.]],\n",
|
||||
" [[-101., -161., -143., -54.],\n",
|
||||
" [-110., -148., -120., -30.],\n",
|
||||
" [ -62., -64., -36., 6.],\n",
|
||||
" [ -3., 19., 29., 24.]]],\n",
|
||||
" [[[ 39., 67., 53., 18.],\n",
|
||||
" [ 50., 68., 32., -6.],\n",
|
||||
" [ 2., -40., -76., -66.],\n",
|
||||
" [ -31., -89., -111., -72.]],\n",
|
||||
" [[ 59., 115., 117., 54.],\n",
|
||||
" [ 114., 212., 208., 90.],\n",
|
||||
" [ 114., 200., 196., 78.],\n",
|
||||
" [ 37., 55., 49., 12.]],\n",
|
||||
" [[ 79., 163., 181., 90.],\n",
|
||||
" [ 178., 356., 384., 186.],\n",
|
||||
" [ 226., 440., 468., 222.],\n",
|
||||
" [ 105., 199., 209., 96.]]]])\n",
|
||||
" layer = layer_class(\n",
|
||||
" num_input_channels=kernels.shape[0], \n",
|
||||
" num_output_channels=kernels.shape[1], \n",
|
||||
" input_dim_1=inputs.shape[2], \n",
|
||||
" input_dim_2=inputs.shape[3],\n",
|
||||
" kernel_dim_1=kernels.shape[2],\n",
|
||||
" kernel_dim_2=kernels.shape[3]\n",
|
||||
" )\n",
|
||||
" layer.params = [kernels, biases]\n",
|
||||
" layer_grads_wrt_inputs = layer.bprop(inputs, outputs, grads_wrt_outputs)\n",
|
||||
" assert layer_grads_wrt_inputs.shape == true_grads_wrt_inputs.shape, (\n",
|
||||
" 'Layer bprop returns incorrect shaped array. '\n",
|
||||
" 'Correct shape is \\n\\n{0}\\n\\n but returned shape is \\n\\n{1}.'\n",
|
||||
" .format(true_grads_wrt_inputs.shape, layer_grads_wrt_inputs.shape)\n",
|
||||
" )\n",
|
||||
" assert np.allclose(layer_grads_wrt_inputs, true_grads_wrt_inputs), (\n",
|
||||
" 'Layer bprop does not return correct values. '\n",
|
||||
" 'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}'\n",
|
||||
" .format(true_grads_wrt_inputs, layer_grads_wrt_inputs)\n",
|
||||
" )\n",
|
||||
" return True\n",
|
||||
"\n",
|
||||
"def test_conv_layer_grad_wrt_params(\n",
|
||||
" layer_class, do_cross_correlation=False):\n",
|
||||
" \"\"\"Tests `grad_wrt_params` method of a convolutional layer.\n",
|
||||
" \n",
|
||||
" Checks the outputs of `grad_wrt_params` method for fixed inputs \n",
|
||||
" against known reference values for the gradients with respect to \n",
|
||||
" kernels and biases, and raises an AssertionError if the returned\n",
|
||||
" values are not consistent with the reference values. If tests\n",
|
||||
" are all passed returns True.\n",
|
||||
" \n",
|
||||
" Args:\n",
|
||||
" layer_class: Convolutional layer implementation following the \n",
|
||||
" interface defined in the provided skeleton class.\n",
|
||||
" do_cross_correlation: Whether the layer implements an operation\n",
|
||||
" corresponding to cross-correlation (True) i.e kernels are\n",
|
||||
" not flipped before sliding over inputs, or convolution\n",
|
||||
" (False) with filters being flipped.\n",
|
||||
"\n",
|
||||
" Raises:\n",
|
||||
" AssertionError: Raised if output of `layer.bprop` is inconsistent \n",
|
||||
" with reference values either in shape or values.\n",
|
||||
" \"\"\"\n",
|
||||
" inputs = np.arange(96).reshape((2, 3, 4, 4))\n",
|
||||
" kernels = np.arange(-12, 12).reshape((2, 3, 2, 2))\n",
|
||||
" biases = np.arange(2)\n",
|
||||
" grads_wrt_outputs = np.arange(-20, 16).reshape((2, 2, 3, 3))\n",
|
||||
" true_kernel_grads = np.array(\n",
|
||||
" [[[[ -240., -114.],\n",
|
||||
" [ 264., 390.]],\n",
|
||||
" [[-2256., -2130.],\n",
|
||||
" [-1752., -1626.]],\n",
|
||||
" [[-4272., -4146.],\n",
|
||||
" [-3768., -3642.]]],\n",
|
||||
" [[[ 5268., 5232.],\n",
|
||||
" [ 5124., 5088.]],\n",
|
||||
" [[ 5844., 5808.],\n",
|
||||
" [ 5700., 5664.]],\n",
|
||||
" [[ 6420., 6384.],\n",
|
||||
" [ 6276., 6240.]]]])\n",
|
||||
" if do_cross_correlation:\n",
|
||||
" kernels = kernels[:, :, ::-1, ::-1]\n",
|
||||
" true_kernel_grads = true_kernel_grads[:, :, ::-1, ::-1]\n",
|
||||
" true_bias_grads = np.array([-126., 36.])\n",
|
||||
" layer = layer_class(\n",
|
||||
" num_input_channels=kernels.shape[0], \n",
|
||||
" num_output_channels=kernels.shape[1], \n",
|
||||
" input_dim_1=inputs.shape[2], \n",
|
||||
" input_dim_2=inputs.shape[3],\n",
|
||||
" kernel_dim_1=kernels.shape[2],\n",
|
||||
" kernel_dim_2=kernels.shape[3]\n",
|
||||
" )\n",
|
||||
" layer.params = [kernels, biases]\n",
|
||||
" layer_kernel_grads, layer_bias_grads = (\n",
|
||||
" layer.grads_wrt_params(inputs, grads_wrt_outputs))\n",
|
||||
" assert layer_kernel_grads.shape == true_kernel_grads.shape, (\n",
|
||||
" 'grads_wrt_params gives incorrect shaped kernel gradients output. '\n",
|
||||
" 'Correct shape is \\n\\n{0}\\n\\n but returned shape is \\n\\n{1}.'\n",
|
||||
" .format(true_kernel_grads.shape, layer_kernel_grads.shape)\n",
|
||||
" )\n",
|
||||
" assert np.allclose(layer_kernel_grads, true_kernel_grads), (\n",
|
||||
" 'grads_wrt_params does not give correct kernel gradients output. '\n",
|
||||
" 'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}.'\n",
|
||||
" .format(true_kernel_grads, layer_kernel_grads)\n",
|
||||
" )\n",
|
||||
" assert layer_bias_grads.shape == true_bias_grads.shape, (\n",
|
||||
" 'grads_wrt_params gives incorrect shaped bias gradients output. '\n",
|
||||
" 'Correct shape is \\n\\n{0}\\n\\n but returned shape is \\n\\n{1}.'\n",
|
||||
" .format(true_bias_grads.shape, layer_bias_grads.shape)\n",
|
||||
" )\n",
|
||||
" assert np.allclose(layer_bias_grads, true_bias_grads), (\n",
|
||||
" 'grads_wrt_params does not give correct bias gradients output. '\n",
|
||||
" 'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}.'\n",
|
||||
" .format(true_bias_grads, layer_bias_grads)\n",
|
||||
" )\n",
|
||||
" return True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"An example of using the test functions if given in the cell below. This assumes you implement a convolution (rather than cross-correlation) operation. If the implementation is correct "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"all_correct = test_conv_layer_fprop(ConvolutionalLayer, False)\n",
|
||||
"all_correct &= test_conv_layer_bprop(ConvolutionalLayer, False)\n",
|
||||
"all_correct &= test_conv_layer_grad_wrt_params(ConvolutionalLayer, False)\n",
|
||||
"if all_correct:\n",
|
||||
" print('All tests passed.')"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"anaconda-cloud": {},
|
||||
"kernelspec": {
|
||||
"display_name": "Python [conda env:mlp]",
|
||||
"language": "python",
|
||||
"name": "conda-env-mlp-py"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
Loading…
Reference in New Issue
Block a user