mlpractical/notebooks/DropoutandPenalty_tests.ipynb
Visual Computing (VICO) Group 6883eb77c2 Add cw1
2024-10-14 09:56:47 +01:00

123 lines
3.7 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"import sys\n",
"# sys.path.append('/path/to/mlpractical')\n",
"from mlp.test_methods import test_dropout_layer\n",
"import numpy as np\n",
"\n",
"fprop_test, fprop_output, fprop_correct, \\\n",
"bprop_test, bprop_output, bprop_correct = test_dropout_layer()\n",
"\n",
"assert fprop_test == 1.0, (\n",
"'The dropout layer fprop functionality test failed'\n",
"'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}\\n\\n difference is \\n\\n{2}'\n",
".format(fprop_correct, fprop_output, fprop_output-fprop_correct)\n",
")\n",
"\n",
"print(\"Dropout Layer Fprop Functionality Test Passed\")\n",
"\n",
"assert bprop_test == 1.0, (\n",
"'The dropout layer bprop functionality test failed'\n",
"'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}\\n\\n difference is \\n\\n{2}'\n",
".format(bprop_correct, bprop_output, bprop_output-bprop_correct)\n",
")\n",
"\n",
"print(\"Dropout Layer Bprop Test Passed\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from mlp.test_methods import test_L1_Penalty\n",
"\n",
"\n",
"call_test, call_output, call_correct, \\\n",
"grad_test, grad_output, grad_correct = test_L1_Penalty()\n",
"\n",
"\n",
"assert call_test == 1.0, (\n",
"'The call function for L1 Penalty test failed'\n",
"'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}\\n\\n difference is \\n\\n{2}'\n",
".format(call_correct, call_output, call_output-call_correct)\n",
")\n",
"\n",
"print(\"L1 Penalty Call Functionality Test Passed\")\n",
"\n",
"assert grad_test == 1.0, (\n",
"'The grad function for L1 Penalty test failed'\n",
"'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}\\n\\n difference is \\n\\n{2}'\n",
".format(grad_correct, grad_output, grad_output-grad_correct, grad_output/grad_correct)\n",
")\n",
"\n",
"\n",
"\n",
"print(\"L1 Penalty Grad Function Test Passed\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from mlp.test_methods import test_L2_Penalty\n",
"\n",
"\n",
"call_test, call_output, call_correct, \\\n",
"grad_test, grad_output, grad_correct = test_L2_Penalty()\n",
"\n",
"\n",
"assert call_test == 1.0, (\n",
"'The call function for L2 Penalty test failed'\n",
"'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}\\n\\n difference is \\n\\n{2}'\n",
".format(call_correct, call_output, call_output-call_correct)\n",
")\n",
"\n",
"print(\"L2 Penalty Call Functionality Test Passed\")\n",
"\n",
"assert grad_test == 1.0, (\n",
"'The grad function for L2 Penalty test failed'\n",
"'Correct output is \\n\\n{0}\\n\\n but returned output is \\n\\n{1}\\n\\n difference is \\n\\n{2}'\n",
".format(grad_correct, grad_output, grad_output-grad_correct, grad_output/grad_correct)\n",
")\n",
"\n",
"\n",
"\n",
"print(\"L2 Penalty Grad Function Test Passed\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 1
}