Commit 05c9a892 authored by Jonathan Lee's avatar Jonathan Lee
Browse files

changed normalization and updated annotations

parent bc9b39a1
......@@ -2,9 +2,18 @@
"cells": [
{
"cell_type": "code",
"execution_count": 384,
"execution_count": 1,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/ifshome/jolee/miniconda/lib/python3.8/site-packages/nilearn/datasets/__init__.py:87: FutureWarning: Fetchers from the nilearn.datasets module will be updated in version 0.9 to return python strings instead of bytes and Pandas dataframes instead of Numpy arrays.\n",
" warn(\"Fetchers from the nilearn.datasets module will be \"\n"
]
}
],
"source": [
"# import necessary packages\n",
"from sklearn.preprocessing import LabelBinarizer\n",
......@@ -30,7 +39,7 @@
},
{
"cell_type": "code",
"execution_count": 385,
"execution_count": 2,
"metadata": {},
"outputs": [
{
......@@ -139,7 +148,7 @@
},
{
"cell_type": "code",
"execution_count": 389,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
......@@ -177,7 +186,7 @@
},
{
"cell_type": "code",
"execution_count": 390,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
......@@ -195,7 +204,7 @@
},
{
"cell_type": "code",
"execution_count": 391,
"execution_count": 5,
"metadata": {},
"outputs": [
{
......@@ -204,7 +213,7 @@
"(128, 184, 1)"
]
},
"execution_count": 391,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
......@@ -219,7 +228,7 @@
},
{
"cell_type": "code",
"execution_count": 421,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
......@@ -237,14 +246,13 @@
"for i in imgs_avg:\n",
" imgs_avg_nib.append(nib.load(i).get_fdata())\n",
"\n",
"# divide by 255 to normalize pixel values\n",
"for i in range(len(imgs_max)): \n",
" imgs_merged.append(np.concatenate((imgs_max_nib[i] / 255, imgs_avg_nib[i] / 255), axis=2))"
" imgs_merged.append(np.concatenate((imgs_max_nib[i], imgs_avg_nib[i]), axis=2))"
]
},
{
"cell_type": "code",
"execution_count": 422,
"execution_count": 7,
"metadata": {},
"outputs": [
{
......@@ -253,7 +261,7 @@
"(128, 184, 2)"
]
},
"execution_count": 422,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
......@@ -266,14 +274,14 @@
},
{
"cell_type": "code",
"execution_count": 423,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"<ipython-input-423-acee21f7a78c>:8: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n",
"<ipython-input-8-acee21f7a78c>:8: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n",
" plt.subplot(5 / columns + 1, columns, i + 1)\n"
]
},
......@@ -304,14 +312,14 @@
},
{
"cell_type": "code",
"execution_count": 424,
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"<ipython-input-424-5117e302ece2>:8: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n",
"<ipython-input-9-5117e302ece2>:8: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n",
" plt.subplot(5 / columns + 1, columns, i + 1)\n"
]
},
......@@ -342,7 +350,7 @@
},
{
"cell_type": "code",
"execution_count": 445,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
......@@ -356,16 +364,16 @@
},
{
"cell_type": "code",
"execution_count": 446,
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<matplotlib.image.AxesImage at 0x7fe71c447ac0>"
"<matplotlib.image.AxesImage at 0x7f297d00d850>"
]
},
"execution_count": 446,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
},
......@@ -391,7 +399,7 @@
},
{
"cell_type": "code",
"execution_count": 458,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
......@@ -419,7 +427,7 @@
},
{
"cell_type": "code",
"execution_count": 451,
"execution_count": 13,
"metadata": {},
"outputs": [
{
......@@ -444,7 +452,7 @@
},
{
"cell_type": "code",
"execution_count": 452,
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
......@@ -462,6 +470,7 @@
"from tensorflow.keras.layers import Input\n",
"from tensorflow.keras.models import Model\n",
"\n",
"# double check with regress is set to false here \n",
"def create_mlp(dim, regress=False):\n",
" \n",
" # define our MLP network\n",
......@@ -497,7 +506,7 @@
},
{
"cell_type": "code",
"execution_count": 453,
"execution_count": 15,
"metadata": {
"scrolled": false
},
......@@ -516,88 +525,88 @@
"text": [
"[INFO] training model...\n",
"Epoch 1/40\n",
"23/23 [==============================] - 2s 40ms/step - loss: 5907202.5000 - val_loss: 85.3816\n",
"23/23 [==============================] - 2s 43ms/step - loss: 739162.7500 - val_loss: 80.7780\n",
"Epoch 2/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 9870156.0000 - val_loss: 82.9214\n",
"23/23 [==============================] - 1s 33ms/step - loss: 108194.5469 - val_loss: 77.7209\n",
"Epoch 3/40\n",
"23/23 [==============================] - 1s 32ms/step - loss: 10330770.0000 - val_loss: 89.8360\n",
"23/23 [==============================] - 1s 33ms/step - loss: 4658813.0000 - val_loss: 175.1640\n",
"Epoch 4/40\n",
"23/23 [==============================] - 1s 32ms/step - loss: 1725934.7500 - val_loss: 94.6955\n",
"23/23 [==============================] - 1s 34ms/step - loss: 9749125.0000 - val_loss: 248.0767\n",
"Epoch 5/40\n",
"23/23 [==============================] - 1s 32ms/step - loss: 709866.3750 - val_loss: 94.1317\n",
"23/23 [==============================] - 1s 33ms/step - loss: 4491801.0000 - val_loss: 274.3863\n",
"Epoch 6/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1453704.2500 - val_loss: 82.3401\n",
"23/23 [==============================] - 1s 33ms/step - loss: 254635.0469 - val_loss: 380.1227\n",
"Epoch 7/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1854183.1250 - val_loss: 110.8519\n",
"23/23 [==============================] - 1s 33ms/step - loss: 82372.6016 - val_loss: 377.8203\n",
"Epoch 8/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3644127.0000 - val_loss: 111.8308\n",
"23/23 [==============================] - 1s 33ms/step - loss: 5235496.5000 - val_loss: 294.4142\n",
"Epoch 9/40\n",
"23/23 [==============================] - 1s 36ms/step - loss: 1630926.7500 - val_loss: 112.9966\n",
"23/23 [==============================] - 1s 33ms/step - loss: 499993.4688 - val_loss: 302.4116\n",
"Epoch 10/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 315047.1875 - val_loss: 81.3186\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3674149.7500 - val_loss: 301.1278\n",
"Epoch 11/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 799771.1250 - val_loss: 75.0370\n",
"23/23 [==============================] - 1s 33ms/step - loss: 236464.9531 - val_loss: 246.8560\n",
"Epoch 12/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1064065.3750 - val_loss: 77.3539\n",
"23/23 [==============================] - 1s 33ms/step - loss: 19876822.0000 - val_loss: 69.6813\n",
"Epoch 13/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1303266.5000 - val_loss: 95.6228\n",
"23/23 [==============================] - 1s 33ms/step - loss: 2988911.0000 - val_loss: 80.7738\n",
"Epoch 14/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 2436036.5000 - val_loss: 98.8011\n",
"23/23 [==============================] - 1s 33ms/step - loss: 9260542.0000 - val_loss: 85.6476\n",
"Epoch 15/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 9746630.0000 - val_loss: 77.0218\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1247110.8750 - val_loss: 88.1314\n",
"Epoch 16/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 21851116.0000 - val_loss: 80.2659\n",
"23/23 [==============================] - 1s 33ms/step - loss: 2866541.7500 - val_loss: 90.6115\n",
"Epoch 17/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 10444255.0000 - val_loss: 73.1865\n",
"23/23 [==============================] - 1s 36ms/step - loss: 158247.6875 - val_loss: 88.9790\n",
"Epoch 18/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 1842511.8750 - val_loss: 130.2612\n",
"23/23 [==============================] - 1s 34ms/step - loss: 410635.1250 - val_loss: 90.1120\n",
"Epoch 19/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 846315.1875 - val_loss: 81.5081\n",
"23/23 [==============================] - 1s 34ms/step - loss: 8200141.0000 - val_loss: 90.3297\n",
"Epoch 20/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 7538889.0000 - val_loss: 79.4730\n",
"23/23 [==============================] - 1s 34ms/step - loss: 2934102.5000 - val_loss: 90.0181\n",
"Epoch 21/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3532151.0000 - val_loss: 69.8874\n",
"23/23 [==============================] - 1s 33ms/step - loss: 4385727.5000 - val_loss: 91.2359\n",
"Epoch 22/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1252445.0000 - val_loss: 114.9249\n",
"23/23 [==============================] - 1s 35ms/step - loss: 7016348.5000 - val_loss: 92.0402\n",
"Epoch 23/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 2978219.5000 - val_loss: 108.4571\n",
"23/23 [==============================] - 1s 37ms/step - loss: 1570839.8750 - val_loss: 92.5150\n",
"Epoch 24/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 922540.4375 - val_loss: 81.8545\n",
"23/23 [==============================] - 1s 35ms/step - loss: 578274.3125 - val_loss: 87.3443\n",
"Epoch 25/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3633383.0000 - val_loss: 69.3617\n",
"23/23 [==============================] - 1s 35ms/step - loss: 561783.6875 - val_loss: 87.6562\n",
"Epoch 26/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1673403.2500 - val_loss: 68.7792\n",
"23/23 [==============================] - 1s 41ms/step - loss: 9761388.0000 - val_loss: 88.0854\n",
"Epoch 27/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 3747849.2500 - val_loss: 66.1937\n",
"23/23 [==============================] - 1s 34ms/step - loss: 671722.1250 - val_loss: 87.9868\n",
"Epoch 28/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 5839762.0000 - val_loss: 61.7886\n",
"23/23 [==============================] - 1s 35ms/step - loss: 291089.4688 - val_loss: 88.8749\n",
"Epoch 29/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 6155945.0000 - val_loss: 57.3682\n",
"23/23 [==============================] - 1s 34ms/step - loss: 6084078.0000 - val_loss: 90.0092\n",
"Epoch 30/40\n",
"23/23 [==============================] - 1s 36ms/step - loss: 9463727.0000 - val_loss: 61.3495\n",
"23/23 [==============================] - 1s 34ms/step - loss: 674623.6250 - val_loss: 91.0310\n",
"Epoch 31/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 267309.3750 - val_loss: 77.1904\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3171207.5000 - val_loss: 92.0963\n",
"Epoch 32/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 5791735.5000 - val_loss: 76.2954\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1611184.8750 - val_loss: 94.2985\n",
"Epoch 33/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1950277.5000 - val_loss: 143.1821\n",
"23/23 [==============================] - 1s 32ms/step - loss: 3172797.7500 - val_loss: 98.5941\n",
"Epoch 34/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 288345.4375 - val_loss: 111.6001\n",
"23/23 [==============================] - 1s 34ms/step - loss: 423511.1875 - val_loss: 104.0902\n",
"Epoch 35/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 15917235.0000 - val_loss: 93.8450\n",
"23/23 [==============================] - 1s 35ms/step - loss: 126338.9531 - val_loss: 102.0765\n",
"Epoch 36/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 6554179.5000 - val_loss: 89.9294\n",
"23/23 [==============================] - 1s 34ms/step - loss: 520707.8750 - val_loss: 101.0857\n",
"Epoch 37/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 916703.4375 - val_loss: 86.5181\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1672894.8750 - val_loss: 100.0803\n",
"Epoch 38/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1636418.8750 - val_loss: 81.6224\n",
"23/23 [==============================] - 1s 34ms/step - loss: 3788303.0000 - val_loss: 100.6476\n",
"Epoch 39/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 3871.7839 - val_loss: 72.8129\n",
"23/23 [==============================] - 1s 35ms/step - loss: 2577893.5000 - val_loss: 98.6984\n",
"Epoch 40/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 740887.7500 - val_loss: 73.8067\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1781253.5000 - val_loss: 97.7365\n",
"[INFO] predicting t-score...\n",
"[INFO] average t-score: -1.47828, std t-score: 1.406043864383835\n",
"[INFO] mean absolute percentage error: 73.81%, std: 54.82%\n"
"[INFO] mean absolute percentage error: 97.74%, std: 18.17%\n"
]
}
],
......
......@@ -2,9 +2,18 @@
"cells": [
{
"cell_type": "code",
"execution_count": 384,
"execution_count": 1,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/ifshome/jolee/miniconda/lib/python3.8/site-packages/nilearn/datasets/__init__.py:87: FutureWarning: Fetchers from the nilearn.datasets module will be updated in version 0.9 to return python strings instead of bytes and Pandas dataframes instead of Numpy arrays.\n",
" warn(\"Fetchers from the nilearn.datasets module will be \"\n"
]
}
],
"source": [
"# import necessary packages\n",
"from sklearn.preprocessing import LabelBinarizer\n",
......@@ -30,7 +39,7 @@
},
{
"cell_type": "code",
"execution_count": 385,
"execution_count": 2,
"metadata": {},
"outputs": [
{
......@@ -139,7 +148,7 @@
},
{
"cell_type": "code",
"execution_count": 389,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
......@@ -177,7 +186,7 @@
},
{
"cell_type": "code",
"execution_count": 390,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
......@@ -195,7 +204,7 @@
},
{
"cell_type": "code",
"execution_count": 391,
"execution_count": 5,
"metadata": {},
"outputs": [
{
......@@ -204,7 +213,7 @@
"(128, 184, 1)"
]
},
"execution_count": 391,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
......@@ -219,7 +228,7 @@
},
{
"cell_type": "code",
"execution_count": 421,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
......@@ -237,14 +246,13 @@
"for i in imgs_avg:\n",
" imgs_avg_nib.append(nib.load(i).get_fdata())\n",
"\n",
"# divide by 255 to normalize pixel values\n",
"for i in range(len(imgs_max)): \n",
" imgs_merged.append(np.concatenate((imgs_max_nib[i] / 255, imgs_avg_nib[i] / 255), axis=2))"
" imgs_merged.append(np.concatenate((imgs_max_nib[i], imgs_avg_nib[i]), axis=2))"
]
},
{
"cell_type": "code",
"execution_count": 422,
"execution_count": 7,
"metadata": {},
"outputs": [
{
......@@ -253,7 +261,7 @@
"(128, 184, 2)"
]
},
"execution_count": 422,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
......@@ -266,14 +274,14 @@
},
{
"cell_type": "code",
"execution_count": 423,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"<ipython-input-423-acee21f7a78c>:8: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n",
"<ipython-input-8-acee21f7a78c>:8: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n",
" plt.subplot(5 / columns + 1, columns, i + 1)\n"
]
},
......@@ -304,14 +312,14 @@
},
{
"cell_type": "code",
"execution_count": 424,
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"<ipython-input-424-5117e302ece2>:8: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n",
"<ipython-input-9-5117e302ece2>:8: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n",
" plt.subplot(5 / columns + 1, columns, i + 1)\n"
]
},
......@@ -342,7 +350,7 @@
},
{
"cell_type": "code",
"execution_count": 445,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
......@@ -356,16 +364,16 @@
},
{
"cell_type": "code",
"execution_count": 446,
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<matplotlib.image.AxesImage at 0x7fe71c447ac0>"
"<matplotlib.image.AxesImage at 0x7f297d00d850>"
]
},
"execution_count": 446,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
},
......@@ -391,7 +399,7 @@
},
{
"cell_type": "code",
"execution_count": 458,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
......@@ -419,7 +427,7 @@
},
{
"cell_type": "code",
"execution_count": 451,
"execution_count": 13,
"metadata": {},
"outputs": [
{
......@@ -444,7 +452,7 @@
},
{
"cell_type": "code",
"execution_count": 452,
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
......@@ -462,6 +470,7 @@
"from tensorflow.keras.layers import Input\n",
"from tensorflow.keras.models import Model\n",
"\n",
"# double check with regress is set to false here \n",
"def create_mlp(dim, regress=False):\n",
" \n",
" # define our MLP network\n",
......@@ -497,7 +506,7 @@
},
{
"cell_type": "code",
"execution_count": 453,
"execution_count": 15,
"metadata": {
"scrolled": false
},
......@@ -516,88 +525,88 @@
"text": [
"[INFO] training model...\n",
"Epoch 1/40\n",
"23/23 [==============================] - 2s 40ms/step - loss: 5907202.5000 - val_loss: 85.3816\n",
"23/23 [==============================] - 2s 43ms/step - loss: 739162.7500 - val_loss: 80.7780\n",
"Epoch 2/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 9870156.0000 - val_loss: 82.9214\n",
"23/23 [==============================] - 1s 33ms/step - loss: 108194.5469 - val_loss: 77.7209\n",
"Epoch 3/40\n",
"23/23 [==============================] - 1s 32ms/step - loss: 10330770.0000 - val_loss: 89.8360\n",
"23/23 [==============================] - 1s 33ms/step - loss: 4658813.0000 - val_loss: 175.1640\n",
"Epoch 4/40\n",
"23/23 [==============================] - 1s 32ms/step - loss: 1725934.7500 - val_loss: 94.6955\n",
"23/23 [==============================] - 1s 34ms/step - loss: 9749125.0000 - val_loss: 248.0767\n",
"Epoch 5/40\n",
"23/23 [==============================] - 1s 32ms/step - loss: 709866.3750 - val_loss: 94.1317\n",
"23/23 [==============================] - 1s 33ms/step - loss: 4491801.0000 - val_loss: 274.3863\n",
"Epoch 6/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1453704.2500 - val_loss: 82.3401\n",
"23/23 [==============================] - 1s 33ms/step - loss: 254635.0469 - val_loss: 380.1227\n",
"Epoch 7/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1854183.1250 - val_loss: 110.8519\n",
"23/23 [==============================] - 1s 33ms/step - loss: 82372.6016 - val_loss: 377.8203\n",
"Epoch 8/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3644127.0000 - val_loss: 111.8308\n",
"23/23 [==============================] - 1s 33ms/step - loss: 5235496.5000 - val_loss: 294.4142\n",
"Epoch 9/40\n",
"23/23 [==============================] - 1s 36ms/step - loss: 1630926.7500 - val_loss: 112.9966\n",
"23/23 [==============================] - 1s 33ms/step - loss: 499993.4688 - val_loss: 302.4116\n",
"Epoch 10/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 315047.1875 - val_loss: 81.3186\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3674149.7500 - val_loss: 301.1278\n",
"Epoch 11/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 799771.1250 - val_loss: 75.0370\n",
"23/23 [==============================] - 1s 33ms/step - loss: 236464.9531 - val_loss: 246.8560\n",
"Epoch 12/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1064065.3750 - val_loss: 77.3539\n",
"23/23 [==============================] - 1s 33ms/step - loss: 19876822.0000 - val_loss: 69.6813\n",
"Epoch 13/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1303266.5000 - val_loss: 95.6228\n",
"23/23 [==============================] - 1s 33ms/step - loss: 2988911.0000 - val_loss: 80.7738\n",
"Epoch 14/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 2436036.5000 - val_loss: 98.8011\n",
"23/23 [==============================] - 1s 33ms/step - loss: 9260542.0000 - val_loss: 85.6476\n",
"Epoch 15/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 9746630.0000 - val_loss: 77.0218\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1247110.8750 - val_loss: 88.1314\n",
"Epoch 16/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 21851116.0000 - val_loss: 80.2659\n",
"23/23 [==============================] - 1s 33ms/step - loss: 2866541.7500 - val_loss: 90.6115\n",
"Epoch 17/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 10444255.0000 - val_loss: 73.1865\n",
"23/23 [==============================] - 1s 36ms/step - loss: 158247.6875 - val_loss: 88.9790\n",
"Epoch 18/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 1842511.8750 - val_loss: 130.2612\n",
"23/23 [==============================] - 1s 34ms/step - loss: 410635.1250 - val_loss: 90.1120\n",
"Epoch 19/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 846315.1875 - val_loss: 81.5081\n",
"23/23 [==============================] - 1s 34ms/step - loss: 8200141.0000 - val_loss: 90.3297\n",
"Epoch 20/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 7538889.0000 - val_loss: 79.4730\n",
"23/23 [==============================] - 1s 34ms/step - loss: 2934102.5000 - val_loss: 90.0181\n",
"Epoch 21/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3532151.0000 - val_loss: 69.8874\n",
"23/23 [==============================] - 1s 33ms/step - loss: 4385727.5000 - val_loss: 91.2359\n",
"Epoch 22/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1252445.0000 - val_loss: 114.9249\n",
"23/23 [==============================] - 1s 35ms/step - loss: 7016348.5000 - val_loss: 92.0402\n",
"Epoch 23/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 2978219.5000 - val_loss: 108.4571\n",
"23/23 [==============================] - 1s 37ms/step - loss: 1570839.8750 - val_loss: 92.5150\n",
"Epoch 24/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 922540.4375 - val_loss: 81.8545\n",
"23/23 [==============================] - 1s 35ms/step - loss: 578274.3125 - val_loss: 87.3443\n",
"Epoch 25/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3633383.0000 - val_loss: 69.3617\n",
"23/23 [==============================] - 1s 35ms/step - loss: 561783.6875 - val_loss: 87.6562\n",
"Epoch 26/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1673403.2500 - val_loss: 68.7792\n",
"23/23 [==============================] - 1s 41ms/step - loss: 9761388.0000 - val_loss: 88.0854\n",
"Epoch 27/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 3747849.2500 - val_loss: 66.1937\n",
"23/23 [==============================] - 1s 34ms/step - loss: 671722.1250 - val_loss: 87.9868\n",
"Epoch 28/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 5839762.0000 - val_loss: 61.7886\n",
"23/23 [==============================] - 1s 35ms/step - loss: 291089.4688 - val_loss: 88.8749\n",
"Epoch 29/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 6155945.0000 - val_loss: 57.3682\n",
"23/23 [==============================] - 1s 34ms/step - loss: 6084078.0000 - val_loss: 90.0092\n",
"Epoch 30/40\n",
"23/23 [==============================] - 1s 36ms/step - loss: 9463727.0000 - val_loss: 61.3495\n",
"23/23 [==============================] - 1s 34ms/step - loss: 674623.6250 - val_loss: 91.0310\n",
"Epoch 31/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 267309.3750 - val_loss: 77.1904\n",
"23/23 [==============================] - 1s 33ms/step - loss: 3171207.5000 - val_loss: 92.0963\n",
"Epoch 32/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 5791735.5000 - val_loss: 76.2954\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1611184.8750 - val_loss: 94.2985\n",
"Epoch 33/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 1950277.5000 - val_loss: 143.1821\n",
"23/23 [==============================] - 1s 32ms/step - loss: 3172797.7500 - val_loss: 98.5941\n",
"Epoch 34/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 288345.4375 - val_loss: 111.6001\n",
"23/23 [==============================] - 1s 34ms/step - loss: 423511.1875 - val_loss: 104.0902\n",
"Epoch 35/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 15917235.0000 - val_loss: 93.8450\n",
"23/23 [==============================] - 1s 35ms/step - loss: 126338.9531 - val_loss: 102.0765\n",
"Epoch 36/40\n",
"23/23 [==============================] - 1s 33ms/step - loss: 6554179.5000 - val_loss: 89.9294\n",
"23/23 [==============================] - 1s 34ms/step - loss: 520707.8750 - val_loss: 101.0857\n",
"Epoch 37/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 916703.4375 - val_loss: 86.5181\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1672894.8750 - val_loss: 100.0803\n",
"Epoch 38/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1636418.8750 - val_loss: 81.6224\n",
"23/23 [==============================] - 1s 34ms/step - loss: 3788303.0000 - val_loss: 100.6476\n",
"Epoch 39/40\n",
"23/23 [==============================] - 1s 34ms/step - loss: 3871.7839 - val_loss: 72.8129\n",
"23/23 [==============================] - 1s 35ms/step - loss: 2577893.5000 - val_loss: 98.6984\n",
"Epoch 40/40\n",
"23/23 [==============================] - 1s 35ms/step - loss: 740887.7500 - val_loss: 73.8067\n",
"23/23 [==============================] - 1s 34ms/step - loss: 1781253.5000 - val_loss: 97.7365\n",
"[INFO] predicting t-score...\n",
"[INFO] average t-score: -1.47828, std t-score: 1.406043864383835\n",
"[INFO] mean absolute percentage error: 73.81%, std: 54.82%\n"
"[INFO] mean absolute percentage error: 97.74%, std: 18.17%\n"
]
}
],
......
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment