You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
nsw-2016-storm-impact/notebooks/03_dune_to_vs_runup.ipynb

555 lines
17 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Investigate how dune toe compares to R_high"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T03:38:44.538853Z",
"start_time": "2018-12-03T03:38:44.189514Z"
}
},
"outputs": [],
"source": [
"%matplotlib inline\n",
"%reload_ext autoreload\n",
"%autoreload"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T03:38:46.213387Z",
"start_time": "2018-12-03T03:38:44.781382Z"
}
},
"outputs": [],
"source": [
"from IPython.core.debugger import set_trace\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"import os\n",
"\n",
"import plotly\n",
"import plotly.graph_objs as go\n",
"import plotly.plotly as py\n",
"import plotly.tools as tls\n",
"import plotly.figure_factory as ff\n",
"import plotly.io as pio"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load data\n",
"Load data from the `./data/interim/` folder and parse into `pandas` dataframes."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T03:38:53.297184Z",
"start_time": "2018-12-03T03:38:46.365829Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Importing profiles.csv\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\z5189959\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\numpy\\lib\\arraysetops.py:472: FutureWarning:\n",
"\n",
"elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Importing profile_features.csv\n",
"Importing impacts_forecasted_foreshore_slope_sto06.csv\n",
"Importing impacts_forecasted_mean_slope_sto06.csv\n",
"Importing impacts_observed.csv\n",
"Importing twl_foreshore_slope_sto06.csv\n",
"Importing twl_mean_slope_sto06.csv\n",
"Done!\n"
]
}
],
"source": [
"def df_from_csv(csv, index_col, data_folder='../data/interim'):\n",
" print('Importing {}'.format(csv))\n",
" return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n",
"\n",
"df_profiles = df_from_csv('profiles.csv', index_col=[0, 1, 2])\n",
"df_profile_features = df_from_csv('profile_features.csv', index_col=[0])\n",
"\n",
"impacts = {\n",
" 'forecasted': {\n",
" 'foreshore_slope_sto06': df_from_csv('impacts_forecasted_foreshore_slope_sto06.csv', index_col=[0]),\n",
" 'mean_slope_sto06': df_from_csv('impacts_forecasted_mean_slope_sto06.csv', index_col=[0]),\n",
" },\n",
" 'observed': df_from_csv('impacts_observed.csv', index_col=[0])\n",
" }\n",
"\n",
"twls = {\n",
" 'forecasted': {\n",
" 'foreshore_slope_sto06': df_from_csv('twl_foreshore_slope_sto06.csv', index_col=[0, 1]),\n",
" 'mean_slope_sto06':df_from_csv('twl_mean_slope_sto06.csv', index_col=[0, 1]),\n",
" }\n",
"}\n",
"\n",
"print('Done!')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Compare underpredicted cases"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T04:05:30.984007Z",
"start_time": "2018-12-03T04:05:30.805508Z"
}
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>dune_toe_z</th>\n",
" <th>R_high</th>\n",
" <th>diff</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>AVOCAn0005</th>\n",
" <td>3.306</td>\n",
" <td>3.260440</td>\n",
" <td>-0.045560</td>\n",
" </tr>\n",
" <tr>\n",
" <th>AVOCAn0008</th>\n",
" <td>3.507</td>\n",
" <td>3.220084</td>\n",
" <td>-0.286916</td>\n",
" </tr>\n",
" <tr>\n",
" <th>BILG0005</th>\n",
" <td>4.807</td>\n",
" <td>3.293445</td>\n",
" <td>-1.513555</td>\n",
" </tr>\n",
" <tr>\n",
" <th>BLUEYS0001</th>\n",
" <td>3.064</td>\n",
" <td>2.800144</td>\n",
" <td>-0.263856</td>\n",
" </tr>\n",
" <tr>\n",
" <th>BLUEYS0002</th>\n",
" <td>2.929</td>\n",
" <td>2.470641</td>\n",
" <td>-0.458359</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" dune_toe_z R_high diff\n",
"AVOCAn0005 3.306 3.260440 -0.045560\n",
"AVOCAn0008 3.507 3.220084 -0.286916\n",
"BILG0005 4.807 3.293445 -1.513555\n",
"BLUEYS0001 3.064 2.800144 -0.263856\n",
"BLUEYS0002 2.929 2.470641 -0.458359"
]
},
"execution_count": 39,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Find site_ids where the forecast has been underpredicted\n",
"set1 = set(impacts['forecasted']['mean_slope_sto06'].query(\"storm_regime == 'swash'\").index.get_level_values('site_id'))\n",
"set2 = set(impacts['observed'].query(\"storm_regime == 'collision'\").index.get_level_values('site_id'))\n",
"site_ids = list(set1.intersection(set2))\n",
"\n",
"# Get dune toes at these sites and predicted max R_high\n",
"df_toes = df_profile_features.loc[site_ids].query('profile_type==\"prestorm\"').dune_toe_z\n",
"df_R_highs = twls['forecasted']['mean_slope_sto06'].loc[site_ids].groupby('site_id')['R_high'].max()\n",
"\n",
"# Join into one dataframe\n",
"df_twl_toes = pd.concat([df_toes, df_R_highs],axis=1,sort=True)\n",
"df_twl_toes['diff'] = df_twl_toes['R_high'] - df_twl_toes['dune_toe_z']\n",
"df_twl_toes.head()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now let's plot the comparison between our R_high TWL values and the dune toes to see how far off they were."
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T04:08:15.732169Z",
"start_time": "2018-12-03T04:08:15.656966Z"
}
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "35b9331242af473dba2f91761c307022",
"version_major": 2,
"version_minor": 0
},
"text/html": [
"<p>Failed to display Jupyter Widget of type <code>FigureWidget</code>.</p>\n",
"<p>\n",
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
" that the widgets JavaScript is still loading. If this message persists, it\n",
" likely means that the widgets JavaScript library is either not installed or\n",
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
" Widgets Documentation</a> for setup instructions.\n",
"</p>\n",
"<p>\n",
" If you're reading this message in another frontend (for example, a static\n",
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
" it may mean that your frontend doesn't currently support widgets.\n",
"</p>\n"
],
"text/plain": [
"FigureWidget({\n",
" 'data': [{'type': 'histogram',\n",
" 'uid': '75f0d11f-9242-4fc7-b433-1f04e1e37ba6',\n",
" 'y': [-0.045560088746212646, -0.28691603912686325,\n",
" -1.5135547360075963, ..., -0.4613631587476821,\n",
" -0.5212332930925054, -0.3948507473332721]}],\n",
" 'layout': {'bargap': 0.2,\n",
" 'bargroupgap': 0.1,\n",
" 'title': 'D_low - R_high<br>Observed Collision, Forecasted Swash',\n",
" 'xaxis': {'title': 'Count'},\n",
" 'yaxis': {'title': 'z (m AHD)'}}\n",
"})"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"trace1 = go.Histogram(y=df_twl_toes['diff'].tolist())\n",
"\n",
"layout = go.Layout(\n",
" title='D_low - R_high<br>Observed Collision, Forecasted Swash',\n",
" yaxis=dict(\n",
" title='z (m AHD)'\n",
" ),\n",
" xaxis=dict(\n",
" title='Count'\n",
" ),\n",
" bargap=0.2,\n",
" bargroupgap=0.1\n",
")\n",
"\n",
"g_plot = go.FigureWidget(data=[trace1], layout=layout)\n",
"g_plot"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The above plot shows that the R_high value for most of the incorrectly forecasted collision regimes, was typically underpredicted by less than 0.5 m."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Compare overpredicted cases"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T04:08:56.128806Z",
"start_time": "2018-12-03T04:08:55.894182Z"
}
},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>dune_toe_z</th>\n",
" <th>R_high</th>\n",
" <th>diff</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>AVOCAn0004</th>\n",
" <td>3.178</td>\n",
" <td>3.416988</td>\n",
" <td>0.238988</td>\n",
" </tr>\n",
" <tr>\n",
" <th>BOOM0004</th>\n",
" <td>3.065</td>\n",
" <td>3.074980</td>\n",
" <td>0.009980</td>\n",
" </tr>\n",
" <tr>\n",
" <th>BOOM0011</th>\n",
" <td>2.771</td>\n",
" <td>6.491824</td>\n",
" <td>3.720824</td>\n",
" </tr>\n",
" <tr>\n",
" <th>BOOM0012</th>\n",
" <td>2.796</td>\n",
" <td>3.148087</td>\n",
" <td>0.352087</td>\n",
" </tr>\n",
" <tr>\n",
" <th>CATHIE0001</th>\n",
" <td>2.780</td>\n",
" <td>3.522792</td>\n",
" <td>0.742792</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" dune_toe_z R_high diff\n",
"AVOCAn0004 3.178 3.416988 0.238988\n",
"BOOM0004 3.065 3.074980 0.009980\n",
"BOOM0011 2.771 6.491824 3.720824\n",
"BOOM0012 2.796 3.148087 0.352087\n",
"CATHIE0001 2.780 3.522792 0.742792"
]
},
"execution_count": 42,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Find site_ids where the forecast has been overpredicted\n",
"set1 = set(impacts['forecasted']['mean_slope_sto06'].query(\"storm_regime == 'collision'\").index.get_level_values('site_id'))\n",
"set2 = set(impacts['observed'].query(\"storm_regime == 'swash'\").index.get_level_values('site_id'))\n",
"site_ids = list(set1.intersection(set2))\n",
"\n",
"# Get dune toes at these sites and predicted max R_high\n",
"df_toes = df_profile_features.loc[site_ids].query('profile_type==\"prestorm\"').dune_toe_z\n",
"df_R_highs = twls['forecasted']['mean_slope_sto06'].loc[site_ids].groupby('site_id')['R_high'].max()\n",
"\n",
"# Join into one dataframe\n",
"df_twl_toes = pd.concat([df_toes, df_R_highs],axis=1,sort=True)\n",
"df_twl_toes['diff'] = df_twl_toes['R_high'] - df_twl_toes['dune_toe_z']\n",
"df_twl_toes.head()\n"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T04:14:46.601092Z",
"start_time": "2018-12-03T04:14:46.522883Z"
}
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "3ea49a4ac07c4ea19bbb4532326ff94c",
"version_major": 2,
"version_minor": 0
},
"text/html": [
"<p>Failed to display Jupyter Widget of type <code>FigureWidget</code>.</p>\n",
"<p>\n",
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
" that the widgets JavaScript is still loading. If this message persists, it\n",
" likely means that the widgets JavaScript library is either not installed or\n",
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
" Widgets Documentation</a> for setup instructions.\n",
"</p>\n",
"<p>\n",
" If you're reading this message in another frontend (for example, a static\n",
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
" it may mean that your frontend doesn't currently support widgets.\n",
"</p>\n"
],
"text/plain": [
"FigureWidget({\n",
" 'data': [{'type': 'histogram',\n",
" 'uid': '4a284474-2be1-4fd7-87d5-25364cc78df4',\n",
" 'y': [0.23898814460475037, 0.009980312001434566, 3.720823710344608,\n",
" ..., 1.5720238663972683, 0.912998680585452, 1.1419977620500927]}],\n",
" 'layout': {'bargap': 0.2,\n",
" 'bargroupgap': 0.1,\n",
" 'title': 'D_low - R_high<br>Observed Swash, Forecasted Collision',\n",
" 'xaxis': {'title': 'Count'},\n",
" 'yaxis': {'title': 'z (m AHD)'}}\n",
"})"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"trace1 = go.Histogram(y=df_twl_toes['diff'].tolist())\n",
"\n",
"layout = go.Layout(\n",
" title='D_low - R_high<br>Observed Swash, Forecasted Collision',\n",
" yaxis=dict(\n",
" title='z (m AHD)'\n",
" ),\n",
" xaxis=dict(\n",
" title='Count'\n",
" ),\n",
" bargap=0.2,\n",
" bargroupgap=0.1\n",
")\n",
"\n",
"g_plot = go.FigureWidget(data=[trace1], layout=layout)\n",
"g_plot"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The errors when we forecast collision but observe swash are much greater than we we forecast swash and observe collision. For this case, errors in excess of 1.0 m common. Why is this?"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}