You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
615 lines
20 KiB
Plaintext
615 lines
20 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Check change in mean slope\n",
|
|
"- Check the effect of changes in prestorm and poststorm mean slope.\n",
|
|
"- If there is a large berm, the prestorm mean slope (between dune toe and MHW) could be too small, and underpredict wave runup and TWL.\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Setup notebook"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Enable autoreloading of our modules. \n",
|
|
"# Most of the code will be located in the /src/ folder, \n",
|
|
"# and then called from the notebook.\n",
|
|
"%matplotlib inline\n",
|
|
"%reload_ext autoreload\n",
|
|
"%autoreload"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from IPython.core.debugger import set_trace\n",
|
|
"\n",
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"import os\n",
|
|
"\n",
|
|
"import plotly\n",
|
|
"import plotly.graph_objs as go\n",
|
|
"import plotly.plotly as py\n",
|
|
"import plotly.tools as tools\n",
|
|
"import plotly.figure_factory as ff\n",
|
|
"import plotly.io as pio\n",
|
|
"\n",
|
|
"import itertools\n",
|
|
"\n",
|
|
"import matplotlib\n",
|
|
"from matplotlib import cm\n",
|
|
"import colorlover as cl\n",
|
|
"\n",
|
|
"from ipywidgets import widgets, Output\n",
|
|
"from IPython.display import display, clear_output, Image, HTML\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"from sklearn.metrics import confusion_matrix"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Matplot lib default settings\n",
|
|
"plt.rcParams[\"figure.figsize\"] = (10,6)\n",
|
|
"plt.rcParams['axes.grid']=True\n",
|
|
"plt.rcParams['grid.alpha'] = 0.5\n",
|
|
"plt.rcParams['grid.color'] = \"grey\"\n",
|
|
"plt.rcParams['grid.linestyle'] = \"--\"\n",
|
|
"plt.rcParams['axes.grid']=True\n",
|
|
"\n",
|
|
"# https://stackoverflow.com/a/20709149\n",
|
|
"matplotlib.rcParams['text.usetex'] = True\n",
|
|
"\n",
|
|
"matplotlib.rcParams['text.latex.preamble'] = [\n",
|
|
" r'\\usepackage{siunitx}', # i need upright \\micro symbols, but you need...\n",
|
|
" r'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts\n",
|
|
" r'\\usepackage{helvet}', # set the normal font here\n",
|
|
" r'\\usepackage{amsmath}',\n",
|
|
" r'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet\n",
|
|
" r'\\sansmath', # <- tricky! -- gotta actually tell tex to use!\n",
|
|
"] "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Import data\n",
|
|
"Import our data into pandas Dataframes for the analysis. Data files are `.csv` files which are stored in the `./data/interim/` folder."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def df_from_csv(csv, index_col, data_folder='../data/interim'):\n",
|
|
" print('Importing {}'.format(csv))\n",
|
|
" return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n",
|
|
"\n",
|
|
"df_waves = df_from_csv('waves.csv', index_col=[0, 1])\n",
|
|
"df_tides = df_from_csv('tides.csv', index_col=[0, 1])\n",
|
|
"df_profiles = df_from_csv('profiles.csv', index_col=[0, 1, 2])\n",
|
|
"df_sites = df_from_csv('sites.csv', index_col=[0])\n",
|
|
"df_profile_features_crest_toes = df_from_csv('profile_features_crest_toes.csv', index_col=[0,1])\n",
|
|
"\n",
|
|
"# Note that the forecasted data sets should be in the same order for impacts and twls\n",
|
|
"impacts = {\n",
|
|
" 'forecasted': {\n",
|
|
" 'foreshore_slope_sto06': df_from_csv('impacts_forecasted_foreshore_slope_sto06.csv', index_col=[0]),\n",
|
|
" 'mean_slope_sto06': df_from_csv('impacts_forecasted_mean_slope_sto06.csv', index_col=[0]),\n",
|
|
" },\n",
|
|
" 'observed': df_from_csv('impacts_observed.csv', index_col=[0])\n",
|
|
" }\n",
|
|
"\n",
|
|
"\n",
|
|
"twls = {\n",
|
|
" 'forecasted': {\n",
|
|
" 'foreshore_slope_sto06': df_from_csv('twl_foreshore_slope_sto06.csv', index_col=[0, 1]),\n",
|
|
" 'mean_slope_sto06':df_from_csv('twl_mean_slope_sto06.csv', index_col=[0, 1]),\n",
|
|
" }\n",
|
|
"}\n",
|
|
"print('Done!')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Plot prestorm vs poststorm mean slopes\n",
|
|
"Prestorm slopes have already been calculated as part of the TWL forecasting, however we'll need to extract the poststorm mean slopes from our profiles at each site."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Prestorm slopes are easy as we have already calculated this as part of the \n",
|
|
"df_slopes_prestorm = twls['forecasted']['mean_slope_sto06'].groupby('site_id').head(1).reset_index().set_index(['site_id']).beta.to_frame()\n",
|
|
"\n",
|
|
"# Get x and z at mhw (z=0.7m) for each site\n",
|
|
"z_mhw = 0.7\n",
|
|
"mhw_poststorm = []\n",
|
|
"for site, df in df_profiles.xs('poststorm', level='profile_type').groupby('site_id'):\n",
|
|
" df = df.dropna(subset=['z'])\n",
|
|
" df = df.iloc[(df['z']-z_mhw).abs().argsort().head(1)].reset_index()\n",
|
|
" df = df.iloc[0]\n",
|
|
" mhw_poststorm.append({\n",
|
|
" 'site_id': df.site_id,\n",
|
|
" 'x_mhw': df.x,\n",
|
|
" 'z_mhw': df.z\n",
|
|
" })\n",
|
|
"# break\n",
|
|
"df_mhw_poststorm = pd.DataFrame(mhw_poststorm)\n",
|
|
"df_mhw_poststorm = df_mhw_poststorm.set_index('site_id')\n",
|
|
"\n",
|
|
"# Get x and z at poststorm dune toe for each site\n",
|
|
"df_dune_toe_poststorm = df_profile_features_crest_toes.xs('poststorm', level='profile_type')[['dune_toe_x','dune_toe_z']]\n",
|
|
"\n",
|
|
"# If there is no poststorm dune toe defined, use the dune crest\n",
|
|
"df_dune_crest_poststorm = df_profile_features_crest_toes.xs('poststorm', level='profile_type')[['dune_crest_x','dune_crest_z']]\n",
|
|
"df_dune_toe_poststorm.dune_toe_x = df_dune_toe_poststorm.dune_toe_x.fillna(df_dune_crest_poststorm.dune_crest_x)\n",
|
|
"df_dune_toe_poststorm.dune_toe_z = df_dune_toe_poststorm.dune_toe_z.fillna(df_dune_crest_poststorm.dune_crest_z)\n",
|
|
"\n",
|
|
"\n",
|
|
"# Join df for mhw and dune toe\n",
|
|
"df = df_mhw_poststorm.join(df_dune_toe_poststorm)\n",
|
|
"df['beta'] = -(df['dune_toe_z'] - df['z_mhw']) / (df['dune_toe_x'] -df['x_mhw'])\n",
|
|
"df_slopes_poststorm = df['beta'].to_frame()\n",
|
|
"\n",
|
|
"# Count how many nans\n",
|
|
"print('Number of nans: {}'.format(df_slopes_poststorm.beta.isna().sum()))\n",
|
|
"\n",
|
|
"# Display dataframe\n",
|
|
"print('df_slopes_poststorm:')\n",
|
|
"df_slopes_poststorm"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Now, let's join our post storm slopes, prestorm slopes, observed and forecasted impacts into one data frame to make it easier to plot."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"dfs = [df_slopes_poststorm.rename(columns={'beta':'poststorm_beta'}),\n",
|
|
" df_slopes_prestorm.rename(columns={'beta':'prestorm_beta'}),\n",
|
|
" impacts['observed']['storm_regime'].to_frame().rename(columns={'storm_regime': 'observed_regime'}),\n",
|
|
" impacts['forecasted']['mean_slope_sto06']['storm_regime'].to_frame().rename(columns={'storm_regime': 'forecasted_regime'})\n",
|
|
" ]\n",
|
|
"\n",
|
|
"df = pd.concat(dfs, axis='columns')\n",
|
|
"df"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"We also should add the change in beach width between prestorm and post storm profiles"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"ele = 0.7\n",
|
|
"data = []\n",
|
|
"for site_id, df_site in df_profiles.groupby('site_id'):\n",
|
|
" \n",
|
|
" # Beach width should be measured from dune toe (or crest if doesn't exist) to MHW\n",
|
|
" \n",
|
|
" dune_toe_x = np.nanmax([\n",
|
|
" df_profile_features_crest_toes.loc[(site_id,'prestorm')].dune_crest_x,\n",
|
|
" df_profile_features_crest_toes.loc[(site_id,'prestorm')].dune_toe_x\n",
|
|
" ])\n",
|
|
" \n",
|
|
" \n",
|
|
" # TODO This probably should take the closest value to ele starting from the seaward end of the profile\n",
|
|
" temp = df_site.xs('prestorm',level='profile_type').dropna(subset=['z'])\n",
|
|
" prestorm_width = temp.iloc[(temp.z - ele).abs().argsort()[0]].name[1] - dune_toe_x\n",
|
|
" \n",
|
|
" temp = df_site.xs('poststorm',level='profile_type').dropna(subset=['z'])\n",
|
|
" poststorm_width = temp.iloc[(temp.z - ele).abs().argsort()[0]].name[1] - dune_toe_x\n",
|
|
" \n",
|
|
" width_change = prestorm_width - poststorm_width\n",
|
|
" data.append(\n",
|
|
" {\n",
|
|
" 'site_id': site_id,\n",
|
|
" 'width_change': width_change,\n",
|
|
" 'prestorm_width': prestorm_width,\n",
|
|
" 'poststorm_width': poststorm_width\n",
|
|
" })\n",
|
|
" \n",
|
|
" \n",
|
|
" \n",
|
|
" \n",
|
|
"df_width_change = pd.DataFrame(data)\n",
|
|
"df_width_change = df_width_change.set_index(['site_id'])\n",
|
|
"\n",
|
|
"# Join with the data\n",
|
|
"df = df.merge(df_width_change, left_on=['site_id'], right_on=['site_id'])\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"heading_collapsed": true
|
|
},
|
|
"source": [
|
|
"## Plot our data in a confusion matrix\n",
|
|
"Superseded"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"hidden": true
|
|
},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"hidden": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"fig = tools.make_subplots(\n",
|
|
" rows=2,\n",
|
|
" cols=2,\n",
|
|
" specs=[[{}, {}], [{}, {}]],\n",
|
|
" subplot_titles=('Swash/Swash', 'Swash/Collision', \n",
|
|
" 'Collision/Swash', 'Collision/Collision'),\n",
|
|
" shared_xaxes=True, shared_yaxes=True,)\n",
|
|
"\n",
|
|
"\n",
|
|
"# Loop through combinations of observed/forecasted swash/collision\n",
|
|
"data = []\n",
|
|
"for forecasted_regime, observed_regime in itertools.product(['swash','collision'],repeat=2):\n",
|
|
" \n",
|
|
" # Get data for this combination \n",
|
|
" query = 'forecasted_regime==\"{}\" & observed_regime==\"{}\"'.format(forecasted_regime, observed_regime)\n",
|
|
" df_data = df.query(query)\n",
|
|
" print(query)\n",
|
|
" \n",
|
|
" \n",
|
|
" # Determine which subplot to plot results in\n",
|
|
" if forecasted_regime == 'swash' and observed_regime == 'swash':\n",
|
|
" x_col = 1\n",
|
|
" y_col = 1\n",
|
|
" elif forecasted_regime == 'collision' and observed_regime == 'collision':\n",
|
|
" x_col = 2\n",
|
|
" y_col = 2\n",
|
|
" elif forecasted_regime == 'swash' and observed_regime == 'collision':\n",
|
|
" x_col = 2\n",
|
|
" y_col = 1\n",
|
|
" elif forecasted_regime == 'collision' and observed_regime == 'swash':\n",
|
|
" x_col = 1\n",
|
|
" y_col = 2\n",
|
|
" else:\n",
|
|
" print('something went wrong')\n",
|
|
" continue\n",
|
|
"\n",
|
|
" fig.append_trace(\n",
|
|
" go.Scatter(\n",
|
|
" x=df_data.prestorm_beta,\n",
|
|
" y=df_data.poststorm_beta,\n",
|
|
" text = df_data.index.tolist(),\n",
|
|
" hoverinfo = 'text',\n",
|
|
" mode = 'markers',\n",
|
|
" line = dict(\n",
|
|
" color = ('rgba(22, 22, 22, 0.2)'),\n",
|
|
" width = 0.5,)),\n",
|
|
" x_col,\n",
|
|
" y_col)\n",
|
|
"\n",
|
|
"# layout = go.Layout(\n",
|
|
"# xaxis=dict(domain=[0, 0.45]),\n",
|
|
"# yaxis=dict(\n",
|
|
"# domain=[0, 0.45],\n",
|
|
"# type='log',\n",
|
|
"# ),\n",
|
|
"# xaxis2=dict(domain=[0.55, 1]),\n",
|
|
"# xaxis4=dict(domain=[0.55, 1], anchor='y4'),\n",
|
|
"# yaxis3=dict(\n",
|
|
"# domain=[0.55, 1],\n",
|
|
"# type='log',\n",
|
|
"# ),\n",
|
|
"# yaxis4=dict(\n",
|
|
"# domain=[0.55, 1],\n",
|
|
"# anchor='x4',\n",
|
|
"# type='log',\n",
|
|
"# ))\n",
|
|
"\n",
|
|
"fig['layout'].update(showlegend=False, title='Specs with Subplot Title',height=800,)\n",
|
|
"\n",
|
|
"for ax in ['yaxis','yaxis2']:\n",
|
|
"# fig['layout'][ax]['type']='log'\n",
|
|
" fig['layout'][ax]['range']= [0,0.2]\n",
|
|
"\n",
|
|
"for ax in ['xaxis', 'xaxis2']:\n",
|
|
" fig['layout'][ax]['range']= [0,0.2]\n",
|
|
"\n",
|
|
"go.FigureWidget(fig)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"hidden": true
|
|
},
|
|
"source": [
|
|
"Looking at the above plot:\n",
|
|
"- In general, we can see that the prestorm mean slope is flatter than the poststorm mean slope. This can be explained by the presence of prestorm berms, which increase the prestorm mean slope. During the storm, these berms get eroded and decrease the slope.\n",
|
|
"- **Collision/Collision**: Where we observe and predict collision, we see steeper prestorm slopes. This is to be expected since larger slopes will generate more runup and higher TWLs.\n",
|
|
"- **Swash/Collision**: Where we predict collision but observe swash, we can see that the prestorm mean slopes >0.1 generate high TWLs. \n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Plot our data in a confusion matrix\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"df[cc_mask].loc[df[cc_mask].poststorm_beta+0.05< df[cc_mask].prestorm_beta]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"f, ([ax1, ax2], [ax3, ax4],) = plt.subplots(\n",
|
|
" 2,\n",
|
|
" 2,\n",
|
|
" sharey=True,\n",
|
|
" sharex=True,\n",
|
|
" figsize=(8, 7))\n",
|
|
"\n",
|
|
"\n",
|
|
"ss_mask = (df.observed_regime=='swash') & (df.forecasted_regime=='swash')\n",
|
|
"sc_mask = (df.observed_regime=='swash') & (df.forecasted_regime=='collision')\n",
|
|
"cs_mask = (df.observed_regime=='collision') & (df.forecasted_regime=='swash')\n",
|
|
"cc_mask = (df.observed_regime=='collision') & (df.forecasted_regime=='collision')\n",
|
|
"\n",
|
|
"# Define colormap for our observations\n",
|
|
"cm = plt.cm.get_cmap('plasma')\n",
|
|
"\n",
|
|
"params = {'edgecolors': '#999999',\n",
|
|
" 's': 12,\n",
|
|
" 'linewidth': 0.1, \n",
|
|
" 'cmap':cm,\n",
|
|
" 'vmin':0, \n",
|
|
" 'vmax':60\n",
|
|
" }\n",
|
|
"\n",
|
|
"sc=ax1.scatter(df[ss_mask].prestorm_beta, df[ss_mask].poststorm_beta, c=df[ss_mask].width_change,**params)\n",
|
|
"ax1.set_title('Swash/Swash')\n",
|
|
"ax1.set_ylabel('Observed swash')\n",
|
|
"\n",
|
|
"ax2.scatter(df[sc_mask].prestorm_beta, df[sc_mask].poststorm_beta, c=df[sc_mask].width_change,**params)\n",
|
|
"ax2.set_title('Swash/Collision')\n",
|
|
"\n",
|
|
"ax3.scatter(df[cs_mask].prestorm_beta, df[cs_mask].poststorm_beta, c=df[cs_mask].width_change,**params)\n",
|
|
"ax3.set_title('Collision/Swash')\n",
|
|
"ax3.set_ylabel('Observed collision')\n",
|
|
"ax3.set_xlabel('Predicted swash')\n",
|
|
"\n",
|
|
"ax4.scatter(df[cc_mask].prestorm_beta, df[cc_mask].poststorm_beta, c=df[cc_mask].width_change,**params)\n",
|
|
"ax4.set_title('Collision/Collision')\n",
|
|
"ax4.set_xlabel('Predicted collision')\n",
|
|
"\n",
|
|
"for ax in [ax1,ax2,ax3,ax4]:\n",
|
|
" ax.plot([0,0.2],[0,0.2], 'k--')\n",
|
|
" ax.set_xlim([0,0.2])\n",
|
|
" ax.set_ylim([0,0.2])\n",
|
|
"\n",
|
|
" \n",
|
|
"# Create a big ax so we can use common axis labels\n",
|
|
"# https://stackoverflow.com/a/36542971\n",
|
|
"f.add_subplot(111, frameon=False)\n",
|
|
"plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n",
|
|
"plt.grid(False)\n",
|
|
"plt.xlabel(\"Prestorm mean slope (-)\", labelpad=25)\n",
|
|
"plt.ylabel(\"Poststorm mean slope (-)\", labelpad=25)\n",
|
|
" \n",
|
|
"# Layout adjustment\n",
|
|
"plt.tight_layout()\n",
|
|
"plt.subplots_adjust(hspace=0.25, bottom=0.1,right=0.9)\n",
|
|
"\n",
|
|
"# Add colorbar\n",
|
|
"cbar_ax = f.add_axes([0.95, 0.15, 0.05, 0.7])\n",
|
|
"cb = f.colorbar(sc, cax=cbar_ax)\n",
|
|
"cb.set_label(r'$\\varDelta$ beach width at MHW (m)')\n",
|
|
"\n",
|
|
"# Save and show figure\n",
|
|
"plt.savefig('06-confusion-change-in-slope.png'.format(beach), dpi=600, bbox_inches='tight') \n",
|
|
"plt.show()\n",
|
|
"plt.close()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Plot for single beach"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"beach = 'NARRA'\n",
|
|
"\n",
|
|
"df_beach = df.loc[df.index.str.contains(beach)]\n",
|
|
"\n",
|
|
"# Get index\n",
|
|
"n = [x for x in range(len(df_beach))][::-1]\n",
|
|
"n_sites = [x for x in df_beach.index][::-1]\n",
|
|
"\n",
|
|
"f, (ax1,ax2,ax3,ax4) = plt.subplots(1,4, sharey=True,figsize=(10, 8))\n",
|
|
"\n",
|
|
"ax1.plot(df_beach.prestorm_beta,n,label='Prestorm slope',color='#4d9221')\n",
|
|
"ax1.plot(df_beach.poststorm_beta,n,label='Poststorm slope',color='#c51b7d')\n",
|
|
"ax1.set_title('Mean beach slope')\n",
|
|
"ax1.legend(loc='center', bbox_to_anchor=(0.5, -0.15))\n",
|
|
"\n",
|
|
"# Replace yticks with site_ids\n",
|
|
"yticks = ax1.get_yticks().tolist()\n",
|
|
"yticks = [n_sites[int(y)] if 0 <= y <= len(n_sites) else y for y in yticks ]\n",
|
|
"ax1.set_yticklabels(yticks)\n",
|
|
"ax1.set_xlabel(r'Slope (-)')\n",
|
|
"\n",
|
|
"ax2.plot(df_beach.prestorm_width,n,label='Prestorm width',color='#4d9221')\n",
|
|
"ax2.plot(df_beach.poststorm_width,n, label='Poststorm width',color='#c51b7d')\n",
|
|
"# ax2.set_xlim([200,300])\n",
|
|
"ax2.set_xlabel(r'Beach width (m)')\n",
|
|
"ax2.set_title('Beach width\\nat MHW')\n",
|
|
"ax2.legend(loc='center', bbox_to_anchor=(0.5, -0.15))\n",
|
|
"\n",
|
|
"ax3.plot(df_beach.width_change,n,color='#999999')\n",
|
|
"ax3.set_xlim([0,75])\n",
|
|
"ax3.set_title('Change in MHW\\nbeach width')\n",
|
|
"ax3.set_xlabel(r'$\\varDelta$ Beach width (m)')\n",
|
|
"\n",
|
|
"\n",
|
|
"ax4.plot(df_beach.poststorm_beta / df_beach.prestorm_beta,n,color='#999999')\n",
|
|
"ax4.set_title('Ratio between pre and\\npost storm mean slopes')\n",
|
|
"\n",
|
|
"plt.tight_layout()\n",
|
|
"f.subplots_adjust(top=0.88)\n",
|
|
"f.suptitle(beach)\n",
|
|
"\n",
|
|
"# Print to figure\n",
|
|
"plt.savefig('06-change-in-slope-{}.png'.format(beach), dpi=600, bbox_inches='tight') \n",
|
|
"plt.show()\n",
|
|
"plt.close()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"df_beach"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"hide_input": false,
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.6"
|
|
},
|
|
"toc": {
|
|
"base_numbering": 1,
|
|
"nav_menu": {},
|
|
"number_sections": true,
|
|
"sideBar": true,
|
|
"skip_h1_title": false,
|
|
"title_cell": "Table of Contents",
|
|
"title_sidebar": "Contents",
|
|
"toc_cell": false,
|
|
"toc_position": {},
|
|
"toc_section_display": true,
|
|
"toc_window_display": false
|
|
},
|
|
"varInspector": {
|
|
"cols": {
|
|
"lenName": 16,
|
|
"lenType": 16,
|
|
"lenVar": 40
|
|
},
|
|
"kernels_config": {
|
|
"python": {
|
|
"delete_cmd_postfix": "",
|
|
"delete_cmd_prefix": "del ",
|
|
"library": "var_list.py",
|
|
"varRefreshCmd": "print(var_dic_list())"
|
|
},
|
|
"r": {
|
|
"delete_cmd_postfix": ") ",
|
|
"delete_cmd_prefix": "rm(",
|
|
"library": "var_list.r",
|
|
"varRefreshCmd": "cat(var_dic_list()) "
|
|
}
|
|
},
|
|
"types_to_exclude": [
|
|
"module",
|
|
"function",
|
|
"builtin_function_or_method",
|
|
"instance",
|
|
"_Feature"
|
|
],
|
|
"window_display": false
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|