{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# TWL Exceedance" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Setup notebook" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Enable autoreloading of our modules. \n", "# Most of the code will be located in the /src/ folder, \n", "# and then called from the notebook.\n", "%matplotlib inline\n", "%reload_ext autoreload\n", "%autoreload" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from IPython.core.debugger import set_trace\n", "\n", "import pandas as pd\n", "import numpy as np\n", "import os\n", "import decimal\n", "import plotly\n", "import plotly.graph_objs as go\n", "import plotly.plotly as py\n", "import plotly.tools as tls\n", "import plotly.figure_factory as ff\n", "from plotly import tools\n", "import plotly.io as pio\n", "from scipy import stats\n", "import math\n", "import matplotlib\n", "from matplotlib import cm\n", "import colorlover as cl\n", "from tqdm import tqdm_notebook\n", "from ipywidgets import widgets, Output\n", "from IPython.display import display, clear_output, Image, HTML\n", "from scipy import stats\n", "from sklearn.metrics import confusion_matrix\n", "import matplotlib.pyplot as plt\n", "from scipy.interpolate import interp1d" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Matplot lib default settings\n", "plt.rcParams[\"figure.figsize\"] = (10,6)\n", "plt.rcParams['axes.grid']=True\n", "plt.rcParams['grid.alpha'] = 0.5\n", "plt.rcParams['grid.color'] = \"grey\"\n", "plt.rcParams['grid.linestyle'] = \"--\"\n", "\n", "\n", "# https://stackoverflow.com/a/20709149\n", "matplotlib.rcParams['text.usetex'] = True\n", "matplotlib.rcParams['font.family'] = 'sans-serif'\n", "matplotlib.rcParams['font.sans-serif'] = 'Helvetica'\n", "\n", "matplotlib.rcParams['text.latex.preamble'] = [\n", " r'\\usepackage{siunitx}', # i need upright \\micro symbols, but you need...\n", " r'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts\n", " r'\\usepackage{helvet}', # set the normal font here\n", " r'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet\n", " r'\\sansmath' # <- tricky! -- gotta actually tell tex to use!\n", "] " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Import data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def df_from_csv(csv, index_col, data_folder='../data/interim'):\n", " print('Importing {}'.format(csv))\n", " return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n", "\n", "df_waves = df_from_csv('waves.csv', index_col=[0, 1])\n", "df_tides = df_from_csv('tides.csv', index_col=[0, 1])\n", "df_profiles = df_from_csv('profiles.csv', index_col=[0, 1, 2])\n", "df_sites = df_from_csv('sites.csv', index_col=[0])\n", "df_profile_features_crest_toes = df_from_csv('profile_features_crest_toes.csv', index_col=[0,1])\n", "\n", "# Note that the forecasted data sets should be in the same order for impacts and twls\n", "impacts = {\n", " 'forecasted': {\n", " 'foreshore_slope_sto06': df_from_csv('impacts_forecasted_foreshore_slope_sto06.csv', index_col=[0]),\n", " 'mean_slope_sto06': df_from_csv('impacts_forecasted_mean_slope_sto06.csv', index_col=[0]),\n", " },\n", " 'observed': df_from_csv('impacts_observed.csv', index_col=[0])\n", " }\n", "\n", "\n", "twls = {\n", " 'forecasted': {\n", " 'foreshore_slope_sto06': df_from_csv('twl_foreshore_slope_sto06.csv', index_col=[0, 1]),\n", " 'mean_slope_sto06':df_from_csv('twl_mean_slope_sto06.csv', index_col=[0, 1]),\n", " }\n", "}\n", "print('Done!')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Calculate impact hours\n", "- For each site_id, determine the R2 elevation." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Create figure to plot results\n", "fig = tools.make_subplots(\n", " rows=2,\n", " cols=2,\n", " specs=[[{}, {}], [{}, {}]],\n", " subplot_titles=('Swash/Swash', 'Swash/Collision', 'Collision/Swash',\n", " 'Collision/Collision'),\n", " shared_xaxes=True,\n", " shared_yaxes=True,\n", " horizontal_spacing=0.05,\n", " vertical_spacing=0.1,\n", " print_grid=False)\n", "\n", "# Iterate through each site\n", "print('Calculating cumulate frequency of R_high for each site:')\n", "site_ids = twls['forecasted']['mean_slope_sto06'].index.get_level_values(\n", " 'site_id').unique().values\n", "for site_id in tqdm_notebook(site_ids):\n", "\n", " # Put data into a temporary dataframe, shorter name is easier to work with\n", " df_impacts = impacts['forecasted']['mean_slope_sto06'].loc[site_id]\n", " df_twls = twls['forecasted']['mean_slope_sto06'].loc[site_id]\n", "\n", " D_low = df_impacts.dune_toe_z\n", " if np.isnan(D_low):\n", " continue\n", "\n", " # Get R_high elevations minus dune toe\n", " R_high_ts = df_twls.R_high.dropna().values\n", " R_high_D_low_ts = R_high_ts - D_low\n", "\n", " # Get SWL minus dune toe\n", " SWL_D_low_ts = df_twls['tide'].dropna().values - D_low\n", " DSWL_D_low_ts = (df_twls['tide'] + df_twls['setup']).dropna().values - D_low\n", "\n", " # Get cumulative freq\n", " cumfreq = stats.cumfreq(R_high_D_low_ts, numbins=100)\n", "# cumfreq = stats.cumfreq(DSWL_D_low_ts, numbins=100)\n", "\n", " # Calculate space of values for x\n", " bin_vals = cumfreq.lowerlimit + np.linspace(\n", " 0, cumfreq.binsize * cumfreq.cumcount.size, cumfreq.cumcount.size)\n", "\n", " # Check which subplot we should put this site on\n", " forecasted_regime = impacts['forecasted']['mean_slope_sto06'].loc[\n", " site_id].storm_regime\n", " observed_regime = impacts['observed'].loc[site_id].storm_regime\n", "\n", " if forecasted_regime == 'swash' and observed_regime == 'swash':\n", " x_col = 1\n", " y_col = 1\n", " elif forecasted_regime == 'collision' and observed_regime == 'collision':\n", " x_col = 2\n", " y_col = 2\n", " elif forecasted_regime == 'swash' and observed_regime == 'collision':\n", " x_col = 2\n", " y_col = 1\n", " elif forecasted_regime == 'collision' and observed_regime == 'swash':\n", " x_col = 1\n", " y_col = 2\n", " else:\n", " continue\n", "\n", " fig.append_trace(\n", " go.Scattergl(\n", " x=bin_vals,\n", " y=[max(cumfreq.cumcount) - x for x in cumfreq.cumcount],\n", " name=site_id,\n", " line=dict(\n", " color=('rgba(22, 22, 22, 0.2)'),\n", " width=0.5,\n", " )), x_col, y_col)\n", "\n", "print('Finalizing plot:')\n", "# Change some formatting for the plot\n", "layout = go.Layout(\n", " xaxis=dict(domain=[0, 0.45]),\n", " yaxis=dict(\n", " domain=[0, 0.45],\n", " type='log',\n", " ),\n", " xaxis2=dict(domain=[0.55, 1]),\n", " xaxis4=dict(domain=[0.55, 1], anchor='y4'),\n", " yaxis3=dict(\n", " domain=[0.55, 1],\n", " type='log',\n", " ),\n", " yaxis4=dict(\n", " domain=[0.55, 1],\n", " anchor='x4',\n", " type='log',\n", " ))\n", "\n", "fig['layout'].update(\n", " showlegend=False,\n", " title='Impact hours',\n", " height=800,\n", ")\n", "\n", "for ax in ['yaxis', 'yaxis2']:\n", "# fig['layout'][ax]['range'] = [0, 400]\n", " fig['layout'][ax]['range'] = [0, 15]\n", "\n", "for ax in ['xaxis', 'xaxis2']:\n", "# fig['layout'][ax]['range'] = [-2.5, 2.5]\n", " fig['layout'][ax]['range'] = [-1, 1]\n", "\n", "fig['layout']['xaxis'].update(title='R_high - D_low')\n", "fig['layout']['xaxis2'].update(title='R_high - D_low')\n", "fig['layout']['yaxis'].update(title='No. of Hours')\n", "fig['layout']['yaxis2'].update(title='No. of Hours')\n", "\n", "# pio.write_image(fig, 'fig2.png')\n", "\n", "go.FigureWidget(fig)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This gives an overview of the distribution of impact hours. Try to calculate the confidence interval bounds for each prediction/observed combination.\n", "\n", "The following cell looks at combining all the observations from each CDF into one large CDF and calculating a confidence interval from it, but I'm not sure if this is a valid method." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "code_folding": [ 3 ] }, "outputs": [], "source": [ "from statsmodels.distributions.empirical_distribution import ECDF\n", "from statsmodels.distributions.empirical_distribution import _conf_set\n", "\n", "df_twls = twls['forecasted']['mean_slope_sto06']\n", "df_forecasted_impacts = impacts['forecasted']['mean_slope_sto06']\n", "df_observed_impacts = impacts['observed']\n", "\n", "plt.figure(figsize=(6,8))\n", "\n", "# Do some data rearranging and joining to make it easier\n", "df_joined = df_twls.reset_index()\n", "df_joined = df_joined.set_index('site_id')\n", "df_joined = df_joined.merge(\n", " df_observed_impacts[['storm_regime']],\n", " left_on='site_id',\n", " right_on='site_id').rename({\n", " 'storm_regime': 'observed_regime'\n", " },\n", " axis='columns')\n", "df_joined = df_joined.merge(\n", " df_forecasted_impacts[['storm_regime', 'dune_toe_z']],\n", " left_on='site_id',\n", " right_on='site_id').rename({\n", " 'storm_regime': 'forecasted_regime'\n", " },\n", " axis='columns')\n", "\n", "regime_combinations = [\n", " ('swash', 'swash', '#2b83ba'),\n", " ('collision', 'swash', '#abdda4'),\n", " ('swash', 'collision', '#fdae61'),\n", " ('collision', 'collision', '#d7191c'),\n", "]\n", "\n", "for comb in regime_combinations:\n", "\n", " observed_regime = comb[0]\n", " forecasted_regime = comb[1]\n", " color = comb[2]\n", "\n", " # Get values of elevation difference to plot\n", " query = '(observed_regime==\"{}\") & (forecasted_regime==\"{}\")'.format(\n", " observed_regime, forecasted_regime)\n", " df = df_joined.query(query)\n", " R_high_D_low = (df.R_high - df.dune_toe_z).values\n", " R_high_D_low = R_high_D_low[~np.isnan(R_high_D_low)]\n", "\n", " ecdf = ECDF(R_high_D_low)\n", "\n", " y = ecdf.y\n", " lower, upper = _conf_set(y, alpha=0.05)\n", " x = ecdf.x\n", "\n", " avg_hrs = df.groupby('site_id').count().R_high.mean()\n", " y = [avg_hrs - v * avg_hrs for v in y]\n", " lower = [avg_hrs - v * avg_hrs for v in lower]\n", " upper = [avg_hrs - v * avg_hrs for v in upper]\n", "\n", " plt.step(\n", " x,\n", " y,\n", " color=color,\n", " label='Pred={}, Obs={}'.format(forecasted_regime, observed_regime))\n", " plt.fill_between(\n", " x, y, upper, color='grey', alpha=0.2, interpolate=False, step='pre')\n", " plt.fill_between(\n", " x, y, lower, color='grey', alpha=0.2, interpolate=False, step='pre')\n", "\n", "# # Plot for checking\n", "\n", "plt.title('Empirical CDF with 95\\% confidence intervals')\n", "plt.xlabel('$R_{high} - D_{low} (m)$')\n", "plt.ylabel('Hours of Elevation Exceedence')\n", "plt.xlim([-1, 1])\n", "plt.ylim([0, 25])\n", "plt.legend(loc='best')\n", "\n", "# Print to figure\n", "plt.savefig('05-empirical-cdf.png', dpi=600, bbox_inches='tight') \n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The plot above shows:\n", "- collision if R_high - D_low > 0.25 m for 6 hours\n", "- swash if R_high - D_low < -0.8m for 7 hours\n", "\n", "additionaly:\n", "- collision if R_high > D_low for more than 10 hours\n", " \n", "Let's test how these new critera would perform." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Calculate elevation exceedence for each hours we are interested in\n", "ele_exceedence_6hr = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False).groupby('site_id').R_high.nth(6-1).rename('ele_exceedence_6hr')\n", "\n", "ele_exceedence_7hr = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False).groupby('site_id').R_high.nth(7-1).rename('ele_exceedence_7hr')\n", "\n", "\n", "ele_exceedence_2hr = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False).groupby('site_id').R_high.nth(2-1).rename('ele_exceedence_2hr')\n", "\n", "ele_exceedence_1hr = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False).groupby('site_id').R_high.nth(0).rename('ele_exceedence_1hr')\n", "\n", "\n", "# Get our dune toes\n", "dune_toes = df_profile_features_crest_toes.xs('prestorm',level='profile_type')['dune_toe_z']\n", "\n", "# Get our observed regimes\n", "observed_regime = impacts['observed'].storm_regime.rename('observed_regime')\n", "\n", "# Concat into one data frame\n", "df = pd.concat([dune_toes, ele_exceedence_6hr, ele_exceedence_7hr, ele_exceedence_1hr, ele_exceedence_2hr, observed_regime],axis=1)\n", "\n", "# Get predicted regime based on old criteria\n", "df.loc[df.ele_exceedence_1hr < df.dune_toe_z, 'forecasted_regime'] = 'swash'\n", "df.loc[df.ele_exceedence_1hr > df.dune_toe_z , 'forecasted_regime'] = 'collision'\n", "\n", "\n", "regime_combinations = [\n", " ('swash','swash'),\n", " ('collision','swash'),\n", " ('swash','collision'),\n", " ('collision','collision'),\n", "]\n", "\n", "print('Original')\n", "for comb in regime_combinations:\n", " query = 'forecasted_regime==\"{}\" & observed_regime==\"{}\"'.format(comb[0], comb[1])\n", " print('Forecasted: {}, Observed: {}, Count: {}'.format(comb[0], comb[1], len(df.query(query))))\n", "\n", "\n", "# Get predicted regime based on our new criteria\n", "\n", "adjust_swash_criteria = (df.forecasted_regime == 'swash') & (df.ele_exceedence_7hr - df.dune_toe_z > -0.8)\n", "adjust_collision_criteria = (df.forecasted_regime == 'collision') & (df.ele_exceedence_6hr - df.dune_toe_z < 0.25)\n", "df.loc[adjust_swash_criteria, 'forecasted_regime'] = 'collision'\n", "df.loc[adjust_collision_criteria, 'forecasted_regime'] = 'swash'\n", "\n", "# df.loc[(df.ele_exceedence_1hr - df.dune_toe_z <= -0.15 ),'forecasted_regime'] = 'swash'\n", "# df.loc[(df.ele_exceedence_1hr - df.dune_toe_z > -0.15 ),'forecasted_regime'] = 'collision'\n", "\n", "\n", "print('\\nAfter adjustment')\n", "for comb in regime_combinations:\n", " query = 'forecasted_regime==\"{}\" & observed_regime==\"{}\"'.format(comb[0], comb[1])\n", " print('Forecasted: {}, Observed: {}, Count: {}'.format(comb[0], comb[1], len(df.query(query))))\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Looking at the adjusted values, we can see these criteria actually make it worse. There must be something wrong with the technique - maybe the way of calculating the confidence intervals is wrong? Let's try calculate confidence intervals for each regime combination.\n", "\n", "*This cell I don't think is used...*\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def mean_confidence_interval(data, confidence=0.95):\n", " a = 1.0 * np.array(data)\n", " n = len(a)\n", " m, se = np.mean(a), stats.sem(a)\n", " h = se * stats.t.ppf((1 + confidence) / 2., n-1)\n", " return m, m-h, m+h\n", "\n", "# Add columns indicating how many n hrs was this the largest record\n", "df = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False)\n", "df['n_hrs_largest']= df.groupby('site_id').cumcount()+1\n", "\n", "# Join observed and forecast impacts and dune toe elevation\n", "observed_regime = impacts['observed'].storm_regime.rename('observed_regime').to_frame()\n", "forecasted_regime = impacts['forecasted']['mean_slope_sto06'].storm_regime.rename('forecasted_regime').to_frame()\n", "dune_info = df_profile_features_crest_toes.xs('prestorm', level='profile_type')\n", "\n", "df['datetime'] = df.index.get_level_values('datetime')\n", "df = df.merge(observed_regime,left_on=['site_id'],right_on='site_id')\n", "df = df.merge(forecasted_regime,left_on=['site_id'],right_on='site_id')\n", "df = df.merge(dune_info,left_on=['site_id'],right_on='site_id')\n", "\n", "# Make new column for R_high minus D_low\n", "df['R_high_D_low_diff'] = df.R_high - df.dune_toe_z\n", "\n", "\n", "regime_combinations = [\n", " ('swash','swash'),\n", " ('swash','collision'),\n", " ('collision','swash'),\n", " ('collision','collision'),\n", "]\n", "\n", "print('Calculating hr exceedence elevations for each combination:')\n", "exceedence_data = []\n", "for hr in tqdm_notebook([x for x in range(1,101)]):\n", " \n", " for comb in regime_combinations:\n", " \n", " vals = df.loc[(df.n_hrs_largest==hr) & (df.observed_regime==comb[0]) & (df.forecasted_regime==comb[1])].R_high_D_low_diff.dropna().values\n", " \n", " ci = mean_confidence_interval(vals)\n", "\n", " exceedence_data.append({\n", " 'observed_regime': comb[0],\n", " 'forecasted_regime': comb[1],\n", " 'exceedence_hr': hr,\n", " 'ci_mean': ci[0],\n", " 'ci_lower': ci[1],\n", " 'ci_upper': ci[2],\n", " })\n", " \n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's try a different apporach and try split the observed swash and collision regimes at each impact duration hour. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from scipy.stats import norm\n", "\n", "best_split = []\n", "exceedence_hrs = []\n", "swash_mean = []\n", "swash_95_lower = []\n", "swash_95_upper = []\n", "collision_mean = []\n", "collision_95_lower = []\n", "collision_95_upper = []\n", "swash_median = []\n", "swash_q1 = []\n", "swash_q3 = []\n", "collision_median = []\n", "collision_q1 = []\n", "collision_q3 = []\n", "\n", "for hr in tqdm_notebook([x for x in range(1,101)]):\n", " \n", " dists = []\n", " plt.figure(figsize=(10,2))\n", " for observed_regime in ['swash','collision']:\n", " \n", " vals = df.loc[(df.n_hrs_largest==hr) &\n", " (df.observed_regime==observed_regime)].R_high_D_low_diff.dropna().values\n", " \n", " if observed_regime =='collision':\n", " color = 'red'\n", " label='collision'\n", " else:\n", " color = 'blue'\n", " label='swash'\n", " \n", " plt.hist(vals, bins='auto',color=color, alpha=0.5,label=label) \n", " plt.title(\"{} hour exceedence TWL\".format(hr))\n", " plt.xlim([-2.5,2.5])\n", " \n", " dists.append(norm.fit(vals))\n", " \n", " # Find which elevation best splits swash and collision\n", "# eles = [x for x in np.linspace(-2,2,1000)]\n", "# total_cdfs = []\n", "# for ele in eles:\n", "# swash_cdf = norm.cdf(ele,*dists[0])\n", "# collision_cdf = 1 - norm.cdf(ele,*dists[1])\n", "# total_cdfs.append(swash_cdf + collision_cdf)\n", "\n", "# i_max = np.argmax(total_cdfs)\n", "# best_ele = eles[i_max]\n", "\n", "# exceedence_hrs.append(hr)\n", "# best_split.append(best_ele)\n", "\n", " # Find which elevation best splits swash and collision\n", " eles = [x for x in np.linspace(-2,2,100)]\n", " total_cdfs = []\n", " swash_vals = df.loc[(df.n_hrs_largest==hr) &\n", " (df.observed_regime=='swash')].R_high_D_low_diff.dropna().values\n", " collision_vals = df.loc[(df.n_hrs_largest==hr) &\n", " (df.observed_regime=='collision')].R_high_D_low_diff.dropna().values\n", " for ele in eles:\n", " swash_samples = np.sum( swash_vals < ele) / len(swash_vals)\n", " collision_samples = np.sum( collision_vals > ele) / len(collision_vals) \n", " total_cdfs.append(swash_samples + collision_samples)\n", " \n", " i_max = np.argmax(total_cdfs)\n", " best_ele = eles[i_max]\n", "\n", " exceedence_hrs.append(hr)\n", " best_split.append(best_ele) \n", " \n", " \n", " # Store stastistics\n", " swash_mean.append(dists[0][0])\n", " swash_95_lower.append(norm.interval(0.5, *dists[0])[0])\n", " swash_95_upper.append(norm.interval(0.5, *dists[0])[1])\n", " collision_mean.append(dists[1][0])\n", " collision_95_lower.append(norm.interval(0.5, *dists[1])[0])\n", " collision_95_upper.append(norm.interval(0.5, *dists[1])[1])\n", " \n", " swash_median.append(np.percentile(swash_vals, 50))\n", " swash_q1.append(np.percentile(swash_vals, 25))\n", " swash_q3.append(np.percentile(swash_vals, 75))\n", " collision_median.append(np.percentile(collision_vals, 50))\n", " collision_q1.append(np.percentile(collision_vals, 25))\n", " collision_q3.append(np.percentile(collision_vals, 75)) \n", " \n", " plt.axvline(best_ele, label='Best split (x={:.2f} m)'.format(best_ele))\n", " plt.legend(loc='upper right', prop={'size': 10} )\n", " plt.xlabel('$R_{high} - D_{low}$')\n", " plt.ylabel('No. of sites')\n", " plt.xlim([-2,2])\n", " if hr == 80 or hr < 5 or hr==90:\n", " plt.show()\n", " \n", " plt.close()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now, let's plot our distributions for swash/collision and the best seperation between them." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plt.figure(figsize=(5,5))\n", "plt.plot(best_split,exceedence_hrs, label='Best split', color='#000000', linestyle='--')\n", "\n", "# plt.plot(swash_mean, exceedence_hrs, label='Swash', color='#2b83ba')\n", "# plt.fill_betweenx(exceedence_hrs,swash_95_lower,swash_95_upper, color='#2b83ba', alpha=0.2, interpolate=False)\n", "\n", "# plt.plot(collision_mean, exceedence_hrs, label='Collision', color='#d7191c')\n", "# plt.fill_betweenx(exceedence_hrs,collision_95_lower,collision_95_upper, color='#d7191c', alpha=0.2, interpolate=False,label='plus 50')\n", "\n", "\n", "plt.plot(swash_median, exceedence_hrs, label='Swash', color='#2b83ba')\n", "plt.fill_betweenx(exceedence_hrs,swash_q1,swash_q3, color='#2b83ba', alpha=0.2, interpolate=False,label='Swash IQR')\n", "\n", "plt.plot(collision_median, exceedence_hrs, label='Collision', color='#d7191c')\n", "plt.fill_betweenx(exceedence_hrs,collision_q1,collision_q3, color='#d7191c', alpha=0.2, interpolate=False,label='Collision IQR')\n", "\n", "\n", "\n", "\n", "\n", "#===\n", "# # Let's plot one site as well, just to check\n", "# import random\n", "# site_ids = list(impacts['observed'].index.unique().values)\n", "# site_id = random.choice(site_ids)\n", "\n", "\n", "# site_id = 'TREACH0011'\n", "# site_predicted_regime = impacts['forecasted']['mean_slope_sto06'].loc[site_id].storm_regime\n", "# site_observed_regime = impacts['observed'].loc[site_id].storm_regime\n", "# df_site = df.loc[site_id]\n", "# plt.plot(df_site.R_high_D_low_diff, df_site.n_hrs_largest,label='site_id={}\\n(pred={},obs={})'.format(site_id,site_predicted_regime, site_observed_regime),color='#ffffff', linestyle='--')\n", "\n", "\n", "plt.title('Observed Swash/Collision - Best Split')\n", "plt.xlabel('$R_{high} - D_{low}$ (m)')\n", "plt.ylabel('Exceedance hours')\n", "plt.ylim([0,100])\n", "plt.xlim([-2,2])\n", "plt.legend()\n", "\n", "# Print to figure\n", "plt.savefig('05-best-split.png', dpi=600, bbox_inches='tight') \n", "\n", "plt.show()\n", "plt.close()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Plot above shows that if Rhigh = Dlow plus/minus 0.25m, we should say the storm regime is uncertain, rather than trying to make an incorrect prediction." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "df_for = impacts['forecasted']['mean_slope_sto06']\n", "df_obs = impacts['observed']\n", "\n", "# Join forecasted and observed impacts into same dataframe\n", "df_for = df_for.rename(columns={'storm_regime': 'forecasted_regime'})\n", "df_obs = df_obs.rename(columns={'storm_regime': 'observed_regime'})\n", "df_for = df_for.merge(\n", " df_obs.observed_regime.to_frame(), left_index=True, right_index=True)\n", "\n", "# Get wrong forecasts\n", "incorrect_for = df_for.forecasted_regime != df_for.observed_regime\n", "\n", "# How many wrong/correct forecasts\n", "print('There were {} correct forecasts'.format(len(df_for[~incorrect_for])))\n", "print('There were {} incorrect forecasts'.format(len(df_for[incorrect_for])))\n", "print('')\n", "\n", "# How many of these forecasts were where R_high was near D_low?\n", "close_eles = ((df.R_high > df.dune_toe_z - 0.25) &\n", " (df.R_high < df.dune_toe_z + 0.25))\n", "\n", "s = 'R_high and D_low elevations were close at {} correctly forecasted sites'\n", "print(s.format(len(df_for[~incorrect_for & close_eles])))\n", "\n", "s = 'R_high and D_low elevations were close at {} wrongly forecasted sites'\n", "print(s.format(len(df_for[incorrect_for & close_eles])))\n", "\n", "# df[(df.R_high>df.dune_toe_z-0.25)&(df.R_high