Update notebooks

develop
Chris Leaman 6 years ago
parent 1bad6f6dd7
commit 5850871c14

2
.gitattributes vendored

@ -1,3 +1,5 @@
*.ipynb filter=nbstripout *.ipynb filter=nbstripout
*.ipynb diff=ipynb *.ipynb diff=ipynb

@ -611,7 +611,46 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"df_sites.site_no.to_csv('temp.csv')" "# Get twl information at maximum R_high for a site\n",
"# site_id = 'NARRA0008'\n",
"site_id = 'NARRA0012'\n",
"\n",
"print('TWLs:')\n",
"t = twls['forecasted']['mean_slope_sto06'].xs(site_id,level='site_id').R_high.idxmax()\n",
"print(twls['forecasted']['mean_slope_sto06'].loc[(site_id, t)])\n",
"\n",
"print('\\nforecast regime:')\n",
"print(impacts['forecasted']['mean_slope_sto06'].loc[site_id])\n",
"\n",
"print('\\nobserved regime:')\n",
"print(impacts['observed'].loc[site_id])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"site_id = 'NARRA0010'\n",
"\n",
"print(site_id)\n",
"# Storm duration\n",
"Hs0_storm = 3.0\n",
"df_twls = twls['forecasted']['mean_slope_sto06'].xs(site_id,level='site_id')\n",
"t_start = np.argmax(df_twls.Hs0 > Hs0_storm)\n",
"t_end = np.argmax((df_twls.Hs0 > Hs0_storm)[::-1])\n",
"i_start = df_twls.index.get_loc(t_start)\n",
"i_end = df_twls.index.get_loc(t_end)\n",
"\n",
"df_storm = df_twls.iloc[i_start:i_end]\n",
"print('Storm length: {} hrs'.format(len(df_storm)))\n",
"\n",
"# Get hours above a certain elevation\n",
"z_critical = 2.4\n",
"\n",
"n_impact_hrs = np.sum(df_storm.R_high >z_critical)\n",
"print('Number of hours before peak water level with R_high > {}m: {}hrs'.format(z_critical,n_impact_hrs))\n"
] ]
}, },
{ {
@ -879,10 +918,10 @@
"height": "656px", "height": "656px",
"left": "508px", "left": "508px",
"top": "90px", "top": "90px",
"width": "218.797px" "width": "282.797px"
}, },
"toc_section_display": true, "toc_section_display": true,
"toc_window_display": true "toc_window_display": false
}, },
"varInspector": { "varInspector": {
"cols": { "cols": {

File diff suppressed because it is too large Load Diff

@ -0,0 +1,242 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Profile picker"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true
},
"source": [
"## Setup notebook"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"# Enable autoreloading of our modules. \n",
"# Most of the code will be located in the /src/ folder, \n",
"# and then called from the notebook.\n",
"%matplotlib inline\n",
"%reload_ext autoreload\n",
"%autoreload"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"from IPython.core.debugger import set_trace\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"import os\n",
"import decimal\n",
"import plotly\n",
"import plotly.graph_objs as go\n",
"import plotly.plotly as py\n",
"import plotly.tools as tls\n",
"import plotly.figure_factory as ff\n",
"from plotly import tools\n",
"import plotly.io as pio\n",
"from scipy import stats\n",
"import math\n",
"import matplotlib\n",
"from matplotlib import cm\n",
"import colorlover as cl\n",
"import numpy.ma as ma\n",
"\n",
"from ipywidgets import widgets, Output\n",
"from IPython.display import display, clear_output, Image, HTML\n",
"\n",
"from sklearn.metrics import confusion_matrix\n",
"\n",
"import numpy as np\n",
"from matplotlib import pyplot as plt\n",
"\n",
"from sklearn import linear_model, datasets\n",
"\n",
"from scipy.interpolate import UnivariateSpline\n",
"from scipy.interpolate import interp1d\n",
"from scipy.interpolate import splrep, splev\n",
"from scipy.integrate import simps\n",
"from scipy.stats import linregress\n",
"from scipy.signal import find_peaks\n",
"import json"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"# Matplot lib default settings\n",
"plt.rcParams[\"figure.figsize\"] = (10,6)\n",
"plt.rcParams['axes.grid']=True\n",
"plt.rcParams['grid.alpha'] = 0.5\n",
"plt.rcParams['grid.color'] = \"grey\"\n",
"plt.rcParams['grid.linestyle'] = \"--\"\n",
"plt.rcParams['axes.grid']=True\n",
"\n",
"# https://stackoverflow.com/a/20709149\n",
"matplotlib.rcParams['text.usetex'] = True\n",
"\n",
"matplotlib.rcParams['text.latex.preamble'] = [\n",
" r'\\usepackage{siunitx}', # i need upright \\micro symbols, but you need...\n",
" r'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts\n",
" r'\\usepackage{helvet}', # set the normal font here\n",
" r'\\usepackage{amsmath}',\n",
" r'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet\n",
" r'\\sansmath', # <- tricky! -- gotta actually tell tex to use!\n",
"] "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import data\n",
"Let's first import data from our pre-processed interim data folder."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def df_from_csv(csv, index_col, data_folder='../data/interim'):\n",
" print('Importing {}'.format(csv))\n",
" return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n",
"\n",
"df_profiles = df_from_csv('profiles.csv', index_col=[0, 1, 2])\n",
"df_profile_features_crest_toes = df_from_csv('profile_features_crest_toes.csv', index_col=[0,1])\n",
"\n",
"print('Done!')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Manually pick features"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib notebook\n",
"\n",
"sites = df_profiles.index.get_level_values('site_id').unique()\n",
"\n",
"\n",
"fig =plt.figure(figsize=(10, 3))\n",
"\n",
"df_prestorm = df_profiles.xs((sites[0],'prestorm'),level=('site_id','profile_type'))\n",
"df_poststorm = df_profiles.xs((sites[0],'poststorm'),level=('site_id','profile_type'))\n",
"line_prestorm, = plt.plot(df_prestorm.index, df_prestorm.z, label='prestorm')\n",
"line_poststorm, = plt.plot(df_prestorm.index, df_prestorm.z, label='poststorm')\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# df_profiles.xs((sites[0],'prestorm'),level=('site_id','profile_type'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"hide_input": false,
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -52,11 +52,41 @@
"import matplotlib\n", "import matplotlib\n",
"from matplotlib import cm\n", "from matplotlib import cm\n",
"import colorlover as cl\n", "import colorlover as cl\n",
"\n", "from tqdm import tqdm_notebook\n",
"from ipywidgets import widgets, Output\n", "from ipywidgets import widgets, Output\n",
"from IPython.display import display, clear_output, Image, HTML\n", "from IPython.display import display, clear_output, Image, HTML\n",
"from scipy import stats\n",
"from sklearn.metrics import confusion_matrix\n",
"import matplotlib.pyplot as plt\n",
"from scipy.interpolate import interp1d"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Matplot lib default settings\n",
"plt.rcParams[\"figure.figsize\"] = (10,6)\n",
"plt.rcParams['axes.grid']=True\n",
"plt.rcParams['grid.alpha'] = 0.5\n",
"plt.rcParams['grid.color'] = \"grey\"\n",
"plt.rcParams['grid.linestyle'] = \"--\"\n",
"\n",
"\n", "\n",
"from sklearn.metrics import confusion_matrix" "# https://stackoverflow.com/a/20709149\n",
"matplotlib.rcParams['text.usetex'] = True\n",
"matplotlib.rcParams['font.family'] = 'sans-serif'\n",
"matplotlib.rcParams['font.sans-serif'] = 'Helvetica'\n",
"\n",
"matplotlib.rcParams['text.latex.preamble'] = [\n",
" r'\\usepackage{siunitx}', # i need upright \\micro symbols, but you need...\n",
" r'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts\n",
" r'\\usepackage{helvet}', # set the normal font here\n",
" r'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet\n",
" r'\\sansmath' # <- tricky! -- gotta actually tell tex to use!\n",
"] "
] ]
}, },
{ {
@ -105,8 +135,8 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Calculate vertical distribution of wave count\n", "## Calculate impact hours\n",
"For each site, calculate how many waves reached a certain elevation (store as a binned histogram)." "- For each site_id, determine the R2 elevation."
] ]
}, },
{ {
@ -115,11 +145,306 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"# Helper functions\n", "# Create figure to plot results\n",
"def find_nearest(array, value):\n", "fig = tools.make_subplots(\n",
" array = np.asarray(array)\n", " rows=2,\n",
" idx = np.nanargmin((np.abs(array - value)))\n", " cols=2,\n",
" return array[idx], idx" " specs=[[{}, {}], [{}, {}]],\n",
" subplot_titles=('Swash/Swash', 'Swash/Collision', 'Collision/Swash',\n",
" 'Collision/Collision'),\n",
" shared_xaxes=True,\n",
" shared_yaxes=True,\n",
" horizontal_spacing=0.05,\n",
" vertical_spacing=0.1,\n",
" print_grid=False)\n",
"\n",
"# Iterate through each site\n",
"print('Calculating cumulate frequency of R_high for each site:')\n",
"site_ids = twls['forecasted']['mean_slope_sto06'].index.get_level_values(\n",
" 'site_id').unique().values\n",
"for site_id in tqdm_notebook(site_ids):\n",
"\n",
" # Put data into a temporary dataframe, shorter name is easier to work with\n",
" df_impacts = impacts['forecasted']['mean_slope_sto06'].loc[site_id]\n",
" df_twls = twls['forecasted']['mean_slope_sto06'].loc[site_id]\n",
"\n",
" D_low = df_impacts.dune_toe_z\n",
" if np.isnan(D_low):\n",
" continue\n",
"\n",
" # Get R_high elevations minus dune toe\n",
" R_high_ts = df_twls.R_high.dropna().values\n",
" R_high_D_low_ts = R_high_ts - D_low\n",
"\n",
" # Get SWL minus dune toe\n",
" SWL_D_low_ts = df_twls['tide'].dropna().values - D_low\n",
" DSWL_D_low_ts = (df_twls['tide'] + df_twls['setup']).dropna().values - D_low\n",
"\n",
" # Get cumulative freq\n",
" cumfreq = stats.cumfreq(R_high_D_low_ts, numbins=100)\n",
"# cumfreq = stats.cumfreq(DSWL_D_low_ts, numbins=100)\n",
"\n",
" # Calculate space of values for x\n",
" bin_vals = cumfreq.lowerlimit + np.linspace(\n",
" 0, cumfreq.binsize * cumfreq.cumcount.size, cumfreq.cumcount.size)\n",
"\n",
" # Check which subplot we should put this site on\n",
" forecasted_regime = impacts['forecasted']['mean_slope_sto06'].loc[\n",
" site_id].storm_regime\n",
" observed_regime = impacts['observed'].loc[site_id].storm_regime\n",
"\n",
" if forecasted_regime == 'swash' and observed_regime == 'swash':\n",
" x_col = 1\n",
" y_col = 1\n",
" elif forecasted_regime == 'collision' and observed_regime == 'collision':\n",
" x_col = 2\n",
" y_col = 2\n",
" elif forecasted_regime == 'swash' and observed_regime == 'collision':\n",
" x_col = 2\n",
" y_col = 1\n",
" elif forecasted_regime == 'collision' and observed_regime == 'swash':\n",
" x_col = 1\n",
" y_col = 2\n",
" else:\n",
" continue\n",
"\n",
" fig.append_trace(\n",
" go.Scattergl(\n",
" x=bin_vals,\n",
" y=[max(cumfreq.cumcount) - x for x in cumfreq.cumcount],\n",
" name=site_id,\n",
" line=dict(\n",
" color=('rgba(22, 22, 22, 0.2)'),\n",
" width=0.5,\n",
" )), x_col, y_col)\n",
"\n",
"print('Finalizing plot:')\n",
"# Change some formatting for the plot\n",
"layout = go.Layout(\n",
" xaxis=dict(domain=[0, 0.45]),\n",
" yaxis=dict(\n",
" domain=[0, 0.45],\n",
" type='log',\n",
" ),\n",
" xaxis2=dict(domain=[0.55, 1]),\n",
" xaxis4=dict(domain=[0.55, 1], anchor='y4'),\n",
" yaxis3=dict(\n",
" domain=[0.55, 1],\n",
" type='log',\n",
" ),\n",
" yaxis4=dict(\n",
" domain=[0.55, 1],\n",
" anchor='x4',\n",
" type='log',\n",
" ))\n",
"\n",
"fig['layout'].update(\n",
" showlegend=False,\n",
" title='Impact hours',\n",
" height=800,\n",
")\n",
"\n",
"for ax in ['yaxis', 'yaxis2']:\n",
"# fig['layout'][ax]['range'] = [0, 400]\n",
" fig['layout'][ax]['range'] = [0, 15]\n",
"\n",
"for ax in ['xaxis', 'xaxis2']:\n",
"# fig['layout'][ax]['range'] = [-2.5, 2.5]\n",
" fig['layout'][ax]['range'] = [-1, 1]\n",
"\n",
"fig['layout']['xaxis'].update(title='R_high - D_low')\n",
"fig['layout']['xaxis2'].update(title='R_high - D_low')\n",
"fig['layout']['yaxis'].update(title='No. of Hours')\n",
"fig['layout']['yaxis2'].update(title='No. of Hours')\n",
"\n",
"# pio.write_image(fig, 'fig2.png')\n",
"\n",
"go.FigureWidget(fig)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This gives an overview of the distribution of impact hours. Try to calculate the confidence interval bounds for each prediction/observed combination.\n",
"\n",
"The following cell looks at combining all the observations from each CDF into one large CDF and calculating a confidence interval from it, but I'm not sure if this is a valid method."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"code_folding": [
3
]
},
"outputs": [],
"source": [
"from statsmodels.distributions.empirical_distribution import ECDF\n",
"from statsmodels.distributions.empirical_distribution import _conf_set\n",
"\n",
"df_twls = twls['forecasted']['mean_slope_sto06']\n",
"df_forecasted_impacts = impacts['forecasted']['mean_slope_sto06']\n",
"df_observed_impacts = impacts['observed']\n",
"\n",
"plt.figure(figsize=(6,8))\n",
"\n",
"# Do some data rearranging and joining to make it easier\n",
"df_joined = df_twls.reset_index()\n",
"df_joined = df_joined.set_index('site_id')\n",
"df_joined = df_joined.merge(\n",
" df_observed_impacts[['storm_regime']],\n",
" left_on='site_id',\n",
" right_on='site_id').rename({\n",
" 'storm_regime': 'observed_regime'\n",
" },\n",
" axis='columns')\n",
"df_joined = df_joined.merge(\n",
" df_forecasted_impacts[['storm_regime', 'dune_toe_z']],\n",
" left_on='site_id',\n",
" right_on='site_id').rename({\n",
" 'storm_regime': 'forecasted_regime'\n",
" },\n",
" axis='columns')\n",
"\n",
"regime_combinations = [\n",
" ('swash', 'swash', '#2b83ba'),\n",
" ('collision', 'swash', '#abdda4'),\n",
" ('swash', 'collision', '#fdae61'),\n",
" ('collision', 'collision', '#d7191c'),\n",
"]\n",
"\n",
"for comb in regime_combinations:\n",
"\n",
" observed_regime = comb[0]\n",
" forecasted_regime = comb[1]\n",
" color = comb[2]\n",
"\n",
" # Get values of elevation difference to plot\n",
" query = '(observed_regime==\"{}\") & (forecasted_regime==\"{}\")'.format(\n",
" observed_regime, forecasted_regime)\n",
" df = df_joined.query(query)\n",
" R_high_D_low = (df.R_high - df.dune_toe_z).values\n",
" R_high_D_low = R_high_D_low[~np.isnan(R_high_D_low)]\n",
"\n",
" ecdf = ECDF(R_high_D_low)\n",
"\n",
" y = ecdf.y\n",
" lower, upper = _conf_set(y, alpha=0.05)\n",
" x = ecdf.x\n",
"\n",
" avg_hrs = df.groupby('site_id').count().R_high.mean()\n",
" y = [avg_hrs - v * avg_hrs for v in y]\n",
" lower = [avg_hrs - v * avg_hrs for v in lower]\n",
" upper = [avg_hrs - v * avg_hrs for v in upper]\n",
"\n",
" plt.step(\n",
" x,\n",
" y,\n",
" color=color,\n",
" label='Pred={}, Obs={}'.format(forecasted_regime, observed_regime))\n",
" plt.fill_between(\n",
" x, y, upper, color='grey', alpha=0.2, interpolate=False, step='pre')\n",
" plt.fill_between(\n",
" x, y, lower, color='grey', alpha=0.2, interpolate=False, step='pre')\n",
"\n",
"# # Plot for checking\n",
"\n",
"plt.title('Empirical CDF with 95\\% confidence intervals')\n",
"plt.xlabel('$R_{high} - D_{low} (m)$')\n",
"plt.ylabel('Hours of Elevation Exceedence')\n",
"plt.xlim([-1, 1])\n",
"plt.ylim([0, 25])\n",
"plt.legend(loc='best')\n",
"\n",
"# Print to figure\n",
"plt.savefig('05-empirical-cdf.png', dpi=600, bbox_inches='tight') \n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The plot above shows:\n",
"- collision if R_high - D_low > 0.25 m for 6 hours\n",
"- swash if R_high - D_low < -0.8m for 7 hours\n",
"\n",
"additionaly:\n",
"- collision if R_high > D_low for more than 10 hours\n",
" \n",
"Let's test how these new critera would perform."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Calculate elevation exceedence for each hours we are interested in\n",
"ele_exceedence_6hr = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False).groupby('site_id').R_high.nth(6-1).rename('ele_exceedence_6hr')\n",
"\n",
"ele_exceedence_7hr = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False).groupby('site_id').R_high.nth(7-1).rename('ele_exceedence_7hr')\n",
"\n",
"\n",
"ele_exceedence_2hr = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False).groupby('site_id').R_high.nth(2-1).rename('ele_exceedence_2hr')\n",
"\n",
"ele_exceedence_1hr = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False).groupby('site_id').R_high.nth(0).rename('ele_exceedence_1hr')\n",
"\n",
"\n",
"# Get our dune toes\n",
"dune_toes = df_profile_features_crest_toes.xs('prestorm',level='profile_type')['dune_toe_z']\n",
"\n",
"# Get our observed regimes\n",
"observed_regime = impacts['observed'].storm_regime.rename('observed_regime')\n",
"\n",
"# Concat into one data frame\n",
"df = pd.concat([dune_toes, ele_exceedence_6hr, ele_exceedence_7hr, ele_exceedence_1hr, ele_exceedence_2hr, observed_regime],axis=1)\n",
"\n",
"# Get predicted regime based on old criteria\n",
"df.loc[df.ele_exceedence_1hr < df.dune_toe_z, 'forecasted_regime'] = 'swash'\n",
"df.loc[df.ele_exceedence_1hr > df.dune_toe_z , 'forecasted_regime'] = 'collision'\n",
"\n",
"\n",
"regime_combinations = [\n",
" ('swash','swash'),\n",
" ('collision','swash'),\n",
" ('swash','collision'),\n",
" ('collision','collision'),\n",
"]\n",
"\n",
"print('Original')\n",
"for comb in regime_combinations:\n",
" query = 'forecasted_regime==\"{}\" & observed_regime==\"{}\"'.format(comb[0], comb[1])\n",
" print('Forecasted: {}, Observed: {}, Count: {}'.format(comb[0], comb[1], len(df.query(query))))\n",
"\n",
"\n",
"# Get predicted regime based on our new criteria\n",
"\n",
"adjust_swash_criteria = (df.forecasted_regime == 'swash') & (df.ele_exceedence_7hr - df.dune_toe_z > -0.8)\n",
"adjust_collision_criteria = (df.forecasted_regime == 'collision') & (df.ele_exceedence_6hr - df.dune_toe_z < 0.25)\n",
"df.loc[adjust_swash_criteria, 'forecasted_regime'] = 'collision'\n",
"df.loc[adjust_collision_criteria, 'forecasted_regime'] = 'swash'\n",
"\n",
"# df.loc[(df.ele_exceedence_1hr - df.dune_toe_z <= -0.15 ),'forecasted_regime'] = 'swash'\n",
"# df.loc[(df.ele_exceedence_1hr - df.dune_toe_z > -0.15 ),'forecasted_regime'] = 'collision'\n",
"\n",
"\n",
"print('\\nAfter adjustment')\n",
"for comb in regime_combinations:\n",
" query = 'forecasted_regime==\"{}\" & observed_regime==\"{}\"'.format(comb[0], comb[1])\n",
" print('Forecasted: {}, Observed: {}, Count: {}'.format(comb[0], comb[1], len(df.query(query))))\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Looking at the adjusted values, we can see these criteria actually make it worse. There must be something wrong with the technique - maybe the way of calculating the confidence intervals is wrong? Let's try calculate confidence intervals for each regime combination.\n",
"\n",
"*This cell I don't think is used...*\n"
] ]
}, },
{ {
@ -128,7 +453,65 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"df_profile_features_crest_toes.loc[(site_id,'prestorm'),'dune_toe_z']" "def mean_confidence_interval(data, confidence=0.95):\n",
" a = 1.0 * np.array(data)\n",
" n = len(a)\n",
" m, se = np.mean(a), stats.sem(a)\n",
" h = se * stats.t.ppf((1 + confidence) / 2., n-1)\n",
" return m, m-h, m+h\n",
"\n",
"# Add columns indicating how many n hrs was this the largest record\n",
"df = twls['forecasted']['mean_slope_sto06'].sort_values(['R_high'],ascending=False)\n",
"df['n_hrs_largest']= df.groupby('site_id').cumcount()+1\n",
"\n",
"# Join observed and forecast impacts and dune toe elevation\n",
"observed_regime = impacts['observed'].storm_regime.rename('observed_regime').to_frame()\n",
"forecasted_regime = impacts['forecasted']['mean_slope_sto06'].storm_regime.rename('forecasted_regime').to_frame()\n",
"dune_info = df_profile_features_crest_toes.xs('prestorm', level='profile_type')\n",
"\n",
"df['datetime'] = df.index.get_level_values('datetime')\n",
"df = df.merge(observed_regime,left_on=['site_id'],right_on='site_id')\n",
"df = df.merge(forecasted_regime,left_on=['site_id'],right_on='site_id')\n",
"df = df.merge(dune_info,left_on=['site_id'],right_on='site_id')\n",
"\n",
"# Make new column for R_high minus D_low\n",
"df['R_high_D_low_diff'] = df.R_high - df.dune_toe_z\n",
"\n",
"\n",
"regime_combinations = [\n",
" ('swash','swash'),\n",
" ('swash','collision'),\n",
" ('collision','swash'),\n",
" ('collision','collision'),\n",
"]\n",
"\n",
"print('Calculating hr exceedence elevations for each combination:')\n",
"exceedence_data = []\n",
"for hr in tqdm_notebook([x for x in range(1,101)]):\n",
" \n",
" for comb in regime_combinations:\n",
" \n",
" vals = df.loc[(df.n_hrs_largest==hr) & (df.observed_regime==comb[0]) & (df.forecasted_regime==comb[1])].R_high_D_low_diff.dropna().values\n",
" \n",
" ci = mean_confidence_interval(vals)\n",
"\n",
" exceedence_data.append({\n",
" 'observed_regime': comb[0],\n",
" 'forecasted_regime': comb[1],\n",
" 'exceedence_hr': hr,\n",
" 'ci_mean': ci[0],\n",
" 'ci_lower': ci[1],\n",
" 'ci_upper': ci[2],\n",
" })\n",
" \n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's try a different apporach and try split the observed swash and collision regimes at each impact duration hour. "
] ]
}, },
{ {
@ -136,6 +519,443 @@
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [
"from scipy.stats import norm\n",
"\n",
"best_split = []\n",
"exceedence_hrs = []\n",
"swash_mean = []\n",
"swash_95_lower = []\n",
"swash_95_upper = []\n",
"collision_mean = []\n",
"collision_95_lower = []\n",
"collision_95_upper = []\n",
"swash_median = []\n",
"swash_q1 = []\n",
"swash_q3 = []\n",
"collision_median = []\n",
"collision_q1 = []\n",
"collision_q3 = []\n",
"\n",
"for hr in tqdm_notebook([x for x in range(1,101)]):\n",
" \n",
" dists = []\n",
" plt.figure(figsize=(10,2))\n",
" for observed_regime in ['swash','collision']:\n",
" \n",
" vals = df.loc[(df.n_hrs_largest==hr) &\n",
" (df.observed_regime==observed_regime)].R_high_D_low_diff.dropna().values\n",
" \n",
" if observed_regime =='collision':\n",
" color = 'red'\n",
" label='collision'\n",
" else:\n",
" color = 'blue'\n",
" label='swash'\n",
" \n",
" plt.hist(vals, bins='auto',color=color, alpha=0.5,label=label) \n",
" plt.title(\"{} hour exceedence TWL\".format(hr))\n",
" plt.xlim([-2.5,2.5])\n",
" \n",
" dists.append(norm.fit(vals))\n",
" \n",
" # Find which elevation best splits swash and collision\n",
"# eles = [x for x in np.linspace(-2,2,1000)]\n",
"# total_cdfs = []\n",
"# for ele in eles:\n",
"# swash_cdf = norm.cdf(ele,*dists[0])\n",
"# collision_cdf = 1 - norm.cdf(ele,*dists[1])\n",
"# total_cdfs.append(swash_cdf + collision_cdf)\n",
"\n",
"# i_max = np.argmax(total_cdfs)\n",
"# best_ele = eles[i_max]\n",
"\n",
"# exceedence_hrs.append(hr)\n",
"# best_split.append(best_ele)\n",
"\n",
" # Find which elevation best splits swash and collision\n",
" eles = [x for x in np.linspace(-2,2,100)]\n",
" total_cdfs = []\n",
" swash_vals = df.loc[(df.n_hrs_largest==hr) &\n",
" (df.observed_regime=='swash')].R_high_D_low_diff.dropna().values\n",
" collision_vals = df.loc[(df.n_hrs_largest==hr) &\n",
" (df.observed_regime=='collision')].R_high_D_low_diff.dropna().values\n",
" for ele in eles:\n",
" swash_samples = np.sum( swash_vals < ele) / len(swash_vals)\n",
" collision_samples = np.sum( collision_vals > ele) / len(collision_vals) \n",
" total_cdfs.append(swash_samples + collision_samples)\n",
" \n",
" i_max = np.argmax(total_cdfs)\n",
" best_ele = eles[i_max]\n",
"\n",
" exceedence_hrs.append(hr)\n",
" best_split.append(best_ele) \n",
" \n",
" \n",
" # Store stastistics\n",
" swash_mean.append(dists[0][0])\n",
" swash_95_lower.append(norm.interval(0.5, *dists[0])[0])\n",
" swash_95_upper.append(norm.interval(0.5, *dists[0])[1])\n",
" collision_mean.append(dists[1][0])\n",
" collision_95_lower.append(norm.interval(0.5, *dists[1])[0])\n",
" collision_95_upper.append(norm.interval(0.5, *dists[1])[1])\n",
" \n",
" swash_median.append(np.percentile(swash_vals, 50))\n",
" swash_q1.append(np.percentile(swash_vals, 25))\n",
" swash_q3.append(np.percentile(swash_vals, 75))\n",
" collision_median.append(np.percentile(collision_vals, 50))\n",
" collision_q1.append(np.percentile(collision_vals, 25))\n",
" collision_q3.append(np.percentile(collision_vals, 75)) \n",
" \n",
" plt.axvline(best_ele, label='Best split (x={:.2f} m)'.format(best_ele))\n",
" plt.legend(loc='upper right', prop={'size': 10} )\n",
" plt.xlabel('$R_{high} - D_{low}$')\n",
" plt.ylabel('No. of sites')\n",
" plt.xlim([-2,2])\n",
" if hr == 80 or hr < 5 or hr==90:\n",
" plt.show()\n",
" \n",
" plt.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now, let's plot our distributions for swash/collision and the best seperation between them."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plt.figure(figsize=(5,5))\n",
"plt.plot(best_split,exceedence_hrs, label='Best split', color='#000000', linestyle='--')\n",
"\n",
"# plt.plot(swash_mean, exceedence_hrs, label='Swash', color='#2b83ba')\n",
"# plt.fill_betweenx(exceedence_hrs,swash_95_lower,swash_95_upper, color='#2b83ba', alpha=0.2, interpolate=False)\n",
"\n",
"# plt.plot(collision_mean, exceedence_hrs, label='Collision', color='#d7191c')\n",
"# plt.fill_betweenx(exceedence_hrs,collision_95_lower,collision_95_upper, color='#d7191c', alpha=0.2, interpolate=False,label='plus 50')\n",
"\n",
"\n",
"plt.plot(swash_median, exceedence_hrs, label='Swash', color='#2b83ba')\n",
"plt.fill_betweenx(exceedence_hrs,swash_q1,swash_q3, color='#2b83ba', alpha=0.2, interpolate=False,label='Swash IQR')\n",
"\n",
"plt.plot(collision_median, exceedence_hrs, label='Collision', color='#d7191c')\n",
"plt.fill_betweenx(exceedence_hrs,collision_q1,collision_q3, color='#d7191c', alpha=0.2, interpolate=False,label='Collision IQR')\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"#===\n",
"# # Let's plot one site as well, just to check\n",
"# import random\n",
"# site_ids = list(impacts['observed'].index.unique().values)\n",
"# site_id = random.choice(site_ids)\n",
"\n",
"\n",
"# site_id = 'TREACH0011'\n",
"# site_predicted_regime = impacts['forecasted']['mean_slope_sto06'].loc[site_id].storm_regime\n",
"# site_observed_regime = impacts['observed'].loc[site_id].storm_regime\n",
"# df_site = df.loc[site_id]\n",
"# plt.plot(df_site.R_high_D_low_diff, df_site.n_hrs_largest,label='site_id={}\\n(pred={},obs={})'.format(site_id,site_predicted_regime, site_observed_regime),color='#ffffff', linestyle='--')\n",
"\n",
"\n",
"plt.title('Observed Swash/Collision - Best Split')\n",
"plt.xlabel('$R_{high} - D_{low}$ (m)')\n",
"plt.ylabel('Exceedance hours')\n",
"plt.ylim([0,100])\n",
"plt.xlim([-2,2])\n",
"plt.legend()\n",
"\n",
"# Print to figure\n",
"plt.savefig('05-best-split.png', dpi=600, bbox_inches='tight') \n",
"\n",
"plt.show()\n",
"plt.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Plot above shows that if Rhigh = Dlow plus/minus 0.25m, we should say the storm regime is uncertain, rather than trying to make an incorrect prediction."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_for = impacts['forecasted']['mean_slope_sto06']\n",
"df_obs = impacts['observed']\n",
"\n",
"# Join forecasted and observed impacts into same dataframe\n",
"df_for = df_for.rename(columns={'storm_regime': 'forecasted_regime'})\n",
"df_obs = df_obs.rename(columns={'storm_regime': 'observed_regime'})\n",
"df_for = df_for.merge(\n",
" df_obs.observed_regime.to_frame(), left_index=True, right_index=True)\n",
"\n",
"# Get wrong forecasts\n",
"incorrect_for = df_for.forecasted_regime != df_for.observed_regime\n",
"\n",
"# How many wrong/correct forecasts\n",
"print('There were {} correct forecasts'.format(len(df_for[~incorrect_for])))\n",
"print('There were {} incorrect forecasts'.format(len(df_for[incorrect_for])))\n",
"print('')\n",
"\n",
"# How many of these forecasts were where R_high was near D_low?\n",
"close_eles = ((df.R_high > df.dune_toe_z - 0.25) &\n",
" (df.R_high < df.dune_toe_z + 0.25))\n",
"\n",
"s = 'R_high and D_low elevations were close at {} correctly forecasted sites'\n",
"print(s.format(len(df_for[~incorrect_for & close_eles])))\n",
"\n",
"s = 'R_high and D_low elevations were close at {} wrongly forecasted sites'\n",
"print(s.format(len(df_for[incorrect_for & close_eles])))\n",
"\n",
"# df[(df.R_high>df.dune_toe_z-0.25)&(df.R_high<df.dune_toe_z+0.25)]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"So we can see more than half the number of incorrect predictions by saying they're unknown, but a quarter of correct predictions will say they're unknown."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# df_exceedence = pd.DataFrame(exceedence_data)\n",
"# df_exceedence = df_exceedence.set_index(['observed_regime','forecasted_regime','exceedence_hr'])\n",
"\n",
"# import random\n",
"# site_ids = list(impacts['observed'].index.unique().values)\n",
"# for site_id in random.sample(site_ids, 5):\n",
"\n",
"# # Plot mean ele exceedence hours for each combination\n",
"# plt.figure(figsize=(10,4))\n",
"# regime_combinations = [\n",
"# ('swash','swash'),\n",
"# ('swash','collision'),\n",
"# ('collision','swash'),\n",
"# ('collision','collision'),\n",
"# ]\n",
"\n",
"# for comb in regime_combinations:\n",
"# df_plot = df_exceedence.xs((comb[0], comb[1]), level=['observed_regime','forecasted_regime'])\n",
"# plt.plot(df_plot.ci_mean, df_plot.index.values,label='obs={}, pred={}'.format(comb[0],comb[1]))\n",
"# plt.fill_betweenx(df_plot.index.values, df_plot.ci_lower, df_plot.ci_upper, color='grey', alpha=0.2, interpolate=False)\n",
"\n",
"# plt.xlim([-2,1])\n",
"# plt.ylim([0,100])\n",
"\n",
"# # Let's plot one site as well, just to check\n",
"# site_predicted_regime = impacts['forecasted']['mean_slope_sto06'].loc[site_id].storm_regime\n",
"# site_observed_regime = impacts['observed'].loc[site_id].storm_regime\n",
"# df_site = df.loc[site_id]\n",
"# plt.plot(df_site.R_high_D_low_diff, df_site.n_hrs_largest,label='site_id={} (pred={},obs={})'.format(site_id,site_predicted_regime, site_observed_regime))\n",
"\n",
"# plt.legend(loc='upper right', prop={'size': 8})\n",
"# plt.show()\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true
},
"source": [
"# Other stuff which hasn't been tidied up"
]
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true,
"hidden": true
},
"source": [
"### Check the relationship between SWL-Dtoe, DSWL-Dtoe, R_high-Dtoe\n",
"Use 3D scatter plot to check the relationship between SWL-Dtoe, DSWL-Dtoe, R_high-Dtoe.\n",
"\n",
"This is moving away from time dependence..."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"[x[1] for x in df.query('forecasted_regime==\"swash\" & observed_regime==\"swash\"').iterrows()][0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"data = []\n",
"\n",
"# Iterate through each site\n",
"print('Calculating cumulate frequency of R_high for each site:')\n",
"site_ids = twls['forecasted']['mean_slope_sto06'].index.get_level_values(\n",
" 'site_id').unique().values\n",
"for site_id in tqdm_notebook(site_ids):\n",
"\n",
" # Put data into a temporary dataframe, shorter name is easier to work with\n",
" df_impacts = impacts['forecasted']['mean_slope_sto06'].loc[site_id]\n",
" df_twls = twls['forecasted']['mean_slope_sto06'].loc[site_id]\n",
"\n",
" D_low = df_impacts.dune_toe_z\n",
" if np.isnan(D_low):\n",
" continue\n",
"\n",
" # Find time where R_max is the highest\n",
" t = df_twls.R_high.idxmax()\n",
"\n",
" # Get R_high, tide and setup at that time\n",
" R_high = df_twls.loc[t].R_high\n",
" tide = df_twls.loc[t].tide\n",
" setup = df_twls.loc[t].setup\n",
"\n",
" # Calculate differences in elevation\n",
" R_high_D_low = R_high - D_low\n",
" SWL_D_low = tide - D_low\n",
" DSWL_D_low = tide + setup - D_low\n",
"\n",
" # Check which subplot we should put this site on\n",
" forecasted_regime = impacts['forecasted']['mean_slope_sto06'].loc[\n",
" site_id].storm_regime\n",
" observed_regime = impacts['observed'].loc[site_id].storm_regime\n",
"\n",
" data.append({\n",
" 'R_high_D_low': R_high_D_low,\n",
" 'SWL_D_low': SWL_D_low,\n",
" 'DSWL_D_low': DSWL_D_low,\n",
" 'forecasted_regime': forecasted_regime,\n",
" 'observed_regime': observed_regime\n",
" })\n",
"\n",
"# Turn data into a dataframe and plot\n",
"df = pd.DataFrame(data)\n",
"\n",
"# Plot swash/swash\n",
"query='forecasted_regime==\"swash\" & observed_regime==\"swash\"'\n",
"trace1 = go.Scatter3d(\n",
" x=[x[1].R_high_D_low for x in df.query(query).iterrows()],\n",
" y=[x[1].SWL_D_low for x in df.query(query).iterrows()],\n",
" z=[x[1].DSWL_D_low for x in df.query(query).iterrows()],\n",
" name='Swash/Swash',\n",
" mode='markers',\n",
" marker=dict(\n",
" size=6,\n",
" color='rgb(26,150,65)',\n",
" opacity=0.8))\n",
"\n",
"query='forecasted_regime==\"swash\" & observed_regime==\"collision\"'\n",
"trace2 = go.Scatter3d(\n",
" x=[x[1].R_high_D_low for x in df.query(query).iterrows()],\n",
" y=[x[1].SWL_D_low for x in df.query(query).iterrows()],\n",
" z=[x[1].DSWL_D_low for x in df.query(query).iterrows()],\n",
" name='Swash/Collision',\n",
" mode='markers',\n",
" marker=dict(\n",
" size=6,\n",
" color='rgb(253,174,97)',\n",
" opacity=0.8))\n",
"\n",
"query='forecasted_regime==\"collision\" & observed_regime==\"swash\"'\n",
"trace3 = go.Scatter3d(\n",
" x=[x[1].R_high_D_low for x in df.query(query).iterrows()],\n",
" y=[x[1].SWL_D_low for x in df.query(query).iterrows()],\n",
" z=[x[1].DSWL_D_low for x in df.query(query).iterrows()],\n",
" name='Collision/Swash',\n",
" mode='markers',\n",
" marker=dict(\n",
" size=6,\n",
" color='rgb(166,217,106)',\n",
" opacity=0.8))\n",
"\n",
"query='forecasted_regime==\"collision\" & observed_regime==\"collision\"'\n",
"trace4 = go.Scatter3d(\n",
" x=[x[1].R_high_D_low for x in df.query(query).iterrows()],\n",
" y=[x[1].SWL_D_low for x in df.query(query).iterrows()],\n",
" z=[x[1].DSWL_D_low for x in df.query(query).iterrows()],\n",
" name='Collsion/Collision',\n",
" mode='markers',\n",
" marker=dict(\n",
" size=6,\n",
" color='rgb(215,25,28)',\n",
" opacity=0.8))\n",
"\n",
"layout = go.Layout(\n",
" autosize=False,\n",
" width=1000,\n",
" height=700,\n",
" margin=go.layout.Margin(\n",
" l=50,\n",
" r=50,\n",
" b=100,\n",
" t=100,\n",
" pad=4\n",
" ),\n",
")\n",
"\n",
"fig = go.Figure(data=[trace1,trace2,trace3,trace4], layout=layout)\n",
"go.FigureWidget(fig)\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"hidden": true
},
"source": [
"## Calculate vertical distribution of wave count SS\n",
"For each site, calculate how many waves reached a certain elevation (store as a binned histogram)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [
"# Helper functions\n",
"def find_nearest(array, value):\n",
" array = np.asarray(array)\n",
" idx = np.nanargmin((np.abs(array - value)))\n",
" return array[idx], idx"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [],
"source": [ "source": [
"data = []\n", "data = []\n",
"for site_id, df_site_twl in twls['forecasted']['mean_slope_sto06'].groupby('site_id'):\n", "for site_id, df_site_twl in twls['forecasted']['mean_slope_sto06'].groupby('site_id'):\n",
@ -194,25 +1014,9 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"outputs": [], "hidden": true
"source": [ },
"counts, bin_edges = np.histogram (data_twl[0]['twl_levels'], bins=50) "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"list(np.asarray(twl_eles_per_wave)[~np.isfinite(twl_eles_per_wave)])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"fig = tools.make_subplots(\n", "fig = tools.make_subplots(\n",
@ -288,7 +1092,9 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"hidden": true
},
"outputs": [], "outputs": [],
"source": [ "source": [
"fig['layout']['yaxis']" "fig['layout']['yaxis']"

@ -57,10 +57,37 @@
"\n", "\n",
"from ipywidgets import widgets, Output\n", "from ipywidgets import widgets, Output\n",
"from IPython.display import display, clear_output, Image, HTML\n", "from IPython.display import display, clear_output, Image, HTML\n",
"\n", "import matplotlib.pyplot as plt\n",
"from sklearn.metrics import confusion_matrix" "from sklearn.metrics import confusion_matrix"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Matplot lib default settings\n",
"plt.rcParams[\"figure.figsize\"] = (10,6)\n",
"plt.rcParams['axes.grid']=True\n",
"plt.rcParams['grid.alpha'] = 0.5\n",
"plt.rcParams['grid.color'] = \"grey\"\n",
"plt.rcParams['grid.linestyle'] = \"--\"\n",
"plt.rcParams['axes.grid']=True\n",
"\n",
"# https://stackoverflow.com/a/20709149\n",
"matplotlib.rcParams['text.usetex'] = True\n",
"\n",
"matplotlib.rcParams['text.latex.preamble'] = [\n",
" r'\\usepackage{siunitx}', # i need upright \\micro symbols, but you need...\n",
" r'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts\n",
" r'\\usepackage{helvet}', # set the normal font here\n",
" r'\\usepackage{amsmath}',\n",
" r'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet\n",
" r'\\sansmath', # <- tricky! -- gotta actually tell tex to use!\n",
"] "
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
@ -140,6 +167,12 @@
"# Get x and z at poststorm dune toe for each site\n", "# Get x and z at poststorm dune toe for each site\n",
"df_dune_toe_poststorm = df_profile_features_crest_toes.xs('poststorm', level='profile_type')[['dune_toe_x','dune_toe_z']]\n", "df_dune_toe_poststorm = df_profile_features_crest_toes.xs('poststorm', level='profile_type')[['dune_toe_x','dune_toe_z']]\n",
"\n", "\n",
"# If there is no poststorm dune toe defined, use the dune crest\n",
"df_dune_crest_poststorm = df_profile_features_crest_toes.xs('poststorm', level='profile_type')[['dune_crest_x','dune_crest_z']]\n",
"df_dune_toe_poststorm.dune_toe_x = df_dune_toe_poststorm.dune_toe_x.fillna(df_dune_crest_poststorm.dune_crest_x)\n",
"df_dune_toe_poststorm.dune_toe_z = df_dune_toe_poststorm.dune_toe_z.fillna(df_dune_crest_poststorm.dune_crest_z)\n",
"\n",
"\n",
"# Join df for mhw and dune toe\n", "# Join df for mhw and dune toe\n",
"df = df_mhw_poststorm.join(df_dune_toe_poststorm)\n", "df = df_mhw_poststorm.join(df_dune_toe_poststorm)\n",
"df['beta'] = -(df['dune_toe_z'] - df['z_mhw']) / (df['dune_toe_x'] -df['x_mhw'])\n", "df['beta'] = -(df['dune_toe_z'] - df['z_mhw']) / (df['dune_toe_x'] -df['x_mhw'])\n",
@ -176,26 +209,89 @@
"df" "df"
] ]
}, },
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We also should add the change in beach width between prestorm and post storm profiles"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"df_data.index" "ele = 0.7\n",
"data = []\n",
"for site_id, df_site in df_profiles.groupby('site_id'):\n",
" \n",
" # Beach width should be measured from dune toe (or crest if doesn't exist) to MHW\n",
" \n",
" dune_toe_x = np.nanmax([\n",
" df_profile_features_crest_toes.loc[(site_id,'prestorm')].dune_crest_x,\n",
" df_profile_features_crest_toes.loc[(site_id,'prestorm')].dune_toe_x\n",
" ])\n",
" \n",
" \n",
" # TODO This probably should take the closest value to ele starting from the seaward end of the profile\n",
" temp = df_site.xs('prestorm',level='profile_type').dropna(subset=['z'])\n",
" prestorm_width = temp.iloc[(temp.z - ele).abs().argsort()[0]].name[1] - dune_toe_x\n",
" \n",
" temp = df_site.xs('poststorm',level='profile_type').dropna(subset=['z'])\n",
" poststorm_width = temp.iloc[(temp.z - ele).abs().argsort()[0]].name[1] - dune_toe_x\n",
" \n",
" width_change = prestorm_width - poststorm_width\n",
" data.append(\n",
" {\n",
" 'site_id': site_id,\n",
" 'width_change': width_change,\n",
" 'prestorm_width': prestorm_width,\n",
" 'poststorm_width': poststorm_width\n",
" })\n",
" \n",
" \n",
" \n",
" \n",
"df_width_change = pd.DataFrame(data)\n",
"df_width_change = df_width_change.set_index(['site_id'])\n",
"\n",
"# Join with the data\n",
"df = df.merge(df_width_change, left_on=['site_id'], right_on=['site_id'])\n"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "code",
"execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true
},
"source": [ "source": [
"Plot our data" "## Plot our data in a confusion matrix\n",
"Superseded"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"hidden": true
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hidden": true
},
"outputs": [], "outputs": [],
"source": [ "source": [
"fig = tools.make_subplots(\n", "fig = tools.make_subplots(\n",
@ -279,7 +375,9 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"hidden": true
},
"source": [ "source": [
"Looking at the above plot:\n", "Looking at the above plot:\n",
"- In general, we can see that the prestorm mean slope is flatter than the poststorm mean slope. This can be explained by the presence of prestorm berms, which increase the prestorm mean slope. During the storm, these berms get eroded and decrease the slope.\n", "- In general, we can see that the prestorm mean slope is flatter than the poststorm mean slope. This can be explained by the presence of prestorm berms, which increase the prestorm mean slope. During the storm, these berms get eroded and decrease the slope.\n",
@ -287,6 +385,166 @@
"- **Swash/Collision**: Where we predict collision but observe swash, we can see that the prestorm mean slopes >0.1 generate high TWLs. \n", "- **Swash/Collision**: Where we predict collision but observe swash, we can see that the prestorm mean slopes >0.1 generate high TWLs. \n",
"\n" "\n"
] ]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Plot our data in a confusion matrix\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df[cc_mask].loc[df[cc_mask].poststorm_beta+0.05< df[cc_mask].prestorm_beta]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"f, ([ax1, ax2], [ax3, ax4],) = plt.subplots(\n",
" 2,\n",
" 2,\n",
" sharey=True,\n",
" sharex=True,\n",
" figsize=(8, 7))\n",
"\n",
"\n",
"ss_mask = (df.observed_regime=='swash') & (df.forecasted_regime=='swash')\n",
"sc_mask = (df.observed_regime=='swash') & (df.forecasted_regime=='collision')\n",
"cs_mask = (df.observed_regime=='collision') & (df.forecasted_regime=='swash')\n",
"cc_mask = (df.observed_regime=='collision') & (df.forecasted_regime=='collision')\n",
"\n",
"# Define colormap for our observations\n",
"cm = plt.cm.get_cmap('plasma')\n",
"\n",
"params = {'edgecolors': '#999999',\n",
" 's': 12,\n",
" 'linewidth': 0.1, \n",
" 'cmap':cm,\n",
" 'vmin':0, \n",
" 'vmax':60\n",
" }\n",
"\n",
"sc=ax1.scatter(df[ss_mask].prestorm_beta, df[ss_mask].poststorm_beta, c=df[ss_mask].width_change,**params)\n",
"ax1.set_title('Swash/Swash')\n",
"ax1.set_ylabel('Observed swash')\n",
"\n",
"ax2.scatter(df[sc_mask].prestorm_beta, df[sc_mask].poststorm_beta, c=df[sc_mask].width_change,**params)\n",
"ax2.set_title('Swash/Collision')\n",
"\n",
"ax3.scatter(df[cs_mask].prestorm_beta, df[cs_mask].poststorm_beta, c=df[cs_mask].width_change,**params)\n",
"ax3.set_title('Collision/Swash')\n",
"ax3.set_ylabel('Observed collision')\n",
"ax3.set_xlabel('Predicted swash')\n",
"\n",
"ax4.scatter(df[cc_mask].prestorm_beta, df[cc_mask].poststorm_beta, c=df[cc_mask].width_change,**params)\n",
"ax4.set_title('Collision/Collision')\n",
"ax4.set_xlabel('Predicted collision')\n",
"\n",
"for ax in [ax1,ax2,ax3,ax4]:\n",
" ax.plot([0,0.2],[0,0.2], 'k--')\n",
" ax.set_xlim([0,0.2])\n",
" ax.set_ylim([0,0.2])\n",
"\n",
" \n",
"# Create a big ax so we can use common axis labels\n",
"# https://stackoverflow.com/a/36542971\n",
"f.add_subplot(111, frameon=False)\n",
"plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n",
"plt.grid(False)\n",
"plt.xlabel(\"Prestorm mean slope (-)\", labelpad=25)\n",
"plt.ylabel(\"Poststorm mean slope (-)\", labelpad=25)\n",
" \n",
"# Layout adjustment\n",
"plt.tight_layout()\n",
"plt.subplots_adjust(hspace=0.25, bottom=0.1,right=0.9)\n",
"\n",
"# Add colorbar\n",
"cbar_ax = f.add_axes([0.95, 0.15, 0.05, 0.7])\n",
"cb = f.colorbar(sc, cax=cbar_ax)\n",
"cb.set_label(r'$\\varDelta$ beach width at MHW (m)')\n",
"\n",
"# Save and show figure\n",
"plt.savefig('06-confusion-change-in-slope.png'.format(beach), dpi=600, bbox_inches='tight') \n",
"plt.show()\n",
"plt.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Plot for single beach"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"beach = 'NARRA'\n",
"\n",
"df_beach = df.loc[df.index.str.contains(beach)]\n",
"\n",
"# Get index\n",
"n = [x for x in range(len(df_beach))][::-1]\n",
"n_sites = [x for x in df_beach.index][::-1]\n",
"\n",
"f, (ax1,ax2,ax3,ax4) = plt.subplots(1,4, sharey=True,figsize=(10, 8))\n",
"\n",
"ax1.plot(df_beach.prestorm_beta,n,label='Prestorm slope',color='#4d9221')\n",
"ax1.plot(df_beach.poststorm_beta,n,label='Poststorm slope',color='#c51b7d')\n",
"ax1.set_title('Mean beach slope')\n",
"ax1.legend(loc='center', bbox_to_anchor=(0.5, -0.15))\n",
"\n",
"# Replace yticks with site_ids\n",
"yticks = ax1.get_yticks().tolist()\n",
"yticks = [n_sites[int(y)] if 0 <= y <= len(n_sites) else y for y in yticks ]\n",
"ax1.set_yticklabels(yticks)\n",
"ax1.set_xlabel(r'Slope (-)')\n",
"\n",
"ax2.plot(df_beach.prestorm_width,n,label='Prestorm width',color='#4d9221')\n",
"ax2.plot(df_beach.poststorm_width,n, label='Poststorm width',color='#c51b7d')\n",
"# ax2.set_xlim([200,300])\n",
"ax2.set_xlabel(r'Beach width (m)')\n",
"ax2.set_title('Beach width\\nat MHW')\n",
"ax2.legend(loc='center', bbox_to_anchor=(0.5, -0.15))\n",
"\n",
"ax3.plot(df_beach.width_change,n,color='#999999')\n",
"ax3.set_xlim([0,75])\n",
"ax3.set_title('Change in MHW\\nbeach width')\n",
"ax3.set_xlabel(r'$\\varDelta$ Beach width (m)')\n",
"\n",
"\n",
"ax4.plot(df_beach.poststorm_beta / df_beach.prestorm_beta,n,color='#999999')\n",
"ax4.set_title('Ratio between pre and\\npost storm mean slopes')\n",
"\n",
"plt.tight_layout()\n",
"f.subplots_adjust(top=0.88)\n",
"f.suptitle(beach)\n",
"\n",
"# Print to figure\n",
"plt.savefig('06-change-in-slope-{}.png'.format(beach), dpi=600, bbox_inches='tight') \n",
"plt.show()\n",
"plt.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_beach"
]
} }
], ],
"metadata": { "metadata": {

@ -0,0 +1,348 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Longshore plots of each beach\n",
"- Need to create a longshore plot of each beach to see how the variables change alongshore."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup notebook"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Enable autoreloading of our modules. \n",
"# Most of the code will be located in the /src/ folder, \n",
"# and then called from the notebook.\n",
"%matplotlib inline\n",
"%reload_ext autoreload\n",
"%autoreload"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.core.debugger import set_trace\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"import os\n",
"import decimal\n",
"import plotly\n",
"import plotly.graph_objs as go\n",
"import plotly.plotly as py\n",
"import plotly.tools as tls\n",
"import plotly.figure_factory as ff\n",
"from plotly import tools\n",
"import plotly.io as pio\n",
"from scipy import stats\n",
"import math\n",
"import matplotlib\n",
"from matplotlib import cm\n",
"import colorlover as cl\n",
"from tqdm import tqdm_notebook\n",
"from ipywidgets import widgets, Output\n",
"from IPython.display import display, clear_output, Image, HTML\n",
"from scipy import stats\n",
"from sklearn.metrics import confusion_matrix\n",
"import matplotlib.pyplot as plt\n",
"from scipy.interpolate import interp1d\n",
"from pandas.api.types import CategoricalDtype"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Matplot lib default settings\n",
"plt.rcParams[\"figure.figsize\"] = (10,6)\n",
"plt.rcParams['axes.grid']=True\n",
"plt.rcParams['grid.alpha'] = 0.5\n",
"plt.rcParams['grid.color'] = \"grey\"\n",
"plt.rcParams['grid.linestyle'] = \"--\"\n",
"plt.rcParams['axes.grid']=True\n",
"\n",
"# https://stackoverflow.com/a/20709149\n",
"matplotlib.rcParams['text.usetex'] = True\n",
"\n",
"matplotlib.rcParams['text.latex.preamble'] = [\n",
" r'\\usepackage{siunitx}', # i need upright \\micro symbols, but you need...\n",
" r'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts\n",
" r'\\usepackage{helvet}', # set the normal font here\n",
" r'\\usepackage{amsmath}',\n",
" r'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet\n",
" r'\\sansmath', # <- tricky! -- gotta actually tell tex to use!\n",
"] "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def df_from_csv(csv, index_col, data_folder='../data/interim'):\n",
" print('Importing {}'.format(csv))\n",
" return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n",
"\n",
"df_waves = df_from_csv('waves.csv', index_col=[0, 1])\n",
"df_tides = df_from_csv('tides.csv', index_col=[0, 1])\n",
"df_profiles = df_from_csv('profiles.csv', index_col=[0, 1, 2])\n",
"df_sites = df_from_csv('sites.csv', index_col=[0])\n",
"df_profile_features_crest_toes = df_from_csv('profile_features_crest_toes.csv', index_col=[0,1])\n",
"\n",
"# Note that the forecasted data sets should be in the same order for impacts and twls\n",
"impacts = {\n",
" 'forecasted': {\n",
" 'foreshore_slope_sto06': df_from_csv('impacts_forecasted_foreshore_slope_sto06.csv', index_col=[0]),\n",
" 'mean_slope_sto06': df_from_csv('impacts_forecasted_mean_slope_sto06.csv', index_col=[0]),\n",
" },\n",
" 'observed': df_from_csv('impacts_observed.csv', index_col=[0])\n",
" }\n",
"\n",
"\n",
"twls = {\n",
" 'forecasted': {\n",
" 'foreshore_slope_sto06': df_from_csv('twl_foreshore_slope_sto06.csv', index_col=[0, 1]),\n",
" 'mean_slope_sto06':df_from_csv('twl_mean_slope_sto06.csv', index_col=[0, 1]),\n",
" }\n",
"}\n",
"print('Done!')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate plot for each beach"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"beach = 'NARRA'\n",
"\n",
"# Get the dataframe\n",
"df = impacts['forecasted']['mean_slope_sto06']\n",
"df = df.rename(columns={'storm_regime': 'forecasted_regime'})\n",
"\n",
"df_beach = df.loc[df.index.str.contains(beach)]\n",
"\n",
"# Add information about hydrodynamics at max(R_high) time\n",
"df_beach = df_beach.merge(\n",
" twls['forecasted']['mean_slope_sto06'].drop(columns=['R_high', 'R_low']),\n",
" left_on=['site_id', 'datetime'],\n",
" right_on=['site_id', 'datetime'])\n",
"\n",
"# Add information about observed impacts\n",
"obs_impacts = impacts['observed'].rename(columns={\n",
" 'storm_regime': 'observed_regime'\n",
"}).observed_regime.to_frame()\n",
"df_beach = df_beach.merge(obs_impacts, left_on='site_id', right_on='site_id')\n",
"\n",
"# Convert storm regimes to categorical datatype\n",
"cat_type = CategoricalDtype(\n",
" categories=['swash', 'collision', 'overwash', 'inundation'], ordered=True)\n",
"df_beach.forecasted_regime = df_beach.forecasted_regime.astype(cat_type)\n",
"df_beach.observed_regime = df_beach.observed_regime.astype(cat_type)\n",
"\n",
"# Get index\n",
"n = [x for x in range(len(df_beach))][::-1]\n",
"n_sites = [x for x in df_beach.index][::-1]\n",
"\n",
"f, (ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8) = plt.subplots(\n",
" 1,\n",
" 8,\n",
" sharey=True,\n",
" figsize=(14, 8),\n",
" gridspec_kw={'width_ratios': [4, 4, 2, 2, 2, 2,2,2]})\n",
"\n",
"# Specify colors for storm regimes\n",
"cmap = {\n",
" 'swash': '#1a9850',\n",
" 'collision': '#fee08b',\n",
" 'overwash': '#d73027'\n",
"}\n",
"\n",
"colors = [cmap.get(x) for x in df_beach.observed_regime]\n",
"colors = ['#d73027' if c is None else c for c in colors]\n",
"\n",
"# Plot forecasted and observed storm regime\n",
"ax1.scatter(\n",
" df_beach.observed_regime.cat.codes.replace(-1,np.NaN),\n",
" n,\n",
" color=colors,\n",
" marker='o',\n",
" label='Observed regime')\n",
"\n",
"ax1.scatter(\n",
" df_beach.forecasted_regime.cat.codes.replace(-1,np.NaN),\n",
" n,\n",
" color='b',\n",
" marker='o',\n",
" edgecolors='black',\n",
" facecolors='none',\n",
" label='Forecasted regime')\n",
"\n",
"ax1.set_title('Storm\\nregime')\n",
"ax1.set_xticks([0,1,2,3])\n",
"ax1.set_xticklabels(['swash','collision','overwash','inundation'])\n",
"ax1.tick_params(axis='x', rotation=45)\n",
"ax1.legend(loc='center', bbox_to_anchor=(0.5, -0.15))\n",
"\n",
"# Replace yticks with site_ids\n",
"yticks = ax1.get_yticks().tolist()\n",
"yticks = [n_sites[int(y)] if 0 <= y <= len(n_sites) else y for y in yticks ]\n",
"ax1.set_yticklabels(yticks)\n",
"\n",
"# Water levels\n",
"ax2.plot(df_beach.R_high, n, color='#2c7bb6')\n",
"ax2.plot(df_beach.R_low, n, color='#2c7bb6')\n",
"ax2.fill_betweenx(\n",
" n, df_beach.R_low, df_beach.R_high, alpha=0.2, color='#2c7bb6', label='$R_{low}$ to $R_{high}$')\n",
"\n",
"# Dune elevations\n",
"ax2.plot(df_beach.dune_crest_z, n, color='#fdae61')\n",
"ax2.plot(df_beach.dune_toe_z, n, color='#fdae61')\n",
"ax2.fill_betweenx(\n",
" n, df_beach.dune_toe_z, df_beach.dune_crest_z, alpha=0.2, color='#fdae61', label='$D_{low}$ to $D_{high}$')\n",
"\n",
"ax2.set_title('TWL \\& Dune\\nElevations')\n",
"ax2.legend(loc='center',bbox_to_anchor=(0.5,-0.15))\n",
"ax2.set_xlabel('Elevation (m AHD)')\n",
"\n",
"# Plot R_high - D_low\n",
"ax3.plot(df_beach.R_high - df_beach.dune_toe_z,n,color='#999999')\n",
"ax3.axvline(x=0,color='black',linestyle=':')\n",
"ax3.set_title('$R_{high}$ - $D_{low}$')\n",
"ax3.set_xlabel('Height (m)')\n",
"ax3.set_xlim([-2,2])\n",
"\n",
"# Wave height, wave period, beach slope\n",
"ax4.plot(df_beach.Hs0, n,color='#377eb8')\n",
"ax4.set_title('$H_{s0}$')\n",
"ax4.set_xlabel('Sig. wave height (m)')\n",
"ax4.set_xlim([3,5])\n",
"\n",
"ax5.plot(df_beach.Tp, n,color='#e41a1c')\n",
"ax5.set_title('$T_{p}$')\n",
"ax5.set_xlabel('Peak wave period (s)')\n",
"ax5.set_xlim([8,14])\n",
"\n",
"ax6.plot(df_beach.tide, n,color='#a6cee3')\n",
"ax6.set_title('Tide')\n",
"ax6.set_xlabel('Elevation (m AHD)')\n",
"ax6.set_xlim([0,2])\n",
"\n",
"ax7.plot(df_beach.beta, n,color='#4daf4a')\n",
"ax7.set_title(r'$\\beta$')\n",
"ax7.set_xlabel('Mean prestorm\\nbeach slope')\n",
"ax7.set_xlim([0,0.15])\n",
"\n",
"ax8.plot(df_beach.R2, n,color='#6a3d9a')\n",
"ax8.set_title(r'$R_{2\\%}$')\n",
"ax8.set_xlabel('Height (m)')\n",
"\n",
"plt.tight_layout()\n",
"f.subplots_adjust(top=0.88)\n",
"f.suptitle(beach)\n",
"\n",
"\n",
"# Print to figure\n",
"plt.savefig('07-{}.png'.format(beach), dpi=600, bbox_inches='tight') \n",
"\n",
"plt.show()\n",
"plt.close()"
]
}
],
"metadata": {
"hide_input": false,
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,767 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Narrabeen Slope Test\n",
"With full topo and bathy combined"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup notebook"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Enable autoreloading of our modules. \n",
"# Most of the code will be located in the /src/ folder, \n",
"# and then called from the notebook.\n",
"%matplotlib inline\n",
"%reload_ext autoreload\n",
"%autoreload"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.core.debugger import set_trace\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"import os\n",
"import decimal\n",
"import plotly\n",
"import plotly.graph_objs as go\n",
"import plotly.plotly as py\n",
"import plotly.tools as tls\n",
"import plotly.figure_factory as ff\n",
"from plotly import tools\n",
"import plotly.io as pio\n",
"from scipy import stats\n",
"import math\n",
"import matplotlib\n",
"from matplotlib import cm\n",
"import colorlover as cl\n",
"from tqdm import tqdm_notebook\n",
"from ipywidgets import widgets, Output\n",
"from IPython.display import display, clear_output, Image, HTML\n",
"from scipy import stats\n",
"from sklearn.metrics import confusion_matrix\n",
"import matplotlib.pyplot as plt\n",
"from scipy.interpolate import interp1d\n",
"from pandas.api.types import CategoricalDtype\n",
"from scipy.interpolate import UnivariateSpline\n",
"from shapely.geometry import Point, LineString"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Matplot lib default settings\n",
"plt.rcParams[\"figure.figsize\"] = (10,6)\n",
"plt.rcParams['axes.grid']=True\n",
"plt.rcParams['grid.alpha'] = 0.5\n",
"plt.rcParams['grid.color'] = \"grey\"\n",
"plt.rcParams['grid.linestyle'] = \"--\"\n",
"plt.rcParams['axes.grid']=True\n",
"\n",
"# https://stackoverflow.com/a/20709149\n",
"matplotlib.rcParams['text.usetex'] = True\n",
"\n",
"matplotlib.rcParams['text.latex.preamble'] = [\n",
" r'\\usepackage{siunitx}', # i need upright \\micro symbols, but you need...\n",
" r'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts\n",
" r'\\usepackage{helvet}', # set the normal font here\n",
" r'\\usepackage{amsmath}',\n",
" r'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet\n",
" r'\\sansmath', # <- tricky! -- gotta actually tell tex to use!\n",
"] "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import .csv data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"data_filename = '08-narr-topo-bathy-slope-test-full-profiles.csv'\n",
"\n",
"df_profiles = pd.read_csv(data_filename).set_index(['site_id','x'])\n",
"df_profiles = df_profiles[~df_profiles.index.duplicated(keep='first')]\n",
"print('df_profiles:')\n",
"df_profiles.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Manually cut off the prestorm topo \n",
"cuts = {'NARRA0004': {'prestorm_topo_max_x': 330,\n",
" 'poststorm_topo_max_x': 250},\n",
" 'NARRA0008': {'prestorm_topo_max_x': 290,\n",
" 'poststorm_topo_max_x': 250},\n",
" 'NARRA0012': {'prestorm_topo_max_x': 300,\n",
" 'poststorm_topo_max_x': 250},\n",
" 'NARRA0016': {'prestorm_topo_max_x': 300,\n",
" 'poststorm_topo_max_x': 225},\n",
" 'NARRA0021': {'prestorm_topo_max_x': 280,\n",
" 'poststorm_topo_max_x': 225},\n",
" 'NARRA0023': {'prestorm_topo_max_x': 275,\n",
" 'poststorm_topo_max_x': 215},\n",
" 'NARRA0027': {'prestorm_topo_max_x': 260,\n",
" 'poststorm_topo_max_x': 225},\n",
" 'NARRA0031': {'prestorm_topo_max_x': 260,\n",
" 'poststorm_topo_max_x': 225},\n",
" }\n",
"\n",
"for site_id in cuts:\n",
" mask1 = df_profiles.index.get_level_values('site_id') == site_id\n",
" mask2 = df_profiles.index.get_level_values('x') > cuts[site_id]['prestorm_topo_max_x']\n",
" df_profiles.loc[(mask1)&(mask2), 'pre_topo'] = np.nan\n",
" \n",
" mask3 = df_profiles.index.get_level_values('x') > cuts[site_id]['poststorm_topo_max_x']\n",
" df_profiles.loc[(mask1)&(mask3), 'post_topo'] = np.nan\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# for site_id,df_site in df_profiles.groupby('site_id'):\n",
"# f, (ax1) = plt.subplots(1,1, figsize=(6, 3))\n",
"# ax1.set_title(site_id)\n",
" \n",
"# ax1.plot(df_site.index.get_level_values('x'),\n",
"# df_site.pre_topo,\n",
"# label='Pre Topo',\n",
"# color='#2c7bb6')\n",
"# ax1.plot(df_site.index.get_level_values('x'),\n",
"# df_site.pre_bathy,\n",
"# label='Pre Bathy',\n",
"# color='#abd9e9')\n",
"\n",
"# ax1.plot(df_site.index.get_level_values('x'),\n",
"# df_site.post_topo,\n",
"# label='Post Topo',\n",
"# color='#d7191c')\n",
"# ax1.plot(df_site.index.get_level_values('x'),\n",
"# df_site.post_bathy,\n",
"# label='Post Bathy',\n",
"# color='#fdae61')\n",
"\n",
"# ax1.legend()\n",
"# plt.show()\n",
"# plt.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_profiles = df_profiles.dropna(\n",
" subset=['post_topo', 'post_bathy', 'pre_bathy', 'pre_topo'], how='all')\n",
"\n",
"df_profiles = df_profiles.reset_index()\n",
"df_profiles = df_profiles.melt(id_vars=['site_id','x','lat','lon'],\n",
" value_vars=['post_topo','post_bathy','pre_bathy','pre_topo']).rename(columns={'variable':'profile_type', 'value':'z'})\n",
"\n",
"df_profiles = df_profiles.dropna(subset=['z'])\n",
"\n",
"df_profiles.loc[df_profiles.profile_type=='post_topo','profile_type']='poststorm'\n",
"df_profiles.loc[df_profiles.profile_type=='post_bathy','profile_type']='poststorm'\n",
"df_profiles.loc[df_profiles.profile_type=='pre_topo','profile_type']='prestorm'\n",
"df_profiles.loc[df_profiles.profile_type=='pre_bathy','profile_type']='prestorm'\n",
"\n",
"df_profiles = df_profiles.set_index(['site_id', 'profile_type', 'x'])\n",
"df_profiles = df_profiles[~df_profiles.index.duplicated(keep='first')]\n",
"\n",
"df_profiles = df_profiles.sort_index()\n",
"\n",
"print('df_profiles:')\n",
"df_profiles.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Just plots each site's x and z values\n",
"for site_id,df_site in df_profiles.groupby('site_id'):\n",
" f, (ax1) = plt.subplots(1,1, figsize=(6, 3))\n",
" ax1.set_title(site_id)\n",
" \n",
" prestorm=df_site.index.get_level_values('profile_type') == 'prestorm'\n",
" ax1.plot(df_site[prestorm].index.get_level_values('x'),\n",
" df_site[prestorm].z,\n",
" label='Pre Topo',\n",
" color='#2c7bb6')\n",
"\n",
" \n",
" poststorm=df_site.index.get_level_values('profile_type') == 'poststorm'\n",
" ax1.plot(df_site[poststorm].index.get_level_values('x'),\n",
" df_site[poststorm].z,\n",
" label='Post Topo',\n",
" color='#d7191c')\n",
"\n",
"\n",
" ax1.legend()\n",
" plt.show()\n",
" plt.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Get dune faces"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"code_folding": []
},
"outputs": [],
"source": [
"# Manually define dune x coordinates and work out slope\n",
"\n",
"dune_data = [\n",
" {\n",
" 'site_id': 'NARRA0004',\n",
" 'dune_crest_x': 180,\n",
" 'dune_toe_x': 205\n",
" },\n",
" {\n",
" 'site_id': 'NARRA0008',\n",
" 'dune_crest_x': 180,\n",
" 'dune_toe_x': 205\n",
" },\n",
" {\n",
" 'site_id': 'NARRA0012',\n",
" 'dune_crest_x': 195,\n",
" 'dune_toe_x': 205\n",
" },\n",
" {\n",
" 'site_id': 'NARRA0016',\n",
" 'dune_crest_x': 190,\n",
" 'dune_toe_x': 200\n",
" },\n",
" {\n",
" 'site_id': 'NARRA0021',\n",
" 'dune_crest_x': 205,\n",
" 'dune_toe_x': 210\n",
" },\n",
" {\n",
" 'site_id': 'NARRA0023',\n",
" 'dune_crest_x': 205,\n",
" 'dune_toe_x': 215\n",
" },\n",
" {\n",
" 'site_id': 'NARRA0027',\n",
" 'dune_crest_x': 210,\n",
" 'dune_toe_x': 219\n",
" },\n",
" {\n",
" 'site_id': 'NARRA0031',\n",
" 'dune_crest_x': 210,\n",
" 'dune_toe_x': 218\n",
" },\n",
"]\n",
"\n",
"for site_dune in dune_data:\n",
" df_site = df_profiles.xs(site_dune['site_id'], level='site_id').xs('prestorm',level='profile_type')\n",
" \n",
" dune_crest_x = site_dune['dune_crest_x']\n",
" dune_toe_x = site_dune['dune_toe_x']\n",
" dune_crest_z = df_site.iloc[df_site.index.get_loc(site_dune['dune_crest_x'],method='nearest')].z\n",
" dune_toe_z = df_site.iloc[df_site.index.get_loc(site_dune['dune_toe_x'],method='nearest')].z\n",
"\n",
" dune_slope = (dune_crest_z - dune_toe_z)/(dune_crest_x - dune_toe_x)\n",
" \n",
" site_dune['dune_crest_z'] = dune_crest_z\n",
" site_dune['dune_toe_z'] = dune_toe_z\n",
" site_dune['dune_slope'] = dune_slope\n",
" \n",
" \n",
"# Join back into main data\n",
"df_dunes = pd.DataFrame(dune_data).set_index('site_id')\n",
"print('df_dunes:')\n",
"df_dunes.head()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Just plots each site's x and z values\n",
"# for site_id,df_site in df_profiles.xs('prestorm',level='profile_type').groupby('site_id'):\n",
"# f, (ax1) = plt.subplots(1,1, figsize=(6, 3))\n",
"# ax1.set_title(site_id)\n",
"# ax1.plot(df_site.index.get_level_values('x'),\n",
"# df_site.z)\n",
"# ax1.plot([df_dunes.loc[site_id].dune_crest_x, df_dunes.loc[site_id].dune_toe_x],\n",
"# [df_dunes.loc[site_id].dune_crest_z, df_dunes.loc[site_id].dune_toe_z],\n",
"# 'r.-')\n",
"# ax1.set_xlim([150,250])\n",
"# ax1.set_ylim([0,15])\n",
"# plt.show()\n",
"# plt.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Get prestorm slope"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"z_ele = 0.7\n",
"debug=False\n",
"\n",
"def find_nearest_idx(array, value):\n",
" array = np.asarray(array)\n",
" idx = (np.abs(array - value)).argmin()\n",
" return idx\n",
"\n",
"prestorm_slope_data =[]\n",
"for site_id, df_site in df_profiles.xs('prestorm',level='profile_type').groupby('site_id'):\n",
" \n",
" # Find index of our z_ele\n",
" idx = np.where(df_site.z.values>=z_ele)[0][-1]\n",
" \n",
" prestorm_end_x = df_site.iloc[idx].name[1]\n",
" prestorm_end_z = df_site.iloc[idx].z\n",
" \n",
" prestorm_start_x = df_dunes.loc[site_id].dune_toe_x\n",
" prestorm_start_z = df_dunes.loc[site_id].dune_toe_z\n",
" \n",
" prestorm_slope = (prestorm_end_z-prestorm_start_z)/(prestorm_end_x-prestorm_start_x)\n",
" \n",
" prestorm_slope_data.append({\n",
" 'site_id': site_id,\n",
" 'prestorm_end_x': prestorm_end_x,\n",
" 'prestorm_end_z': prestorm_end_z,\n",
" 'prestorm_start_x': prestorm_start_x,\n",
" 'prestorm_start_z': prestorm_start_z,\n",
" 'prestorm_slope': prestorm_slope\n",
" })\n",
" \n",
"df_prestorm_slope = pd.DataFrame(prestorm_slope_data).set_index(['site_id'])\n",
"print('df_prestorm_slope:')\n",
"df_prestorm_slope.head()\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Get shelf slope\n",
"At 10 m contour"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"code_folding": []
},
"outputs": [],
"source": [
"# Elevation to take shelf slope at\n",
"z_ele = -9\n",
"debug=False\n",
"\n",
"def find_nearest_idx(array, value):\n",
" array = np.asarray(array)\n",
" idx = (np.abs(array - value)).argmin()\n",
" return idx\n",
"\n",
"def slope_at_point(x, z, z_ele,debug=False):\n",
" # Smooth profile a bit\n",
" # TODO the smoothing factor will change based on the number of data points\n",
" # Need to fix\n",
" s = UnivariateSpline(x, z, s=50)\n",
" xs = np.linspace(min(x),max(x),1000)\n",
" zs = s(xs)\n",
"\n",
" # Calculate derivates of spline\n",
" dzdx = np.diff(zs)/np.diff(xs)\n",
"\n",
" # Find index of z_ele\n",
" idx = find_nearest_idx(zs, z_ele)\n",
" slope = dzdx[idx]\n",
" shelf_x = xs[idx]\n",
"\n",
"\n",
" \n",
" # For checking how much smoothing is going on\n",
" if debug:\n",
" f, (ax1) = plt.subplots(1,1, figsize=(6, 3))\n",
" ax1.plot(x,z)\n",
" ax1.plot(xs,zs)\n",
" plt.show()\n",
" plt.close()\n",
" \n",
" return slope, shelf_x, z_ele\n",
" \n",
"shelf_data = []\n",
"for site_id, df_site in df_profiles.xs('prestorm',level='profile_type').groupby('site_id'):\n",
" shelf_slope, shelf_x, shelf_z = slope_at_point(df_site.index.get_level_values('x').values,\n",
" df_site.z, \n",
" z_ele, debug=debug)\n",
" shelf_data.append({\n",
" 'site_id': site_id,\n",
" 'shelf_slope': shelf_slope,\n",
" 'shelf_x': shelf_x,\n",
" 'shelf_z': shelf_z\n",
" })\n",
" \n",
"df_shelf = pd.DataFrame(shelf_data).set_index(['site_id'])\n",
"\n",
"df_shelf.loc['NARRA0004','shelf_slope'] = -0.02\n",
"\n",
"print('df_shelf:')\n",
"df_shelf.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Do geometry\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"df_site"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for site_id, df_site in df_profiles.groupby('site_id'):\n",
"\n",
" # Project the dune face outwards\n",
" dune_face_toe = Point(df_dunes.loc[site_id].dune_toe_x,\n",
" df_dunes.loc[site_id].dune_toe_z)\n",
" dune_face_sea = Point(\n",
" df_dunes.loc[site_id].dune_toe_x + 1000,\n",
" # df_dunes.loc[site_id].dune_toe_z +1000 * -1\n",
" df_dunes.loc[site_id].dune_toe_z +\n",
" 1000 * df_dunes.loc[site_id].dune_slope)\n",
" dune_line = LineString([dune_face_toe, dune_face_sea])\n",
"\n",
" # Project the shelf slope landwards\n",
" shelf_point = Point(df_shelf.loc[site_id].shelf_x,\n",
" df_shelf.loc[site_id].shelf_z)\n",
" shelf_land = Point(\n",
" df_shelf.loc[site_id].shelf_x - 1000, df_shelf.loc[site_id].shelf_z -\n",
" 1000 * df_shelf.loc[site_id].shelf_slope)\n",
" shelf_sea = Point(\n",
" df_shelf.loc[site_id].shelf_x + 1000, df_shelf.loc[site_id].shelf_z +\n",
" 1000 * df_shelf.loc[site_id].shelf_slope)\n",
" shelf_line = LineString([shelf_land, shelf_point, shelf_sea])\n",
"\n",
" # Find intersection between to lines\n",
" dune_shelf_int = dune_line.intersection(shelf_line)\n",
" dist_toe_to_int = dune_face_toe.distance(dune_shelf_int)\n",
"\n",
" # Plots\n",
" f, (ax1) = plt.subplots(1, 1, figsize=(12, 4))\n",
"\n",
" # Raw profile prestorm\n",
" ax1.plot(\n",
" df_site.xs('prestorm',\n",
" level='profile_type').index.get_level_values('x'),\n",
" df_site.xs('prestorm', level='profile_type').z,\n",
" label='Prestorm profile')\n",
"\n",
" # Raw profile poststorm\n",
" ax1.plot(\n",
" df_site.xs('poststorm',\n",
" level='profile_type').index.get_level_values('x'),\n",
" df_site.xs('poststorm', level='profile_type').z,\n",
" label='Poststorm profile')\n",
"\n",
" # Dune face\n",
" ax1.plot(\n",
" [df_dunes.loc[site_id].dune_crest_x, df_dunes.loc[site_id].dune_toe_x],\n",
" [df_dunes.loc[site_id].dune_crest_z, df_dunes.loc[site_id].dune_toe_z],\n",
" linestyle=':',\n",
" color='#999999',\n",
" label='Dune face ({:.2f})'.format(-df_dunes.loc[site_id].dune_slope))\n",
"\n",
" # Projected dune face\n",
" ax1.plot(\n",
" dune_line.xy[0],\n",
" dune_line.xy[1],\n",
" linestyle='--',\n",
" color='#999999',\n",
" label='Dune face (projected)')\n",
"\n",
" # Projected shelf slope\n",
" ax1.plot(\n",
" shelf_line.xy[0],\n",
" shelf_line.xy[1],\n",
" linestyle='--',\n",
" color='#999999',\n",
" label='Shelf slope (projected)')\n",
"\n",
" # Intersection\n",
" ax1.scatter(\n",
" dune_shelf_int.xy[0],\n",
" dune_shelf_int.xy[1],\n",
" marker='x',\n",
" color='#999999',\n",
" label='Dune/shelf projected intersection')\n",
"\n",
" # Prestorm slope\n",
" ax1.plot([\n",
" df_prestorm_slope.loc[site_id].prestorm_start_x,\n",
" df_prestorm_slope.loc[site_id].prestorm_end_x\n",
" ], [\n",
" df_prestorm_slope.loc[site_id].prestorm_start_z,\n",
" df_prestorm_slope.loc[site_id].prestorm_end_z\n",
" ],\n",
" color='violet',\n",
" label='Prestorm slope ({:.2f})'.format(\n",
" -df_prestorm_slope.loc[site_id].prestorm_slope))\n",
"\n",
" # # Find best slope based on distance form toe to intersection?\n",
" # best_slope_toe = shelf_line.interpolate(\n",
" # shelf_line.project(intersection) - 4 * dist_toe_to_int)\n",
" # best_slope = (dune_face_toe.xy[1][0] - best_slope_toe.xy[1][0]) / (\n",
" # dune_face_toe.xy[0][0] - best_slope_toe.xy[0][0])\n",
"\n",
" # # Best slope toe\n",
" # ax1.scatter(\n",
" # best_slope_toe.xy[0], best_slope_toe.xy[1], marker='o', color='g')\n",
"\n",
" # # Best slope\n",
" # ax1.plot([dune_face_toe.xy[0], best_slope_toe.xy[0]],\n",
" # [dune_face_toe.xy[1], best_slope_toe.xy[1]],\n",
" # color='g',\n",
" # label='Best slope ({:.3f})'.format(-best_slope))\n",
"\n",
" # Find best slope based on intersection of prestorm slope and surf zone slope\n",
" prestorm_slope_line = LineString([\n",
" Point(\n",
" df_prestorm_slope.loc[site_id].prestorm_start_x,\n",
" df_prestorm_slope.loc[site_id].prestorm_start_z,\n",
" ),\n",
" Point(\n",
" df_prestorm_slope.loc[site_id].prestorm_start_x + 10000,\n",
" df_prestorm_slope.loc[site_id].prestorm_start_z +\n",
" 10000 * df_prestorm_slope.loc[site_id].prestorm_slope)\n",
" ])\n",
"\n",
" # Where prestorm slope projection intersects shelf line\n",
" prestorm_slope_shelf_int = prestorm_slope_line.intersection(shelf_line)\n",
"\n",
" # Distance between dune/shelf intersection and prestorm/shelf intersection\n",
" dist_shelf_prestorm_ints = prestorm_slope_shelf_int.distance(\n",
" dune_shelf_int)\n",
"\n",
" best_slope_pt = shelf_line.interpolate(\n",
" shelf_line.project(dune_shelf_int) + 0.3 * (shelf_line.project(prestorm_slope_shelf_int) -\n",
" shelf_line.project(dune_shelf_int)))\n",
" \n",
" best_slope =(df_prestorm_slope.loc[site_id].prestorm_start_z-best_slope_pt.xy[1][0])/(df_prestorm_slope.loc[site_id].prestorm_start_x-best_slope_pt.xy[0][0])\n",
" \n",
" if not prestorm_slope_shelf_int.is_empty:\n",
" ax1.plot(\n",
" prestorm_slope_shelf_int.xy[0],\n",
" prestorm_slope_shelf_int.xy[1],\n",
" marker='x',\n",
" color='#999999',\n",
" label='Prestorm slope/shelf\\nprojected intersection')\n",
" ax1.plot(\n",
" prestorm_slope_line.xy[0],\n",
" prestorm_slope_line.xy[1],\n",
" color='#999999',\n",
" linestyle='--',\n",
" label='Prestorm slope projected line')\n",
" ax1.plot(\n",
" [df_prestorm_slope.loc[site_id].prestorm_start_x,\n",
" best_slope_pt.xy[0][0]],\n",
" [df_prestorm_slope.loc[site_id].prestorm_start_z,\n",
" best_slope_pt.xy[1][0]],\n",
" color='red',\n",
" linestyle='--',\n",
" label='Best slope ({:.3f})'.format(-best_slope))\n",
" \n",
" # TEMP Target slopes\n",
" target_slopes = {\n",
" 'NARRA0004': 0.076,\n",
" 'NARRA0008': 0.093,\n",
" 'NARRA0012': 0.060,\n",
" 'NARRA0016': 0.11,\n",
" 'NARRA0021': 0.063,\n",
" 'NARRA0023': 0.061,\n",
" 'NARRA0027': 0.060,\n",
" 'NARRA0031': 0.057,\n",
" }\n",
"\n",
" target_direction = {\n",
" 'NARRA0004': \"flatter\",\n",
" 'NARRA0008': \"steeper\",\n",
" 'NARRA0012': \"flatter\",\n",
" 'NARRA0016': \"flatter\",\n",
" 'NARRA0021': \"steeper\",\n",
" 'NARRA0023': \"steeper\",\n",
" 'NARRA0027': \"steeper\",\n",
" 'NARRA0031': \"steeper\",\n",
" }\n",
" ax1.plot([dune_face_toe.xy[0][0], dune_face_toe.xy[0][0] + 1000], [\n",
" dune_face_toe.xy[1][0],\n",
" dune_face_toe.xy[1][0] - 1000 * target_slopes[site_id]\n",
" ],\n",
" color='red',\n",
" label='Target slope\\n({} than {:.3f})'.format(\n",
" target_direction[site_id], target_slopes[site_id]))\n",
"\n",
" ax1.set_xlim([100, 800])\n",
" ax1.set_ylim([-15, 12])\n",
"# ax1.set_xlim([100, 600])\n",
"# ax1.set_ylim([-10, 12])\n",
"\n",
" # ax1.set_xlim([df_dunes.loc[site_id].dune_crest_x - 50,\n",
" # intersection.xy[0][0] + 50])\n",
" # ax1.set_ylim([intersection.xy[1][0] -3,\n",
" # df_dunes.loc[site_id].dune_crest_z + 3])\n",
"\n",
" ax1.set_title(site_id)\n",
" ax1.legend(loc='upper right', prop={'size': 10})\n",
" f.savefig('08-{}.png'.format(site_id), dpi=600)\n",
" plt.show()\n",
" plt.close()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dune_shelf_int"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"hide_input": false,
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Loading…
Cancel
Save