{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# TWL Exceedance" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Setup notebook" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Enable autoreloading of our modules. \n", "# Most of the code will be located in the /src/ folder, \n", "# and then called from the notebook.\n", "%matplotlib inline\n", "%reload_ext autoreload\n", "%autoreload" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from IPython.core.debugger import set_trace\n", "\n", "import pandas as pd\n", "import numpy as np\n", "import os\n", "import decimal\n", "import plotly\n", "import plotly.graph_objs as go\n", "import plotly.plotly as py\n", "import plotly.tools as tls\n", "import plotly.figure_factory as ff\n", "from plotly import tools\n", "import plotly.io as pio\n", "from scipy import stats\n", "import math\n", "import matplotlib\n", "from matplotlib import cm\n", "import colorlover as cl\n", "\n", "from ipywidgets import widgets, Output\n", "from IPython.display import display, clear_output, Image, HTML\n", "\n", "from sklearn.metrics import confusion_matrix" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Import data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def df_from_csv(csv, index_col, data_folder='../data/interim'):\n", " print('Importing {}'.format(csv))\n", " return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n", "\n", "df_waves = df_from_csv('waves.csv', index_col=[0, 1])\n", "df_tides = df_from_csv('tides.csv', index_col=[0, 1])\n", "df_profiles = df_from_csv('profiles.csv', index_col=[0, 1, 2])\n", "df_sites = df_from_csv('sites.csv', index_col=[0])\n", "df_profile_features_crest_toes = df_from_csv('profile_features_crest_toes.csv', index_col=[0,1])\n", "\n", "# Note that the forecasted data sets should be in the same order for impacts and twls\n", "impacts = {\n", " 'forecasted': {\n", " 'foreshore_slope_sto06': df_from_csv('impacts_forecasted_foreshore_slope_sto06.csv', index_col=[0]),\n", " 'mean_slope_sto06': df_from_csv('impacts_forecasted_mean_slope_sto06.csv', index_col=[0]),\n", " },\n", " 'observed': df_from_csv('impacts_observed.csv', index_col=[0])\n", " }\n", "\n", "\n", "twls = {\n", " 'forecasted': {\n", " 'foreshore_slope_sto06': df_from_csv('twl_foreshore_slope_sto06.csv', index_col=[0, 1]),\n", " 'mean_slope_sto06':df_from_csv('twl_mean_slope_sto06.csv', index_col=[0, 1]),\n", " }\n", "}\n", "print('Done!')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Calculate vertical distribution of wave count\n", "For each site, calculate how many waves reached a certain elevation (store as a binned histogram)." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Helper functions\n", "def find_nearest(array, value):\n", " array = np.asarray(array)\n", " idx = np.nanargmin((np.abs(array - value)))\n", " return array[idx], idx" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "df_profile_features_crest_toes.loc[(site_id,'prestorm'),'dune_toe_z']" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "data = []\n", "for site_id, df_site_twl in twls['forecasted']['mean_slope_sto06'].groupby('site_id'):\n", " \n", " twl_eles_per_wave = []\n", " \n", " # Iterate through each timestamp and calculate the number of waves at each interavl.\n", " # THIS LOOP IS SLOW\n", " for row in df_site_twl.itertuples():\n", " \n", " distribution = stats.norm(loc=row.tide+row.setup, scale=row.S_total/4) # CHECK\n", "\n", " # Total number of waves we expect in this period\n", " n_waves = int(3600 / row.Tp) # Check that we have 1 hour\n", " \n", " # Get z elevation of each wave twl in this hour and append to list\n", " twl_eles_per_wave.extend([distribution.ppf(1-x/n_waves) for x in range(1,n_waves+1)])\n", " \n", " # Remove nans and infs # CHECK WHY INF\n", " twl_eles_per_wave = list(np.asarray(twl_eles_per_wave)[np.isfinite(twl_eles_per_wave)])\n", " \n", " # Sort wave twl z elevations in descending list\n", " twl_eles_per_wave.sort(reverse=True) \n", " \n", " # Get index of closest value of dune toe. This is the number of waves that exceeded the the dune toe\n", " try:\n", " _, idx = find_nearest(twl_eles_per_wave, dune_toe_z)\n", " except:\n", " continue\n", " \n", " # Get forecasted and observed impacts\n", " forecasted_regime = impacts['forecasted']['mean_slope_sto06'].loc[site_id,'storm_regime']\n", " observed_regime = impacts['observed'].loc[site_id,'storm_regime']\n", " \n", " counts, bin_edges = np.histogram(twl_eles_per_wave, bins=100) \n", " \n", " data.append({\n", " 'site_id': site_id,\n", " 'forecasted_regime': forecasted_regime,\n", " 'observed_regime': observed_regime,\n", " 'n_waves_exceeding_dune_toe': idx,\n", " 'n_waves': [x for x in range(0,500,1)],\n", " 'truncated_twl_levels': [twl_eles_per_wave[x] for x in range(0,500,1)],\n", " 'truncated_dune_toe_z': df_profile_features_crest_toes.loc[(site_id,'prestorm'),'dune_toe_z'],\n", " 'full_counts': counts,\n", " 'full_bin_edges': bin_edges,\n", " })\n", " \n", " print('Done {}'.format(site_id))\n", "\n", "data_twl = data\n", "# df = pd.DataFrame(data)\n", "# df = df.set_index('site_id')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "counts, bin_edges = np.histogram (data_twl[0]['twl_levels'], bins=50) " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "list(np.asarray(twl_eles_per_wave)[~np.isfinite(twl_eles_per_wave)])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig = tools.make_subplots(\n", " rows=2,\n", " cols=2,\n", " specs=[[{}, {}], [{}, {}]],\n", " subplot_titles=('Swash/Swash', 'Swash/Collision', \n", " 'Collision/Swash', 'Collision/Collision'),\n", " shared_xaxes=True, shared_yaxes=True,)\n", "\n", "data = []\n", "for site in data_twl:\n", " if site['forecasted_regime'] == 'swash' and site[\n", " 'observed_regime'] == 'swash':\n", " x_col = 1\n", " y_col = 1\n", " elif site['forecasted_regime'] == 'collision' and site[\n", " 'observed_regime'] == 'collision':\n", " x_col = 2\n", " y_col = 2\n", " elif site['forecasted_regime'] == 'swash' and site[\n", " 'observed_regime'] == 'collision':\n", " x_col = 2\n", " y_col = 1\n", " elif site['forecasted_regime'] == 'collision' and site[\n", " 'observed_regime'] == 'swash':\n", " x_col = 1\n", " y_col = 2\n", " else:\n", " continue\n", "\n", " fig.append_trace(\n", " go.Scattergl(\n", " x=[x - site['dune_toe_z'] for x in site['twl_levels']],\n", " y=site['n_waves'],\n", " name=site['site_id'],\n", " line = dict(\n", " color = ('rgba(22, 22, 22, 0.2)'),\n", " width = 0.5,)),\n", " x_col,\n", " y_col)\n", "\n", "# layout = go.Layout(\n", "# xaxis=dict(domain=[0, 0.45]),\n", "# yaxis=dict(\n", "# domain=[0, 0.45],\n", "# type='log',\n", "# ),\n", "# xaxis2=dict(domain=[0.55, 1]),\n", "# xaxis4=dict(domain=[0.55, 1], anchor='y4'),\n", "# yaxis3=dict(\n", "# domain=[0.55, 1],\n", "# type='log',\n", "# ),\n", "# yaxis4=dict(\n", "# domain=[0.55, 1],\n", "# anchor='x4',\n", "# type='log',\n", "# ))\n", "\n", "fig['layout'].update(showlegend=False, title='Specs with Subplot Title',height=800,)\n", "\n", "for ax in ['yaxis','yaxis2']:\n", "# fig['layout'][ax]['type']='log'\n", " fig['layout'][ax]['range']= [0,100]\n", "\n", "for ax in ['xaxis', 'xaxis2']:\n", " fig['layout'][ax]['range']= [-1,1]\n", "\n", "go.FigureWidget(fig)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig['layout']['yaxis']" ] } ], "metadata": { "hide_input": false, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.6" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": false }, "varInspector": { "cols": { "lenName": 16, "lenType": 16, "lenVar": 40 }, "kernels_config": { "python": { "delete_cmd_postfix": "", "delete_cmd_prefix": "del ", "library": "var_list.py", "varRefreshCmd": "print(var_dic_list())" }, "r": { "delete_cmd_postfix": ") ", "delete_cmd_prefix": "rm(", "library": "var_list.r", "varRefreshCmd": "cat(var_dic_list()) " } }, "types_to_exclude": [ "module", "function", "builtin_function_or_method", "instance", "_Feature" ], "window_display": false } }, "nbformat": 4, "nbformat_minor": 2 }