Compare commits

..

20 Commits

Author SHA1 Message Date
Chris Leaman f6c43fda38 Merge branch 'bugfix/fix-mean-slopes' into develop 6 years ago
Chris Leaman 5f59c8f8ee Update slope function to accept top x coordinate
Needed to ensure correctly mean slope is calculated. If only inputting dune toe z coordinates, sometimes the function can chose the wrong x coordinate if there are multiple crossings of the z coordinate.
6 years ago
Chris Leaman b8ead25cd5 Disable numpy futurewarnings when running from CLI 6 years ago
Chris Leaman e3b782abbd Update notebooks 6 years ago
Chris Leaman dcadf0bf12 Add jupyter packages to environment.yml 6 years ago
Chris Leaman 494c8d9052 Update README.md 6 years ago
Chris Leaman 174ddce102 Update Makefile 6 years ago
Chris Leaman 4910d103f2 Update .gitignore for images in notebook directory 6 years ago
Chris Leaman 75f372154d Update logging format 6 years ago
Chris Leaman 5b9b4141b3 Fix bugs for forecasting impacts 6 years ago
Chris Leaman 44310e3be4 Fix bugs for forecasting impacts 6 years ago
Chris Leaman c73e2dd254 Update notebooks 6 years ago
Chris Leaman 06cc97ef7e Add functions to parse Tom Beuzen's dune toe and crests 6 years ago
Chris Leaman 23557cdf8e Update notebooks 6 years ago
Chris Leaman 8991657410 Add instructions for adding package 6 years ago
Chris Leaman 2a98dc2b01 Fix environment: add autopep8 for jupyter notebooks 6 years ago
Chris Leaman 64602d5c7d Fix environment 6 years ago
Chris Leaman dac2f9824d Fix environment 6 years ago
Chris Leaman c391edf755 Keep old index number of site when parsing .mat file 6 years ago
Chris Leaman 7e314757ce Fix typos in README.md 6 years ago

2
.gitignore vendored

@ -1,5 +1,7 @@
# Jupyter NB Checkpoints
.ipynb_checkpoints/
/notebooks/*.png
# exclude data from source control by default
/data/

@ -11,20 +11,30 @@ CURRENT_DIR = $(shell pwd)
###############################
# Create python virtual environment
. PHONY: venv_init
.PHONY: venv-init
venv-init: ##@environment Setup virtual environment
conda create -f environment.yml --prefix=.venv python=3.7
conda env create -f environment.yml --prefix=.venv python=3.6
.PHONY: venv-activate
venv-activate: ##@environment Activates the virtual environment
activate $(CURRENT_DIR)/.venv
.PHONY: venv-update
venv-update: ##@environment Updates to latest packages
conda update ipykernel && conda update --prefix .venv --all
.PHONY: venv-requirements-install
venv-requirements-install: ##@environment Ensures environment.yml packages are installed
conda env update
# The environment.yml file should really be created by hand, but
# this provides a good starting point.
.PHONY: venv-requirements-export
venv-requirements-export: ##@environment Exports current environment to environment.yml
conda env export --file environment.yml
# To install new packages: conda install --prefix .venv PACKAGE
###############################
# Get data from network drive
@ -82,14 +92,22 @@ impacts: ./data/interim/impacts_forecasted_foreshore_slope_sto06.csv ./data/inte
--input-csv "./data/interim/sites.csv" \
--output-shp "./data/interim/sites.shp"
# Creates a .csv of our dune toe and crest profile features
./data/interim/profile_features.csv: ./data/raw/profile_features/dune_crests.shp ./data/raw/profile_features/dune_toes.shp ./data/interim/sites.csv ./data/interim/profiles.csv
# # Creates a .csv of our dune toe and crest profile features from .shp file
# ./data/interim/profile_features.csv: ./data/raw/profile_features/dune_crests.shp ./data/raw/profile_features/dune_toes.shp ./data/interim/sites.csv ./data/interim/profiles.csv
# activate ./.venv && python ./src/cli.py create-profile-features \
# --dune-crest-shp "./data/raw/profile_features/dune_crests.shp" \
# --dune-toe-shp "./data/raw/profile_features/dune_toes.shp" \
# --sites-csv "./data/interim/sites.csv" \
# --profiles-csv "./data/interim/profiles.csv" \
# --output-csv "./data/interim/profile_features.csv"
# Create a .csv of our dune toe and crest profile features from Tom Beuzen's .mat file
./data/interim/profile_features.csv: ./data/raw/profile_features_tom_beuzen/*.mat ./data/interim/sites.csv
activate ./.venv && python ./src/cli.py create-profile-features \
--dune-crest-shp "./data/raw/profile_features/dune_crests.shp" \
--dune-toe-shp "./data/raw/profile_features/dune_toes.shp" \
--crest-mat "./data/raw/profile_features_tom_beuzen/J16_DuneCrest.mat" \
--toe-mat "./data/raw/profile_features_tom_beuzen/J16_DuneToe.mat" \
--sites-csv "./data/interim/sites.csv" \
--profiles-csv "./data/interim/profiles.csv" \
--output-csv "./data/interim/profile_features.csv"
--output-file "./data/interim/profile_features.csv"
# Creates a forecast of twl using sto06 and prestorm time varying prestorm foreshore slope
./data/interim/twl_foreshore_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv
@ -112,6 +130,17 @@ impacts: ./data/interim/impacts_forecasted_foreshore_slope_sto06.csv ./data/inte
--slope "mean" \
--output-file "./data/interim/twl_mean_slope_sto06.csv"
./data/interim/twl_poststorm_mean_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv
activate ./.venv && python ./src/cli.py create-twl-forecast \
--waves-csv "./data/interim/waves.csv" \
--tides-csv "./data/interim/tides.csv" \
--profiles-csv "./data/interim/profiles.csv" \
--profile-features-csv "./data/interim/profile_features.csv" \
--runup-function "sto06" \
--slope "mean" \
--profile-type "poststorm" \
--output-file "./data/interim/twl_poststorm_mean_slope_sto06.csv"
./data/interim/impacts_observed.csv: ./data/interim/profiles.csv ./data/interim/profile_features.csv
activate ./.venv && python ./src/cli.py create-observed-impacts \
--profiles-csv "./data/interim/profiles.csv" \
@ -130,6 +159,12 @@ impacts: ./data/interim/impacts_forecasted_foreshore_slope_sto06.csv ./data/inte
--forecasted-twl-csv "./data/interim/twl_foreshore_slope_sto06.csv" \
--output-file "./data/interim/impacts_forecasted_foreshore_slope_sto06.csv"
./data/interim/impacts_forecasted_poststorm_mean_slope_sto06.csv: ./data/interim/profile_features.csv ./data/interim/twl_foreshore_slope_sto06.csv
activate ./.venv && python ./src/cli.py create-forecasted-impacts \
--profile-features-csv "./data/interim/profile_features.csv" \
--forecasted-twl-csv "./data/interim/twl_poststorm_mean_slope_sto06.csv" \
--output-file "./data/interim/impacts_forecasted_poststorm_mean_slope_sto06.csv"
###############################
# Misc commands
@ -168,5 +203,3 @@ HELP_FUN = \
help: ##@other Show this help.
@perl -e '$(HELP_FUN)' $(MAKEFILE_LIST)

@ -1,5 +1,5 @@
# 2016 Narrabeen Storm EWS Performance
This repository investigates whether the storm impacts (i.e. Sallenger, 2000) f the June 2016 Narrabeen Storm could have been forecasted in advance.
This repository investigates whether the storm impacts (i.e. Sallenger, 2000) of the June 2016 Narrabeen Storm could have been forecasted in advance.
## Repository and analysis format
This repository follows the [Cookiecutter Data Science](https://drivendata.github.io/cookiecutter-data-science/)
@ -69,9 +69,9 @@ been corrected for systematic errors, so actual elevations should be taken from
- [ ] Setup precomit hook for automatic code formatting using [black](https://ljvmiranda921.github.io/notebook/2018/06/21/precommits-using-black-and-flake8/). Low priority as can run black using the command `make format`.
- [ ] Raw tide WL's are interpolated based on location from tide gauges. This probably isn't the most accurate method, but should have a small effect since surge elevation was low during this event. Need to assess the effect of this method.
- [ ] Estimate max TWL from elevation where pre storm and post storm profiles are the same. Need to think more about this as runup impacting dune toe will move the dune face back, incorrectly raising the observed twl. Perhaps this estimation of max TWL is only useful for the swash regime.
- [ ] Implement (bayesian change detection algorithm)[https://github.com/hildensia/bayesian_changepoint_detection] to help detect dune crests and toes from profiles. Probably low priority at the moment since we are doing manual detection.
- [ ] Implement [bayesian change detection algorithm](https://github.com/hildensia/bayesian_changepoint_detection) to help detect dune crests and toes from profiles. Probably low priority at the moment since we are doing manual detection.
- [ ] Implement dune impact calculations as per Palmsten & Holman. Calculation should be done in a new dataframe.
- [ ] Implement data/interim/*.csv file checking using py.test. Check for correct columns, number of nans etc. Testing of code is probably a lower priority than just checking the interim data files at the moment.
- [ ] Implement data/interim/*.csv file checking using py.test. Check for correct columns, number of nans etc. Testing of code is probably a lower priority than just checking the interim data files at the moment. Some functions which should be tested are the slope functions in `forecast_twl.py`, as these can be tricky with different profiles.
- [ ] Investigate using [modin](https://github.com/modin-project/modin) to help speed up analysis.
- [ ] Need to think about how relative imports are handled, see [here](https://chrisyeh96.github.io/2017/08/08/definitive-guide-python-imports.html). Maybe the click CLI interface should be moved to the `./src/` folder and it can import all the other packages?
- [ ] Simplify runup_models in Stockdon06 - we should really only have one function for each runup model. Need to make it work with individual values or entire dataframe. Use (np.maskedarray)[https://docs.scipy.org/doc/numpy-1.15.0/reference/maskedarray.generic.html]
- [X] Need to think about how relative imports are handled, see [here](https://chrisyeh96.github.io/2017/08/08/definitive-guide-python-imports.html). Maybe the click CLI interface should be moved to the `./src/` folder and it can import all the other packages?
- [ ] Simplify runup_models in Stockdon06 - we should really only have one function for each runup model. Need to make it work with individual values or entire dataframe. Use [np.maskedarray](https://docs.scipy.org/doc/numpy-1.15.0/reference/maskedarray.generic.html)

@ -1,149 +1,161 @@
name: C:\Users\z5189959\Desktop\nsw-2016-storm-impact\.venv
channels:
- plotly
- defaults
- conda-forge
dependencies:
- appdirs=1.4.3=py_1
- attrs=18.2.0=py_0
- backcall=0.1.0=py_0
- black=18.9b0=py_0
- bleach=3.0.2=py_0
- boost=1.66.0=py36_vc14_1
- boost-cpp=1.66.0=vc14_1
- ca-certificates=2018.10.15=ha4d7672_0
- certifi=2018.10.15=py36_1000
- colorama=0.4.0=py_0
- colorlover=0.2.1=py_0
- curl=7.60.0=vc14_0
- entrypoints=0.2.3=py36_1002
- expat=2.2.5=vc14_0
- freetype=2.8.1=vc14_0
- freexl=1.0.2=vc14_2
- geotiff=1.4.2=vc14_1
- hdf4=4.2.13=vc14_0
- hdf5=1.10.1=vc14_2
- icu=58.2=vc14_0
- ipykernel=5.1.0=py36h39e3cac_1001
- ipython=7.1.1=py36h39e3cac_1000
- jedi=0.13.1=py36_1000
- jinja2=2.10=py_1
- jpeg=9b=vc14_2
- jupyter_client=5.2.3=py_1
- jupyter_contrib_core=0.3.3=py_2
- jupyter_contrib_nbextensions=0.5.0=py36_1000
- jupyter_highlight_selected_word=0.2.0=py36_1000
- jupyter_latex_envs=1.4.4=py36_1000
- jupyter_nbextensions_configurator=0.4.0=py36_1000
- kealib=1.4.7=vc14_4
- krb5=1.14.6=vc14_0
- libgdal=2.2.4=vc14_5
- libiconv=1.14=vc14_4
- libnetcdf=4.6.1=vc14_2
- libpng=1.6.34=vc14_0
- libpq=9.6.3=vc14_0
- libsodium=1.0.16=vc14_0
- libspatialite=4.3.0a=vc14_19
- libtiff=4.0.9=vc14_0
- libxml2=2.9.5=vc14_1
- libxslt=1.1.32=vc14_0
- lxml=4.2.3=py36heafd4d3_0
- markupsafe=1.1.0=py36hfa6e2cd_1000
- matplotlib=2.2.2=py36_1
- mistune=0.8.4=py36hfa6e2cd_1000
- nbconvert=5.3.1=py_1
- notebook=5.7.2=py36_1000
- openjpeg=2.3.0=vc14_2
- openssl=1.0.2p=hfa6e2cd_1001
- pandoc=2.4=0
- pandocfilters=1.4.2=py_1
- parso=0.3.1=py_0
- pickleshare=0.7.5=py36_1000
- proj4=4.9.3=vc14_5
- prometheus_client=0.4.2=py_0
- prompt_toolkit=2.0.7=py_0
- pygments=2.2.0=py_1
- python=3.6.6=he025d50_0
- pywinpty=0.5.4=py36_1002
- pyzmq=17.1.2=py36hf576995_1001
- qt=5.6.2=vc14_1
- send2trash=1.5.0=py_0
- sqlite=3.20.1=vc14_2
- terminado=0.8.1=py36_1001
- testpath=0.4.2=py36_1000
- tk=8.6.8=vc14_0
- toml=0.10.0=py_0
- vc=14=0
- wcwidth=0.1.7=py_1
- webencodings=0.5.1=py_1
- winpty=0.4.3=4
- xerces-c=3.2.0=vc14_0
- yaml=0.1.7=vc14_0
- zeromq=4.2.5=vc14_2
- zlib=1.2.11=vc14_0
- matplotlib-base=3.0.2=py36h3e3dc42_1001
- appdirs=1.4.3=py36h28b3542_0
- asn1crypto=0.24.0=py36_0
- attrs=18.2.0=py36h28b3542_0
- autopep8=1.4.3=py36_0
- backcall=0.1.0=py36_0
- blas=1.0=mkl
- bleach=3.0.2=py36_0
- boost=1.67.0=py36_4
- boost-cpp=1.67.0=hfa6e2cd_4
- ca-certificates=2018.03.07=0
- certifi=2018.10.15=py36_0
- cffi=1.11.5=py36h74b6da3_1
- chardet=3.0.4=py36_1
- click=7.0=py36_0
- click-plugins=1.0.4=py36_0
- cligj=0.5.0=py36_0
- colorama=0.4.0=py36_0
- cryptography=2.3.1=py36h74b6da3_0
- curl=7.62.0=h2a8f88b_0
- cycler=0.10.0=py36h009560c_0
- decorator=4.3.0=py36_0
- entrypoints=0.2.3=py36_2
- expat=2.2.5=he025d50_0
- fiona=1.7.10=py36h5bf8d1d_0
- freetype=2.9.1=ha9979f8_1
- freexl=1.0.5=hfa6e2cd_0
- gdal=2.2.2=py36hcebd033_1
- geos=3.6.2=h9ef7328_2
- geotiff=1.4.2=hd5bfa41_0
- hdf4=4.2.13=h712560f_2
- hdf5=1.10.1=h98b8871_1
- icc_rt=2017.0.4=h97af966_0
- icu=58.2=ha66f8fd_1
- idna=2.7=py36_0
- intel-openmp=2019.1=144
- ipykernel=5.1.0=py36h39e3cac_0
- ipython=7.2.0=py36h39e3cac_0
- ipython_genutils=0.2.0=py36h3c5d0ee_0
- ipywidgets=7.4.2=py36_0
- jedi=0.13.1=py36_0
- jinja2=2.10=py36_0
- jpeg=9b=hb83a4c4_2
- jsonschema=2.6.0=py36h7636477_0
- jupyter_client=5.2.3=py36_0
- jupyter_core=4.4.0=py36_0
- kealib=1.4.7=ha5b336b_5
- kiwisolver=1.0.1=py36h6538335_0
- libboost=1.67.0=hd9e427e_4
- libcurl=7.61.1=h7602738_0
- krb5=1.16.1=h038dc86_6
- libboost=1.67.0=hfd51bdf_4
- libcurl=7.62.0=h2a8f88b_0
- libgdal=2.2.2=h2727f2b_1
- libiconv=1.15=h1df5818_7
- libkml=1.3.0=he5f2a48_4
- libnetcdf=4.4.1.1=h825a56a_8
- libpng=1.6.35=h2a8f88b_0
- libpq=10.5=h5fe2233_0
- libsodium=1.0.16=h9d3ae62_0
- libspatialite=4.3.0a=h383548d_18
- libssh2=1.8.0=hd619d38_4
- libtiff=4.0.9=h36446d0_2
- libxml2=2.9.8=hadb2253_1
- libxslt=1.1.32=hf6f1972_0
- lxml=4.2.5=py36hef2cd61_0
- m2w64-gcc-libgfortran=5.3.0=6
- m2w64-gcc-libs=5.3.0=7
- m2w64-gcc-libs-core=5.3.0=7
- m2w64-gmp=6.1.0=2
- m2w64-libwinpthread-git=5.0.0.4634.697f757=2
- markupsafe=1.1.0=py36he774522_0
- matplotlib=3.0.1=py36hc8f65d3_0
- mistune=0.8.4=py36he774522_0
- mkl=2018.0.3=1
- mkl_fft=1.0.6=py36hdbbee80_0
- mkl_random=1.0.1=py36h77b88f5_1
- msys2-conda-epoch=20160418=1
- munch=2.3.2=py36_0
- nbconvert=5.3.1=py36_0
- nbformat=4.4.0=py36h3a5bc1b_0
- notebook=5.7.2=py36_0
- numpy=1.15.4=py36ha559c80_0
- numpy-base=1.15.4=py36h8128ebf_0
- openjpeg=2.3.0=h5ec785f_1
- openssl=1.0.2p=hfa6e2cd_0
- pandas=0.23.4=py36h830ac7b_0
- pandoc=2.2.3.2=0
- pandocfilters=1.4.2=py36_1
- parso=0.3.1=py36_0
- pickleshare=0.7.5=py36_0
- pip=18.1=py36_0
- plotly=3.4.1=py36h28b3542_0
- plotly=3.4.2=py36h28b3542_0
- proj4=4.9.3=hcf24537_7
- prometheus_client=0.4.2=py36_0
- prompt_toolkit=2.0.7=py36_0
- psutil=5.4.8=py36he774522_0
- py-boost=1.67.0=py36h8300f20_4
- pycodestyle=2.4.0=py36_0
- pycparser=2.19=py36_0
- pygments=2.2.0=py36hb010967_0
- pyopenssl=18.0.0=py36_0
- pyparsing=2.3.0=py36_0
- pyproj=1.9.5.1=py36_0
- pyqt=5.6.0=py36_2
- pyqt=5.9.2=py36h6538335_2
- pysocks=1.6.8=py36_0
- python=3.6.7=h33f27b4_1
- python-dateutil=2.7.5=py36_0
- pytz=2018.7=py36_0
- pywinpty=0.5.4=py36_0
- pyyaml=3.13=py36hfa6e2cd_0
- pyzmq=17.1.2=py36hfa6e2cd_0
- qt=5.9.6=vc14h1e9a669_2
- requests=2.20.1=py36_0
- retrying=1.3.3=py36_2
- scikit-learn=0.20.1=py36hb854c30_0
- scipy=1.1.0=py36h4f6bf74_1
- send2trash=1.5.0=py36_0
- setuptools=40.6.2=py36_0
- shapely=1.6.4=py36hc90234e_0
- sip=4.19.8=py36h6538335_0
- six=1.11.0=py36_1
- sqlite=3.25.3=he774522_0
- terminado=0.8.1=py36_1
- testpath=0.4.2=py36_0
- tk=8.6.8=hfa6e2cd_0
- toml=0.10.0=py36h28b3542_0
- tornado=5.1.1=py36hfa6e2cd_0
- traitlets=4.3.2=py36h096827d_0
- urllib3=1.23=py36_0
- vc=14.1=h0510ff6_4
- vs2015_runtime=14.15.26706=h3a45250_0
- wcwidth=0.1.7=py36h3d5aa90_0
- webencodings=0.5.1=py36_1
- wheel=0.32.3=py36_0
- widgetsnbextension=3.4.2=py36_0
- win_inet_pton=1.0.1=py36_1
- wincertstore=0.2=py36h7fe50ca_0
- winpty=0.4.3=4
- xerces-c=3.2.2=ha925a31_0
- xz=5.2.4=h2fa13f4_4
- yaml=0.1.7=hc54c509_2
- yapf=0.25.0=py36_0
- zeromq=4.2.5=he025d50_1
- zlib=1.2.11=h62dcd97_3
- plotly-orca=1.1.1=1
- pip:
- blackcellmagic==0.0.1
- mat4py==0.4.1
prefix: C:\Users\z5189959\Desktop\nsw-2016-storm-impact\.venv

File diff suppressed because one or more lines are too long

@ -0,0 +1,627 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Investigate \"collision protection volume\" concept"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T02:45:14.908283Z",
"start_time": "2018-12-05T02:45:14.556163Z"
}
},
"outputs": [],
"source": [
"%matplotlib inline\n",
"%reload_ext autoreload\n",
"%autoreload"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T02:45:34.323928Z",
"start_time": "2018-12-05T02:45:14.911088Z"
}
},
"outputs": [],
"source": [
"from IPython.core.debugger import set_trace\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"import os\n",
"\n",
"import plotly\n",
"import plotly.graph_objs as go\n",
"import plotly.plotly as py\n",
"import plotly.tools as tls\n",
"import plotly.figure_factory as ff\n",
"import plotly.io as pio"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load data\n",
"Load data from the `./data/interim/` folder and parse into `pandas` dataframes."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T02:45:53.010702Z",
"start_time": "2018-12-05T02:45:34.324930Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Importing profiles.csv\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\z5189959\\Desktop\\nsw-2016-storm-impact\\.venv\\lib\\site-packages\\numpy\\lib\\arraysetops.py:522: FutureWarning:\n",
"\n",
"elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Importing profile_features.csv\n",
"Importing impacts_forecasted_foreshore_slope_sto06.csv\n",
"Importing impacts_forecasted_mean_slope_sto06.csv\n",
"Importing impacts_observed.csv\n",
"Importing twl_foreshore_slope_sto06.csv\n",
"Importing twl_mean_slope_sto06.csv\n",
"Done!\n"
]
}
],
"source": [
"def df_from_csv(csv, index_col, data_folder='../data/interim'):\n",
" print('Importing {}'.format(csv))\n",
" return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n",
"\n",
"df_profiles = df_from_csv('profiles.csv', index_col=[0, 1, 2])\n",
"df_profile_features = df_from_csv('profile_features.csv', index_col=[0])\n",
"\n",
"impacts = {\n",
" 'forecasted': {\n",
" 'foreshore_slope_sto06': df_from_csv('impacts_forecasted_foreshore_slope_sto06.csv', index_col=[0]),\n",
" 'mean_slope_sto06': df_from_csv('impacts_forecasted_mean_slope_sto06.csv', index_col=[0]),\n",
" },\n",
" 'observed': df_from_csv('impacts_observed.csv', index_col=[0])\n",
" }\n",
"\n",
"twls = {\n",
" 'forecasted': {\n",
" 'foreshore_slope_sto06': df_from_csv('twl_foreshore_slope_sto06.csv', index_col=[0, 1]),\n",
" 'mean_slope_sto06':df_from_csv('twl_mean_slope_sto06.csv', index_col=[0, 1]),\n",
" }\n",
"}\n",
"\n",
"print('Done!')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Lets define a function to calculate the \"collision protection volume\" based on prestorm profiles."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Get berm feature functions\n",
"Define a couple of functions which are going to help us get features of our berms."
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T03:01:56.646213Z",
"start_time": "2018-12-05T03:01:56.366466Z"
},
"code_folding": []
},
"outputs": [],
"source": [
"from shapely.geometry import Point, LineString, Polygon\n",
"\n",
"\n",
"def collision_protection_vol(x, z, d_low_x, d_low_z, lower_z, angle):\n",
" # First, get the bounding line strings of our protection volume\n",
" lower_line = LineString([Point(min(x), lower_z), Point(max(x), lower_z)])\n",
" profile_line = LineString([Point(x_coord, z_coord) for x_coord, z_coord in zip(x, z)\n",
" if all([not np.isnan(x_coord), not np.isnan(z_coord)])])\n",
" slope_line = LineString([Point(d_low_x, d_low_z),\n",
" Point(max(x), d_low_z - max(x) * np.sin(np.deg2rad(angle)))])\n",
"\n",
" # Work out where our lower line and slope line intersect\n",
" lower_profile_intersection = lower_line.intersection(profile_line)\n",
" d_protected_intersection = lower_line.intersection(slope_line)\n",
"\n",
" # Define the perimeter of the protection area\n",
" profile_protected = LineString([Point(x_coord, z_coord) for x_coord, z_coord\n",
" in zip(profile_line.xy[0], profile_line.xy[1])\n",
" if d_low_x < x_coord < lower_profile_intersection.xy[0][0]]\n",
" + [lower_profile_intersection]\n",
" + [d_protected_intersection]\n",
" + [Point(d_low_x, d_low_z)])\n",
"\n",
" # Convert to polygon and return the area (m3/m)\n",
" protection_area_poly = Polygon(profile_protected)\n",
" protection_area_vol = protection_area_poly.area\n",
" return protection_area_vol\n",
"\n",
"\n",
"def get_berm_width(z, d_low_x):\n",
" \"\"\"\n",
" Returns the width of the berm, defined by the distance between dune toe to z=0\n",
" \"\"\"\n",
" x_seaward_limit = z.dropna().tail(1).reset_index().x[0]\n",
" return x_seaward_limit - d_low_x\n",
"\n",
"\n",
"\n",
"\n",
"site_id = 'NARRA0018'\n",
"profile_type = 'prestorm'\n",
"query = \"site_id == '{}' and profile_type == '{}'\".format(\n",
" site_id, profile_type)\n",
"prestorm_profile = df_profiles.query(query)\n",
"profile_features = df_profile_features.query(query)\n",
"\n",
"x = prestorm_profile.index.get_level_values('x')\n",
"z = prestorm_profile.z\n",
"d_low_x = profile_features.dune_toe_x.tolist()[0]\n",
"d_low_z = profile_features.dune_toe_z.tolist()[0]\n",
"angle = 60 # degrees from the horizontal\n",
"lower_z = 0.5 # from mhw\n",
"\n",
"vol = collision_protection_vol(x, z, d_low_x, d_low_z, lower_z, angle)\n",
"berm_width = get_berm_width(z, d_low_x)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T02:45:54.224110Z",
"start_time": "2018-12-05T02:45:54.030142Z"
}
},
"outputs": [],
"source": [
"from datetime import timedelta\n",
"\n",
"def wl_time(t, wl, z_lower, z_upper):\n",
" \"\"\"\n",
" Returns the amount of time the water level is between two elevations.\n",
" \"\"\"\n",
" df_wl = pd.DataFrame.from_records([(t_val, R2_val) for t_val, R2_val in zip(t,R2)], columns=['datetime','wl'])\n",
" df_wl.set_index(pd.DatetimeIndex(df_wl['datetime']),inplace=True)\n",
" df_wl.drop(columns=['datetime'], inplace=True)\n",
" \n",
" # Assumes that each record is one hour... probably need to check this\n",
" hours = len(df_wl.query('{} < wl < {}'.format(z_lower, z_upper)))\n",
" return timedelta(hours=hours)\n",
"\n",
"def wave_power(t, wl, z_lower, z_upper, Hs0, Tp):\n",
" \"\"\"\n",
" Returns the cumulative wave power when the water level is between two elevations.\n",
" \"\"\"\n",
" df_wl = pd.DataFrame.from_records([(t_val, R2_val,Hs0_val,Tp_val) for t_val, R2_val,Hs0_val,Tp_val in zip(t,R2,Hs0,Tp)], columns=['datetime','wl', 'Hs0','Tp'])\n",
" df_wl.set_index(pd.DatetimeIndex(df_wl['datetime']),inplace=True)\n",
" df_wl.drop(columns=['datetime'], inplace=True)\n",
" \n",
" # Assumes that each record is one hour... probably need to check this\n",
" rho = 1025 # kg/m3\n",
" g = 9.8 # m/s2\n",
" df_wl_times = df_wl.query('{} < wl < {}'.format(z_lower, z_upper))\n",
" power = rho * g ** 2 / 64 / np.pi * df_wl_times.Hs0 ** 2 * df_wl_times.Tp\n",
" return power.sum()\n",
"\n",
"df_twl = twls['forecasted']['mean_slope_sto06']\n",
"df_twl_site = df_twl.query(\"site_id == '{}'\".format(site_id))\n",
"\n",
"R2 = df_twl_site.R2.tolist()\n",
"t = df_twl_site.index.get_level_values('datetime')\n",
"z_lower = 0.5\n",
"z_upper = d_low_z\n",
"\n",
"exposed_time = wl_time(t, R2, z_lower,z_upper)\n",
"\n",
"Hs0 = df_twl.Hs0.tolist()\n",
"Tp = df_twl.Tp.tolist()\n",
"wave_p = wave_power(t, R2, z_lower,z_upper,Hs0, Tp)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T02:45:54.231129Z",
"start_time": "2018-12-05T02:45:54.225660Z"
}
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T03:37:45.472885Z",
"start_time": "2018-12-05T03:37:45.462857Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"0.96"
]
},
"execution_count": 57,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"def dune_toe_elevation_change(site_id, df_profile_features):\n",
" query = \"site_id == '{}'\".format(site_id)\n",
" profile_features = df_profile_features.query(query)\n",
" prestorm_dune_toe_z = profile_features.query(\"profile_type=='prestorm'\").dune_toe_z.tolist()[0]\n",
" poststorm_dune_toe_z = profile_features.query(\"profile_type=='poststorm'\").dune_toe_z.tolist()[0]\n",
" return prestorm_dune_toe_z - poststorm_dune_toe_z\n",
"\n",
"toe_ele_change = dune_toe_elevation_change(\"MANNING0081\", df_profile_features)\n",
"toe_ele_change"
]
},
{
"cell_type": "code",
"execution_count": 62,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T03:45:45.203827Z",
"start_time": "2018-12-05T03:45:13.608478Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0 of 816\n",
"20 of 816\n",
"40 of 816\n",
"60 of 816\n",
"80 of 816\n",
"100 of 816\n"
]
}
],
"source": [
"vols = []\n",
"exposed_times = []\n",
"toe_ele_changes = []\n",
"wave_powers = []\n",
"berm_widths = []\n",
"swash_vol_changes = []\n",
"dune_face_vol_changes = []\n",
"site_ids_to_plot = []\n",
"\n",
"# Get site ids where we observed collision\n",
"observed_site_ids = impacts['observed'].query(\"storm_regime=='collision'\").index.get_level_values('site_id').unique()\n",
"\n",
"# # Get site ids where we forecast swash\n",
"# forecasted_site_ids = impacts['forecasted']['mean_slope_sto06'].query(\"storm_regime=='swash'\").index.get_level_values('site_id').unique()\n",
"\n",
"# site_ids = set(observed_site_ids).intersection(set(forecasted_site_ids))\n",
"\n",
"site_ids = observed_site_ids\n",
"\n",
"# Calculate for each site\n",
"\n",
"for n, site_id in enumerate(site_ids):\n",
" \n",
" if n%20 ==0:\n",
" print('{} of {}'.format(n, len(site_ids)))\n",
" \n",
" try:\n",
" query = \"site_id == '{}' and profile_type == '{}'\".format(site_id, 'prestorm')\n",
" prestorm_profile = df_profiles.query(query)\n",
" profile_features = df_profile_features.query(query)\n",
"\n",
" vol = collision_protection_vol(x = prestorm_profile.index.get_level_values('x'),\n",
" z = prestorm_profile.z,\n",
" d_low_x = profile_features.dune_toe_x.tolist()[0],\n",
" d_low_z = profile_features.dune_toe_z.tolist()[0],\n",
" lower_z = profile_features.dune_toe_z.tolist()[0] - 2, # from mhw\n",
" angle = 60, # degrees from the horizontal\n",
" )\n",
" \n",
" df_twl = twls['forecasted']['mean_slope_sto06']\n",
" df_twl_site = df_twl.query(\"site_id == '{}'\".format(site_id))\n",
" \n",
" berm_width = get_berm_width(z = prestorm_profile.z,\n",
" d_low_x = profile_features.dune_toe_x.tolist()[0]) \n",
" \n",
" exposed_time = wl_time(t = df_twl_site.index.get_level_values('datetime'),\n",
" wl = df_twl_site.R2.tolist(),\n",
" z_lower = profile_features.dune_toe_z.tolist()[0] -2,\n",
" z_upper = profile_features.dune_toe_z.tolist()[0],\n",
" )\n",
" swash_vol_change = impacts['observed'].query(\"site_id == '{}'\".format(site_id)).swash_vol_change.tolist()[0]\n",
" dune_face_vol_change = impacts['observed'].query(\"site_id == '{}'\".format(site_id)).dune_face_vol_change.tolist()[0]\n",
" \n",
" power = wave_power(t = df_twl_site.index.get_level_values('datetime'),\n",
" wl = df_twl_site.R2.tolist(),\n",
" z_lower = profile_features.dune_toe_z.tolist()[0] -2,\n",
" z_upper = profile_features.dune_toe_z.tolist()[0],\n",
" Hs0=df_twl_site.Hs0.tolist(),\n",
" Tp=df_twl_site.Tp.tolist())\n",
" \n",
" toe_ele_change = dune_toe_elevation_change(site_id, df_profile_features)\n",
" except:\n",
" continue\n",
" \n",
"# print(site_id, toe_ele_change)\n",
" vols.append(vol)\n",
" exposed_times.append(exposed_time)\n",
" toe_ele_changes.append(toe_ele_change)\n",
" wave_powers.append(power)\n",
" berm_widths.append(berm_width)\n",
" swash_vol_changes.append(swash_vol_change)\n",
" dune_face_vol_changes.append(dune_face_vol_change)\n",
" site_ids_to_plot.append(site_id)\n",
" \n",
" if n>100:\n",
" break\n",
"\n",
" \n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T03:12:11.598150Z",
"start_time": "2018-12-03T03:12:11.590128Z"
}
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 72,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T05:03:39.147413Z",
"start_time": "2018-12-05T05:03:39.070207Z"
}
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "225855bac0d0464d9be74917812c19ac",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"FigureWidget({\n",
" 'data': [{'marker': {'size': 4},\n",
" 'mode': 'markers',\n",
" 'text': [-0…"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"trace1 = go.Scatter(\n",
" x=berm_widths,\n",
" y=dune_face_vol_changes,\n",
" text = ['{}<br>{}'.format(ele, site_id) for ele,site_id in zip(toe_ele_changes,site_ids_to_plot)],\n",
" mode='markers',\n",
" marker=dict(\n",
" size=4,\n",
"# color = [-1 if x<0 else 1 for x in toe_ele_changes],\n",
"# color = toe_ele_changes,\n",
"# color = dune_face_vol_changes,\n",
"# color = [x.total_seconds() / 60 / 60 for x in exposed_times],\n",
"# colorscale='Viridis',\n",
"# showscale=True\n",
" ))\n",
"\n",
"layout = go.Layout(\n",
" title='Dune Collision Protection',\n",
"# height=300,\n",
"# legend=dict(font={'size': 10}),\n",
"# margin=dict(t=50, b=50, l=50, r=20),\n",
" xaxis=dict(\n",
" title='Berm width',\n",
" autorange=True,\n",
" showgrid=True,\n",
" zeroline=True,\n",
" showline=True,\n",
" ),\n",
" yaxis=dict(\n",
" title='Dune face vol change',\n",
" autorange=True,\n",
" showgrid=True,\n",
" zeroline=True,\n",
" showline=True,\n",
" ))\n",
"\n",
"g_plot = go.FigureWidget(data=[trace1], layout=layout)\n",
"g_plot"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T03:15:46.517975Z",
"start_time": "2018-12-05T03:15:46.512936Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"[64.5799,\n",
" 21.0163,\n",
" 38.106,\n",
" 28.101,\n",
" 58.7247,\n",
" 33.5534,\n",
" 71.1675,\n",
" 52.6043,\n",
" 50.5765,\n",
" 39.9074,\n",
" 67.8385,\n",
" 43.9043,\n",
" 39.8181,\n",
" 37.7153,\n",
" 20.4454,\n",
" 39.7757,\n",
" 42.1843,\n",
" 33.6152,\n",
" 42.9587,\n",
" 39.9773,\n",
" 35.7835,\n",
" 31.2884,\n",
" -0.4618,\n",
" 31.0094,\n",
" 33.3479,\n",
" 47.8394,\n",
" 32.3566,\n",
" 36.5205,\n",
" 45.7109,\n",
" 16.0687,\n",
" 35.4375,\n",
" 43.327,\n",
" 53.5016,\n",
" 31.0357,\n",
" 47.6528,\n",
" 25.5658,\n",
" 41.0514,\n",
" 28.1645,\n",
" 44.5443,\n",
" 42.925,\n",
" 33.9535,\n",
" 36.2626,\n",
" 35.2536]"
]
},
"execution_count": 51,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# impacts['observed']\n",
"swash_vol_changes"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": true
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,782 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Investigate how dune toe compares to R_high"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T23:04:57.331037Z",
"start_time": "2018-12-03T23:04:57.006071Z"
}
},
"outputs": [],
"source": [
"%matplotlib inline\n",
"%reload_ext autoreload\n",
"%autoreload"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T23:04:58.749827Z",
"start_time": "2018-12-03T23:04:57.333943Z"
}
},
"outputs": [],
"source": [
"from IPython.core.debugger import set_trace\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"import os\n",
"\n",
"import plotly\n",
"import plotly.graph_objs as go\n",
"import plotly.plotly as py\n",
"import plotly.tools as tls\n",
"import plotly.figure_factory as ff\n",
"import plotly.io as pio"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load data\n",
"Load data from the `./data/interim/` folder and parse into `pandas` dataframes."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-03T23:05:05.800496Z",
"start_time": "2018-12-03T23:04:58.751721Z"
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Importing profiles.csv\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\z5189959\\Desktop\\nsw-2016-storm-impact\\.venv\\lib\\site-packages\\numpy\\lib\\arraysetops.py:522: FutureWarning:\n",
"\n",
"elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Importing profile_features.csv\n",
"Importing impacts_forecasted_foreshore_slope_sto06.csv\n",
"Importing impacts_forecasted_mean_slope_sto06.csv\n",
"Importing impacts_observed.csv\n",
"Importing twl_foreshore_slope_sto06.csv\n",
"Importing twl_mean_slope_sto06.csv\n",
"Done!\n"
]
}
],
"source": [
"def df_from_csv(csv, index_col, data_folder='../data/interim'):\n",
" print('Importing {}'.format(csv))\n",
" return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n",
"\n",
"df_profiles = df_from_csv('profiles.csv', index_col=[0, 1, 2])\n",
"df_profile_features = df_from_csv('profile_features.csv', index_col=[0])\n",
"\n",
"impacts = {\n",
" 'forecasted': {\n",
" 'foreshore_slope_sto06': df_from_csv('impacts_forecasted_foreshore_slope_sto06.csv', index_col=[0]),\n",
" 'mean_slope_sto06': df_from_csv('impacts_forecasted_mean_slope_sto06.csv', index_col=[0]),\n",
" },\n",
" 'observed': df_from_csv('impacts_observed.csv', index_col=[0])\n",
" }\n",
"\n",
"twls = {\n",
" 'forecasted': {\n",
" 'foreshore_slope_sto06': df_from_csv('twl_foreshore_slope_sto06.csv', index_col=[0, 1]),\n",
" 'mean_slope_sto06':df_from_csv('twl_mean_slope_sto06.csv', index_col=[0, 1]),\n",
" }\n",
"}\n",
"\n",
"print('Done!')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Compare predicted R_high with D_low\n",
"Let's see what the distribution of R_high is compared with D_low. How far off are the predicted water levels compared with the dune toes?"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-04T02:20:58.446500Z",
"start_time": "2018-12-04T02:20:58.439480Z"
}
},
"outputs": [],
"source": [
"def get_site_ids(df_forecasted, df_observed, forecasted_regime, observed_regime):\n",
" \"\"\"\n",
" Returns list of site_ids which match the given forecasted and observed regime\n",
" \"\"\"\n",
" set1 = set(df_forecasted.query(\"storm_regime == '{}'\".format(\n",
" forecasted_regime)).index.get_level_values('site_id'))\n",
" set2 = set(df_observed.query(\"storm_regime == '{}'\".format(\n",
" observed_regime)).index.get_level_values('site_id'))\n",
" return sorted(list(set1.intersection(set2)))\n",
"\n",
"\n",
"def get_R_high_D_low_diff(site_ids, df_profile_features, df_twls):\n",
" \"\"\"\n",
" Returns a dataframe of the difference between the R_high and D_low differences. \n",
" Positive values indicate R_high is larger than D_low.\n",
" \"\"\"\n",
" # Get dune toes at these sites and predicted max R_high\n",
" df_toes = df_profile_features.loc[site_ids].query(\n",
" 'profile_type==\"prestorm\"').dune_toe_z\n",
" df_R_highs = df_twls.loc[site_ids].groupby('site_id')['R_high'].max()\n",
"\n",
" # Join into one dataframe\n",
" df_twl_toes = pd.concat([df_toes, df_R_highs], axis=1, sort=True)\n",
" df_twl_toes['diff'] = df_twl_toes['R_high'] - df_twl_toes['dune_toe_z']\n",
" return df_twl_toes['diff']\n"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-04T03:55:51.858020Z",
"start_time": "2018-12-04T03:55:50.879155Z"
}
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "94883b85733444528fe8a73379ce4611",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"FigureWidget({\n",
" 'data': [{'marker': {'color': '#ef8a62'},\n",
" 'name': 'Overpredicted',\n",
" …"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"swash_overpredicted_site_ids = get_site_ids(df_forecasted=impacts['forecasted']['mean_slope_sto06'],\n",
" df_observed=impacts['observed'],\n",
" forecasted_regime='collision',\n",
" observed_regime='swash')\n",
"swash_overpredicted_diffs = get_R_high_D_low_diff(site_ids=swash_overpredicted_site_ids,\n",
" df_profile_features=df_profile_features,\n",
" df_twls=twls['forecasted']['mean_slope_sto06'])\n",
"\n",
"swash_correct_site_ids = get_site_ids(df_forecasted=impacts['forecasted']['mean_slope_sto06'],\n",
" df_observed=impacts['observed'],\n",
" forecasted_regime='swash',\n",
" observed_regime='swash')\n",
"swash_correct_diffs = get_R_high_D_low_diff(site_ids=swash_correct_site_ids,\n",
" df_profile_features=df_profile_features,\n",
" df_twls=twls['forecasted']['mean_slope_sto06'])\n",
"\n",
"\n",
"trace1 = go.Histogram(y=swash_correct_diffs.tolist(),\n",
" opacity=0.75,\n",
" name='Correctly predicted',\n",
" marker=dict(\n",
" color='#67a9cf',\n",
" ),\n",
" ybins=dict(\n",
" size=0.1\n",
"),)\n",
"trace2 = go.Histogram(y=swash_overpredicted_diffs.tolist(),\n",
" opacity=0.75,\n",
" name='Overpredicted',\n",
" marker=dict(\n",
" color='#ef8a62',\n",
"),\n",
" ybins=dict(\n",
" size=0.1\n",
"),)\n",
"\n",
"layout = go.Layout(\n",
" title='R_high - D_low<br>Swash Regime',\n",
" barmode='overlay',\n",
" yaxis=dict(\n",
" title='z (m AHD)'\n",
" ),\n",
" xaxis=dict(\n",
" title='Count'\n",
" ),\n",
" bargap=0.2,\n",
" bargroupgap=0.1,\n",
" legend=dict(x=.6, y=1)\n",
")\n",
"\n",
"g_plot_swash = go.FigureWidget(data=[trace2, trace1], layout=layout)\n",
"\n",
"# To output to file\n",
"img_bytes = pio.write_image(g_plot_swash, 'g_plot_swash.png',format='png', width=600, height=400, scale=5)\n",
"\n",
"g_plot_swash\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-04T04:10:47.339268Z",
"start_time": "2018-12-04T04:10:45.796887Z"
}
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "3933da9295fe446f9413bca8842100c2",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"FigureWidget({\n",
" 'data': [{'marker': {'color': '#ef8a62'},\n",
" 'name': 'Underpredicted',\n",
" …"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"collision_underpredicted_site_ids = get_site_ids(df_forecasted=impacts['forecasted']['mean_slope_sto06'],\n",
" df_observed=impacts['observed'],\n",
" forecasted_regime='swash',\n",
" observed_regime='collision')\n",
"collision_underpredicted_diffs = get_R_high_D_low_diff(site_ids=collision_underpredicted_site_ids,\n",
" df_profile_features=df_profile_features,\n",
" df_twls=twls['forecasted']['mean_slope_sto06'])\n",
"\n",
"collision_correct_site_ids = get_site_ids(df_forecasted=impacts['forecasted']['mean_slope_sto06'],\n",
" df_observed=impacts['observed'],\n",
" forecasted_regime='collision',\n",
" observed_regime='collision')\n",
"collision_correct_diffs = get_R_high_D_low_diff(site_ids=collision_correct_site_ids,\n",
" df_profile_features=df_profile_features,\n",
" df_twls=twls['forecasted']['mean_slope_sto06'])\n",
"\n",
"\n",
"trace1 = go.Histogram(y=collision_correct_diffs.tolist(),\n",
" opacity=0.75,\n",
" name='Correctly predicted',\n",
" marker=dict(\n",
" color='#67a9cf',\n",
" ),\n",
" ybins=dict(\n",
" size=0.1\n",
"),)\n",
"trace2 = go.Histogram(y=collision_underpredicted_diffs.tolist(),\n",
" opacity=0.75,\n",
" name='Underpredicted',\n",
" marker=dict(\n",
" color='#ef8a62',\n",
" ),\n",
" ybins=dict(\n",
" size=0.1\n",
"),)\n",
"\n",
"layout = go.Layout(\n",
" title='R_high - D_low<br>Collision Regime',\n",
" barmode='overlay',\n",
" yaxis=dict(\n",
" title='z (m AHD)'\n",
" ),\n",
" xaxis=dict(\n",
" title='Count'\n",
" ),\n",
" bargap=0.2,\n",
" bargroupgap=0.1,\n",
" legend=dict(x=.6, y=1)\n",
")\n",
"\n",
"g_plot_collision = go.FigureWidget(data=[trace2, trace1], layout=layout)\n",
"\n",
"# To output to file\n",
"img_bytes = pio.write_image(g_plot_collision, 'g_plot_collision.png',format='png', width=600, height=400, scale=5)\n",
"\n",
"g_plot_collision"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Does dune toe lower?\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {
"heading_collapsed": true
},
"source": [
"### What do over predicted and underpredicted profiles look like?"
]
},
{
"cell_type": "markdown",
"metadata": {
"hidden": true
},
"source": [
"Define a function for getting the average beach profile for a number of given site_ids:"
]
},
{
"cell_type": "code",
"execution_count": 156,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-04T23:11:08.853877Z",
"start_time": "2018-12-04T23:11:08.846876Z"
},
"hidden": true
},
"outputs": [],
"source": [
"def get_avg_profile(site_ids, debug=False):\n",
" rows = []\n",
" for n,site_id in enumerate(site_ids):\n",
" profile = df_profiles.query(\"site_id == '{}' and profile_type == 'prestorm'\".format(site_id))\n",
" profile_z = np.array(profile.z.tolist())\n",
" profile_x = np.array(profile.index.get_level_values('x').tolist())\n",
" \n",
" # Let's center the profile based on the z=0 location\n",
" idx_last_z_val = max(np.argwhere(~np.isnan(profile_z)==True))[0]\n",
" x_last_val = profile_x[idx_last_z_val]\n",
" profile_x = [x - x_last_val for x in profile_x]\n",
" \n",
" # Put values into a dictionary\n",
" for x,z in zip(profile_x, profile_z):\n",
" rows.append({'x':x, 'z': z})\n",
"\n",
" # Return early for debugging\n",
" if debug and n>3:\n",
" break\n",
" \n",
" # Create dataframe from rows\n",
" df = pd.DataFrame(rows)\n",
" avg_profile = df.groupby('x').agg({'z': [np.nanmean, np.nanstd]}).reset_index()\n",
"\n",
" return {\n",
" 'x': avg_profile.x.tolist(),\n",
" 'z': avg_profile.z.nanmean.tolist(),\n",
" 'std': avg_profile.z.nanstd.tolist(),\n",
" 'n': n+1 # number of profiles\n",
" }"
]
},
{
"cell_type": "markdown",
"metadata": {
"hidden": true
},
"source": [
"Now, let's look at whether there is a difference between the average beach profile of correctly forecasted site_ids and incorrectly forecasted site_ids. First, looking at sites where we observed swash regime."
]
},
{
"cell_type": "code",
"execution_count": 161,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T02:00:36.853374Z",
"start_time": "2018-12-05T01:58:21.839366Z"
},
"code_folding": [],
"hidden": true
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "03f2e99d20a347f3922a0e6a36f99ccd",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"FigureWidget({\n",
" 'data': [{'line': {'color': 'rgb(205, 0, 0)', 'width': 2},\n",
" 'mode': 'lines',\n",
" …"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"overpredicted = get_avg_profile(swash_overpredicted_site_ids)\n",
"correct = get_avg_profile(swash_correct_site_ids)\n",
"\n",
"# Add mean profile\n",
"trace_overpredicted_mean = go.Scatter(\n",
" x=overpredicted['x'],\n",
" y=overpredicted['z'],\n",
" opacity=1,\n",
" mode='lines',\n",
" name='Mean overpredicted profile (n={})'.format(overpredicted['n']),\n",
" line=dict(\n",
" color=('rgb(205, 0, 0)'),\n",
" width=2)\n",
")\n",
"\n",
"trace_overpredited_std_top = go.Scatter(\n",
" x=overpredicted['x'],\n",
" y=np.add(overpredicted['z'], overpredicted['std']),\n",
" opacity=1,\n",
" hoverinfo='none',\n",
" showlegend=False,\n",
" mode='lines',\n",
" line=dict(\n",
" color=('rgb(205, 0, 0)'),\n",
" width=0.5,\n",
" dash='dash')\n",
")\n",
"\n",
"trace_overpredited_std_btm = go.Scatter(\n",
" x=overpredicted['x'],\n",
" y=np.subtract(overpredicted['z'], overpredicted['std']),\n",
" opacity=1,\n",
" hoverinfo='none',\n",
" mode='lines',\n",
" showlegend=False,\n",
" line=dict(\n",
" color=('rgb(205, 0, 0)'),\n",
" width=0.5,\n",
" dash='dash')\n",
")\n",
"\n",
"trace_correct_mean = go.Scatter(\n",
" x=avg_correct_x,\n",
" y=avg_correct_z,\n",
" opacity=1,\n",
" mode='lines',\n",
" name='Mean correct profile (n={})'.format(correct['n']),\n",
" line=dict(\n",
" color=('rgb(0, 205, 0)'),\n",
" width=2)\n",
")\n",
"\n",
"trace_correct_std_top = go.Scatter(\n",
" x=avg_correct_x,\n",
" y=np.add(avg_correct_z, avg_correct_std),\n",
" opacity=1,\n",
" hoverinfo='none',\n",
" showlegend=False,\n",
" mode='lines',\n",
" line=dict(\n",
" color=('rgb(0, 205, 0)'),\n",
" width=0.5,\n",
" dash='dash')\n",
")\n",
"\n",
"trace_correct_std_btm = go.Scatter(\n",
" x=avg_correct_x,\n",
" y=np.subtract(avg_correct_z, avg_correct_std),\n",
" opacity=1,\n",
" hoverinfo='none',\n",
" mode='lines',\n",
" showlegend=False,\n",
" line=dict(\n",
" color=('rgb(0, 205, 0)'),\n",
" width=0.5,\n",
" dash='dash')\n",
")\n",
"\n",
"layout = dict(showlegend=True,\n",
" title='Observed Swash Impact Regime',\n",
" legend=dict(x=.6, y=1),\n",
" xaxis=dict(\n",
" range=[-150, 0]),\n",
" yaxis=dict(\n",
" range=[0, 10]))\n",
"\n",
"fig = go.FigureWidget(data=[trace_overpredicted_mean,\n",
" trace_overpredited_std_top,\n",
" trace_overpredited_std_btm,\n",
" trace_correct_mean,\n",
" trace_correct_std_top,\n",
" trace_correct_std_btm],\n",
" layout=layout)\n",
"\n",
"# To output to file\n",
"img_bytes = pio.write_image(\n",
" fig, 'mean_profiles_swash.png', format='png', width=600, height=600, scale=5)\n",
"\n",
"fig"
]
},
{
"cell_type": "markdown",
"metadata": {
"hidden": true
},
"source": [
"We can see that the difference is pretty minimal. For cases where we predicted collision, but observed swash (overprediction), we see that overpredicted profiles are slightly more concave than correctly predicted sites."
]
},
{
"cell_type": "code",
"execution_count": 162,
"metadata": {
"ExecuteTime": {
"end_time": "2018-12-05T02:03:38.394415Z",
"start_time": "2018-12-05T02:00:37.335377Z"
},
"hidden": true
},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "1255bccc024e4690b4b8ff4ccc8e9e35",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"FigureWidget({\n",
" 'data': [{'line': {'color': 'rgb(205, 0, 0)', 'width': 2},\n",
" 'mode': 'lines',\n",
" …"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"underpredicted = get_avg_profile(collision_underpredicted_site_ids)\n",
"correct = get_avg_profile(collision_correct_site_ids)\n",
"\n",
"# Add mean profile\n",
"trace_underpredicted_mean = go.Scatter(\n",
" x = underpredicted['x'],\n",
" y= underpredicted['z'],\n",
" opacity = 1,\n",
" mode='lines',\n",
" name='Mean underpredicted profile (n={})'.format(underpredicted['n']),\n",
" line = dict(\n",
" color = ('rgb(205, 0, 0)'),\n",
" width = 2)\n",
")\n",
"\n",
"trace_underpredicted_std_top = go.Scatter(\n",
" x = underpredicted['x'],\n",
" y= np.add(underpredicted['z'],underpredicted['std']),\n",
" opacity = 1,\n",
" hoverinfo='none',\n",
" showlegend=False,\n",
" mode='lines',\n",
" line = dict(\n",
" color = ('rgb(205, 0, 0)'),\n",
" width = 0.5,\n",
" dash = 'dash')\n",
") \n",
"\n",
"trace_underpredicted_std_btm = go.Scatter(\n",
" x = underpredicted['x'],\n",
" y= np.subtract(underpredicted['z'],underpredicted['std']),\n",
" opacity = 1,\n",
" hoverinfo='none',\n",
" mode='lines',\n",
" showlegend=False,\n",
" line = dict(\n",
" color = ('rgb(205, 0, 0)'),\n",
" width = 0.5,\n",
" dash = 'dash')\n",
") \n",
"\n",
"trace_correct_mean = go.Scatter(\n",
" x = avg_correct_x,\n",
" y= avg_correct_z,\n",
" opacity = 1,\n",
" mode='lines',\n",
" name='Mean correct profile (n={})'.format(correct['n']),\n",
" line = dict(\n",
" color = ('rgb(0, 205, 0)'),\n",
" width = 2)\n",
")\n",
"\n",
"trace_correct_std_top = go.Scatter(\n",
" x = avg_correct_x,\n",
" y= np.add(avg_correct_z, avg_correct_std),\n",
" opacity = 1,\n",
" hoverinfo='none',\n",
" showlegend=False,\n",
" mode='lines',\n",
" line = dict(\n",
" color = ('rgb(0, 205, 0)'),\n",
" width = 0.5,\n",
" dash = 'dash')\n",
") \n",
"\n",
"trace_correct_std_btm = go.Scatter(\n",
" x = avg_correct_x,\n",
" y= np.subtract(avg_correct_z, avg_correct_std),\n",
" opacity = 1,\n",
" hoverinfo='none',\n",
" mode='lines',\n",
" showlegend=False,\n",
" line = dict(\n",
" color = ('rgb(0, 205, 0)'),\n",
" width = 0.5,\n",
" dash = 'dash')\n",
") \n",
" \n",
"layout = dict(showlegend=True,\n",
" title='Observed Collision Impact Regime',\n",
" legend=dict(x=.6, y=1),\n",
" xaxis=dict(\n",
" range=[-150,0]),\n",
" yaxis=dict(\n",
" range=[0,10]))\n",
" \n",
"fig=go.FigureWidget(data=[trace_underpredicted_mean, \n",
" trace_underpredicted_std_top,\n",
" trace_underpredicted_std_btm, \n",
" trace_correct_mean, \n",
" trace_correct_std_top, \n",
" trace_correct_std_btm], \n",
" layout=layout)\n",
"\n",
"# To output to file\n",
"img_bytes = pio.write_image(fig, 'mean_profiles_collision.png',format='png', width=600, height=600, scale=5)\n",
"\n",
"fig\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"hidden": true
},
"source": [
"This plot is a bit more interesting. It shows that we are correctly forecasting collision when the profile is more accreted/convex, but when the profile is more eroded/concave, the water level is underpredicted. Why is this? "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {
"height": "calc(100% - 180px)",
"left": "10px",
"top": "150px",
"width": "232.391px"
},
"toc_section_display": true,
"toc_window_display": true
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

@ -23,6 +23,7 @@ def forecast_twl(
runup_function,
n_processes=MULTIPROCESS_THREADS,
slope="foreshore",
profile_type='prestorm'
):
# Use df_waves as a base
df_twl = df_waves.copy()
@ -45,11 +46,14 @@ def forecast_twl(
df_twl["beta"] = pd.concat(results)
elif slope == "mean":
df_temp = df_twl.join(df_profile_features, how="inner")
df_temp = df_twl.join(df_profile_features.query("profile_type=='{}'".format(profile_type)).reset_index(
level='profile_type')
, how="inner")
df_temp["mhw"] = 0.5
with Pool(processes=n_processes) as pool:
results = pool.starmap(
mean_slope_for_site_id, [(site_id, df_temp, df_profiles, "dune_toe_z", "mhw") for site_id in site_ids]
mean_slope_for_site_id, [(site_id, df_temp, df_profiles, "dune_toe_z", "dune_toe_x", "mhw") for
site_id in site_ids]
)
df_twl["beta"] = pd.concat(results)
@ -70,7 +74,8 @@ def forecast_twl(
return df_twl
def mean_slope_for_site_id(site_id, df_twl, df_profiles, top_elevation_col, btm_elevation_col):
def mean_slope_for_site_id(site_id, df_twl, df_profiles, top_elevation_col, top_x_col, btm_elevation_col,
profile_type='prestorm'):
"""
Calculates the foreshore slope values a given site_id. Returns a series (with same indicies as df_twl) of
foreshore slopes. This function is used to parallelize getting foreshore slopes as it is computationally
@ -82,7 +87,7 @@ def mean_slope_for_site_id(site_id, df_twl, df_profiles, top_elevation_col, btm_
"""
# Get the prestorm beach profile
profile = df_profiles.query("site_id =='{}' and profile_type == 'prestorm'".format(site_id))
profile = df_profiles.query("site_id =='{}' and profile_type == '{}'".format(site_id, profile_type))
profile_x = profile.index.get_level_values("x").tolist()
profile_z = profile.z.tolist()
@ -95,6 +100,7 @@ def mean_slope_for_site_id(site_id, df_twl, df_profiles, top_elevation_col, btm_
top_elevation=row[top_elevation_col],
btm_elevation=row[btm_elevation_col],
method="end_points",
top_x= row[top_x_col]
),
axis=1,
)
@ -190,7 +196,7 @@ def foreshore_slope_from_profile(profile_x, profile_z, tide, runup_function, **k
iteration_count += 1
def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, method="end_points"):
def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, method="end_points", top_x=None, btm_x=None):
"""
Returns a slope (beta) from a bed profile, given the top and bottom elevations of where the slope should be taken.
:param x: List of x bed profile coordinates
@ -198,6 +204,9 @@ def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, metho
:param top_elevation: Top elevation of where to take the slope
:param btm_elevation: Bottom elevation of where to take the slope
:param method: Method used to calculate slope (end_points or least_squares)
:param top_x: x-coordinate of the top end point. May be needed, as there may be multiple crossings of the
top_elevation.
:param btm_x: x-coordinate of the bottom end point
:return:
"""
@ -207,7 +216,18 @@ def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, metho
end_points = {"top": {"z": top_elevation}, "btm": {"z": btm_elevation}}
for end_type in end_points.keys():
# Add x coordinates if they are specified
if top_x and end_type == 'top':
end_points['top']['x'] = top_x
continue
if btm_x and end_type == 'top':
end_points['btm']['x'] = btm_x
continue
elevation = end_points[end_type]["z"]
intersection_x = crossings(profile_x, profile_z, elevation)
@ -226,7 +246,12 @@ def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, metho
end_points[end_type]["x"] = intersection_x[-1]
else:
# For bottom elevation, take most landward intersection that is seaward of top elevation
end_points[end_type]["x"] = [x for x in intersection_x if x > end_points["top"]["x"]][0]
end_point_btm = [x for x in intersection_x if x > end_points["top"]["x"]]
if len(end_point_btm) == 0:
# If there doesn't seem to be an intersection seaward of the top elevation, return none.
return None
else:
end_points[end_type]["x"] = end_point_btm[0]
if method == "end_points":
x_top = end_points["top"]["x"]
@ -279,25 +304,28 @@ def crossings(profile_x, profile_z, constant_z):
@click.option("--profile-features-csv", required=True, help="")
@click.option("--runup-function", required=True, help="", type=click.Choice(["sto06"]))
@click.option("--slope", required=True, help="", type=click.Choice(["foreshore", "mean"]))
@click.option("--profile-type", required=True, help="", type=click.Choice(["prestorm", "poststorm"]))
@click.option("--output-file", required=True, help="")
def create_twl_forecast(waves_csv, tides_csv, profiles_csv, profile_features_csv, runup_function, slope, output_file):
def create_twl_forecast(waves_csv, tides_csv, profiles_csv, profile_features_csv, runup_function, slope,
profile_type,output_file):
logger.info("Creating forecast of total water levels")
logger.info("Importing data")
df_waves = pd.read_csv(waves_csv, index_col=[0, 1])
df_tides = pd.read_csv(tides_csv, index_col=[0, 1])
df_profiles = pd.read_csv(profiles_csv, index_col=[0, 1, 2])
df_profile_features = pd.read_csv(profile_features_csv, index_col=[0])
df_profile_features = pd.read_csv(profile_features_csv, index_col=[0,1])
logger.info("Forecasting TWL")
df_twl_foreshore_slope_sto06 = forecast_twl(
df_twl = forecast_twl(
df_tides,
df_profiles,
df_waves,
df_profile_features,
runup_function=getattr(runup_models, runup_function),
slope=slope,
profile_type=profile_type
)
df_twl_foreshore_slope_sto06.to_csv(output_file)
df_twl.to_csv(output_file)
logger.info("Saved to %s", output_file)
logger.info("Done!")

@ -20,7 +20,7 @@ def forecasted_impacts(df_profile_features, df_forecasted_twl):
"""
logger.info("Getting forecasted storm impacts")
df_forecasted_impacts = pd.DataFrame(index=df_profile_features.index)
df_forecasted_impacts = pd.DataFrame(index=df_profile_features.index.get_level_values('site_id').unique())
# For each site, find the maximum R_high value and the corresponding R_low value.
idx = df_forecasted_twl.groupby(level=["site_id"])["R_high"].idxmax().dropna()
@ -29,7 +29,7 @@ def forecasted_impacts(df_profile_features, df_forecasted_twl):
# Join with df_profile features to find dune toe and crest elevations
df_forecasted_impacts = df_forecasted_impacts.merge(
df_profile_features[["dune_toe_z", "dune_crest_z"]], how="left", left_index=True, right_index=True
df_profile_features.query("profile_type=='prestorm'")[["dune_toe_z", "dune_crest_z"]], how="left", left_index=True, right_index=True
)
# Compare R_high and R_low wirth dune crest and toe elevations
@ -73,7 +73,7 @@ def storm_regime(df_forecasted_impacts):
def create_forecasted_impacts(profile_features_csv, forecasted_twl_csv, output_file):
logger.info("Creating observed wave impacts")
logger.info("Importing existing data")
df_profile_features = pd.read_csv(profile_features_csv, index_col=[0])
df_profile_features = pd.read_csv(profile_features_csv, index_col=[0,1])
df_forecasted_twl = pd.read_csv(forecasted_twl_csv, index_col=[0, 1])
df_forecasted_impacts = forecasted_impacts(df_profile_features, df_forecasted_twl)

@ -30,14 +30,15 @@ def volume_change(df_profiles, df_profile_features, zone):
"""
logger.info("Calculating change in beach volume in {} zone".format(zone))
df_vol_changes = pd.DataFrame(index=df_profile_features.index)
df_vol_changes = pd.DataFrame(index=df_profile_features.index.get_level_values('site_id').unique())
df_profiles = df_profiles.sort_index()
sites = df_profiles.groupby(level=["site_id"])
for site_id, df_site in sites:
logger.debug("Calculating change in beach volume at {} in {} zone".format(site_id, zone))
prestorm_dune_toe_x = df_profile_features.loc[df_profile_features.index == site_id].dune_toe_x.tolist()
prestorm_dune_crest_x = df_profile_features.loc[df_profile_features.index == site_id].dune_crest_x.tolist()
query ="site_id=='{}'&profile_type=='prestorm'".format(site_id)
prestorm_dune_toe_x = df_profile_features.query(query).dune_toe_x.tolist()
prestorm_dune_crest_x = df_profile_features.query(query).dune_crest_x.tolist()
# We may not have a dune toe or crest defined, or there may be multiple defined.
prestorm_dune_crest_x = return_first_or_nan(prestorm_dune_crest_x)
@ -61,14 +62,20 @@ def volume_change(df_profiles, df_profile_features, zone):
for profile_type in ["prestorm", "poststorm"]
]
)
x_first_obs = max(
[
min(df_zone.query("profile_type == '{}'".format(profile_type)).index.get_level_values("x"))
for profile_type in ["prestorm", "poststorm"]
]
)
# Where we want to measure pre and post storm volume is dependant on the zone selected
if zone == "swash":
x_min = prestorm_dune_toe_x
x_min = max(prestorm_dune_toe_x,x_first_obs)
x_max = x_last_obs
elif zone == "dune_face":
x_min = prestorm_dune_crest_x
x_max = prestorm_dune_toe_x
x_min = max(prestorm_dune_crest_x, x_first_obs)
x_max = min(prestorm_dune_toe_x,x_last_obs)
else:
logger.warning("Zone argument not properly specified. Please check")
x_min = None
@ -89,13 +96,23 @@ def volume_change(df_profiles, df_profile_features, zone):
x_max=x_max,
)
# No point keeping so many decimal places, let's round them
prestorm_vol = np.round(prestorm_vol, 2)
poststorm_vol = np.round(poststorm_vol, 2)
# Volume change needs to be calculated including a tolerance for LIDAR accuracy. If difference between
# profiles is less than 20 cm, consider them as zero difference.
prestorm_z = df_zone.query("profile_type=='prestorm'").reset_index('profile_type').z
poststorm_z = df_zone.query("profile_type=='poststorm'").reset_index('profile_type').z
diff_z = prestorm_z - poststorm_z
diff_z[abs(diff_z) < 0.2] = 0
diff_vol = beach_volume(
x=diff_z.index.get_level_values("x"),
z=diff_z,
x_min=x_min,
x_max=x_max,
)
df_vol_changes.loc[site_id, "prestorm_{}_vol".format(zone)] = prestorm_vol
df_vol_changes.loc[site_id, "poststorm_{}_vol".format(zone)] = poststorm_vol
df_vol_changes.loc[site_id, "{}_vol_change".format(zone)] = prestorm_vol - poststorm_vol
df_vol_changes.loc[site_id, "{}_vol_change".format(zone)] = diff_vol
df_vol_changes.loc[site_id, "{}_pct_change".format(zone)] = diff_vol / prestorm_vol * 100
return df_vol_changes
@ -127,31 +144,14 @@ def storm_regime(df_observed_impacts):
:return:
"""
logger.info("Getting observed storm regimes")
df_observed_impacts.loc[df_observed_impacts.dune_face_vol_change <= 5, "storm_regime"] = "swash"
df_observed_impacts.loc[df_observed_impacts.dune_face_vol_change > 5, "storm_regime"] = "collision"
return df_observed_impacts
swash = (df_observed_impacts.dune_face_pct_change <= 2) & (df_observed_impacts.dune_face_vol_change <= 3)
collision = (df_observed_impacts.dune_face_pct_change >= 2) |(df_observed_impacts.dune_face_vol_change > 3)
df_observed_impacts.loc[swash, "storm_regime"] = "swash"
df_observed_impacts.loc[collision, "storm_regime"] = "collision"
return df_observed_impacts
#
# if __name__ == "__main__":
# logger.info("Importing existing data")
# data_folder = "./data/interim"
# df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
# df_profile_features = pd.read_csv(os.path.join(data_folder, "profile_features.csv"), index_col=[0])
#
# logger.info("Creating new dataframe for observed impacts")
# df_observed_impacts = pd.DataFrame(index=df_profile_features.index)
#
# logger.info("Getting pre/post storm volumes")
# df_swash_vol_changes = volume_change(df_profiles, df_profile_features, zone="swash")
# df_dune_face_vol_changes = volume_change(df_profiles, df_profile_features, zone="dune_face")
# df_observed_impacts = df_observed_impacts.join([df_swash_vol_changes, df_dune_face_vol_changes])
#
# # Classify regime based on volume changes
# df_observed_impacts = storm_regime(df_observed_impacts)
#
# # Save dataframe to csv
# df_observed_impacts.to_csv(os.path.join(data_folder, "impacts_observed.csv"))
@click.command()
@ -163,10 +163,10 @@ def create_observed_impacts(profiles_csv, profile_features_csv, output_file):
logger.info("Creating observed wave impacts")
logger.info("Importing data")
df_profiles = pd.read_csv(profiles_csv, index_col=[0, 1, 2])
df_profile_features = pd.read_csv(profile_features_csv, index_col=[0])
df_profile_features = pd.read_csv(profile_features_csv, index_col=[0,1])
logger.info("Creating new dataframe for observed impacts")
df_observed_impacts = pd.DataFrame(index=df_profile_features.index)
df_observed_impacts = pd.DataFrame(index=df_profile_features.index.get_level_values('site_id').unique())
logger.info("Getting pre/post storm volumes")
df_swash_vol_changes = volume_change(df_profiles, df_profile_features, zone="swash")
@ -177,7 +177,7 @@ def create_observed_impacts(profiles_csv, profile_features_csv, output_file):
df_observed_impacts = storm_regime(df_observed_impacts)
# Save dataframe to csv
df_observed_impacts.to_csv(output_file)
df_observed_impacts.to_csv(output_file, float_format='%.4f')
logger.info("Saved to %s", output_file)
logger.info("Done!")

@ -11,6 +11,9 @@ import analysis.forecast_twl as forecast_twl
import analysis.forecasted_storm_impacts as forecasted_storm_impacts
import analysis.observed_storm_impacts as observed_storm_impacts
# Disable numpy warnings
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
@click.group()
def cli():
@ -21,7 +24,8 @@ if __name__ == "__main__":
cli.add_command(parse_mat.create_waves_csv)
cli.add_command(parse_mat.create_sites_and_profiles_csv)
cli.add_command(parse_mat.create_tides_csv)
cli.add_command(profile_features.create_profile_features)
cli.add_command(parse_mat.create_profile_features)
# cli.add_command(profile_features.create_profile_features)
cli.add_command(csv_to_shp.sites_csv_to_shp)
cli.add_command(forecast_twl.create_twl_forecast)
cli.add_command(forecasted_storm_impacts.create_forecasted_impacts)

@ -45,6 +45,48 @@ def parse_orientations(orientations_mat):
return df
def parse_dune_crest_toes(df_sites, crest_mat, toe_mat):
"""
:param df_sites:
:param crest_mat:
:param toe_mat:
:return:
"""
logger.info("Parsing dune crests and toes")
rows = []
crest_data = loadmat(crest_mat)
toe_data = loadmat(toe_mat)
for n, _ in enumerate(crest_data['xc1']):
rows.extend([{
'dune_crest_x': crest_data['xc1'][n],
'dune_crest_z': crest_data['zc1'][n],
'dune_toe_x': toe_data['xt1'][n],
'dune_toe_z': toe_data['zt1'][n],
'profile_type': 'prestorm',
'site_no': n+1
},{
'dune_crest_x': crest_data['xc2'][n],
'dune_crest_z': crest_data['zc2'][n],
'dune_toe_x': toe_data['xt2'][n],
'dune_toe_z': toe_data['zt2'][n],
'profile_type': 'poststorm',
'site_no': n + 1
}])
df_profile_features = pd.DataFrame(rows)
# Want the site_id instead of the site_no, so merge in df_sites
df_sites.reset_index(inplace=True)
df_profile_features = df_sites[['site_no','site_id']].merge(df_profile_features, how='outer', on=['site_no'])
df_profile_features.drop(columns=['site_no'],inplace=True)
df_profile_features.set_index(['site_id','profile_type'], inplace=True)
df_profile_features.sort_index(inplace=True)
df_profile_features = df_profile_features.round(3)
return df_profile_features
def combine_sites_and_orientaions(df_sites, df_orientations):
"""
Replaces beach/lat/lon columns with the unique site_id.
@ -151,6 +193,7 @@ def parse_profiles_and_sites(profiles_mat):
site_counter = 0
for i, site in enumerate(mat_data["site"]):
logger.debug('Processing site {} of {}'.format(i+1, len(mat_data['site'])))
# Give each site a unique id
if len(site_rows) == 0 or site_rows[-1]["beach"] != site:
@ -205,6 +248,7 @@ def parse_profiles_and_sites(profiles_mat):
profile_rows.append(
{
"site_id": site_id,
"lon": lon[0],
"lat": lat[0],
"profile_type": profile_type,
@ -224,6 +268,7 @@ def parse_profiles_and_sites(profiles_mat):
site_rows.append(
{
"site_id": site_id,
"site_no": i + 1,
"beach": site,
"lat": x_200_lat,
"lon": x_200_lon,
@ -330,6 +375,18 @@ def create_waves_csv(waves_mat, sites_csv, output_file):
logger.info("Created %s", output_file)
@click.command(short_help="create profile_features.csv")
@click.option("--crest-mat", required=True, help=".mat file containing wave records")
@click.option("--toe-mat", required=True, help=".mat file containing wave records")
@click.option("--sites-csv", required=True, help=".csv file description of cross section sites")
@click.option("--output-file", required=True, help="where to save waves.csv")
def create_profile_features(crest_mat, toe_mat, sites_csv, output_file):
logger.info("Creating %s", output_file)
df_sites = pd.read_csv(sites_csv, index_col=[0])
df_profile_features = parse_dune_crest_toes(df_sites, crest_mat, toe_mat)
df_profile_features.to_csv(output_file)
logger.info("Created %s", output_file)
@click.command(short_help="create profiles.csv")
@click.option("--profiles-mat", required=True, help=".mat file containing beach profiles")
@click.option("--profiles-output-file", required=True, help="where to save profiles.csv")

@ -3,7 +3,8 @@ version: 1
disable_existing_loggers: False
formatters:
simple:
format: "%(asctime)s - %(filename)s - %(levelname)s - %(message)s"
format: "[%(asctime)s] [%(filename)15.15s:%(lineno)4.4s %(funcName)15.15s] [%(levelname)-4.4s] %(message)s"
datefmt: "%Y-%m-%d %H:%M:%S"
handlers:
console:
@ -46,5 +47,5 @@ loggers:
propagate: no
root:
level: INFO
level: DEBUG
handlers: [console, info_file_handler, error_file_handler, warning_file_handler]

Loading…
Cancel
Save