Compare commits

..

No commits in common. '3f2cfa50aadfc773b0e654f257528fbeadc4e27d' and '6c5444fc06c927b26fc029e7b0ef4f7839a1fc81' have entirely different histories.

@ -13,7 +13,7 @@ CURRENT_DIR = $(shell pwd)
.PHONY: venv-init
venv-init: ##@environment Setup virtual environment
conda env create -f environment.yml --prefix=.venv
conda env create -f environment.yml --prefix=.venv python=3.6
.PHONY: venv-activate
venv-activate: ##@environment Activates the virtual environment
@ -27,6 +27,12 @@ venv-update: ##@environment Updates to latest packages
venv-requirements-install: ##@environment Ensures environment.yml packages are installed
conda env update
# The environment.yml file should really be created by hand, but
# this provides a good starting point.
.PHONY: venv-requirements-export
venv-requirements-export: ##@environment Exports current environment to environment.yml
conda env export --file environment.yml
# To install new packages: conda install --prefix .venv PACKAGE
@ -37,8 +43,8 @@ venv-requirements-install: ##@environment Ensures environment.yml packages are i
push-data: ##@data Copies data from ./data/ to data backup directory
rclone copy ./data/ $(DATA_BACKUP_DIR) --exclude "*.las" --progress
# We probably don't want to pull the raw LIDAR .las files, so lets exclude them
pull-data: ##@data Copies data from data backup directory to ./data/
# We probably don't want to pull the raw LIDAR .las files, so lets exclude them
rclone copy $(DATA_BACKUP_DIR) ./data/ --exclude "*.las" --progress
@ -96,17 +102,12 @@ impacts: ./data/interim/impacts_forecasted_foreshore_slope_sto06.csv ./data/inte
# --output-csv "./data/interim/profile_features.csv"
# Create a .csv of our dune toe and crest profile features from Tom Beuzen's .mat file
# Also apply an overwrite of some values, using an excel sheet
./data/interim/profile_features.csv: ./data/raw/profile_features_tom_beuzen/*.mat ./data/interim/sites.csv
activate ./.venv && python ./src/cli.py create-profile-features \
--crest-mat "./data/raw/profile_features_tom_beuzen/J16_DuneCrest.mat" \
--toe-mat "./data/raw/profile_features_tom_beuzen/J16_DuneToe.mat" \
--sites-csv "./data/interim/sites.csv" \
--output-file "./data/interim/profile_features.csv" \
&& python ./src/cli.py apply-profile-features-overwrite \
--interim_file "./data/interim/profile_features.csv" \
--overwrite_file "./data/raw/profile_features_chris_leaman/profile_features_chris_leaman.xlsx" \
--profile_file "./data/interim/profiles.csv"
--output-file "./data/interim/profile_features.csv"
# Creates a forecast of twl using sto06 and prestorm time varying prestorm foreshore slope
./data/interim/twl_foreshore_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv
@ -131,16 +132,16 @@ impacts: ./data/interim/impacts_forecasted_foreshore_slope_sto06.csv ./data/inte
--profile-type "prestorm" \
--output-file "./data/interim/twl_mean_slope_sto06.csv"
# ./data/interim/twl_poststorm_mean_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv
# activate ./.venv && python ./src/cli.py create-twl-forecast \
# --waves-csv "./data/interim/waves.csv" \
# --tides-csv "./data/interim/tides.csv" \
# --profiles-csv "./data/interim/profiles.csv" \
# --profile-features-csv "./data/interim/profile_features.csv" \
# --runup-function "sto06" \
# --slope "mean" \
# --profile-type "poststorm" \
# --output-file "./data/interim/twl_poststorm_mean_slope_sto06.csv"
./data/interim/twl_poststorm_mean_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv
activate ./.venv && python ./src/cli.py create-twl-forecast \
--waves-csv "./data/interim/waves.csv" \
--tides-csv "./data/interim/tides.csv" \
--profiles-csv "./data/interim/profiles.csv" \
--profile-features-csv "./data/interim/profile_features.csv" \
--runup-function "sto06" \
--slope "mean" \
--profile-type "poststorm" \
--output-file "./data/interim/twl_poststorm_mean_slope_sto06.csv"
./data/interim/impacts_observed.csv: ./data/interim/profiles.csv ./data/interim/profile_features.csv
activate ./.venv && python ./src/cli.py create-observed-impacts \
@ -160,11 +161,11 @@ impacts: ./data/interim/impacts_forecasted_foreshore_slope_sto06.csv ./data/inte
--forecasted-twl-csv "./data/interim/twl_foreshore_slope_sto06.csv" \
--output-file "./data/interim/impacts_forecasted_foreshore_slope_sto06.csv"
# ./data/interim/impacts_forecasted_poststorm_mean_slope_sto06.csv: ./data/interim/profile_features.csv ./data/interim/twl_foreshore_slope_sto06.csv
# activate ./.venv && python ./src/cli.py create-forecasted-impacts \
# --profile-features-csv "./data/interim/profile_features.csv" \
# --forecasted-twl-csv "./data/interim/twl_poststorm_mean_slope_sto06.csv" \
# --output-file "./data/interim/impacts_forecasted_poststorm_mean_slope_sto06.csv"
./data/interim/impacts_forecasted_poststorm_mean_slope_sto06.csv: ./data/interim/profile_features.csv ./data/interim/twl_foreshore_slope_sto06.csv
activate ./.venv && python ./src/cli.py create-forecasted-impacts \
--profile-features-csv "./data/interim/profile_features.csv" \
--forecasted-twl-csv "./data/interim/twl_poststorm_mean_slope_sto06.csv" \
--output-file "./data/interim/impacts_forecasted_poststorm_mean_slope_sto06.csv"
###############################

@ -59,8 +59,6 @@ aerial LIDAR and were processed by Mitch Harley. Tides and waves (10 m contour a
- `/data/raw/raw_lidar`: This is the raw pre/post storm aerial LIDAR which was taken for the June 2016 storm. `.las` files are the raw files which have been processed into `.tiff` files using `PDAL`. Note that these files have not
been corrected for systematic errors, so actual elevations should be taken from the `processed_shorelines` folder. Obtained November 2018 from Mitch Harley from the black external HDD labeled "UNSW LIDAR".
- `/data/raw/profile_features`: Dune toe and crest locations based on prestorm LIDAR. Refer to `/notebooks/qgis.qgz` as this shows how they were manually extracted. Note that the shapefiles only show the location (lat/lon) of the dune crest and toe. For actual elevations, these locations need to related to the processed shorelines.
- `/data/raw/profile_features_tom_beuzen`: This mat file contains dune toes and crests that Tom Beuzen picked out for each profile. This is used as a basis for the toe/crest locations, but is overridden from data contained in `/data/raw/profile_features_chris_leaman`.
- `/data/raw/profile_features_chris_leaman`: An excel file containing manually selected dune toes, crests, berms and impacts by Chris Leaman. The values in this file should take preceedence over values picked by Tom Beuzen.
## Notebooks
- `/notebooks/01_exploration.ipynb`: Shows how to import processed shorelines, waves and tides. An interactive widget plots the location and cross sections.

@ -1,39 +1,161 @@
name: C:\Users\z5189959\Desktop\nsw-2016-storm-impact\.venv
channels:
- conda-forge
- plotly
- defaults
- conda-forge
dependencies:
- python=3.6
- attrs
- autopep8
- black
- click
- click-plugins
- colorlover
- fiona
- ipykernel
- ipython
- ipywidgets
- matplotlib
- nbformat
- notebook
- numpy
- pandas
- pandoc
- pip
- plotly
- plotly-orca
- proj4
- pyproj
- python-dateutil
- pytz
- pyyaml
- requests
- scikit-learn
- scipy
- setuptools
- shapely
- yaml
- yapf
- black=18.9b0=py_0
- colorlover=0.2.1=py_0
- jupyter_contrib_core=0.3.3=py_2
- jupyter_contrib_nbextensions=0.5.0=py36_1000
- jupyter_highlight_selected_word=0.2.0=py36_1000
- jupyter_latex_envs=1.4.4=py36_1000
- jupyter_nbextensions_configurator=0.4.0=py36_1000
- matplotlib-base=3.0.2=py36h3e3dc42_1001
- appdirs=1.4.3=py36h28b3542_0
- asn1crypto=0.24.0=py36_0
- attrs=18.2.0=py36h28b3542_0
- autopep8=1.4.3=py36_0
- backcall=0.1.0=py36_0
- blas=1.0=mkl
- bleach=3.0.2=py36_0
- boost=1.67.0=py36_4
- boost-cpp=1.67.0=hfa6e2cd_4
- ca-certificates=2018.03.07=0
- certifi=2018.10.15=py36_0
- cffi=1.11.5=py36h74b6da3_1
- chardet=3.0.4=py36_1
- click=7.0=py36_0
- click-plugins=1.0.4=py36_0
- cligj=0.5.0=py36_0
- colorama=0.4.0=py36_0
- cryptography=2.3.1=py36h74b6da3_0
- curl=7.62.0=h2a8f88b_0
- cycler=0.10.0=py36h009560c_0
- decorator=4.3.0=py36_0
- entrypoints=0.2.3=py36_2
- expat=2.2.5=he025d50_0
- fiona=1.7.10=py36h5bf8d1d_0
- freetype=2.9.1=ha9979f8_1
- freexl=1.0.5=hfa6e2cd_0
- gdal=2.2.2=py36hcebd033_1
- geos=3.6.2=h9ef7328_2
- geotiff=1.4.2=hd5bfa41_0
- hdf4=4.2.13=h712560f_2
- hdf5=1.10.1=h98b8871_1
- icc_rt=2017.0.4=h97af966_0
- icu=58.2=ha66f8fd_1
- idna=2.7=py36_0
- intel-openmp=2019.1=144
- ipykernel=5.1.0=py36h39e3cac_0
- ipython=7.2.0=py36h39e3cac_0
- ipython_genutils=0.2.0=py36h3c5d0ee_0
- ipywidgets=7.4.2=py36_0
- jedi=0.13.1=py36_0
- jinja2=2.10=py36_0
- jpeg=9b=hb83a4c4_2
- jsonschema=2.6.0=py36h7636477_0
- jupyter_client=5.2.3=py36_0
- jupyter_core=4.4.0=py36_0
- kealib=1.4.7=ha5b336b_5
- kiwisolver=1.0.1=py36h6538335_0
- krb5=1.16.1=h038dc86_6
- libboost=1.67.0=hfd51bdf_4
- libcurl=7.62.0=h2a8f88b_0
- libgdal=2.2.2=h2727f2b_1
- libiconv=1.15=h1df5818_7
- libkml=1.3.0=he5f2a48_4
- libnetcdf=4.4.1.1=h825a56a_8
- libpng=1.6.35=h2a8f88b_0
- libpq=10.5=h5fe2233_0
- libsodium=1.0.16=h9d3ae62_0
- libspatialite=4.3.0a=h383548d_18
- libssh2=1.8.0=hd619d38_4
- libtiff=4.0.9=h36446d0_2
- libxml2=2.9.8=hadb2253_1
- libxslt=1.1.32=hf6f1972_0
- lxml=4.2.5=py36hef2cd61_0
- m2w64-gcc-libgfortran=5.3.0=6
- m2w64-gcc-libs=5.3.0=7
- m2w64-gcc-libs-core=5.3.0=7
- m2w64-gmp=6.1.0=2
- m2w64-libwinpthread-git=5.0.0.4634.697f757=2
- markupsafe=1.1.0=py36he774522_0
- matplotlib=3.0.1=py36hc8f65d3_0
- mistune=0.8.4=py36he774522_0
- mkl=2018.0.3=1
- mkl_fft=1.0.6=py36hdbbee80_0
- mkl_random=1.0.1=py36h77b88f5_1
- msys2-conda-epoch=20160418=1
- munch=2.3.2=py36_0
- nbconvert=5.3.1=py36_0
- nbformat=4.4.0=py36h3a5bc1b_0
- notebook=5.7.2=py36_0
- numpy=1.15.4=py36ha559c80_0
- numpy-base=1.15.4=py36h8128ebf_0
- openjpeg=2.3.0=h5ec785f_1
- openssl=1.0.2p=hfa6e2cd_0
- pandas=0.23.4=py36h830ac7b_0
- pandoc=2.2.3.2=0
- pandocfilters=1.4.2=py36_1
- parso=0.3.1=py36_0
- pickleshare=0.7.5=py36_0
- pip=18.1=py36_0
- plotly=3.4.2=py36h28b3542_0
- proj4=4.9.3=hcf24537_7
- prometheus_client=0.4.2=py36_0
- prompt_toolkit=2.0.7=py36_0
- psutil=5.4.8=py36he774522_0
- py-boost=1.67.0=py36h8300f20_4
- pycodestyle=2.4.0=py36_0
- pycparser=2.19=py36_0
- pygments=2.2.0=py36hb010967_0
- pyopenssl=18.0.0=py36_0
- pyparsing=2.3.0=py36_0
- pyproj=1.9.5.1=py36_0
- pyqt=5.9.2=py36h6538335_2
- pysocks=1.6.8=py36_0
- python=3.6.7=h33f27b4_1
- python-dateutil=2.7.5=py36_0
- pytz=2018.7=py36_0
- pywinpty=0.5.4=py36_0
- pyyaml=3.13=py36hfa6e2cd_0
- pyzmq=17.1.2=py36hfa6e2cd_0
- qt=5.9.6=vc14h1e9a669_2
- requests=2.20.1=py36_0
- retrying=1.3.3=py36_2
- scikit-learn=0.20.1=py36hb854c30_0
- scipy=1.1.0=py36h4f6bf74_1
- send2trash=1.5.0=py36_0
- setuptools=40.6.2=py36_0
- shapely=1.6.4=py36hc90234e_0
- sip=4.19.8=py36h6538335_0
- six=1.11.0=py36_1
- sqlite=3.25.3=he774522_0
- terminado=0.8.1=py36_1
- testpath=0.4.2=py36_0
- tk=8.6.8=hfa6e2cd_0
- toml=0.10.0=py36h28b3542_0
- tornado=5.1.1=py36hfa6e2cd_0
- traitlets=4.3.2=py36h096827d_0
- urllib3=1.23=py36_0
- vc=14.1=h0510ff6_4
- vs2015_runtime=14.15.26706=h3a45250_0
- wcwidth=0.1.7=py36h3d5aa90_0
- webencodings=0.5.1=py36_1
- wheel=0.32.3=py36_0
- widgetsnbextension=3.4.2=py36_0
- win_inet_pton=1.0.1=py36_1
- wincertstore=0.2=py36h7fe50ca_0
- winpty=0.4.3=4
- xerces-c=3.2.2=ha925a31_0
- xz=5.2.4=h2fa13f4_4
- yaml=0.1.7=hc54c509_2
- yapf=0.25.0=py36_0
- zeromq=4.2.5=he025d50_1
- zlib=1.2.11=h62dcd97_3
- plotly-orca=1.1.1=1
- pip:
- blackcellmagic
- mat4py
- blackcellmagic==0.0.1
- mat4py==0.4.1
prefix: C:\Users\z5189959\Desktop\nsw-2016-storm-impact\.venv

File diff suppressed because it is too large Load Diff

Binary file not shown.

@ -59,9 +59,7 @@ def forecast_twl(
df_twl["beta"] = pd.concat(results)
# Estimate runup
R2, setup, S_total, S_inc, S_ig = runup_function(
Hs0=df_twl["Hs0"].tolist(), Tp=df_twl["Tp"].tolist(), beta=df_twl["beta"].tolist()
)
R2, setup, S_total, S_inc, S_ig = runup_function(Hs0=df_twl['Hs0'].tolist(), Tp=df_twl["Tp"].tolist(), beta=df_twl["beta"].tolist())
df_twl["R2"] = R2
df_twl["setup"] = setup

@ -12,7 +12,7 @@ def sto06(Hs0, Tp, beta):
df = pd.DataFrame({"Hs0": Hs0, "Tp": Tp, "beta": beta}, index=[x for x in range(0, np.size(Hs0))])
df["Lp"] = 9.8 * df["Tp"] ** 2 / 2 / np.pi
df["Lp"] = 9.8 * df['Tp'] ** 2 / 2 / np.pi
# General equation
df["S_ig"] = pd.to_numeric(0.06 * np.sqrt(df["Hs0"] * df["Lp"]), errors="coerce")

@ -5,12 +5,11 @@ Entry point to run data processing and analysis commands.
import click
import data.parse_mat as parse_mat
import data.parse_shp as profile_features
import data.profile_features as profile_features
import data.csv_to_shp as csv_to_shp
import analysis.forecast_twl as forecast_twl
import analysis.forecasted_storm_impacts as forecasted_storm_impacts
import analysis.observed_storm_impacts as observed_storm_impacts
import data.apply_manual_overwrites as apply_manual_overwrites
# Disable numpy warnings
import warnings
@ -29,9 +28,8 @@ if __name__ == "__main__":
cli.add_command(parse_mat.create_tides_csv)
cli.add_command(parse_mat.create_profile_features)
# cli.add_command(profile_features.create_profile_features)
cli.add_command(csv_to_shp.sites_csv_to_geojson)
cli.add_command(csv_to_shp.sites_csv_to_shp)
cli.add_command(forecast_twl.create_twl_forecast)
cli.add_command(forecasted_storm_impacts.create_forecasted_impacts)
cli.add_command(observed_storm_impacts.create_observed_impacts)
cli.add_command(apply_manual_overwrites.apply_profile_features_overwrite)
cli()

@ -1,103 +0,0 @@
"""
After generating interim data files based on raw data, we may need to overwrite some rows with manual data.
"""
import pandas as pd
import numpy as np
import click
from utils import setup_logging
logger = setup_logging()
def overwrite_profile_features(df_interim, df_overwrite, df_profiles, overwrite=True):
"""
Overwrite the interim profile features file with an excel file.
:param interim_file: Should be './data/interim/profile_features.csv'
:param overwrite_file: Should be './data/raw/profile_features_chris_leaman/profile_features_chris_leaman.csv'
:param overwrite: Whether or not to overwrite the original interim_file. If false, file will not be written
:return:
"""
# Merge
df_merged = df_interim.merge(df_overwrite, left_index=True, right_index=True, suffixes=["", "_overwrite"])
# Remove x vals if overwrite file as remove
df_merged.loc[df_merged.dune_crest_x_overwrite == "remove", "dune_crest_x"] = np.nan
df_merged.loc[df_merged.dune_toe_x_overwrite == "remove", "dune_toe_x"] = np.nan
# Put in new x vals. Note that a NaN value in the overwrite column, means keep the original value.
idx = (df_merged.dune_crest_x_overwrite.notnull()) & (df_merged.dune_crest_x_overwrite != "remove")
df_merged.loc[idx, "dune_crest_x"] = df_merged.loc[idx, "dune_crest_x_overwrite"]
idx = (df_merged.dune_toe_x_overwrite.notnull()) & (df_merged.dune_toe_x_overwrite != "remove")
df_merged.loc[idx, "dune_toe_x"] = df_merged.loc[idx, "dune_toe_x_overwrite"]
# Recalculate z values from x coordinates
for site_id in df_merged.index.get_level_values("site_id").unique():
logger.info("Overwriting dune crest/toes with manual values: {}".format(site_id))
# Get profiles
df_profile = df_profiles.query('site_id=="{}"'.format(site_id))
for param in ["prestorm", "poststorm"]:
for loc in ["crest", "toe"]:
# Get x value to find corresponding z value
x_val = df_merged.loc[(site_id, param), "dune_{}_x".format(loc)]
if np.isnan(x_val):
df_merged.loc[(site_id, param), "dune_{}_z".format(loc)] = np.nan
continue
# Get the corresponding z value for our x value
query = 'site_id=="{}" & profile_type=="{}" & x=="{}"'.format(site_id, param, x_val)
# Try get the value from the other profile if we return nan or empty dataframe
if df_profile.query(query).empty:
if param == "prestorm":
query = 'site_id=="{}" & profile_type=="{}" & x=="{}"'.format(site_id, "poststorm", x_val)
elif param == "poststorm":
query = 'site_id=="{}" & profile_type=="{}" & x=="{}"'.format(site_id, "prestorm", x_val)
z_val = df_profile.query(query).iloc[0].z
else:
z_val = df_profile.query(query).iloc[0].z
# Put results back into merged dataframe
df_merged.loc[(site_id, param), "dune_{}_z".format(loc)] = z_val
# Drop columns
df_merged = df_merged.drop(columns=["dune_crest_x_overwrite", "dune_toe_x_overwrite", "comment"], errors="ignore")
# Merge back into interim data frame. Use concat/duplicates since .update will not update nan values
df_final = pd.concat([df_merged, df_interim])
df_final = df_final[~df_final.index.duplicated(keep="first")]
df_final = df_final.sort_index()
# Write to file
return df_final
@click.command(short_help="overwrite profile_features with manual excel sheet")
@click.option("--interim_file", required=True, help="path of profile_features.csv")
@click.option("--overwrite_file", required=True, help="path of excel file with overwrite data")
@click.option("--profile_file", required=True, help="path of profiles.csv")
@click.option("--overwrite/--no-overwrite", default=True)
def apply_profile_features_overwrite(interim_file, overwrite_file, profile_file, overwrite):
logger.info("Overwriting profile features with manual excel file")
# Load files
df_interim = pd.read_csv(interim_file, index_col=[0, 1])
df_overwrite = pd.read_excel(overwrite_file)
df_profiles = pd.read_csv(profile_file, index_col=[0, 1, 2])
if "site_id" in df_overwrite.columns and "profile_type" in df_overwrite.columns:
df_overwrite = df_overwrite.set_index(["site_id", "profile_type"])
# Replace interim values with overwrite values
df_interim = overwrite_profile_features(df_interim, df_overwrite, df_profiles, overwrite)
# Write to csv
df_interim.to_csv(interim_file, float_format="%.3f")
logger.info("Done!")

@ -1,333 +1,34 @@
"""
Converts .csv files to .shape files
"""
import os
import numpy.ma as ma
import click
import fiona
import numpy as np
import pandas as pd
from fiona.crs import from_epsg
from shapely.geometry import Point, mapping, LineString
from collections import OrderedDict
from data.parse_shp import convert_coord_systems
from shapely.geometry import Point, mapping
from utils import setup_logging
logger = setup_logging()
def R_high_to_geojson(sites_csv, profiles_csv, impacts_csv, output_geojson):
sites_csv = './data/interim/sites.csv'
profiles_csv = './data/interim/profiles.csv'
impacts_csv = './data/interim/impacts_forecasted_mean_slope_sto06.csv'
output_geojson = './data/interim/R_high_forecasted_mean_slope_sto06.geojson'
df_sites = pd.read_csv(sites_csv, index_col=[0])
df_profiles = pd.read_csv(profiles_csv, index_col=[0,1,2])
df_impacts = pd.read_csv(impacts_csv, index_col=[0])
# Create geojson file
schema = {
'geometry': 'Point',
'properties': OrderedDict([
('beach', 'str'),
('site_id', 'str'),
('elevation', 'float'),
])
}
with fiona.open(output_geojson, 'w', driver='GeoJSON', crs=from_epsg(4326), schema=schema) as output:
for index, row in df_impacts.iterrows():
site_id = index
beach = index[:-4]
# Find lat/lon of R_high position
R_high_z = row['R_high']
# Get poststorm profile (or should this be prestorm?)
df_profile = df_profiles.query('site_id=="{}" & profile_type=="prestorm"'.format(index))
int_x = crossings(df_profile.index.get_level_values('x').tolist(),
df_profile.z.tolist(),
R_high_z)
# Take most landward interesection. Continue to next site if there is no intersection
try:
int_x = max(int_x)
except:
continue
# Get lat/lon on intercept position
site = df_sites.loc[site_id]
center_profile_x = site["profile_x_lat_lon"]
orientation = site["orientation"]
center_lat_lon = Point(site['lon'], site['lat']) # Get lat/lon of center of profile
center_xy = convert_coord_systems(center_lat_lon)
center_x, center_y = center_xy.xy
# Calculate xy position of point and convert to lat/lon
point_x = center_x + (center_profile_x - int_x) * np.cos(np.deg2rad(orientation))
point_y = center_y + (center_profile_x - int_x) * np.sin(np.deg2rad(orientation))
point_xy = Point(point_x, point_y)
point_lat_lon = convert_coord_systems(point_xy,
in_coord_system="EPSG:28356",
out_coord_system="EPSG:4326")
prop = OrderedDict([
('beach', beach),
('site_id', site_id),
('elevation', R_high_z),
])
output.write({"geometry": mapping(point_lat_lon), "properties": prop})
def crossings(profile_x, profile_z, constant_z):
"""
Finds the x coordinate of a z elevation for a beach profile. Much faster than using shapely to calculate
intersections since we are only interested in intersections of a constant value. Will return multiple
intersections if found. Used in calculating beach slope.
Adapted from https://stackoverflow.com/a/34745789
:param profile_x: List of x coordinates for the beach profile section
:param profile_z: List of z coordinates for the beach profile section
:param constant_z: Float of the elevation to find corresponding x coordinates
:return: List of x coordinates which correspond to the constant_z
"""
# Remove nans to suppress warning messages
valid = ~ma.masked_invalid(profile_z).mask
profile_z = np.array(profile_z)[valid]
profile_x = np.array(profile_x)[valid]
# Normalize the 'signal' to zero.
# Use np.subtract rather than a list comprehension for performance reasons
z = np.subtract(profile_z, constant_z)
# Find all indices right before any crossing.
# TODO Sometimes this can give a runtime warning https://stackoverflow.com/a/36489085
indicies = np.where(z[:-1] * z[1:] < 0)[0]
# Use linear interpolation to find intersample crossings.
return [profile_x[i] - (profile_x[i] - profile_x[i + 1]) / (z[i] - z[i + 1]) * (z[i]) for i in indicies]
@click.command()
@click.option("--sites-csv", required=True, help=".csv file to convert")
@click.option("--profile-features-csv", required=True, help=".csv file to convert")
@click.option("--output-geojson", required=True, help="where to store .geojson file")
def profile_features_to_geojson(sites_csv, profile_features_csv, output_geojson):
"""
Converts profile_features containing dune toes and crest locations to a geojson we can load into QGIS
:param sites_csv:
:param profiles_csv:
:param profile_features_csv:
:param output_geojson:
:return:
"""
logger.info("Creating profile features geojson")
# Read files from interim folder
df_sites = pd.read_csv(sites_csv, index_col=[0])
df_profile_features = pd.read_csv(profile_features_csv, index_col=[0])
# Create geojson file
schema = {
'geometry': 'Point',
'properties': OrderedDict([
('beach', 'str'),
('site_id', 'str'),
('point_type', 'str'), # prestorm_dune_toe, prestorm_dune_crest, poststorm_dune_toe, poststorm_dune_crest
('profile_type', 'str'),
('elevation', 'float'),
])
}
with fiona.open(output_geojson, 'w', driver='GeoJSON', crs=from_epsg(4326), schema=schema) as output:
for index, row in df_profile_features.iterrows():
beach = index[:-4]
site_id = index
profile_type = row['profile_type']
for point_type in ['crest', 'toe']:
# point_type='crest'
elevation = row['dune_{}_z'.format(point_type)]
x = row['dune_{}_x'.format(point_type)]
if np.isnan(x):
continue
# Geojsons need to use 'null' instead of 'nan'
if np.isnan(elevation):
elevation = None
# Convert x position to lat/lon
site = df_sites.loc[site_id]
center_profile_x = site["profile_x_lat_lon"]
orientation = site["orientation"]
center_lat_lon = Point(site['lon'], site['lat']) # Get lat/lon of center of profile
center_xy = convert_coord_systems(center_lat_lon)
center_x, center_y = center_xy.xy
# Calculate xy position of point and convert to lat/lon
point_x = center_x + (center_profile_x - x) * np.cos(np.deg2rad(orientation))
point_y = center_y + (center_profile_x - x) * np.sin(np.deg2rad(orientation))
point_xy = Point(point_x, point_y)
point_lat_lon = convert_coord_systems(point_xy,
in_coord_system="EPSG:28356",
out_coord_system="EPSG:4326")
prop = OrderedDict([
('beach', beach),
('site_id',site_id),
('point_type', point_type),
('profile_type', profile_type),
('elevation', elevation),
])
output.write({"geometry": mapping(point_lat_lon), "properties": prop})
@click.command()
@click.option("--input-csv", required=True, help=".csv file to convert")
@click.option("--output-geojson", required=True, help="where to store .geojson file")
def sites_csv_to_geojson(input_csv, output_geojson):
@click.option("--output-shp", required=True, help="where to store .shp file")
def sites_csv_to_shp(input_csv, output_shp):
"""
Converts our dataframe of sites to .geojson to load in QGis. Sites are loaded as linestrings of the profile
cross-sections
Converts our dataframe of sites to .shp to load in QGis
:param input_csv:
:param output_geojson:
:param output_shp:
:return:
"""
logger.info("Converting %s to %s", input_csv, output_geojson)
logger.info("Converting %s to %s", input_csv, output_shp)
df_sites = pd.read_csv(input_csv, index_col=[0])
logger.info(os.environ.get("GDAL_DATA", None))
schema = {
'geometry': 'LineString',
'properties': OrderedDict([
('beach','str'),
('site_id', 'str'),
])
}
with fiona.open(output_geojson, 'w', driver='GeoJSON', crs=from_epsg(4326), schema=schema) as output:
schema = {"geometry": "Point", "properties": {"beach": "str", "site_id": "str"}}
with fiona.open(output_shp, "w", crs=from_epsg(4326), driver="ESRI Shapefile", schema=schema) as output:
for index, row in df_sites.iterrows():
# Work out where center of profile is
orientation = row["orientation"]
center_profile_x = row["profile_x_lat_lon"]
center_lon = row["lon"]
center_lat = row["lat"]
center_lat_lon = Point(center_lon, center_lat)
center_xy = convert_coord_systems(center_lat_lon)
center_x, center_y = center_xy.xy
# Work out where landward profile limit is
land_x = center_x + center_profile_x * np.cos(np.deg2rad(orientation))
land_y = center_y + center_profile_x * np.sin(np.deg2rad(orientation))
land_xy = Point(land_x, land_y)
land_lat_lon = convert_coord_systems(land_xy, in_coord_system="EPSG:28356",
out_coord_system="EPSG:4326")
# Work out where seaward profile limit is
sea_x = center_x - center_profile_x * np.cos(np.deg2rad(orientation))
sea_y = center_y - center_profile_x * np.sin(np.deg2rad(orientation))
sea_xy = Point(sea_x, sea_y)
sea_lat_lon = convert_coord_systems(sea_xy, in_coord_system="EPSG:28356", out_coord_system="EPSG:4326")
line_string = LineString([land_lat_lon, center_lat_lon, sea_lat_lon])
prop = OrderedDict([("beach", row["beach"]),
("site_id", index)])
output.write({"geometry": mapping(line_string), "properties": prop})
logger.info("Done!")
@click.command()
@click.option("--sites-csv", required=True, help="sites.csv file to convert")
@click.option("--observed-impacts-csv", required=True, help="impacts-observed.csv file to convert")
@click.option("--forecast-impacts-csv", required=True, help="impacts-forecast.csv file to convert")
@click.option("--output-geojson", required=True, help="where to store .geojson file")
def impacts_to_geojson(sites_csv, observed_impacts_csv, forecast_impacts_csv, output_geojson):
"""
Converts impacts observed and forecasted to a geojson for visualization in QGIS
:param sites_csv:
:param observed_impacts_csv:
:param forecast_impacts_csv:
:param output_geojson:
:return:
"""
# Get information from .csv and read into pandas dataframe
df_sites = pd.read_csv(sites_csv, index_col=[0])
df_observed = pd.read_csv(observed_impacts_csv, index_col=[0])
df_forecast = pd.read_csv(forecast_impacts_csv, index_col=[0]).rename({'storm_regime': 'forecast_storm_regime'})
# Rename columns, so we can distinguish between forecast and observed
df_observed = df_observed.rename(columns={'storm_regime': 'observed_storm_regime'})
df_forecast = df_forecast.rename(columns={'storm_regime': 'forecast_storm_regime'})
# Concat into one big dataframe
df = pd.concat([df_sites, df_observed, df_forecast], sort=True,axis=1)
# Make new column for accuracy of forecast. Use underpredict/correct/overpredict classes
df.loc[df.observed_storm_regime == df.forecast_storm_regime, 'forecast_accuray'] = 'correct'
# Observed/Forecasted/Class for each combination
classes = [('swash', 'collision', 'overpredict'),
('swash', 'swash', 'correct'),
('swash', 'overwash', 'overpredict'),
('collision', 'swash', 'underpredict'),
('collision', 'collision', 'correct'),
('collision', 'overwash', 'overpredict'),
('overwash', 'swash', 'underpredict'),
('overwash', 'collision', 'underpredict'),
('overwash', 'overwash', 'correct')]
for c in classes:
df.loc[(df.observed_storm_regime == c[0])&(df.forecast_storm_regime == c[1]), 'forecast_accuracy'] = c[2]
schema = {
'geometry': 'Point',
'properties': OrderedDict([
('beach','str'),
('site_id', 'str'),
('forecast_storm_regime', 'str'),
('observed_storm_regime', 'str',),
('forecast_accuracy', 'str')
])
}
# TODO Impact marker location should be at the seaward end of the profile
with fiona.open(output_geojson, 'w', driver='GeoJSON', crs=from_epsg(4326), schema=schema) as output:
for index, row in df.iterrows():
# Locate the marker at the seaward end of the profile to avoid cluttering the coastline.
# Work out where seaward profile limit is
orientation = row["orientation"]
center_profile_x = row["profile_x_lat_lon"]
center_lon = row["lon"]
center_lat = row["lat"]
center_lat_lon = Point(center_lon, center_lat)
center_xy = convert_coord_systems(center_lat_lon)
center_x, center_y = center_xy.xy
sea_x = center_x - center_profile_x * np.cos(np.deg2rad(orientation))
sea_y = center_y - center_profile_x * np.sin(np.deg2rad(orientation))
sea_xy = Point(sea_x, sea_y)
sea_lat_lon = convert_coord_systems(sea_xy, in_coord_system="EPSG:28356", out_coord_system="EPSG:4326")
prop = OrderedDict([
('beach',row["beach"]),
('site_id', index),
('forecast_storm_regime', row["forecast_storm_regime"]),
('observed_storm_regime', row["observed_storm_regime"],),
('forecast_accuracy', row["forecast_accuracy"])
])
output.write({"geometry": mapping(sea_lat_lon), "properties": prop})
point = Point(row["x_200_lon"], row["x_200_lat"])
prop = {"beach": row["beach"], "site_id": index}
output.write({"geometry": mapping(point), "properties": prop})
logger.info("Done!")

@ -11,7 +11,7 @@ import pandas as pd
from mat4py import loadmat
from shapely.geometry import Point
from data.parse_shp import convert_coord_systems
from data.profile_features import convert_coord_systems
from utils import setup_logging
logger = setup_logging()
@ -198,17 +198,6 @@ def parse_profiles_and_sites(profiles_mat):
site_rows = []
site_counter = 0
# Our z values can come from these columns, depending on the isgood flag.
# Let's reoganise them into a list of list
z_names = ["Zpre", "Zpost", "Zrec1", "Zrec2", "Zrec3", "Zrec4"]
z_cols = [mat_data[col] for col in z_names]
z_sites = []
for cols in zip(*z_cols):
z_vals = []
for z_vector in zip(*cols):
z_vals.append([z[0] for z in z_vector])
z_sites.append(z_vals)
for i, site in enumerate(mat_data["site"]):
logger.debug("Processing site {} of {}".format(i + 1, len(mat_data["site"])))
@ -226,36 +215,28 @@ def parse_profiles_and_sites(profiles_mat):
# Want to calculation the orientation
orientation = {}
for x, lat, lon, z_site, easting, northing in zip(
for x, lat, lon, z_prestorm, z_poststorm, easting, northing in zip(
mat_data["x"][i],
mat_data["lats"][i],
mat_data["lons"][i],
z_sites[i],
mat_data["Zpre"][i],
mat_data["Zpost"][i],
mat_data["eastings"][i],
mat_data["northings"][i],
):
profile_type = None
for j, is_good in enumerate([1] + mat_data["isgood"][i]):
# Assumes the first profile is always good and is the prestorm profike
if j == 0:
profile_type = "prestorm"
z = z_site[j]
land_lim = np.nan
# Only extract pre and post storm profile
for j, profile_type in enumerate(["prestorm", "poststorm"]):
# Skips bad profiles
elif is_good == 0:
continue
# Takes the first isgood profile as the post storm profile
else:
profile_type = "poststorm"
z = z_site[j]
if mat_data["isgood"][i][j] == 1:
land_lim = mat_data["landlims"][i][j]
survey_datetime = matlab_datenum_to_datetime(mat_data["surveydates"][i][j])
if profile_type == "prestorm":
z = z_prestorm
else:
z = z_poststorm
# Keep a record of the where the center of the profile is located, and the locations of the land
# and sea
@ -277,16 +258,12 @@ def parse_profiles_and_sites(profiles_mat):
"lat": lat[0],
"profile_type": profile_type,
"x": x[0],
"z": z,
"z": z[0],
"land_lim": land_lim,
"survey_datetime": survey_datetime,
}
)
# Stop looking at profiles if we've got our post-storm profile
if profile_type == "poststorm":
break
orientation = math.degrees(
math.atan2(
orientation["land_northing"] - orientation["sea_northing"],
@ -329,7 +306,7 @@ def remove_zeros(df_profiles):
df_profiles.index.get_level_values("profile_type") == key[1]
)
df_profile = df_profiles[idx_site]
x_last_ele = df_profile[df_profile.z == 0].index.get_level_values("x")[0]
x_last_ele = df_profile[df_profile.z != 0].index.get_level_values("x")[-1]
df_profiles.loc[idx_site & (df_profiles.index.get_level_values("x") > x_last_ele), "z"] = np.nan
logger.info("Removed zeros from end of profiles")

@ -1,6 +1,6 @@
---
version: 1
disable_existing_loggers: True
disable_existing_loggers: False
formatters:
simple:
format: "[%(asctime)s] [%(filename)15.15s:%(lineno)4.4s %(funcName)15.15s] [%(levelname)-4.4s] %(message)s"

Loading…
Cancel
Save