You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
818 lines
28 KiB
Plaintext
818 lines
28 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Shoreline position v.s. wave energy\n",
|
|
"This notebook looks at the relationship between shoreline position and wave energy."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Setup notebook"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"import datetime\n",
|
|
"import pickle\n",
|
|
"import fiona\n",
|
|
"import shapely\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"import pandas as pd\n",
|
|
"import geopandas\n",
|
|
"from scipy.stats import percentileofscore\n",
|
|
"from shapely.geometry import Point\n",
|
|
"import numpy as np\n",
|
|
"import requests\n",
|
|
"from bs4 import BeautifulSoup\n",
|
|
"import urllib.parse\n",
|
|
"import itertools\n",
|
|
"from tqdm import tqdm\n",
|
|
"import glob\n",
|
|
"from scipy.interpolate import griddata, SmoothBivariateSpline\n",
|
|
"from scipy.ndimage.filters import gaussian_filter\n",
|
|
"import colorcet as cc"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Shoreline positions"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Import Killian's data\n",
|
|
"shorelines = pickle.load(open(\"14_timeseries_Australia_2.pkl\", \"rb\"))\n",
|
|
"beaches = fiona.open(\"14_beaches_Australia.geojson\")\n",
|
|
"polygons = fiona.open(\"14_polygons_Australia.geojson\")\n",
|
|
"transects = fiona.open(\"14_transects_Australia.geojson\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"code_folding": [
|
|
0
|
|
]
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"def df_from_csv(csv, index_col, data_folder='../data/interim'):\n",
|
|
" print('Importing {}'.format(csv))\n",
|
|
" return pd.read_csv(os.path.join(data_folder,csv), index_col=index_col)\n",
|
|
"\n",
|
|
"# Import Chris' data\n",
|
|
"df_sites = df_from_csv('sites.csv', index_col=[0])\n",
|
|
"df_obs_impacts = df_from_csv('impacts_observed.csv', index_col=[0])\n",
|
|
"df_waves = df_from_csv('waves.csv', index_col=[0,1])\n",
|
|
"df_waves.index = df_waves.index.set_levels([df_waves.index.levels[0], pd.to_datetime(df_waves.index.levels[1])])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Get coorindates of transects where Killian has given shoreline data\n",
|
|
"transect_data = [x for x in transects if x['properties']['id'] in shorelines.keys()]\n",
|
|
"transect_dict = [{'name':x['properties']['name'],\n",
|
|
" 'id':x['properties']['id'],\n",
|
|
" 'orientation':x['properties']['orientation'],\n",
|
|
" 'start_coords': Point(x['geometry']['coordinates'][0][0], x['geometry']['coordinates'][0][1]),\n",
|
|
" 'end_coords': Point(x['geometry']['coordinates'][1][0], x['geometry']['coordinates'][1][1])} for x in transect_data]\n",
|
|
"df_transects = pd.DataFrame(transect_dict)\n",
|
|
"gdf_transects = geopandas.GeoDataFrame(df_transects, geometry='start_coords',crs={'init':'epsg:4326'})\n",
|
|
"\n",
|
|
"# Find closest Chris transect to each one of Kilian's transects\n",
|
|
"# First transform coords using geopandas\n",
|
|
"df_sites['coords'] = list(zip(df_sites.lon, df_sites.lat))\n",
|
|
"df_sites['coords'] = df_sites['coords'].apply(Point)\n",
|
|
"gdf_sites = geopandas.GeoDataFrame(df_sites, geometry='coords',crs={'init':'epsg:4326'})\n",
|
|
"gdf_sites['site_id'] = gdf_sites.index"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Find nearest Chris transect for each of Kilian's transect\n",
|
|
"\n",
|
|
"from shapely.ops import nearest_points\n",
|
|
"\n",
|
|
"def nearest(row,\n",
|
|
" geom_union,\n",
|
|
" df1,\n",
|
|
" df2,\n",
|
|
" geom1_col='geometry',\n",
|
|
" geom2_col='geometry',\n",
|
|
" src_column=None):\n",
|
|
" \"\"\"Find the nearest point and return the corresponding value from specified column.\"\"\"\n",
|
|
" # Find the geometry that is closest\n",
|
|
" nearest = df2[geom2_col] == nearest_points(row[geom1_col], geom_union)[1]\n",
|
|
" # Get the corresponding value from df2 (matching is based on the geometry)\n",
|
|
" value = df2[nearest][src_column].get_values()[0]\n",
|
|
" return value\n",
|
|
"\n",
|
|
"unary_union = gdf_sites.unary_union\n",
|
|
"gdf_transects['chris_site_id'] = gdf_transects.apply(nearest,\n",
|
|
" geom_union=unary_union,\n",
|
|
" df1=gdf_transects,\n",
|
|
" df2=gdf_sites,\n",
|
|
" geom1_col='start_coords',\n",
|
|
" geom2_col='coords',\n",
|
|
" src_column='site_id',\n",
|
|
" axis=1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Got the closests site_id, now check the distance. If distance too far between these sites, then probably not a good match.\n",
|
|
"gdf_transects = gdf_transects.merge(gdf_sites[['coords']], left_on='chris_site_id', right_on='site_id')\n",
|
|
"gdf_transects = gdf_transects.rename({'coords': 'chris_coords'},axis='columns')\n",
|
|
"gdf_transects\n",
|
|
"distances = gdf_transects[['start_coords']].to_crs(epsg=28356).distance(\n",
|
|
" geopandas.GeoDataFrame(gdf_transects[['chris_coords']],\n",
|
|
" geometry='chris_coords',\n",
|
|
" crs={\n",
|
|
" 'init': 'epsg:4326'\n",
|
|
" }).to_crs(epsg=28356))\n",
|
|
"\n",
|
|
"gdf_transects['transect_to_chris_dist'] = distances"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Limit used transects to 300 m max distance\n",
|
|
"gdf_transects = gdf_transects[gdf_transects.transect_to_chris_dist < 300]\n",
|
|
"gdf_transects"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Calculate the change to the percentile of shoreline right after the storm\n",
|
|
"# Kilian's shorelines are given for z=0 MSL, so find change in shoreline due to storm\n",
|
|
"gdf_transects=gdf_transects.merge(df_obs_impacts.width_msl_change_m,left_on=['chris_site_id'], right_on=['site_id'])\n",
|
|
"gdf_transects"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# At each beach calculate percentile of shoreline right before the storm\n",
|
|
"data = []\n",
|
|
"\n",
|
|
"for row in gdf_transects.iterrows():\n",
|
|
"\n",
|
|
" # Get shoreline records\n",
|
|
" id_shorelines = shorelines[row[1].id]['chainage']\n",
|
|
" id_dates = shorelines[row[1].id]['dates']\n",
|
|
"\n",
|
|
" # Find last date before June 2016 storm\n",
|
|
" dt_storm = datetime.datetime(2016, 6, 3)\n",
|
|
" dt_storm = dt_storm.replace(tzinfo=datetime.timezone.utc)\n",
|
|
" mask = pd.Series([x < dt_storm for x in id_dates])\n",
|
|
" i_last_obs = mask[::-1].idxmax()\n",
|
|
"\n",
|
|
" last_obs_ch = id_shorelines[i_last_obs]\n",
|
|
" last_obs_date = id_dates[i_last_obs]\n",
|
|
" post_storm_ch = last_obs_ch + row[1].width_msl_change_m\n",
|
|
"\n",
|
|
" prestorm_shoreline_pctile = percentileofscore(id_shorelines[~np.isnan(id_shorelines)], last_obs_ch)\n",
|
|
" poststorm_shoreline_pctile = percentileofscore(id_shorelines[~np.isnan(id_shorelines)],\n",
|
|
" post_storm_ch)\n",
|
|
" change_shoreline_pctile = poststorm_shoreline_pctile - prestorm_shoreline_pctile\n",
|
|
"\n",
|
|
" rel_change_shoreline_pctile = (poststorm_shoreline_pctile- prestorm_shoreline_pctile)/prestorm_shoreline_pctile *100\n",
|
|
" \n",
|
|
" # Calculate percentile of shoreline score\n",
|
|
" data.append({\n",
|
|
" 'prestorm_shoreline_pctile': prestorm_shoreline_pctile,\n",
|
|
" 'poststorm_shoreline_pctile': poststorm_shoreline_pctile,\n",
|
|
" 'change_shoreline_pctile': change_shoreline_pctile,\n",
|
|
" 'rel_change_shoreline_pctile': rel_change_shoreline_pctile,\n",
|
|
" 'index': row[0]\n",
|
|
" })\n",
|
|
"\n",
|
|
"data = pd.DataFrame(data).set_index('index')\n",
|
|
"gdf_transects = gdf_transects.join(data)\n",
|
|
"gdf_transects"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Grab data from NSW Nearshore wave transformation tool.\n",
|
|
"# Need to relate Kilian's site id\n",
|
|
"sites = [{\n",
|
|
" 'id': 'way4042355',\n",
|
|
" 'site_id': 'DEEWHYs0003',\n",
|
|
" 'nsw_nearshore_id': 1007832\n",
|
|
"}, {\n",
|
|
" 'id': 'way13858409',\n",
|
|
" 'site_id': 'DEEWHYn0003',\n",
|
|
" 'nsw_nearshore_id': 1007822, \n",
|
|
"}, {\n",
|
|
" 'id': 'way13858412',\n",
|
|
" 'site_id': 'MONA0011',\n",
|
|
" 'nsw_nearshore_id': 1007726, \n",
|
|
"},\n",
|
|
"{\n",
|
|
" 'id': 'way14040821',\n",
|
|
" 'site_id': 'NARRA0007',\n",
|
|
" 'nsw_nearshore_id': 1007760, \n",
|
|
"},{\n",
|
|
" 'id': 'way14040977',\n",
|
|
" 'site_id': 'NARRA0018',\n",
|
|
" 'nsw_nearshore_id': 1007770, \n",
|
|
"},{\n",
|
|
" 'id': 'way14041013',\n",
|
|
" 'site_id': 'NARRA0030',\n",
|
|
" 'nsw_nearshore_id': 1007778, \n",
|
|
"},{\n",
|
|
" 'id': 'way25005079',\n",
|
|
" 'site_id': 'MACM0009',\n",
|
|
" 'nsw_nearshore_id': 1007354, \n",
|
|
"},{\n",
|
|
" 'id': 'way54609773',\n",
|
|
" 'site_id': 'WAMBE0005',\n",
|
|
" 'nsw_nearshore_id': 1007264, \n",
|
|
"},{\n",
|
|
" 'id': 'way54667480',\n",
|
|
" 'site_id': 'AVOCAn0005',\n",
|
|
" 'nsw_nearshore_id': 1007306, \n",
|
|
"},{\n",
|
|
" 'id': 'way54669965',\n",
|
|
" 'site_id': 'AVOCAs0004',\n",
|
|
" 'nsw_nearshore_id': 1007312, \n",
|
|
"},{\n",
|
|
" 'id': 'way134627391',\n",
|
|
" 'site_id': 'ONEMILE0007',\n",
|
|
" 'nsw_nearshore_id': 1005098, \n",
|
|
"},{\n",
|
|
" 'id': 'way159040990',\n",
|
|
" 'site_id': 'LHOUSE0004',\n",
|
|
" 'nsw_nearshore_id': 1005448, \n",
|
|
"},{\n",
|
|
" 'id': 'way173070325',\n",
|
|
" 'site_id': 'LHOUSEn0077',\n",
|
|
" 'nsw_nearshore_id': 1004186, \n",
|
|
"},{\n",
|
|
" 'id': 'way182614828',\n",
|
|
" 'site_id': 'TREACH0009',\n",
|
|
" 'nsw_nearshore_id': 1005472, \n",
|
|
"},{\n",
|
|
" 'id': 'way189407637',\n",
|
|
" 'site_id': 'NSHORE_n0063',\n",
|
|
" 'nsw_nearshore_id': 1003994, \n",
|
|
"},{\n",
|
|
" 'id': 'way190929758',\n",
|
|
" 'site_id': 'CRESn0069',\n",
|
|
" 'nsw_nearshore_id': 1003708, \n",
|
|
"},{\n",
|
|
" 'id': 'way222144734',\n",
|
|
" 'site_id': 'BLUEYS0002',\n",
|
|
" 'nsw_nearshore_id': 1005316, \n",
|
|
"},{\n",
|
|
" 'id': 'way222145626',\n",
|
|
" 'site_id': 'BOOM0008',\n",
|
|
" 'nsw_nearshore_id': 1005298, \n",
|
|
"},{\n",
|
|
" 'id': 'way224198013',\n",
|
|
" 'site_id': 'MANNING0048',\n",
|
|
" 'nsw_nearshore_id': 1004712, \n",
|
|
"},{\n",
|
|
" 'id': 'way450323845',\n",
|
|
" 'site_id': 'NAMB0033',\n",
|
|
" 'nsw_nearshore_id': np.nan, \n",
|
|
"},{\n",
|
|
" 'id': 'relation2303044',\n",
|
|
" 'site_id': 'ENTRA0041',\n",
|
|
" 'nsw_nearshore_id': 1007110, \n",
|
|
"},{\n",
|
|
" 'id': 'relation2723197',\n",
|
|
" 'site_id': 'GRANTSn0022',\n",
|
|
" 'nsw_nearshore_id': 1004296, \n",
|
|
"}\n",
|
|
"]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def nearshore_wave_csv_url(id,start_date,end_date):\n",
|
|
" URL = 'http://www.nswaves.com.au/transform.php'\n",
|
|
" payload = {\n",
|
|
" 'init': '1',\n",
|
|
" 'type': 'Transform-Full',\n",
|
|
" 'startsite': '{}'.format(id),\n",
|
|
" 'endsite': '{}'.format(id),\n",
|
|
" 'timestep': 'null',\n",
|
|
" 'startdate': start_date.strftime('%Y-%m-%d'),\n",
|
|
" 'starthour': '00',\n",
|
|
" 'enddate': end_date.strftime('%Y-%m-%d'),\n",
|
|
" 'endhour': '00',\n",
|
|
" 'sitestep': '1',\n",
|
|
" 'method': 'Parametric',\n",
|
|
" 'source': 'Waverider',\n",
|
|
" 'filename': 'ckl',\n",
|
|
" 'format': 'csv',\n",
|
|
" }\n",
|
|
"\n",
|
|
" session = requests.session()\n",
|
|
" r = requests.post(URL, data=payload)\n",
|
|
" \n",
|
|
" soup = BeautifulSoup(r.text)\n",
|
|
" \n",
|
|
" # Check if data extraction was successful\n",
|
|
" if soup.findAll(text=\"OK : Data Extraction Successful - Click filename/s to download data file\"):\n",
|
|
"\n",
|
|
" # Find all links\n",
|
|
" for link in soup.find_all('a'):\n",
|
|
"\n",
|
|
" href = link.get('href')\n",
|
|
" if '/data/full' not in href:\n",
|
|
" continue\n",
|
|
"\n",
|
|
" # Convert to absolute convert to absolute url\n",
|
|
" csv_url = urllib.parse.urljoin(URL, href)\n",
|
|
"\n",
|
|
" return csv_url\n",
|
|
" else:\n",
|
|
" return None\n",
|
|
"\n",
|
|
" \n",
|
|
"def download_csv(url, file_path):\n",
|
|
" urllib.request.urlretrieve(url,file_path)\n",
|
|
" print('Downloaded {}'.format(file_path))\n",
|
|
" \n",
|
|
" \n",
|
|
"def daterange(start_date, end_date,delta):\n",
|
|
" while start_date < end_date:\n",
|
|
" yield start_date\n",
|
|
" start_date += delta\n",
|
|
" \n",
|
|
"def download_nearshore_csv(id, site_id, nsw_nearshore_id, start_date, end_date,output_folder='./14_nearshore_waves/'):\n",
|
|
" \n",
|
|
" # Create output folder if doesn't already exists\n",
|
|
" os.makedirs(output_folder, exist_ok=True)\n",
|
|
"\n",
|
|
" # Output filename\n",
|
|
" output_filename = '{}_{}_{}_{}_{}.csv'.format(\n",
|
|
" id,\n",
|
|
" site_id,\n",
|
|
" nsw_nearshore_id,\n",
|
|
" start_date.strftime('%Y%m%d'),\n",
|
|
" end_date.strftime('%Y%m%d'),\n",
|
|
" )\n",
|
|
" output_filepath = os.path.join(output_folder,output_filename)\n",
|
|
"\n",
|
|
" # Don't download if file already exists\n",
|
|
" if os.path.isfile(output_filepath):\n",
|
|
" return\n",
|
|
"\n",
|
|
" csv_url = nearshore_wave_csv_url(nsw_nearshore_id,start_date,end_date)\n",
|
|
"\n",
|
|
" if csv_url:\n",
|
|
" download_csv(csv_url, output_filepath)\n",
|
|
" else:\n",
|
|
" print('No url found')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# start_year = 2005\n",
|
|
"# end_year = 2015\n",
|
|
"# output_folder = './14_nearshore_waves/'\n",
|
|
"\n",
|
|
"# # Create list of start end dates we want to request\n",
|
|
"# date_ranges = [(datetime.datetime(x, 1, 1), datetime.datetime(x, 12, 31))\n",
|
|
"# for x in range(start_year, end_year + 1)]\n",
|
|
"\n",
|
|
"# inputs = list(itertools.product(sites, date_ranges))\n",
|
|
"\n",
|
|
"# for inpt in inputs:\n",
|
|
"# download_nearshore_csv(inpt[0]['id'], inpt[0]['site_id'], inpt[0]['nsw_nearshore_id'], inpt[1][0], inpt[1][1])\n",
|
|
"# break"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# # Use a queue to get data\n",
|
|
"\n",
|
|
"# from queue import Queue\n",
|
|
"# from threading import Thread\n",
|
|
"# q = Queue(maxsize=0)\n",
|
|
"# num_theads = 4\n",
|
|
"\n",
|
|
"# start_year = 2005\n",
|
|
"# end_year = 2015\n",
|
|
"# date_ranges = [(datetime.datetime(x, 1, 1), datetime.datetime(x, 12, 31))\n",
|
|
"# for x in range(start_year, end_year + 1)]\n",
|
|
"\n",
|
|
"# inputs = [x for x in list(itertools.product(sites, date_ranges))]\n",
|
|
"\n",
|
|
"# #Populating Queue with tasks\n",
|
|
"# results = [{} for x in inputs]\n",
|
|
"\n",
|
|
"# #load up the queue with the urls to fetch and the index for each job (as a tuple):\n",
|
|
"# for i, inpt in enumerate(inputs):\n",
|
|
"# q.put((i, inpt))\n",
|
|
"\n",
|
|
"\n",
|
|
"# # Threaded function for queue processing.\n",
|
|
"# def crawl(q, result):\n",
|
|
"# while not q.empty():\n",
|
|
"# work = q.get() #fetch new work from the Queue\n",
|
|
"# print(work)\n",
|
|
"# download_nearshore_csv(work[1][0]['id'], work[1][0]['site_id'],\n",
|
|
"# work[1][0]['nsw_nearshore_id'], work[1][1][0],\n",
|
|
"# work[1][1][1])\n",
|
|
"# #signal to the queue that task has been processed\n",
|
|
"# q.task_done()\n",
|
|
"# return True\n",
|
|
"\n",
|
|
"\n",
|
|
"# #Starting worker threads on queue processing\n",
|
|
"# for i in range(num_theads):\n",
|
|
"# print('Starting thread {}'.format(i))\n",
|
|
"# worker = Thread(target=crawl, args=(q, results))\n",
|
|
"# worker.setDaemon(True) #setting threads as \"daemon\" allows main program to\n",
|
|
"# #exit eventually even if these dont finish\n",
|
|
"# #correctly.\n",
|
|
"# worker.start()\n",
|
|
"\n",
|
|
"# #now we wait until the queue has been processed\n",
|
|
"# q.join()\n",
|
|
"# print('All tasks completed.')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# For each site, get\n",
|
|
"for site in sites:\n",
|
|
"\n",
|
|
" # print(site)\n",
|
|
" df_sites\n",
|
|
"\n",
|
|
" # Get shoreline orientation\n",
|
|
" orientation = df_sites.loc[[site['site_id']]].orientation.iloc[0]\n",
|
|
"\n",
|
|
" # Get peak hour wave energy from June 2016 storm\n",
|
|
" max_hrly_wave_power = df_waves.loc[[site['site_id']]].Pxs.max()\n",
|
|
"\n",
|
|
" # Load nearshore wave csv files into one dataframe\n",
|
|
" site_nearshore_wave_files = glob.glob('./14_nearshore_waves/*{}*'.format(\n",
|
|
" site['site_id']))\n",
|
|
"\n",
|
|
" if len(site_nearshore_wave_files) == 0:\n",
|
|
" continue\n",
|
|
"\n",
|
|
" df_hist_waves = pd.concat((pd.read_csv(f,\n",
|
|
" skiprows=8,\n",
|
|
" index_col=0,\n",
|
|
" names=['Hs', 'Tp', 'dir'],\n",
|
|
" na_values=' NaN')\n",
|
|
" for f in site_nearshore_wave_files))\n",
|
|
" df_hist_waves.index = pd.to_datetime(df_hist_waves.index)\n",
|
|
"\n",
|
|
" # At each row, calculate crossshore component of nearshore wave energy\n",
|
|
" df_hist_waves['d'] = 10\n",
|
|
" df_hist_waves['L'] = 9.81 * df_hist_waves.Tp**2 / 2 / np.pi\n",
|
|
" df_hist_waves['n'] = 0.5 * (\n",
|
|
" 1 + (4 * np.pi * df_hist_waves.d / df_hist_waves.L) /\n",
|
|
" (np.sinh(4 * np.pi * df_hist_waves.d / df_hist_waves.L)))\n",
|
|
" df_hist_waves['E'] = 1 / 16 * 1025 * 9.81 * df_hist_waves.Hs**2\n",
|
|
" df_hist_waves['C'] = 9.81 * df_hist_waves.Tp / 2 / np.pi * np.tanh(\n",
|
|
" 2 * np.pi * df_hist_waves.d / df_hist_waves.L)\n",
|
|
" df_hist_waves['shoreline_tn_angle'] = 270 - orientation\n",
|
|
" df_hist_waves.loc[\n",
|
|
" df_hist_waves.shoreline_tn_angle > 360,\n",
|
|
" 'shoreline_tn_angle'] = df_hist_waves.shoreline_tn_angle - 360\n",
|
|
" df_hist_waves[\n",
|
|
" 'alpha'] = df_hist_waves.shoreline_tn_angle - df_hist_waves.dir\n",
|
|
" df_hist_waves[\n",
|
|
" 'Px'] = df_hist_waves.n * df_hist_waves.E * df_hist_waves.C * np.cos(\n",
|
|
" np.deg2rad(df_hist_waves.alpha))\n",
|
|
"\n",
|
|
" # Apply percentileofscore for June 2016 wave energy\n",
|
|
" storm_Px_hrly_pctile = percentileofscore(df_hist_waves.Px.dropna().values,\n",
|
|
" max_hrly_wave_power,\n",
|
|
" kind='mean')\n",
|
|
"\n",
|
|
" # Calculate cumulate wave energy from storm\n",
|
|
" idx = ((df_waves.index.get_level_values('datetime') > '2016-06-04') &\n",
|
|
" (df_waves.index.get_level_values('datetime') < '2016-06-07') &\n",
|
|
" (df_waves.index.get_level_values('site_id') == site['site_id']))\n",
|
|
" hrs = len(df_waves[idx])\n",
|
|
" Pxscum_storm = df_waves[idx].Pxs.sum()\n",
|
|
" \n",
|
|
" # Calculate cumulate wave energy of mean wave conditions over length of storm\n",
|
|
" Pxscum_mean = df_hist_waves['Px'].mean() * hrs\n",
|
|
" Pxscum_storm_mean_ratio = Pxscum_storm / Pxscum_mean\n",
|
|
"\n",
|
|
" # Add to gdf_transects dataframe\n",
|
|
" idx = gdf_transects[gdf_transects.chris_site_id == site['site_id']].index\n",
|
|
" gdf_transects.loc[idx, 'storm_Px_hrly_pctile'] = storm_Px_hrly_pctile\n",
|
|
" gdf_transects.loc[idx, 'Pxscum_storm'] = Pxscum_storm\n",
|
|
" gdf_transects.loc[idx, 'Pxscum_mean'] = Pxscum_mean\n",
|
|
" gdf_transects.loc[idx, 'Pxscum_storm_mean_ratio'] = Pxscum_storm_mean_ratio\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"gdf_transects.sort_values(by='Pxscum_storm_mean_ratio',ascending=False).head()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"gdf_transects.sort_values(by='rel_change_shoreline_pctile').head()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Drop nans\n",
|
|
"gdf_transects = gdf_transects.dropna(axis='index',\n",
|
|
" subset=[\n",
|
|
" 'Pxscum_storm_mean_ratio',\n",
|
|
" 'prestorm_shoreline_pctile',\n",
|
|
" 'change_shoreline_pctile',\n",
|
|
" 'rel_change_shoreline_pctile'\n",
|
|
" ],\n",
|
|
" how='any')\n",
|
|
"\n",
|
|
"# Grid results\n",
|
|
"grid_x, grid_y = np.mgrid[0:2:100j, 0:100:100j]\n",
|
|
"\n",
|
|
"x_vals = gdf_transects.Pxscum_storm_mean_ratio.values\n",
|
|
"y_vals = gdf_transects.prestorm_shoreline_pctile.values\n",
|
|
"z_vals = gdf_transects.rel_change_shoreline_pctile.values\n",
|
|
"\n",
|
|
"points = [[x, y] for x, y in zip(\n",
|
|
" x_vals,\n",
|
|
" y_vals,\n",
|
|
")]\n",
|
|
"\n",
|
|
"grid = griddata((x_vals,y_vals), z_vals, (grid_x, grid_y), method='cubic')\n",
|
|
"\n",
|
|
"# Smooth data\n",
|
|
"# https://stackoverflow.com/a/34370291\n",
|
|
"# grid = gaussian_filter(grid, sigma=0.5)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def round_down(num, divisor):\n",
|
|
" return num - (num%divisor)\n",
|
|
"\n",
|
|
"def round_up(x, divisor): \n",
|
|
" return (x + divisor - 1) // divisor * divisor"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"gdf_transects[gdf_transects.prestorm_shoreline_pctile<40].sort_values(by='change_shoreline_pctile',ascending=True).head()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Plot peak wave energy pctile vs prestorm shoreline percentile vs change in shoreline percentile\n",
|
|
"\n",
|
|
"x_col = 'Pxscum_storm_mean_ratio'\n",
|
|
"y_col = 'prestorm_shoreline_pctile'\n",
|
|
"# z_col = 'rel_change_shoreline_pctile'\n",
|
|
"z_col = 'change_shoreline_pctile'\n",
|
|
"\n",
|
|
"# Drop nans\n",
|
|
"gdf_transects = gdf_transects.dropna(axis='index',\n",
|
|
" subset=[x_col, y_col,z_col\n",
|
|
" ],\n",
|
|
" how='any')\n",
|
|
"\n",
|
|
"# Grid results\n",
|
|
"grid_x, grid_y = np.mgrid[0:25:100j, 0:100:100j]\n",
|
|
"\n",
|
|
"x_vals = gdf_transects[x_col].values\n",
|
|
"y_vals = gdf_transects[y_col].values\n",
|
|
"z_vals = gdf_transects[z_col].values\n",
|
|
"\n",
|
|
"grid = griddata((x_vals,y_vals), z_vals, (grid_x, grid_y), method='linear',rescale=True)\n",
|
|
"\n",
|
|
"# Smooth data\n",
|
|
"# https://stackoverflow.com/a/34370291\n",
|
|
"# grid = gaussian_filter(grid, sigma=0.5)\n",
|
|
"\n",
|
|
"\n",
|
|
"# # 2D Spline interpolation\n",
|
|
"# s = SmoothBivariateSpline(x_vals, y_vals,z_vals)\n",
|
|
"# spline_x = np.arange(1,25,0.1)\n",
|
|
"# spline_y = np.arange(0,100,0.5)\n",
|
|
"# spline_z = s(spline_x, spline_y,grid=True)\n",
|
|
"# spline_grid_x, spline_grid_y = np.meshgrid(spline_x, spline_y)\n",
|
|
"\n",
|
|
"\n",
|
|
"# Create figure\n",
|
|
"fig = plt.figure(figsize=(3, 3), dpi=150, facecolor='w', edgecolor='k')\n",
|
|
"ax = fig.add_subplot(111)\n",
|
|
"\n",
|
|
"# Define colors\n",
|
|
"cmap_interval = 25\n",
|
|
"cmap = cc.cm.fire\n",
|
|
"vmin = round_down(np.min(z_vals), cmap_interval)\n",
|
|
"vmax = round_up(np.max(z_vals), cmap_interval)\n",
|
|
"levels = [x*cmap_interval for x in range(-4,2)]\n",
|
|
"\n",
|
|
"\n",
|
|
"# Plot SPLINE grid surface\n",
|
|
"# cf = ax.contourf(spline_grid_x, spline_grid_y, spline_z.T,levels=levels, cmap=cmap,vmin=vmin,vmax=vmax)\n",
|
|
"\n",
|
|
"# Plot SPLINE contours\n",
|
|
"# cs = plt.contour(grid_x, grid_y,grid,levels=levels,linewidths=0.5,colors='white', vmin=vmin,vmax=vmax)\n",
|
|
"# ax.clabel(cs, inline=1, fontsize=4, fmt='%1.0f%%')\n",
|
|
"\n",
|
|
"\n",
|
|
"# Plot CUBIC FIT grid surface\n",
|
|
"cf = plt.contourf(grid_x, grid_y,grid,levels=levels, cmap=cmap,vmin=vmin,vmax=vmax)\n",
|
|
"\n",
|
|
"# Plot CUBIC FIT contours\n",
|
|
"cs = plt.contour(grid_x, grid_y,grid,levels=levels,linewidths=0.5,colors='white', vmin=vmin,vmax=vmax)\n",
|
|
"ax.clabel(cs, inline=1, fontsize=4, fmt='%1.0f%%')\n",
|
|
"\n",
|
|
"\n",
|
|
"scatter = ax.scatter(\n",
|
|
" x=x_vals,\n",
|
|
" y=y_vals,\n",
|
|
" c=z_vals,\n",
|
|
" s=1,\n",
|
|
" cmap=cmap,vmin=vmin,vmax=vmax\n",
|
|
")\n",
|
|
"\n",
|
|
"ax.set_xlim([1,25])\n",
|
|
"\n",
|
|
"ax.set_xlabel(x_col)\n",
|
|
"ax.set_ylabel(y_col)\n",
|
|
"\n",
|
|
"cbar = plt.colorbar(cf)\n",
|
|
"cbar.set_label(z_col)\n",
|
|
"\n",
|
|
"ax.grid(True, linestyle=\"--\", alpha=0.2, color='grey', linewidth=1)\n",
|
|
"\n",
|
|
"plt.show()\n",
|
|
"\n",
|
|
"fig.savefig('14_beach_state_vs_wave_energy_{}'.format(z_col),dpi=600,bbox_inches = \"tight\", pad_inches=0.01)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"hide_input": false,
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.7"
|
|
},
|
|
"toc": {
|
|
"base_numbering": 1,
|
|
"nav_menu": {},
|
|
"number_sections": true,
|
|
"sideBar": true,
|
|
"skip_h1_title": false,
|
|
"title_cell": "Table of Contents",
|
|
"title_sidebar": "Contents",
|
|
"toc_cell": false,
|
|
"toc_position": {
|
|
"height": "calc(100% - 180px)",
|
|
"left": "10px",
|
|
"top": "150px",
|
|
"width": "297.797px"
|
|
},
|
|
"toc_section_display": true,
|
|
"toc_window_display": true
|
|
},
|
|
"varInspector": {
|
|
"cols": {
|
|
"lenName": 16,
|
|
"lenType": 16,
|
|
"lenVar": 40
|
|
},
|
|
"kernels_config": {
|
|
"python": {
|
|
"delete_cmd_postfix": "",
|
|
"delete_cmd_prefix": "del ",
|
|
"library": "var_list.py",
|
|
"varRefreshCmd": "print(var_dic_list())"
|
|
},
|
|
"r": {
|
|
"delete_cmd_postfix": ") ",
|
|
"delete_cmd_prefix": "rm(",
|
|
"library": "var_list.r",
|
|
"varRefreshCmd": "cat(var_dic_list()) "
|
|
}
|
|
},
|
|
"types_to_exclude": [
|
|
"module",
|
|
"function",
|
|
"builtin_function_or_method",
|
|
"instance",
|
|
"_Feature"
|
|
],
|
|
"window_display": false
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|