Compare commits

...

5 Commits

@ -0,0 +1,94 @@
import os
import re
import numpy as np
import pandas as pd
from scipy import stats, optimize
WORKBOOK = '../inputs/20220505_Probabilistic_Erosion_Parameters_5th_JTC_REVIEWED_IPCC_SLR_OFFSET.xlsx' # noqa
SHEET = 'IPCC AR6 WRL FINAL'
VERSION = 'WRL V5 corrected to 2020' # First row of data
def get_cdf(x, loc, scale):
"""Calculate cumulative density function, using Cauchy distribution."""
return stats.cauchy(loc=loc, scale=scale).cdf(x)
def cauchy(n_runs, start_year, end_year):
"""
Use Monte Carlo simulation to generate sea level rise trajectories by
fitting IPCC data to a Cauchy distribution.
Args:
n_runs (int): number of runs
start_year (int): first year of model
end_year (int): last year of model
Returns:
the simulated sea level rise (m)
"""
# Load IPCC SLR data
df = pd.read_excel(WORKBOOK, sheet_name=SHEET, index_col='psmsl_id')
idx = df.index.get_loc(VERSION) # First row containing values we want
df = df.iloc[idx:idx + 5].set_index('quantile')
df = df.drop(columns=['process', 'confidence', 'scenario']).T
df.index.name = 'year'
percentiles = df.columns.values / 100
for i, row in df.iterrows():
values = row.values
# Set valid range of probability distribution function
x_min = row[5] + (row[5] - row[50])
x_max = row[95] + (row[95] - row[50])
x = np.linspace(x_min, x_max, num=1000)
# Fit Cauchy distribution
loc, scale = optimize.curve_fit(get_cdf, values, percentiles)[0]
if x_min == x_max:
# Harcode values for start year (when everything is zero)
scale = 0.001
x_min = -0.001
x_max = 0.001
df.loc[i, 'loc'] = loc
df.loc[i, 'scale'] = scale
df.loc[i, 'min'] = x_min
df.loc[i, 'max'] = x_max
# Interpolate intermediate values
index = np.arange(df.index.min(), df.index.max() + 1)
df = df.reindex(index).interpolate(method='linear')
df = df.loc[start_year:end_year] # Trim dataframe to given range
# Prepare array for SLR values
slr = np.zeros([len(df), n_runs], dtype=float)
for i, (year, row) in enumerate(df.iterrows()):
# Get probability distribution for current year
dist = stats.cauchy(loc=row['loc'], scale=row['scale'])
# Generate random samples
for factor in range(2, 1000):
s_raw = dist.rvs(n_runs * factor)
# Take first samples within valid range
s = s_raw[(s_raw > row['min']) & (s_raw < row['max'])]
if len(s) > n_runs:
break # Success
else:
continue # We need more samples, so try larger factor
# Add the requried number of samples
slr[i] = s[:n_runs]
# Sort each row to make SLR trajectories smooth
slr = np.sort(slr, axis=1)
# Randomise run order (column-wise)
slr = np.random.permutation(slr.T).T
return slr

@ -169,6 +169,7 @@ def get_ongoing_recession(n_runs, start_year, end_year, sea_level_rise,
'min' (array_like): minimum value 'min' (array_like): minimum value
'mode' (array_like): most likely value 'mode' (array_like): most likely value
'max' (array_like): maximum value 'max' (array_like): maximum value
'function' (str): optional external function ('package.function')
bruun_factor (dict): bruun_factor (dict):
'min' (float): minimum value 'min' (float): minimum value
'mode' (float): most likely value 'mode' (float): most likely value
@ -180,6 +181,12 @@ def get_ongoing_recession(n_runs, start_year, end_year, sea_level_rise,
Returns: Returns:
the simulated recession distance (m) the simulated recession distance (m)
Notes:
'sea_level_rise' is calculated with a triangular probability distribution
by default. Alternatively 'sea_level_rise' can be calculated using an
external function, to which the arguments 'n_runs', 'start_year', and
'end_year' are passed.
""" """
# Get time interval from input file # Get time interval from input file
@ -187,6 +194,15 @@ def get_ongoing_recession(n_runs, start_year, end_year, sea_level_rise,
n_years = len(years) n_years = len(years)
# Interpolate sea level rise projections (m) # Interpolate sea level rise projections (m)
if sea_level_rise['function']: # Get slr from separate function
# Get names of package/script and function
pkg, func_name = sea_level_rise['function'].split('.')
# Import function from package
func = getattr(__import__(pkg), func_name)
slr = func(n_runs=n_runs, start_year=start_year, end_year=end_year)
else: # Use triangular distribution
slr_mode = np.interp(years, slr_mode = np.interp(years,
xp=sea_level_rise['year'], xp=sea_level_rise['year'],
fp=sea_level_rise['mode'])[:, np.newaxis] fp=sea_level_rise['mode'])[:, np.newaxis]
@ -222,10 +238,10 @@ def get_ongoing_recession(n_runs, start_year, end_year, sea_level_rise,
slr[i, :] = np.ones([1, n_runs]) * slr_mode[i] slr[i, :] = np.ones([1, n_runs]) * slr_mode[i]
# Sort each row, so SLR follows a smooth trajectory for each model run # Sort each row, so SLR follows a smooth trajectory for each model run
slr.sort(axis=1) slr = np.sort(slr, axis=1)
# Shuffle columns, so the order of model runs is randomised # Shuffle columns, so the order of model runs is randomised
np.random.shuffle(slr.T) slr = np.random.permutation(slr.T).T
# Shift sea level so it is zero in the start year # Shift sea level so it is zero in the start year
slr -= slr[0, :].mean(axis=0) slr -= slr[0, :].mean(axis=0)
@ -459,17 +475,18 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
xp=profile_volume[valid_idx], xp=profile_volume[valid_idx],
fp=profile_chainage[valid_idx]) fp=profile_chainage[valid_idx])
fig, ax = plt.subplots(9,
len(output_years),
figsize=(16, 24),
sharey='row')
# Check whether to save probabilistic diagnostics # Check whether to save probabilistic diagnostics
output_diagnostics = False
for _, bp in pd.DataFrame(diagnostics).iterrows(): for _, bp in pd.DataFrame(diagnostics).iterrows():
if ((str(prof['block']) == str(bp['block'])) if ((str(prof['block']) == str(bp['block']))
and (prof['profile'] == bp['profile'])): and (prof['profile'] == bp['profile'])):
output_diagnostics = True output_diagnostics = True
fig, ax = plt.subplots(9,
len(output_years),
figsize=(16, 24),
sharey='row')
# Loop through years # Loop through years
pbar_year = tqdm(output_years, leave=False) pbar_year = tqdm(output_years, leave=False)
for j, year in enumerate(pbar_year): for j, year in enumerate(pbar_year):
@ -666,13 +683,15 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
'diagnostics', 'diagnostics',
'{} {} {}.csv'.format(beach_scenario, year, '{} {} {}.csv'.format(beach_scenario, year,
profile_type)) profile_type))
dump_df = dump_df[::100] # Only output every 100th row
dump_df.to_csv(csv_name, float_format='%g') dump_df.to_csv(csv_name, float_format='%g')
for i, c in enumerate(dump_df.columns[3:]): for i, c in enumerate(dump_df.columns[3:]):
ax[i, j].plot(dump_df.index, ax[i, j].plot(dump_df.index,
dump_df[c], dump_df[c],
'.', '.',
color='#666666', color='#aaaaaa',
alpha=0.1,
markersize=2) markersize=2)
ax[i, j].spines['right'].set_visible(False) ax[i, j].spines['right'].set_visible(False)
ax[i, j].spines['top'].set_visible(False) ax[i, j].spines['top'].set_visible(False)
@ -698,6 +717,115 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
plt.savefig(figname, bbox_inches='tight', dpi=300) plt.savefig(figname, bbox_inches='tight', dpi=300)
plt.close(fig) plt.close(fig)
# Plot time series figure
fig, ax = plt.subplots(4,
2,
figsize=(12, 16),
sharey='row',
gridspec_kw={
'wspace': 0.05,
'width_ratios': [3, 1]
})
ax[0, 0].plot(years, slr[:, :100], c='#aaaaaa', lw=0.2)
ax[0, 0].plot(years,
slr[:, 1],
c='C0',
label='Sample simulation')
ax[0, 1].hist(slr[-1, :],
bins=100,
fc='#cccccc',
ec='#aaaaaa',
orientation='horizontal')
ax[1, 0].plot(years, (slr * bf)[:, :100], c='#aaaaaa', lw=0.2)
ax[1, 0].plot(years, (slr * bf)[:, 1], c='C0')
ax[1, 1].hist((slr * bf)[-1, :],
bins=100,
fc='#cccccc',
ec='#aaaaaa',
orientation='horizontal')
ax[2, 0].plot(years, ur[:, :100], c='#aaaaaa', lw=0.2)
ax[2, 0].plot(years, ur[:, 1], c='C0')
ax[2, 1].hist(ur[-1, :],
bins=100,
fc='#cccccc',
ec='#aaaaaa',
orientation='horizontal')
ax[3, 0].plot(years, r[:, :100], c='#aaaaaa', lw=0.2)
ax[3, 0].plot(years, r[:, 1], c='C0', zorder=3)
ax[3, 1].hist(r[-1, :],
bins=100,
fc='#cccccc',
ec='#aaaaaa',
orientation='horizontal')
baseline = r[:, 1]
sdd = storm_demand_dist[:, 1]
for i in range(len(slr)):
pe = [
matplotlib.patheffects.Stroke(linewidth=5,
foreground='#ffffff',
capstyle='butt'),
matplotlib.patheffects.Normal()
]
ax[3, 0].plot([years[i], years[i]],
[baseline[i], baseline[i] + sdd[i]],
c='C0',
path_effects=pe)
# Maximum recession encountered
r_max = (baseline + sdd).max()
ax[3, 0].axhline(y=r_max, c='C3', linestyle=':')
i = len(years) - 1
for a in ax[:, 0]:
a.set_xlim(right=years[-1])
# Add line at zero
for a in ax[:-1, 0]:
a.spines['bottom'].set_position(('data', 0))
a.set_xticklabels([])
for a in ax[:, 1]:
a.xaxis.set_visible(False)
a.spines['bottom'].set_visible(False)
ax[3, 0].annotate(
'Most eroded beach state encountered in planning period',
(years[0], r_max),
xytext=(0, 20),
textcoords='offset pixels')
ax[0, 0].legend()
ax[0, 0].set_title((f'Probabilistic trajectories\n'
f'(first 100 out of {n_runs:,} runs)'))
ax[0, 1].set_title(
f'Probability\ndistribution\nin year {years[i]}')
ax[0, 0].set_ylabel('Sea level (m)', labelpad=10)
ax[1, 0].set_ylabel('Bruun recession (m)', labelpad=10)
ax[2, 0].set_ylabel('Underlying recession (m)', labelpad=10)
ax[3, 0].set_ylabel('Shoreline displacement (m)', labelpad=10)
ax[3, 0].set_title(('Bruun recession'
'+ underlying recession'
'+ storm demand'),
y=0.9)
for a in ax.ravel():
a.spines['top'].set_visible(False)
a.spines['right'].set_visible(False)
figname = os.path.join(
'diagnostics',
f'{beach_scenario} {profile_type} timeseries.png')
plt.savefig(figname, bbox_inches='tight', dpi=300)
plt.close(fig)
def main(): def main():

Loading…
Cancel
Save