Compare commits

...

3 Commits

@ -464,6 +464,12 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
figsize=(16, 24), figsize=(16, 24),
sharey='row') sharey='row')
# Check whether to save probabilistic diagnostics
for _, bp in pd.DataFrame(diagnostics).iterrows():
if ((str(prof['block']) == str(bp['block']))
and (prof['profile'] == bp['profile'])):
output_diagnostics = True
# Loop through years # Loop through years
pbar_year = tqdm(output_years, leave=False) pbar_year = tqdm(output_years, leave=False)
for j, year in enumerate(pbar_year): for j, year in enumerate(pbar_year):
@ -595,103 +601,102 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
header=False, header=False,
float_format='%g') float_format='%g')
# Check whether to save probabilistic diagnostics if output_diagnostics:
for _, bp in pd.DataFrame(diagnostics).iterrows(): # Save probabilistic diagnostics
if not ((str(prof['block']) == str(bp['block'])) and year_idx = year == years
(prof['profile'] == bp['profile'])):
continue # Don't save # Find index where most extreme event occurred
event_year_idx = chainage_with_recession.argmin(axis=0)
# Save probabilistic diagnostics
year_idx = year == years # define dummy index
ix = np.arange(n_runs)
# Find index where most extreme event occurred dump_data = {
event_year_idx = chainage_with_recession.argmin(axis=0) 'Sea level rise (m)':
slr[event_year_idx, ix].ravel(),
# define dummy index 'Bruun factor (-)':
ix = np.arange(n_runs) bf[event_year_idx, ix].ravel(),
dump_data = { 'Bruun factor x SLR (m)':
'Sea level rise (m)': slr[event_year_idx, ix].ravel() *
slr[event_year_idx, ix].ravel(), bf[event_year_idx, ix].ravel(),
'Bruun factor (-)': 'Underlying trend rate (m/yr)':
bf[event_year_idx, ix].ravel(), ur_rate[year_idx, :].ravel(),
'Bruun factor x SLR (m)': 'Underlying trend (m)':
slr[event_year_idx, ix].ravel() * ur[event_year_idx, ix].ravel(),
bf[event_year_idx, ix].ravel(), 'Underlying + SLR (m)':
'Underlying trend rate (m/yr)': r[event_year_idx, ix].ravel(),
ur_rate[year_idx, :].ravel(), 'Total movement (m)':
'Underlying trend (m)': (storm_demand_dist + r)[event_year_idx, ix].ravel(),
ur[event_year_idx, ix].ravel(), 'Storm demand distance (m)':
'Underlying + SLR (m)': storm_demand_dist[event_year_idx, ix].ravel(),
r[event_year_idx, ix].ravel(), 'Storm demand volume (m3/m)':
'Total movement (m)': storm_demand_volume[event_year_idx, ix].ravel(),
(storm_demand_dist + r)[event_year_idx, ix].ravel(), }
'Storm demand distance (m)':
storm_demand_dist[event_year_idx, ix].ravel(), dump_df = pd.DataFrame(dump_data)
'Storm demand volume (m3/m)': dump_df['Run ID'] = np.arange(len(event_year_idx)) + 1
storm_demand_volume[event_year_idx, ix].ravel(), dump_df['Event year'] = years[event_year_idx]
} dump_df['Years elapsed'] = event_year_idx + 1
dump_df = pd.DataFrame(dump_data) # Reorder columns
dump_df['Run ID'] = np.arange(len(event_year_idx)) + 1 dump_df = dump_df[[
dump_df['Event year'] = years[event_year_idx] 'Run ID',
dump_df['Years elapsed'] = event_year_idx + 1 'Event year',
'Years elapsed',
# Reorder columns 'Sea level rise (m)',
dump_df = dump_df[[ 'Bruun factor (-)',
'Run ID', 'Bruun factor x SLR (m)',
'Event year', 'Underlying trend rate (m/yr)',
'Years elapsed', 'Underlying trend (m)',
'Sea level rise (m)', 'Underlying + SLR (m)',
'Bruun factor (-)', 'Total movement (m)',
'Bruun factor x SLR (m)', 'Storm demand distance (m)',
'Underlying trend rate (m/yr)', 'Storm demand volume (m3/m)',
'Underlying trend (m)', ]]
'Underlying + SLR (m)',
'Total movement (m)', # Sort based on maximum movement
'Storm demand distance (m)', dump_df = dump_df.sort_values('Total movement (m)',
'Storm demand volume (m3/m)', ascending=False)
]]
# Add encounter probabilities
# Sort based on maximum movement dump_df['Encounter probability (%)'] = np.linspace(
dump_df = dump_df.sort_values('Total movement (m)', 0, 100, num=n_runs + 2)[1:-1]
ascending=False) dump_df = dump_df.set_index('Encounter probability (%)')
# Add encounter probabilities csv_name = os.path.join(
dump_df['Encounter probability (%)'] = np.linspace(0, 'diagnostics',
100, '{} {} {}.csv'.format(beach_scenario, year,
num=n_runs + profile_type))
2)[1:-1] dump_df.to_csv(csv_name, float_format='%g')
dump_df = dump_df.set_index('Encounter probability (%)')
for i, c in enumerate(dump_df.columns[3:]):
csv_name = os.path.join( ax[i, j].plot(dump_df.index,
dump_df[c],
'.',
color='#666666',
markersize=2)
ax[i, j].spines['right'].set_visible(False)
ax[i, j].spines['top'].set_visible(False)
if j == 0:
ax[i, 0].yaxis.set_label_coords(-0.4, 0.5)
label = c.replace('(', '\n(')
ax[i, 0].set_ylabel(label,
va='top',
linespacing=1.5)
ax[i, j].set_xlabel('Encounter probability (%)',
labelpad=10)
ax[0, j].set_title(year)
fig.suptitle('{}, block {}, profile {}'.format(
beach_scenario, prof['block'], prof['profile']),
y=0.92)
if output_diagnostics:
figname = os.path.join(
'diagnostics', 'diagnostics',
'{} {} {}.csv'.format(beach_scenario, year, profile_type)) f'{beach_scenario} {profile_type} scatter.png')
dump_df.to_csv(csv_name, float_format='%g') plt.savefig(figname, bbox_inches='tight', dpi=300)
plt.close(fig)
for i, c in enumerate(dump_df.columns[3:]):
ax[i, j].plot(dump_df.index,
dump_df[c],
'.',
color='#666666',
markersize=2)
ax[i, j].spines['right'].set_visible(False)
ax[i, j].spines['top'].set_visible(False)
if j == 0:
ax[i, 0].yaxis.set_label_coords(-0.4, 0.5)
label = c.replace('(', '\n(')
ax[i, 0].set_ylabel(label, va='top', linespacing=1.5)
ax[i, j].set_xlabel('Encounter probability (%)', labelpad=10)
ax[0, j].set_title(year)
fig.suptitle('{}, block {}, profile {}'.format(
beach_scenario, prof['block'], prof['profile']),
y=0.92)
figname = os.path.join(
'diagnostics', '{} {}.png'.format(beach_scenario,
profile_type))
plt.savefig(figname, bbox_inches='tight', dpi=300)
def main(): def main():

@ -0,0 +1,80 @@
"""Fit probability distributions to IPCC sea level rise forecasts.
Reads:
'IPCC AR6.xlsx'
Writes:
'png/*.png'
D. Howe
d.howe@wrl.unsw.edu.au
2022-05-12
"""
import os
import re
import numpy as np
import pandas as pd
from scipy import stats, optimize
import matplotlib.pyplot as plt
# Read data
df = pd.read_excel('IPCC AR6.xlsx', index_col=[0, 1, 2, 3, 4])
df = df.sort_index()
# Use all 'medium' confidence scenarios for intermediate quantiles
scenarios = ['ssp119', 'ssp126', 'ssp245', 'ssp370', 'ssp585']
dff = df.loc[838, 'total', 'medium', scenarios].groupby('quantile').mean()
# Use ssp119/ssp585 for 5th and 95th quantiles
dff.loc[5] = df.loc[838, 'total', 'medium', 'ssp119', 5]
dff.loc[95] = df.loc[838, 'total', 'medium', 'ssp585', 95]
dff = dff.T
dff.index.name = 'year'
percentiles = dff.columns.values / 100
values = dff.loc[2150].values
x_min = values.min() - 0.2
x_max = values.max() + 0.2
x = np.linspace(x_min, x_max, num=1000)
# Get statistical distributions
distributions = [
getattr(stats, d) for d in dir(stats)
if isinstance(getattr(stats, d), stats.rv_continuous)
]
for dist in distributions:
def cdf(x, loc, scale):
"""Calculate cumulative density function"""
return dist(loc=loc, scale=scale).cdf(x)
try:
loc, scale = optimize.curve_fit(cdf, values, percentiles)[0]
p = {'loc': loc, 'scale': scale}
except TypeError:
continue
fig, ax = plt.subplots(1, 2, figsize=(6, 2))
ax[0].plot(x, 100 * dist.cdf(x, **p))
ax[0].plot(values, 100 * percentiles, '.', c='#444444')
ax[1].plot(x, 100 * dist.pdf(x, **p))
ax[0].axhline(y=100, c='#000000', lw=0.8, zorder=-1)
ax[0].set_ylim(0, 101)
ax[1].set_ylim(bottom=0)
ax[0].set_title(dist.name, x=-0.7, y=0.5, ha='left')
for a in ax.ravel():
a.spines['right'].set_visible(False)
a.spines['top'].set_visible(False)
ax[1].set_yticks([])
plt.savefig(f'png/{dist.name}.png', bbox_inches='tight', dpi=100)
plt.close()
Loading…
Cancel
Save