Compare commits
14 Commits
18d7f448a4
...
19b4aa0ccd
Author | SHA1 | Date |
---|---|---|
Dan Howe | 19b4aa0ccd | 3 years ago |
Dan Howe | 4f81605606 | 3 years ago |
Dan Howe | dabbc11715 | 3 years ago |
Dan Howe | 9e8aec2a6d | 3 years ago |
Dan Howe | c0c130c465 | 3 years ago |
Dan Howe | d506fc6fa4 | 3 years ago |
Dan Howe | 105348fa36 | 3 years ago |
Dan Howe | b3b69c7933 | 3 years ago |
Dan Howe | 68c128ebe2 | 3 years ago |
Dan Howe | b1ea85bd5a | 3 years ago |
Dan Howe | d5e8274061 | 3 years ago |
Dan Howe | c9ac8f1e6a | 3 years ago |
Dan Howe | 674688ad86 | 3 years ago |
Dan Howe | 3619504f36 | 3 years ago |
Binary file not shown.
@ -1,130 +0,0 @@
|
||||
"""Calculate probability distributions for IPCC sea level rise forecasts.
|
||||
|
||||
This will calculate the values required to generate triangular distributions,
|
||||
i.e. 'min', 'mode', and 'max' in the `numpy.random.triang()` function.
|
||||
|
||||
Reads:
|
||||
'IPCC AR6.xlsx'
|
||||
|
||||
Writes:
|
||||
'triang-values.csv'
|
||||
|
||||
D. Howe
|
||||
d.howe@wrl.unsw.edu.au
|
||||
2022-05-05
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from scipy import stats, optimize
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
PLOT = False
|
||||
|
||||
|
||||
def norm_cdf(x, loc, scale):
|
||||
"""Calculate cumulative density function, using normal distribution."""
|
||||
return stats.norm(loc=loc, scale=scale).cdf(x)
|
||||
|
||||
|
||||
def triang_cdf(x, loc, scale, c):
|
||||
"""Calculate cumulative density function, using triangular distribution."""
|
||||
return stats.triang(loc=loc, scale=scale, c=c).cdf(x)
|
||||
|
||||
|
||||
# Read data
|
||||
df = pd.read_excel('IPCC AR6.xlsx', index_col=[0, 1, 2, 3, 4])
|
||||
df = df.sort_index()
|
||||
dff = df.loc[838, 'total', 'medium', 'ssp585'].T
|
||||
dff.index.name = 'year'
|
||||
percentiles = dff.columns.values / 100
|
||||
|
||||
for i, row in dff.iterrows():
|
||||
values = row.values
|
||||
|
||||
# Fit normal distribution
|
||||
loc, scale = optimize.curve_fit(norm_cdf, values, percentiles)[0]
|
||||
p_norm = {'loc': loc, 'scale': scale}
|
||||
|
||||
# Fit triangular distribution
|
||||
loc, scale, c = optimize.curve_fit(triang_cdf,
|
||||
values,
|
||||
percentiles,
|
||||
p0=[values[0] - 0.1, 0.5, 0.5])[0]
|
||||
p_triang = {'loc': loc, 'scale': scale, 'c': c}
|
||||
|
||||
# Get triangular distribution parameters
|
||||
left = p_triang['loc']
|
||||
centre = p_triang['loc'] + p_triang['scale'] * p_triang['c']
|
||||
right = p_triang['loc'] + p_triang['scale']
|
||||
|
||||
dff.loc[i, 'min'] = left
|
||||
dff.loc[i, 'mode'] = centre
|
||||
dff.loc[i, 'max'] = right
|
||||
|
||||
if PLOT:
|
||||
fig, ax = plt.subplots(1, 2, figsize=(10, 3))
|
||||
|
||||
x_min = stats.triang.ppf(0.01, **p_triang) - 0.2
|
||||
x_max = stats.triang.ppf(0.99, **p_triang) + 0.2
|
||||
x = np.linspace(x_min, x_max, num=1000)
|
||||
|
||||
ax[0].plot(x, 100 * stats.norm.cdf(x, **p_norm))
|
||||
ax[0].plot(x, 100 * stats.triang.cdf(x, **p_triang))
|
||||
ax[0].plot(values, 100 * percentiles, '.', c='#444444')
|
||||
|
||||
ax[1].plot(x, stats.norm.pdf(x, **p_norm), label='Normal')
|
||||
ax[1].plot(x, stats.triang.pdf(x, **p_triang), label='Triangular')
|
||||
ax[1].plot([], [], '.', c='#444444', label='IPCC data')
|
||||
ax[1].legend()
|
||||
|
||||
ax[1].axvline(x=left, c='C3')
|
||||
ax[1].axvline(x=centre, c='C3')
|
||||
ax[1].axvline(x=right, c='C3')
|
||||
|
||||
ax[0].set_ylabel('Percentile', labelpad=10)
|
||||
ax[0].set_title('Cumulative distribution')
|
||||
ax[1].set_title('Probability density')
|
||||
|
||||
ax[0].annotate(i, (-0.3, 1),
|
||||
xycoords='axes fraction',
|
||||
clip_on=False,
|
||||
size=14)
|
||||
for a in ax:
|
||||
|
||||
a.set_xlabel('SLR (m)', labelpad=10)
|
||||
a.spines['top'].set_visible(False)
|
||||
a.spines['right'].set_visible(False)
|
||||
|
||||
plt.show()
|
||||
|
||||
# Make SLR relative to 2020 level (at the 50th percentile)
|
||||
dff -= dff.loc[2020, 'mode']
|
||||
|
||||
# Save distribution parameters
|
||||
dff[['min', 'mode', 'max']].to_csv('triang-values.csv', float_format='%0.3f')
|
||||
|
||||
if PLOT:
|
||||
# Plot all triangular distributions
|
||||
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
|
||||
|
||||
cmap = plt.cm.get_cmap('RdBu_r', len(dff))
|
||||
c = list(cmap(range(cmap.N)))
|
||||
|
||||
j = -1
|
||||
for i, row in dff.iterrows():
|
||||
j += 1
|
||||
ax.plot(row[['min', 'mode', 'max']], [0, 1, 0], c=c[j])
|
||||
if j % 2 == 0:
|
||||
ax.annotate(f' {i}', (row['mode'], 1),
|
||||
ha='center',
|
||||
va='bottom',
|
||||
rotation=90)
|
||||
|
||||
ax.set_xlabel('SLR (m)', labelpad=10)
|
||||
ax.set_ylabel('Probability density (-)', labelpad=10)
|
||||
|
||||
ax.spines['top'].set_visible(False)
|
||||
ax.spines['right'].set_visible(False)
|
||||
plt.show()
|
@ -0,0 +1,106 @@
|
||||
import os
|
||||
import re
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from scipy import stats
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
PLOT = False
|
||||
START_YEAR = 2020
|
||||
END_YEAR = 2100
|
||||
|
||||
n_runs = 100000
|
||||
|
||||
df = pd.read_csv('cauchy-values.csv', index_col=0)
|
||||
years = np.arange(START_YEAR, END_YEAR + 1)
|
||||
|
||||
# Squeeze distribution to zero in 2020
|
||||
df.loc[2020, 'scale'] = 0.0001
|
||||
df.loc[2020, 'min'] = df.loc[2020, 'loc'] - 0.0001
|
||||
df.loc[2020, 'max'] = df.loc[2020, 'loc'] + 0.0001
|
||||
|
||||
# Interpolate intermediate values
|
||||
df = df.reindex(years).interpolate(method='cubic')
|
||||
|
||||
# Prepare array for SLR values
|
||||
slr = np.zeros([len(years), n_runs], dtype=float)
|
||||
|
||||
for i, (year, row) in enumerate(df.iterrows()):
|
||||
# Get probability distribution
|
||||
dist = stats.cauchy(loc=row['loc'], scale=row['scale'])
|
||||
|
||||
# Generate random samples
|
||||
for factor in range(2, 10):
|
||||
s_raw = dist.rvs(n_runs * factor)
|
||||
|
||||
# Take first samples within valid range
|
||||
s = s_raw[(s_raw > row['min']) & (s_raw < row['max'])]
|
||||
|
||||
if len(s) > n_runs:
|
||||
break # Success
|
||||
else:
|
||||
continue # We need more samples, so try larger factor
|
||||
|
||||
# Add the requried number of samples
|
||||
slr[i] = s[:n_runs]
|
||||
|
||||
# Sort each row to make SLR trajectories smooth
|
||||
slr = np.sort(slr, axis=1)
|
||||
|
||||
# Randomise run order (column-wise)
|
||||
slr = np.random.permutation(slr.T).T
|
||||
|
||||
# Set first year to zero
|
||||
slr[0, :] = df.loc[2020, 'loc']
|
||||
|
||||
# Plot first few trajectories
|
||||
if PLOT:
|
||||
fig, ax = plt.subplots(1,
|
||||
3,
|
||||
figsize=(12, 5),
|
||||
sharey=True,
|
||||
gridspec_kw={
|
||||
'wspace': 0.05,
|
||||
'width_ratios': [3, 1, 1]
|
||||
})
|
||||
|
||||
ax[0].plot(years, slr[:, :100], c='#444444', lw=0.2)
|
||||
ax[1].hist(slr[-1, :],
|
||||
bins=100,
|
||||
fc='#cccccc',
|
||||
ec='#aaaaaa',
|
||||
orientation='horizontal')
|
||||
|
||||
i = len(years) - 1
|
||||
dff = df.T.loc['5':'95', years[i]]
|
||||
ax[2].hist(
|
||||
slr[i, :],
|
||||
bins=100,
|
||||
fc='#cccccc',
|
||||
ec='#aaaaaa',
|
||||
orientation='horizontal',
|
||||
cumulative=True,
|
||||
)
|
||||
ax[2].plot(dff.index.astype(int) / 100 * n_runs,
|
||||
dff.values,
|
||||
'o',
|
||||
c='C3',
|
||||
label='IPCC AR6 data')
|
||||
|
||||
ax[0].set_xlim(right=years[i])
|
||||
|
||||
ax[0].set_title(f'SLR trajectories\n(first 100 out of {n_runs:,} runs)')
|
||||
ax[1].set_title(f'Probability\ndistribution\nin year {years[i]}')
|
||||
ax[2].set_title(f'Cumulative\ndistribution\nin year {years[i]}')
|
||||
ax[0].set_ylabel('SLR (m)', labelpad=10)
|
||||
|
||||
ax[2].legend()
|
||||
|
||||
ax[0].spines['top'].set_visible(False)
|
||||
ax[0].spines['right'].set_visible(False)
|
||||
|
||||
for a in ax[1:]:
|
||||
a.spines['top'].set_visible(False)
|
||||
a.spines['right'].set_visible(False)
|
||||
a.spines['bottom'].set_visible(False)
|
||||
a.xaxis.set_visible(False)
|
@ -0,0 +1,161 @@
|
||||
"""Calculate probability distributions for IPCC sea level rise forecasts.
|
||||
|
||||
This will calculate the values required to Cauchy Distributions for
|
||||
different SLR projections.
|
||||
|
||||
Reads:
|
||||
'IPCC AR6.xlsx'
|
||||
|
||||
Writes:
|
||||
'cauchy-values.csv'
|
||||
|
||||
D. Howe
|
||||
d.howe@wrl.unsw.edu.au
|
||||
2022-05-12
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from scipy import stats, optimize
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
PLOT = True
|
||||
|
||||
|
||||
def cauchy_cdf(x, loc, scale):
|
||||
"""Calculate cumulative density function, using Cauchy distribution."""
|
||||
return stats.cauchy(loc=loc, scale=scale).cdf(x)
|
||||
|
||||
|
||||
# Read data
|
||||
df = pd.read_excel('IPCC AR6.xlsx', index_col=[0, 1, 2, 3, 4])
|
||||
df = df.sort_index()
|
||||
|
||||
# Use all 'medium' confidence scenarios for intermediate quantiles
|
||||
scenarios = ['ssp119', 'ssp126', 'ssp245', 'ssp370', 'ssp585']
|
||||
dff = df.loc[838, 'total', 'medium', scenarios].groupby('quantile').mean()
|
||||
|
||||
# Use ssp119/ssp585 for 5th and 95th quantiles
|
||||
dff.loc[5] = df.loc[838, 'total', 'medium', 'ssp119', 5]
|
||||
dff.loc[95] = df.loc[838, 'total', 'medium', 'ssp585', 95]
|
||||
dff = dff.T
|
||||
|
||||
dff.index.name = 'year'
|
||||
percentiles = dff.columns.values / 100
|
||||
|
||||
for i, row in dff.iterrows():
|
||||
values = row.values
|
||||
|
||||
x_min = row[5] + (row[5] - row[50])
|
||||
x_max = row[95] + (row[95] - row[50])
|
||||
x = np.linspace(x_min, x_max, num=1000)
|
||||
|
||||
# Fit distribution
|
||||
loc, scale = optimize.curve_fit(cauchy_cdf, values, percentiles)[0]
|
||||
p = {'loc': loc, 'scale': scale}
|
||||
|
||||
dff.loc[i, 'loc'] = loc
|
||||
dff.loc[i, 'scale'] = scale
|
||||
dff.loc[i, 'min'] = x_min
|
||||
dff.loc[i, 'max'] = x_max
|
||||
|
||||
if not PLOT:
|
||||
continue
|
||||
|
||||
fig, ax = plt.subplots(1, 2, figsize=(10, 3))
|
||||
|
||||
ax[0].plot(x, 100 * stats.cauchy.cdf(x, **p))
|
||||
ax[0].plot(values, 100 * percentiles, '.', c='#444444')
|
||||
|
||||
ax[1].plot(x, stats.cauchy.pdf(x, **p), label='Cauchy')
|
||||
ax[1].plot([], [], '.', c='#444444', label='IPCC data')
|
||||
ax[1].legend()
|
||||
|
||||
ax[0].set_ylabel('Percentile', labelpad=10)
|
||||
ax[0].set_title('Cumulative distribution')
|
||||
ax[1].set_title('Probability density')
|
||||
|
||||
ax[0].annotate(i, (-0.3, 1),
|
||||
xycoords='axes fraction',
|
||||
clip_on=False,
|
||||
size=14)
|
||||
for a in ax:
|
||||
|
||||
a.set_xlabel('SLR (m)', labelpad=10)
|
||||
a.spines['top'].set_visible(False)
|
||||
a.spines['right'].set_visible(False)
|
||||
|
||||
plt.show()
|
||||
|
||||
# Save distribution parameters
|
||||
dff.to_csv('cauchy-values.csv', float_format='%g')
|
||||
|
||||
if PLOT:
|
||||
# Plot all distributions
|
||||
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
|
||||
|
||||
cmap = plt.cm.get_cmap('RdBu_r', len(dff))
|
||||
c = list(cmap(range(cmap.N)))
|
||||
|
||||
j = -1
|
||||
for i, row in dff.iterrows():
|
||||
j += 1
|
||||
|
||||
x = np.linspace(row['min'], row['max'], num=1000)
|
||||
|
||||
y = stats.cauchy(loc=row['loc'], scale=row['scale']).pdf(x)
|
||||
ax.plot(x, y * row['scale'], c=c[j])
|
||||
if j % 2 == 0:
|
||||
ax.annotate(f' {i}', (x[y.argmax()], 0.32),
|
||||
ha='center',
|
||||
va='bottom',
|
||||
clip_on=False,
|
||||
rotation=90)
|
||||
|
||||
ax.set_ylim(bottom=0, top=0.35)
|
||||
ax.set_yticks([])
|
||||
ax.set_xlabel('SLR (m)', labelpad=10)
|
||||
ax.set_ylabel('Normalised probability density (-)', labelpad=10)
|
||||
|
||||
ax.spines['top'].set_visible(False)
|
||||
ax.spines['right'].set_visible(False)
|
||||
plt.show()
|
||||
|
||||
# Show histgrams with values clipped to our specific range
|
||||
if PLOT:
|
||||
for i, row in dff[::4].iterrows():
|
||||
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
|
||||
|
||||
# Generate random samples in Cauchy distribution
|
||||
r = stats.cauchy(loc=row['loc'], scale=row['scale']).rvs(1000000)
|
||||
|
||||
# Clip to our range
|
||||
rc = np.clip(r, a_min=row['min'], a_max=row['max'])
|
||||
f = (r != rc).sum() / len(r) # Fraction outside range
|
||||
|
||||
ax.hist(rc, 100)
|
||||
|
||||
ym = ax.get_ylim()[1] # Maximum y value
|
||||
|
||||
ax.axvline(x=row['min'], c='C3')
|
||||
ax.axvline(x=row[5], c='C3')
|
||||
ax.axvline(x=row[50], c='C3')
|
||||
ax.axvline(x=row[95], c='C3')
|
||||
ax.axvline(x=row['max'], c='C3')
|
||||
|
||||
ax.annotate(' P_min', (row['min'], ym))
|
||||
ax.annotate(' P5', (row[5], ym))
|
||||
ax.annotate(' P50', (row[50], ym))
|
||||
ax.annotate(' P95', (row[95], ym))
|
||||
ax.annotate(' P_max', (row['max'], ym))
|
||||
|
||||
ax.annotate(f' Samples clipped = {100 * f:0.1f}%', (x_max, ym / 2))
|
||||
|
||||
ax.set_title(row.name, x=0)
|
||||
ax.set_yticks([])
|
||||
ax.set_xlabel('SLR (m)', labelpad=10)
|
||||
ax.spines['top'].set_visible(False)
|
||||
ax.spines['right'].set_visible(False)
|
||||
|
||||
plt.show()
|
Loading…
Reference in New Issue