|
|
@ -464,6 +464,12 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
|
|
|
|
figsize=(16, 24),
|
|
|
|
figsize=(16, 24),
|
|
|
|
sharey='row')
|
|
|
|
sharey='row')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Check whether to save probabilistic diagnostics
|
|
|
|
|
|
|
|
for _, bp in pd.DataFrame(diagnostics).iterrows():
|
|
|
|
|
|
|
|
if ((str(prof['block']) == str(bp['block']))
|
|
|
|
|
|
|
|
and (prof['profile'] == bp['profile'])):
|
|
|
|
|
|
|
|
output_diagnostics = True
|
|
|
|
|
|
|
|
|
|
|
|
# Loop through years
|
|
|
|
# Loop through years
|
|
|
|
pbar_year = tqdm(output_years, leave=False)
|
|
|
|
pbar_year = tqdm(output_years, leave=False)
|
|
|
|
for j, year in enumerate(pbar_year):
|
|
|
|
for j, year in enumerate(pbar_year):
|
|
|
@ -595,103 +601,102 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
|
|
|
|
header=False,
|
|
|
|
header=False,
|
|
|
|
float_format='%g')
|
|
|
|
float_format='%g')
|
|
|
|
|
|
|
|
|
|
|
|
# Check whether to save probabilistic diagnostics
|
|
|
|
if output_diagnostics:
|
|
|
|
for _, bp in pd.DataFrame(diagnostics).iterrows():
|
|
|
|
# Save probabilistic diagnostics
|
|
|
|
if not ((str(prof['block']) == str(bp['block'])) and
|
|
|
|
year_idx = year == years
|
|
|
|
(prof['profile'] == bp['profile'])):
|
|
|
|
|
|
|
|
continue # Don't save
|
|
|
|
# Find index where most extreme event occurred
|
|
|
|
|
|
|
|
event_year_idx = chainage_with_recession.argmin(axis=0)
|
|
|
|
# Save probabilistic diagnostics
|
|
|
|
|
|
|
|
year_idx = year == years
|
|
|
|
# define dummy index
|
|
|
|
|
|
|
|
ix = np.arange(n_runs)
|
|
|
|
# Find index where most extreme event occurred
|
|
|
|
dump_data = {
|
|
|
|
event_year_idx = chainage_with_recession.argmin(axis=0)
|
|
|
|
'Sea level rise (m)':
|
|
|
|
|
|
|
|
slr[event_year_idx, ix].ravel(),
|
|
|
|
# define dummy index
|
|
|
|
'Bruun factor (-)':
|
|
|
|
ix = np.arange(n_runs)
|
|
|
|
bf[event_year_idx, ix].ravel(),
|
|
|
|
dump_data = {
|
|
|
|
'Bruun factor x SLR (m)':
|
|
|
|
'Sea level rise (m)':
|
|
|
|
slr[event_year_idx, ix].ravel() *
|
|
|
|
slr[event_year_idx, ix].ravel(),
|
|
|
|
bf[event_year_idx, ix].ravel(),
|
|
|
|
'Bruun factor (-)':
|
|
|
|
'Underlying trend rate (m/yr)':
|
|
|
|
bf[event_year_idx, ix].ravel(),
|
|
|
|
ur_rate[year_idx, :].ravel(),
|
|
|
|
'Bruun factor x SLR (m)':
|
|
|
|
'Underlying trend (m)':
|
|
|
|
slr[event_year_idx, ix].ravel() *
|
|
|
|
ur[event_year_idx, ix].ravel(),
|
|
|
|
bf[event_year_idx, ix].ravel(),
|
|
|
|
'Underlying + SLR (m)':
|
|
|
|
'Underlying trend rate (m/yr)':
|
|
|
|
r[event_year_idx, ix].ravel(),
|
|
|
|
ur_rate[year_idx, :].ravel(),
|
|
|
|
'Total movement (m)':
|
|
|
|
'Underlying trend (m)':
|
|
|
|
(storm_demand_dist + r)[event_year_idx, ix].ravel(),
|
|
|
|
ur[event_year_idx, ix].ravel(),
|
|
|
|
'Storm demand distance (m)':
|
|
|
|
'Underlying + SLR (m)':
|
|
|
|
storm_demand_dist[event_year_idx, ix].ravel(),
|
|
|
|
r[event_year_idx, ix].ravel(),
|
|
|
|
'Storm demand volume (m3/m)':
|
|
|
|
'Total movement (m)':
|
|
|
|
storm_demand_volume[event_year_idx, ix].ravel(),
|
|
|
|
(storm_demand_dist + r)[event_year_idx, ix].ravel(),
|
|
|
|
}
|
|
|
|
'Storm demand distance (m)':
|
|
|
|
|
|
|
|
storm_demand_dist[event_year_idx, ix].ravel(),
|
|
|
|
dump_df = pd.DataFrame(dump_data)
|
|
|
|
'Storm demand volume (m3/m)':
|
|
|
|
dump_df['Run ID'] = np.arange(len(event_year_idx)) + 1
|
|
|
|
storm_demand_volume[event_year_idx, ix].ravel(),
|
|
|
|
dump_df['Event year'] = years[event_year_idx]
|
|
|
|
}
|
|
|
|
dump_df['Years elapsed'] = event_year_idx + 1
|
|
|
|
|
|
|
|
|
|
|
|
dump_df = pd.DataFrame(dump_data)
|
|
|
|
# Reorder columns
|
|
|
|
dump_df['Run ID'] = np.arange(len(event_year_idx)) + 1
|
|
|
|
dump_df = dump_df[[
|
|
|
|
dump_df['Event year'] = years[event_year_idx]
|
|
|
|
'Run ID',
|
|
|
|
dump_df['Years elapsed'] = event_year_idx + 1
|
|
|
|
'Event year',
|
|
|
|
|
|
|
|
'Years elapsed',
|
|
|
|
# Reorder columns
|
|
|
|
'Sea level rise (m)',
|
|
|
|
dump_df = dump_df[[
|
|
|
|
'Bruun factor (-)',
|
|
|
|
'Run ID',
|
|
|
|
'Bruun factor x SLR (m)',
|
|
|
|
'Event year',
|
|
|
|
'Underlying trend rate (m/yr)',
|
|
|
|
'Years elapsed',
|
|
|
|
'Underlying trend (m)',
|
|
|
|
'Sea level rise (m)',
|
|
|
|
'Underlying + SLR (m)',
|
|
|
|
'Bruun factor (-)',
|
|
|
|
'Total movement (m)',
|
|
|
|
'Bruun factor x SLR (m)',
|
|
|
|
'Storm demand distance (m)',
|
|
|
|
'Underlying trend rate (m/yr)',
|
|
|
|
'Storm demand volume (m3/m)',
|
|
|
|
'Underlying trend (m)',
|
|
|
|
]]
|
|
|
|
'Underlying + SLR (m)',
|
|
|
|
|
|
|
|
'Total movement (m)',
|
|
|
|
# Sort based on maximum movement
|
|
|
|
'Storm demand distance (m)',
|
|
|
|
dump_df = dump_df.sort_values('Total movement (m)',
|
|
|
|
'Storm demand volume (m3/m)',
|
|
|
|
ascending=False)
|
|
|
|
]]
|
|
|
|
|
|
|
|
|
|
|
|
# Add encounter probabilities
|
|
|
|
# Sort based on maximum movement
|
|
|
|
dump_df['Encounter probability (%)'] = np.linspace(
|
|
|
|
dump_df = dump_df.sort_values('Total movement (m)',
|
|
|
|
0, 100, num=n_runs + 2)[1:-1]
|
|
|
|
ascending=False)
|
|
|
|
dump_df = dump_df.set_index('Encounter probability (%)')
|
|
|
|
|
|
|
|
|
|
|
|
# Add encounter probabilities
|
|
|
|
csv_name = os.path.join(
|
|
|
|
dump_df['Encounter probability (%)'] = np.linspace(0,
|
|
|
|
'diagnostics',
|
|
|
|
100,
|
|
|
|
'{} {} {}.csv'.format(beach_scenario, year,
|
|
|
|
num=n_runs +
|
|
|
|
profile_type))
|
|
|
|
2)[1:-1]
|
|
|
|
dump_df.to_csv(csv_name, float_format='%g')
|
|
|
|
dump_df = dump_df.set_index('Encounter probability (%)')
|
|
|
|
|
|
|
|
|
|
|
|
for i, c in enumerate(dump_df.columns[3:]):
|
|
|
|
csv_name = os.path.join(
|
|
|
|
ax[i, j].plot(dump_df.index,
|
|
|
|
|
|
|
|
dump_df[c],
|
|
|
|
|
|
|
|
'.',
|
|
|
|
|
|
|
|
color='#666666',
|
|
|
|
|
|
|
|
markersize=2)
|
|
|
|
|
|
|
|
ax[i, j].spines['right'].set_visible(False)
|
|
|
|
|
|
|
|
ax[i, j].spines['top'].set_visible(False)
|
|
|
|
|
|
|
|
if j == 0:
|
|
|
|
|
|
|
|
ax[i, 0].yaxis.set_label_coords(-0.4, 0.5)
|
|
|
|
|
|
|
|
label = c.replace('(', '\n(')
|
|
|
|
|
|
|
|
ax[i, 0].set_ylabel(label,
|
|
|
|
|
|
|
|
va='top',
|
|
|
|
|
|
|
|
linespacing=1.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ax[i, j].set_xlabel('Encounter probability (%)',
|
|
|
|
|
|
|
|
labelpad=10)
|
|
|
|
|
|
|
|
ax[0, j].set_title(year)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fig.suptitle('{}, block {}, profile {}'.format(
|
|
|
|
|
|
|
|
beach_scenario, prof['block'], prof['profile']),
|
|
|
|
|
|
|
|
y=0.92)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if output_diagnostics:
|
|
|
|
|
|
|
|
figname = os.path.join(
|
|
|
|
'diagnostics',
|
|
|
|
'diagnostics',
|
|
|
|
'{} {} {}.csv'.format(beach_scenario, year, profile_type))
|
|
|
|
f'{beach_scenario} {profile_type} scatter.png')
|
|
|
|
dump_df.to_csv(csv_name, float_format='%g')
|
|
|
|
plt.savefig(figname, bbox_inches='tight', dpi=300)
|
|
|
|
|
|
|
|
plt.close(fig)
|
|
|
|
for i, c in enumerate(dump_df.columns[3:]):
|
|
|
|
|
|
|
|
ax[i, j].plot(dump_df.index,
|
|
|
|
|
|
|
|
dump_df[c],
|
|
|
|
|
|
|
|
'.',
|
|
|
|
|
|
|
|
color='#666666',
|
|
|
|
|
|
|
|
markersize=2)
|
|
|
|
|
|
|
|
ax[i, j].spines['right'].set_visible(False)
|
|
|
|
|
|
|
|
ax[i, j].spines['top'].set_visible(False)
|
|
|
|
|
|
|
|
if j == 0:
|
|
|
|
|
|
|
|
ax[i, 0].yaxis.set_label_coords(-0.4, 0.5)
|
|
|
|
|
|
|
|
label = c.replace('(', '\n(')
|
|
|
|
|
|
|
|
ax[i, 0].set_ylabel(label, va='top', linespacing=1.5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ax[i, j].set_xlabel('Encounter probability (%)', labelpad=10)
|
|
|
|
|
|
|
|
ax[0, j].set_title(year)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fig.suptitle('{}, block {}, profile {}'.format(
|
|
|
|
|
|
|
|
beach_scenario, prof['block'], prof['profile']),
|
|
|
|
|
|
|
|
y=0.92)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
figname = os.path.join(
|
|
|
|
|
|
|
|
'diagnostics', '{} {}.png'.format(beach_scenario,
|
|
|
|
|
|
|
|
profile_type))
|
|
|
|
|
|
|
|
plt.savefig(figname, bbox_inches='tight', dpi=300)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
def main():
|
|
|
|