Export diagnostic data

master
Dan Howe 3 years ago
parent c3c0ba9491
commit c2e3a1a484

@ -459,9 +459,14 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
xp=profile_volume[valid_idx], xp=profile_volume[valid_idx],
fp=profile_chainage[valid_idx]) fp=profile_chainage[valid_idx])
fig, ax = plt.subplots(9,
len(output_years),
figsize=(16, 24),
sharey='row')
# Loop through years # Loop through years
pbar_year = tqdm(output_years, leave=False) pbar_year = tqdm(output_years, leave=False)
for year in pbar_year: for j, year in enumerate(pbar_year):
pbar_year.set_description('Year: {}'.format(year)) pbar_year.set_description('Year: {}'.format(year))
@ -589,6 +594,104 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year,
header=False, header=False,
float_format='%g') float_format='%g')
# Check whether to save probabilistic diagnostics
for _, bp in pd.DataFrame(diagnostics).iterrows():
if not ((str(prof['block']) == str(bp['block'])) and
(prof['profile'] == bp['profile'])):
continue # Don't save
# Save probabilistic diagnostics
year_idx = year == years
# Find index where most extreme event occurred
event_year_idx = chainage_with_recession.argmin(axis=0)
# define dummy index
ix = np.arange(n_runs)
dump_data = {
'Sea level rise (m)':
slr[event_year_idx, ix].ravel(),
'Bruun factor (-)':
bf[event_year_idx, ix].ravel(),
'Bruun factor x SLR (m)':
slr[event_year_idx, ix].ravel() *
bf[event_year_idx, ix].ravel(),
'Underlying trend rate (m/yr)':
ur_rate[year_idx, :].ravel(),
'Underlying trend (m)':
ur[event_year_idx, ix].ravel(),
'Underlying + SLR (m)':
r[event_year_idx, ix].ravel(),
'Total movement (m)':
(storm_demand_dist + r)[event_year_idx, ix].ravel(),
'Storm demand distance (m)':
storm_demand_dist[event_year_idx, ix].ravel(),
'Storm demand volume (m3/m)':
storm_demand_volume[event_year_idx, ix].ravel(),
}
dump_df = pd.DataFrame(dump_data)
dump_df['Run ID'] = np.arange(len(event_year_idx)) + 1
dump_df['Event year'] = years[event_year_idx]
dump_df['Years elapsed'] = event_year_idx + 1
# Reorder columns
dump_df = dump_df[[
'Run ID',
'Event year',
'Years elapsed',
'Sea level rise (m)',
'Bruun factor (-)',
'Bruun factor x SLR (m)',
'Underlying trend rate (m/yr)',
'Underlying trend (m)',
'Underlying + SLR (m)',
'Total movement (m)',
'Storm demand distance (m)',
'Storm demand volume (m3/m)',
]]
# Sort based on maximum movement
dump_df = dump_df.sort_values('Total movement (m)',
ascending=False)
# Add encounter probabilities
dump_df['Encounter probability (%)'] = np.linspace(0,
100,
num=n_runs +
2)[1:-1]
dump_df = dump_df.set_index('Encounter probability (%)')
csv_name = os.path.join(
'diagnostics',
'{} {} {}.csv'.format(beach_scenario, year, profile_type))
dump_df.to_csv(csv_name, float_format='%g')
for i, c in enumerate(dump_df.columns[3:]):
ax[i, j].plot(dump_df.index,
dump_df[c],
'.',
color='#666666',
markersize=2)
ax[i, j].spines['right'].set_visible(False)
ax[i, j].spines['top'].set_visible(False)
if j == 0:
ax[i, 0].yaxis.set_label_coords(-0.4, 0.5)
label = c.replace('(', '\n(')
ax[i, 0].set_ylabel(label, va='top', linespacing=1.5)
ax[i, j].set_xlabel('Encounter probability (%)', labelpad=10)
ax[0, j].set_title(year)
fig.suptitle('{}, block {}, profile {}'.format(
beach_scenario, prof['block'], prof['profile']),
y=0.92)
figname = os.path.join(
'diagnostics', '{} {}.png'.format(beach_scenario,
profile_type))
plt.savefig(figname, bbox_inches='tight', dpi=300)
def main(): def main():

Loading…
Cancel
Save