diff --git a/probabilistic-analysis/probabilistic_assessment.py b/probabilistic-analysis/probabilistic_assessment.py index ac67dbe..ee6858d 100644 --- a/probabilistic-analysis/probabilistic_assessment.py +++ b/probabilistic-analysis/probabilistic_assessment.py @@ -329,7 +329,7 @@ def get_storm_demand_volume(ref_aep, ref_vol, n, mode='fit'): def process(beach_name, beach_scenario, n_runs, start_year, end_year, - output_years, output_ep, zsa_profile_file, zfc_profile_file, + output_years, output_ep, zsa_profile_file, zrfc_profile_file, output_folder, figure_folder, sea_level_rise, bruun_factor, underlying_recession, storm_demand, plot_stats, omit_from_shp, min_chainage, segment_gaps, insert_points, append_points): @@ -345,7 +345,7 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year, output_years (list): years to save profiles output_ep (list): EP values for saved profiles zsa_profile_file (str): path to storm demand vs chainge data (ZSA) - zfc_profile_file (str): path to storm demand vs chainge data (ZFC) + zrfc_profile_file (str): path to storm demand vs chainge data (ZRFC) output_folder (str): where to save profiles sea_level_rise (dict): 'year' (list): years @@ -420,14 +420,14 @@ def process(beach_name, beach_scenario, n_runs, start_year, end_year, np.array(ref_vol)[sort_idx]) # Load profile data for current beach - pbar_profiles = tqdm(['ZSA', 'ZFC'], leave=False) + pbar_profiles = tqdm(['ZSA', 'ZRFC'], leave=False) for profile_type in pbar_profiles: pbar_profiles.set_description('{}'.format(profile_type)) if profile_type == 'ZSA': df_in = pd.read_csv(zsa_profile_file) - if profile_type == 'ZFC': - df_in = pd.read_csv(zfc_profile_file) + if profile_type == 'ZRFC': + df_in = pd.read_csv(zrfc_profile_file) col_names = [c for c in df_in.columns if c.isdigit()]