Merge branch 'feature/twl-exceedence-time' into develop

develop
Chris Leaman 6 years ago
commit a7a3a88a34

@ -118,6 +118,7 @@ impacts: ./data/interim/impacts_forecasted_foreshore_slope_sto06.csv ./data/inte
--profile-features-csv "./data/interim/profile_features.csv" \ --profile-features-csv "./data/interim/profile_features.csv" \
--runup-function "sto06" \ --runup-function "sto06" \
--slope "foreshore" \ --slope "foreshore" \
--profile-type "prestorm" \
--output-file "./data/interim/twl_foreshore_slope_sto06.csv" --output-file "./data/interim/twl_foreshore_slope_sto06.csv"
./data/interim/twl_mean_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv ./data/interim/twl_mean_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv
@ -128,6 +129,7 @@ impacts: ./data/interim/impacts_forecasted_foreshore_slope_sto06.csv ./data/inte
--profile-features-csv "./data/interim/profile_features.csv" \ --profile-features-csv "./data/interim/profile_features.csv" \
--runup-function "sto06" \ --runup-function "sto06" \
--slope "mean" \ --slope "mean" \
--profile-type "prestorm" \
--output-file "./data/interim/twl_mean_slope_sto06.csv" --output-file "./data/interim/twl_mean_slope_sto06.csv"
./data/interim/twl_poststorm_mean_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv ./data/interim/twl_poststorm_mean_slope_sto06.csv: ./data/interim/waves.csv ./data/interim/tides.csv ./data/interim/profiles.csv ./data/interim/sites.csv ./data/interim/profile_features.csv

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -23,7 +23,7 @@ def forecast_twl(
runup_function, runup_function,
n_processes=MULTIPROCESS_THREADS, n_processes=MULTIPROCESS_THREADS,
slope="foreshore", slope="foreshore",
profile_type='prestorm' profile_type="prestorm",
): ):
# Use df_waves as a base # Use df_waves as a base
df_twl = df_waves.copy() df_twl = df_waves.copy()
@ -46,19 +46,20 @@ def forecast_twl(
df_twl["beta"] = pd.concat(results) df_twl["beta"] = pd.concat(results)
elif slope == "mean": elif slope == "mean":
df_temp = df_twl.join(df_profile_features.query("profile_type=='{}'".format(profile_type)).reset_index( df_temp = df_twl.join(
level='profile_type') df_profile_features.query("profile_type=='{}'".format(profile_type)).reset_index(level="profile_type"),
, how="inner") how="inner",
)
df_temp["mhw"] = 0.5 df_temp["mhw"] = 0.5
with Pool(processes=n_processes) as pool: with Pool(processes=n_processes) as pool:
results = pool.starmap( results = pool.starmap(
mean_slope_for_site_id, [(site_id, df_temp, df_profiles, "dune_toe_z", "dune_toe_x", "mhw") for mean_slope_for_site_id,
site_id in site_ids] [(site_id, df_temp, df_profiles, "dune_toe_z", "dune_toe_x", "mhw") for site_id in site_ids],
) )
df_twl["beta"] = pd.concat(results) df_twl["beta"] = pd.concat(results)
# Estimate runup # Estimate runup
R2, setup, S_total, S_inc, S_ig = runup_function(df_twl, Hs0_col="Hs0", Tp_col="Tp", beta_col="beta") R2, setup, S_total, S_inc, S_ig = runup_function(Hs0=df_twl['Hs0'].tolist(), Tp=df_twl["Tp"].tolist(), beta=df_twl["beta"].tolist())
df_twl["R2"] = R2 df_twl["R2"] = R2
df_twl["setup"] = setup df_twl["setup"] = setup
@ -69,13 +70,14 @@ def forecast_twl(
df_twl["R_low"] = df_twl["tide"] + 1.1 * df_twl["setup"] - 1.1 / 2 * df_twl["S_total"] df_twl["R_low"] = df_twl["tide"] + 1.1 * df_twl["setup"] - 1.1 / 2 * df_twl["S_total"]
# Drop unneeded columns # Drop unneeded columns
df_twl.drop(columns=["E", "Exs", "P", "Pxs", "dir"], inplace=True, errors="ignore") # df_twl.drop(columns=["E", "Exs", "P", "Pxs", "dir"], inplace=True, errors="ignore")
return df_twl return df_twl
def mean_slope_for_site_id(site_id, df_twl, df_profiles, top_elevation_col, top_x_col, btm_elevation_col, def mean_slope_for_site_id(
profile_type='prestorm'): site_id, df_twl, df_profiles, top_elevation_col, top_x_col, btm_elevation_col, profile_type="prestorm"
):
""" """
Calculates the foreshore slope values a given site_id. Returns a series (with same indicies as df_twl) of Calculates the foreshore slope values a given site_id. Returns a series (with same indicies as df_twl) of
foreshore slopes. This function is used to parallelize getting foreshore slopes as it is computationally foreshore slopes. This function is used to parallelize getting foreshore slopes as it is computationally
@ -100,7 +102,7 @@ def mean_slope_for_site_id(site_id, df_twl, df_profiles, top_elevation_col, top_
top_elevation=row[top_elevation_col], top_elevation=row[top_elevation_col],
btm_elevation=row[btm_elevation_col], btm_elevation=row[btm_elevation_col],
method="end_points", method="end_points",
top_x= row[top_x_col] top_x=row[top_x_col],
), ),
axis=1, axis=1,
) )
@ -130,7 +132,7 @@ def foreshore_slope_for_site_id(site_id, df_twl, df_profiles):
profile_x=profile_x, profile_x=profile_x,
profile_z=profile_z, profile_z=profile_z,
tide=row.tide, tide=row.tide,
runup_function=runup_models.sto06_individual, runup_function=runup_models.sto06,
Hs0=row.Hs0, Hs0=row.Hs0,
Tp=row.Tp, Tp=row.Tp,
), ),
@ -216,16 +218,14 @@ def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, metho
end_points = {"top": {"z": top_elevation}, "btm": {"z": btm_elevation}} end_points = {"top": {"z": top_elevation}, "btm": {"z": btm_elevation}}
for end_type in end_points.keys(): for end_type in end_points.keys():
# Add x coordinates if they are specified # Add x coordinates if they are specified
if top_x and end_type == 'top': if top_x and end_type == "top":
end_points['top']['x'] = top_x end_points["top"]["x"] = top_x
continue continue
if btm_x and end_type == 'top': if btm_x and end_type == "top":
end_points['btm']['x'] = btm_x end_points["btm"]["x"] = btm_x
continue continue
elevation = end_points[end_type]["z"] elevation = end_points[end_type]["z"]
@ -306,8 +306,9 @@ def crossings(profile_x, profile_z, constant_z):
@click.option("--slope", required=True, help="", type=click.Choice(["foreshore", "mean"])) @click.option("--slope", required=True, help="", type=click.Choice(["foreshore", "mean"]))
@click.option("--profile-type", required=True, help="", type=click.Choice(["prestorm", "poststorm"])) @click.option("--profile-type", required=True, help="", type=click.Choice(["prestorm", "poststorm"]))
@click.option("--output-file", required=True, help="") @click.option("--output-file", required=True, help="")
def create_twl_forecast(waves_csv, tides_csv, profiles_csv, profile_features_csv, runup_function, slope, def create_twl_forecast(
profile_type,output_file): waves_csv, tides_csv, profiles_csv, profile_features_csv, runup_function, slope, profile_type, output_file
):
logger.info("Creating forecast of total water levels") logger.info("Creating forecast of total water levels")
logger.info("Importing data") logger.info("Importing data")
df_waves = pd.read_csv(waves_csv, index_col=[0, 1]) df_waves = pd.read_csv(waves_csv, index_col=[0, 1])
@ -323,7 +324,7 @@ def create_twl_forecast(waves_csv, tides_csv, profiles_csv, profile_features_csv
df_profile_features, df_profile_features,
runup_function=getattr(runup_models, runup_function), runup_function=getattr(runup_models, runup_function),
slope=slope, slope=slope,
profile_type=profile_type profile_type=profile_type,
) )
df_twl.to_csv(output_file) df_twl.to_csv(output_file)

@ -20,7 +20,7 @@ def forecasted_impacts(df_profile_features, df_forecasted_twl):
""" """
logger.info("Getting forecasted storm impacts") logger.info("Getting forecasted storm impacts")
df_forecasted_impacts = pd.DataFrame(index=df_profile_features.index.get_level_values('site_id').unique()) df_forecasted_impacts = pd.DataFrame(index=df_profile_features.index.get_level_values("site_id").unique())
# For each site, find the maximum R_high value and the corresponding R_low value. # For each site, find the maximum R_high value and the corresponding R_low value.
idx = df_forecasted_twl.groupby(level=["site_id"])["R_high"].idxmax().dropna() idx = df_forecasted_twl.groupby(level=["site_id"])["R_high"].idxmax().dropna()
@ -29,7 +29,12 @@ def forecasted_impacts(df_profile_features, df_forecasted_twl):
# Join with df_profile features to find dune toe and crest elevations # Join with df_profile features to find dune toe and crest elevations
df_forecasted_impacts = df_forecasted_impacts.merge( df_forecasted_impacts = df_forecasted_impacts.merge(
df_profile_features.query("profile_type=='prestorm'")[["dune_toe_z", "dune_crest_z"]], how="left", left_index=True, right_index=True df_profile_features.query("profile_type=='prestorm'").reset_index("profile_type")[
["dune_toe_z", "dune_crest_z"]
],
how="left",
left_on=["site_id"],
right_on=["site_id"],
) )
# Compare R_high and R_low wirth dune crest and toe elevations # Compare R_high and R_low wirth dune crest and toe elevations
@ -66,6 +71,36 @@ def storm_regime(df_forecasted_impacts):
return df_forecasted_impacts return df_forecasted_impacts
def twl_exceedence_time(df_profile_features, df_forecasted_twl, z_twl_col="R_high", z_exceedence_col="dune_toe_z"):
"""
Returns a dataframe of number of hours the twl exceeded a certain z elevation.
May need to use this https://stackoverflow.com/a/53656968 if datetimes are not consistent.
:param df_profile_features:
:param df_forecasted_twl:
:param z_twl_col:
:param z_exceedence_col:
:return:
"""
logger.info("Getting twl exceedence time")
# Get a dataframe of prestorm dune toes organised by site_id
df_dune_toes = (
df_profile_features.query('profile_type=="prestorm"').reset_index("profile_type")[z_exceedence_col].to_frame()
)
# Merge dune toes into site_id
df_merged = df_forecasted_twl.merge(df_dune_toes, left_on=["site_id"], right_on=["site_id"])
# Return the sum of hours that twl exceeded the level
return (
(df_merged[z_twl_col] >= df_merged[z_exceedence_col])
.groupby("site_id")
.sum()
.rename("twl_{}_exceedance_hrs".format(z_exceedence_col))
.to_frame()
)
@click.command() @click.command()
@click.option("--profile-features-csv", required=True, help="") @click.option("--profile-features-csv", required=True, help="")
@click.option("--forecasted-twl-csv", required=True, help="") @click.option("--forecasted-twl-csv", required=True, help="")
@ -77,6 +112,11 @@ def create_forecasted_impacts(profile_features_csv, forecasted_twl_csv, output_f
df_forecasted_twl = pd.read_csv(forecasted_twl_csv, index_col=[0, 1]) df_forecasted_twl = pd.read_csv(forecasted_twl_csv, index_col=[0, 1])
df_forecasted_impacts = forecasted_impacts(df_profile_features, df_forecasted_twl) df_forecasted_impacts = forecasted_impacts(df_profile_features, df_forecasted_twl)
df_forecasted_impacts = df_forecasted_impacts.merge(
twl_exceedence_time(df_profile_features, df_forecasted_twl), left_on=["site_id"], right_on=["site_id"]
)
df_forecasted_impacts.to_csv(output_file) df_forecasted_impacts.to_csv(output_file)
logger.info("Saved to %s", output_file) logger.info("Saved to %s", output_file)
logger.info("Done!") logger.info("Done!")

@ -30,7 +30,7 @@ def volume_change(df_profiles, df_profile_features, zone):
""" """
logger.info("Calculating change in beach volume in {} zone".format(zone)) logger.info("Calculating change in beach volume in {} zone".format(zone))
df_vol_changes = pd.DataFrame(index=df_profile_features.index.get_level_values('site_id').unique()) df_vol_changes = pd.DataFrame(index=df_profile_features.index.get_level_values("site_id").unique())
df_profiles = df_profiles.sort_index() df_profiles = df_profiles.sort_index()
sites = df_profiles.groupby(level=["site_id"]) sites = df_profiles.groupby(level=["site_id"])
@ -98,16 +98,11 @@ def volume_change(df_profiles, df_profile_features, zone):
# Volume change needs to be calculated including a tolerance for LIDAR accuracy. If difference between # Volume change needs to be calculated including a tolerance for LIDAR accuracy. If difference between
# profiles is less than 20 cm, consider them as zero difference. # profiles is less than 20 cm, consider them as zero difference.
prestorm_z = df_zone.query("profile_type=='prestorm'").reset_index('profile_type').z prestorm_z = df_zone.query("profile_type=='prestorm'").reset_index("profile_type").z
poststorm_z = df_zone.query("profile_type=='poststorm'").reset_index('profile_type').z poststorm_z = df_zone.query("profile_type=='poststorm'").reset_index("profile_type").z
diff_z = prestorm_z - poststorm_z diff_z = prestorm_z - poststorm_z
diff_z[abs(diff_z) < 0.2] = 0 diff_z[abs(diff_z) < 0.2] = 0
diff_vol = beach_volume( diff_vol = beach_volume(x=diff_z.index.get_level_values("x"), z=diff_z, x_min=x_min, x_max=x_max)
x=diff_z.index.get_level_values("x"),
z=diff_z,
x_min=x_min,
x_max=x_max,
)
df_vol_changes.loc[site_id, "prestorm_{}_vol".format(zone)] = prestorm_vol df_vol_changes.loc[site_id, "prestorm_{}_vol".format(zone)] = prestorm_vol
df_vol_changes.loc[site_id, "poststorm_{}_vol".format(zone)] = poststorm_vol df_vol_changes.loc[site_id, "poststorm_{}_vol".format(zone)] = poststorm_vol
@ -153,7 +148,6 @@ def storm_regime(df_observed_impacts):
return df_observed_impacts return df_observed_impacts
@click.command() @click.command()
@click.option("--profiles-csv", required=True, help="") @click.option("--profiles-csv", required=True, help="")
@click.option("--profile-features-csv", required=True, help="") @click.option("--profile-features-csv", required=True, help="")
@ -166,7 +160,7 @@ def create_observed_impacts(profiles_csv, profile_features_csv, output_file):
df_profile_features = pd.read_csv(profile_features_csv, index_col=[0, 1]) df_profile_features = pd.read_csv(profile_features_csv, index_col=[0, 1])
logger.info("Creating new dataframe for observed impacts") logger.info("Creating new dataframe for observed impacts")
df_observed_impacts = pd.DataFrame(index=df_profile_features.index.get_level_values('site_id').unique()) df_observed_impacts = pd.DataFrame(index=df_profile_features.index.get_level_values("site_id").unique())
logger.info("Getting pre/post storm volumes") logger.info("Getting pre/post storm volumes")
df_swash_vol_changes = volume_change(df_profiles, df_profile_features, zone="swash") df_swash_vol_changes = volume_change(df_profiles, df_profile_features, zone="swash")
@ -177,7 +171,7 @@ def create_observed_impacts(profiles_csv, profile_features_csv, output_file):
df_observed_impacts = storm_regime(df_observed_impacts) df_observed_impacts = storm_regime(df_observed_impacts)
# Save dataframe to csv # Save dataframe to csv
df_observed_impacts.to_csv(output_file, float_format='%.4f') df_observed_impacts.to_csv(output_file, float_format="%.4f")
logger.info("Saved to %s", output_file) logger.info("Saved to %s", output_file)
logger.info("Done!") logger.info("Done!")

@ -2,52 +2,51 @@ import numpy as np
import pandas as pd import pandas as pd
def sto06_individual(Hs0, Tp, beta): def sto06(Hs0, Tp, beta):
"""
:param Hs0: List or float of offshore significant wave height values
:param Tp: List or float of peak wave period
:param beta: List of float of beach slope
:return: Float or list of R2, setup, S_total, S_inc and S_ig values
"""
Lp = 9.8 * Tp ** 2 / 2 / np.pi df = pd.DataFrame({"Hs0": Hs0, "Tp": Tp, "beta": beta}, index=[x for x in range(0, np.size(Hs0))])
S_ig = 0.06 * np.sqrt(Hs0 * Lp) df["Lp"] = 9.8 * df['Tp'] ** 2 / 2 / np.pi
S_inc = 0.75 * beta * np.sqrt(Hs0 * Lp)
# General equation
df["S_ig"] = pd.to_numeric(0.06 * np.sqrt(df["Hs0"] * df["Lp"]), errors="coerce")
df["S_inc"] = pd.to_numeric(0.75 * df["beta"] * np.sqrt(df["Hs0"] * df["Lp"]), errors="coerce")
df["setup"] = pd.to_numeric(0.35 * df["beta"] * np.sqrt(df["Hs0"] * df["Lp"]), errors="coerce")
df["S_total"] = np.sqrt(df["S_inc"] ** 2 + df["S_ig"] ** 2)
df["R2"] = 1.1 * (df["setup"] + df["S_total"] / 2)
# Dissipative conditions # Dissipative conditions
if beta / (Hs0 / Lp) ** (0.5) <= 0.3: dissipative = df["beta"] / (df["Hs0"] / df["Lp"]) ** (0.5) <= 0.3
setup = 0.016 * (Hs0 * Lp) ** 0.5
S_total = 0.046 * (Hs0 * Lp) ** 0.5 df.loc[dissipative, "setup"] = 0.016 * (df["Hs0"] * df["Lp"]) ** (0.5) # eqn 16
R2 = 0.043 * (Hs0 * Lp) ** 0.5 df.loc[dissipative, "S_total"] = 0.046 * (df["Hs0"] * df["Lp"]) ** (0.5) # eqn 17
else: df.loc[dissipative, "R2"] = 0.043 * (df["Hs0"] * df["Lp"]) ** (0.5) # eqn 18
setup = 0.35 * beta * (Hs0 * Lp) ** 0.5
S_total = np.sqrt(S_inc ** 2 + S_ig ** 2)
R2 = 1.1 * (setup + S_total / 2)
return R2, setup, S_total, S_inc, S_ig return (
float_or_list(df["R2"].tolist()),
float_or_list(df["setup"].tolist()),
float_or_list(df["S_total"].tolist()),
float_or_list(df["S_inc"].tolist()),
float_or_list(df["S_ig"].tolist()),
)
def sto06(df, Hs0_col, Tp_col, beta_col): def float_or_list(a):
""" """
Vectorized version of Stockdon06 which can be used with dataframes If only one value in the array, return the float, else return a list
:param df: :param a:
:param Hs0_col:
:param Tp_col:
:param beta_col:
:return: :return:
""" """
if len(a) == 1:
Lp = 9.8 * df[Tp_col] ** 2 / 2 / np.pi return a[0]
else:
# General equation return list(a)
S_ig = pd.to_numeric(0.06 * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
S_inc = pd.to_numeric(0.75 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
setup = pd.to_numeric(0.35 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
S_total = np.sqrt(S_inc ** 2 + S_ig ** 2)
R2 = 1.1 * (setup + S_total / 2)
# Dissipative conditions
dissipative = df[beta_col] / (df[Hs0_col] / Lp) ** (0.5) <= 0.3
setup.loc[dissipative, :] = 0.016 * (df[Hs0_col] * Lp) ** (0.5) # eqn 16
S_total.loc[dissipative, :] = 0.046 * (df[Hs0_col] * Lp) ** (0.5) # eqn 17
R2.loc[dissipative, :] = 0.043 * (df[Hs0_col] * Lp) ** (0.5) # eqn 18
return R2, setup, S_total, S_inc, S_ig
if __name__ == "__main__": if __name__ == "__main__":

@ -13,7 +13,9 @@ import analysis.observed_storm_impacts as observed_storm_impacts
# Disable numpy warnings # Disable numpy warnings
import warnings import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action="ignore", category=FutureWarning)
@click.group() @click.group()
def cli(): def cli():

@ -58,35 +58,41 @@ def parse_dune_crest_toes(df_sites, crest_mat, toe_mat):
crest_data = loadmat(crest_mat) crest_data = loadmat(crest_mat)
toe_data = loadmat(toe_mat) toe_data = loadmat(toe_mat)
for n, _ in enumerate(crest_data['xc1']): for n, _ in enumerate(crest_data["xc1"]):
rows.extend([{ rows.extend(
'dune_crest_x': crest_data['xc1'][n], [
'dune_crest_z': crest_data['zc1'][n], {
'dune_toe_x': toe_data['xt1'][n], "dune_crest_x": crest_data["xc1"][n],
'dune_toe_z': toe_data['zt1'][n], "dune_crest_z": crest_data["zc1"][n],
'profile_type': 'prestorm', "dune_toe_x": toe_data["xt1"][n],
'site_no': n+1 "dune_toe_z": toe_data["zt1"][n],
},{ "profile_type": "prestorm",
'dune_crest_x': crest_data['xc2'][n], "site_no": n + 1,
'dune_crest_z': crest_data['zc2'][n], },
'dune_toe_x': toe_data['xt2'][n], {
'dune_toe_z': toe_data['zt2'][n], "dune_crest_x": crest_data["xc2"][n],
'profile_type': 'poststorm', "dune_crest_z": crest_data["zc2"][n],
'site_no': n + 1 "dune_toe_x": toe_data["xt2"][n],
}]) "dune_toe_z": toe_data["zt2"][n],
"profile_type": "poststorm",
"site_no": n + 1,
},
]
)
df_profile_features = pd.DataFrame(rows) df_profile_features = pd.DataFrame(rows)
# Want the site_id instead of the site_no, so merge in df_sites # Want the site_id instead of the site_no, so merge in df_sites
df_sites.reset_index(inplace=True) df_sites.reset_index(inplace=True)
df_profile_features = df_sites[['site_no','site_id']].merge(df_profile_features, how='outer', on=['site_no']) df_profile_features = df_sites[["site_no", "site_id"]].merge(df_profile_features, how="outer", on=["site_no"])
df_profile_features.drop(columns=['site_no'],inplace=True) df_profile_features.drop(columns=["site_no"], inplace=True)
df_profile_features.set_index(['site_id','profile_type'], inplace=True) df_profile_features.set_index(["site_id", "profile_type"], inplace=True)
df_profile_features.sort_index(inplace=True) df_profile_features.sort_index(inplace=True)
df_profile_features = df_profile_features.round(3) df_profile_features = df_profile_features.round(3)
return df_profile_features return df_profile_features
def combine_sites_and_orientaions(df_sites, df_orientations): def combine_sites_and_orientaions(df_sites, df_orientations):
""" """
Replaces beach/lat/lon columns with the unique site_id. Replaces beach/lat/lon columns with the unique site_id.
@ -193,7 +199,7 @@ def parse_profiles_and_sites(profiles_mat):
site_counter = 0 site_counter = 0
for i, site in enumerate(mat_data["site"]): for i, site in enumerate(mat_data["site"]):
logger.debug('Processing site {} of {}'.format(i+1, len(mat_data['site']))) logger.debug("Processing site {} of {}".format(i + 1, len(mat_data["site"])))
# Give each site a unique id # Give each site a unique id
if len(site_rows) == 0 or site_rows[-1]["beach"] != site: if len(site_rows) == 0 or site_rows[-1]["beach"] != site:
@ -248,7 +254,6 @@ def parse_profiles_and_sites(profiles_mat):
profile_rows.append( profile_rows.append(
{ {
"site_id": site_id, "site_id": site_id,
"lon": lon[0], "lon": lon[0],
"lat": lat[0], "lat": lat[0],
"profile_type": profile_type, "profile_type": profile_type,
@ -387,6 +392,7 @@ def create_profile_features(crest_mat, toe_mat, sites_csv, output_file):
df_profile_features.to_csv(output_file) df_profile_features.to_csv(output_file)
logger.info("Created %s", output_file) logger.info("Created %s", output_file)
@click.command(short_help="create profiles.csv") @click.command(short_help="create profiles.csv")
@click.option("--profiles-mat", required=True, help=".mat file containing beach profiles") @click.option("--profiles-mat", required=True, help=".mat file containing beach profiles")
@click.option("--profiles-output-file", required=True, help="where to save profiles.csv") @click.option("--profiles-output-file", required=True, help="where to save profiles.csv")

Loading…
Cancel
Save