Fix formatting with black

develop
Chris Leaman 6 years ago
parent 6d62a30b2f
commit 8f790cbf1b

@ -1,13 +1,15 @@
import pandas as pd import pandas as pd
import os import os
def main(): def main():
data_folder = './data/interim' data_folder = "./data/interim"
df_waves = pd.read_csv(os.path.join(data_folder, 'waves.csv'), index_col=[0,1]) df_waves = pd.read_csv(os.path.join(data_folder, "waves.csv"), index_col=[0, 1])
df_tides = pd.read_csv(os.path.join(data_folder, 'tides.csv'), index_col=[0,1]) df_tides = pd.read_csv(os.path.join(data_folder, "tides.csv"), index_col=[0, 1])
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0,1,2]) df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
df_sites = pd.read_csv(os.path.join(data_folder, 'sites.csv'),index_col=[0]) df_sites = pd.read_csv(os.path.join(data_folder, "sites.csv"), index_col=[0])
if __name__ == '__main__': if __name__ == "__main__":
main() main()

@ -7,7 +7,7 @@ import os
import pandas as pd import pandas as pd
logging.config.fileConfig('./src/logging.conf', disable_existing_loggers=False) logging.config.fileConfig("./src/logging.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -18,15 +18,16 @@ def compare_impacts(df_forecasted, df_observed):
:param df_observed: :param df_observed:
:return: :return:
""" """
df_compared = df_forecasted.merge(df_observed, left_index=True, right_index=True, df_compared = df_forecasted.merge(
suffixes=['_forecasted', '_observed']) df_observed, left_index=True, right_index=True, suffixes=["_forecasted", "_observed"]
)
return df_compared return df_compared
if __name__ == '__main__': if __name__ == "__main__":
logger.info('Importing existing data') logger.info("Importing existing data")
data_folder = './data/interim' data_folder = "./data/interim"
df_forecasted = pd.read_csv(os.path.join(data_folder, 'impacts_forecasted_mean_slope_sto06.csv'), index_col=[0]) df_forecasted = pd.read_csv(os.path.join(data_folder, "impacts_forecasted_mean_slope_sto06.csv"), index_col=[0])
df_observed = pd.read_csv(os.path.join(data_folder, 'impacts_observed.csv'), index_col=[0]) df_observed = pd.read_csv(os.path.join(data_folder, "impacts_observed.csv"), index_col=[0])
df_compared = compare_impacts(df_forecasted, df_observed) df_compared = compare_impacts(df_forecasted, df_observed)
df_compared.to_csv(os.path.join(data_folder, 'impacts_observed_vs_forecasted_mean_slope_sto06.csv')) df_compared.to_csv(os.path.join(data_folder, "impacts_observed_vs_forecasted_mean_slope_sto06.csv"))

@ -9,54 +9,57 @@ from scipy import stats
from src.analysis.runup_models import sto06_individual, sto06 from src.analysis.runup_models import sto06_individual, sto06
logging.config.fileConfig('./src/logging.conf', disable_existing_loggers=False) logging.config.fileConfig("./src/logging.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def forecast_twl(df_tides, df_profiles, df_waves, df_profile_features, runup_function, n_processes=4, def forecast_twl(
slope='foreshore'): df_tides, df_profiles, df_waves, df_profile_features, runup_function, n_processes=4, slope="foreshore"
):
# Use df_waves as a base # Use df_waves as a base
df_twl = df_waves.copy() df_twl = df_waves.copy()
# Merge tides # Merge tides
logger.info('Merging tides') logger.info("Merging tides")
df_twl = df_twl.merge(df_tides, left_index=True, right_index=True) df_twl = df_twl.merge(df_tides, left_index=True, right_index=True)
# Estimate foreshore slope. Do the analysis per site_id. This is so we only have to query the x and z # Estimate foreshore slope. Do the analysis per site_id. This is so we only have to query the x and z
# cross-section profiles once per site. # cross-section profiles once per site.
logger.info('Calculating beach slopes') logger.info("Calculating beach slopes")
site_ids = df_twl.index.get_level_values('site_id').unique() site_ids = df_twl.index.get_level_values("site_id").unique()
# site_ids = [x for x in site_ids if 'NARRA' in x] # todo remove this - for testing narrabeen only # site_ids = [x for x in site_ids if 'NARRA' in x] # todo remove this - for testing narrabeen only
if slope == 'foreshore': if slope == "foreshore":
# Process each site_id with a different process and combine results at the end # Process each site_id with a different process and combine results at the end
with Pool(processes=n_processes) as pool: with Pool(processes=n_processes) as pool:
results = pool.starmap(foreshore_slope_for_site_id, results = pool.starmap(
[(site_id, df_twl, df_profiles) for site_id in site_ids]) foreshore_slope_for_site_id, [(site_id, df_twl, df_profiles) for site_id in site_ids]
df_twl['beta'] = pd.concat(results) )
df_twl["beta"] = pd.concat(results)
elif slope == 'mean': elif slope == "mean":
# todo mean beach profile # todo mean beach profile
df_temp = df_twl.join(df_profile_features, how='inner') df_temp = df_twl.join(df_profile_features, how="inner")
df_temp['mhw'] = 0.5 df_temp["mhw"] = 0.5
with Pool(processes=n_processes) as pool: with Pool(processes=n_processes) as pool:
results = pool.starmap(mean_slope_for_site_id, results = pool.starmap(
[(site_id, df_temp, df_profiles, 'dune_toe_z', 'mhw') for site_id in site_ids]) mean_slope_for_site_id, [(site_id, df_temp, df_profiles, "dune_toe_z", "mhw") for site_id in site_ids]
df_twl['beta'] = pd.concat(results) )
df_twl["beta"] = pd.concat(results)
# Estimate runup # Estimate runup
R2, setup, S_total, S_inc, S_ig = runup_function(df_twl, Hs0_col='Hs0', Tp_col='Tp', beta_col='beta') R2, setup, S_total, S_inc, S_ig = runup_function(df_twl, Hs0_col="Hs0", Tp_col="Tp", beta_col="beta")
df_twl['R2'] = R2 df_twl["R2"] = R2
df_twl['setup'] = setup df_twl["setup"] = setup
df_twl['S_total'] = S_total df_twl["S_total"] = S_total
# Estimate TWL # Estimate TWL
df_twl['R_high'] = df_twl['tide'] + df_twl['R2'] df_twl["R_high"] = df_twl["tide"] + df_twl["R2"]
df_twl['R_low'] = df_twl['tide'] + 1.1 * df_twl['setup'] - 1.1 / 2 * df_twl['S_total'] df_twl["R_low"] = df_twl["tide"] + 1.1 * df_twl["setup"] - 1.1 / 2 * df_twl["S_total"]
# Drop unneeded columns # Drop unneeded columns
df_twl.drop(columns=['E', 'Exs', 'P', 'Pxs', 'dir'], inplace=True, errors='ignore') df_twl.drop(columns=["E", "Exs", "P", "Pxs", "dir"], inplace=True, errors="ignore")
return df_twl return df_twl
@ -74,15 +77,21 @@ def mean_slope_for_site_id(site_id, df_twl, df_profiles, top_elevation_col, btm_
# Get the prestorm beach profile # Get the prestorm beach profile
profile = df_profiles.query("site_id =='{}' and profile_type == 'prestorm'".format(site_id)) profile = df_profiles.query("site_id =='{}' and profile_type == 'prestorm'".format(site_id))
profile_x = profile.index.get_level_values('x').tolist() profile_x = profile.index.get_level_values("x").tolist()
profile_z = profile.z.tolist() profile_z = profile.z.tolist()
df_twl_site = df_twl.query("site_id == '{}'".format(site_id)) df_twl_site = df_twl.query("site_id == '{}'".format(site_id))
df_beta = df_twl_site.apply(lambda row: slope_from_profile(profile_x=profile_x, profile_z=profile_z, df_beta = df_twl_site.apply(
top_elevation=row[top_elevation_col], lambda row: slope_from_profile(
btm_elevation=row[btm_elevation_col], profile_x=profile_x,
method='end_points'), axis=1) profile_z=profile_z,
top_elevation=row[top_elevation_col],
btm_elevation=row[btm_elevation_col],
method="end_points",
),
axis=1,
)
return df_beta return df_beta
@ -99,16 +108,22 @@ def foreshore_slope_for_site_id(site_id, df_twl, df_profiles):
# Get the prestorm beach profile # Get the prestorm beach profile
profile = df_profiles.query("site_id =='{}' and profile_type == 'prestorm'".format(site_id)) profile = df_profiles.query("site_id =='{}' and profile_type == 'prestorm'".format(site_id))
profile_x = profile.index.get_level_values('x').tolist() profile_x = profile.index.get_level_values("x").tolist()
profile_z = profile.z.tolist() profile_z = profile.z.tolist()
df_twl_site = df_twl.query("site_id == '{}'".format(site_id)) df_twl_site = df_twl.query("site_id == '{}'".format(site_id))
df_beta = df_twl_site.apply(lambda row: foreshore_slope_from_profile(profile_x=profile_x, profile_z=profile_z, df_beta = df_twl_site.apply(
tide=row.tide, lambda row: foreshore_slope_from_profile(
runup_function=sto06_individual, profile_x=profile_x,
Hs0=row.Hs0, profile_z=profile_z,
Tp=row.Tp), axis=1) tide=row.tide,
runup_function=sto06_individual,
Hs0=row.Hs0,
Tp=row.Tp,
),
axis=1,
)
return df_beta return df_beta
@ -137,9 +152,13 @@ def foreshore_slope_from_profile(profile_x, profile_z, tide, runup_function, **k
while True: while True:
R2, setup, S_total, _, _ = runup_function(beta=beta, **kwargs) R2, setup, S_total, _, _ = runup_function(beta=beta, **kwargs)
beta_new = slope_from_profile(profile_x=profile_x, profile_z=profile_z, method='end_points', beta_new = slope_from_profile(
top_elevation=tide + setup + S_total / 2, profile_x=profile_x,
btm_elevation=tide + setup - S_total / 2) profile_z=profile_z,
method="end_points",
top_elevation=tide + setup + S_total / 2,
btm_elevation=tide + setup - S_total / 2,
)
# Return None if we can't find a slope, usually because the elevations we've specified are above/below our # Return None if we can't find a slope, usually because the elevations we've specified are above/below our
# profile x and z coordinates. # profile x and z coordinates.
@ -158,7 +177,7 @@ def foreshore_slope_from_profile(profile_x, profile_z, tide, runup_function, **k
iteration_count += 1 iteration_count += 1
def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, method='end_points'): def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, method="end_points"):
""" """
Returns a slope (beta) from a bed profile, given the top and bottom elevations of where the slope should be taken. Returns a slope (beta) from a bed profile, given the top and bottom elevations of where the slope should be taken.
:param x: List of x bed profile coordinates :param x: List of x bed profile coordinates
@ -173,16 +192,10 @@ def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, metho
if any([x is None for x in [profile_x, profile_z, top_elevation, btm_elevation]]): if any([x is None for x in [profile_x, profile_z, top_elevation, btm_elevation]]):
return None return None
end_points = { end_points = {"top": {"z": top_elevation}, "btm": {"z": btm_elevation}}
'top': {
'z': top_elevation,
},
'btm': {
'z': btm_elevation,
}}
for end_type in end_points.keys(): for end_type in end_points.keys():
elevation = end_points[end_type]['z'] elevation = end_points[end_type]["z"]
intersection_x = crossings(profile_x, profile_z, elevation) intersection_x = crossings(profile_x, profile_z, elevation)
# No intersections found # No intersections found
@ -191,26 +204,26 @@ def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, metho
# One intersection # One intersection
elif len(intersection_x) == 1: elif len(intersection_x) == 1:
end_points[end_type]['x'] = intersection_x[0] end_points[end_type]["x"] = intersection_x[0]
# More than on intersection # More than on intersection
else: else:
if end_type == 'top': if end_type == "top":
# For top elevation, take most seaward intersection # For top elevation, take most seaward intersection
end_points[end_type]['x'] = intersection_x[-1] end_points[end_type]["x"] = intersection_x[-1]
else: else:
# For bottom elevation, take most landward intersection that is seaward of top elevation # For bottom elevation, take most landward intersection that is seaward of top elevation
end_points[end_type]['x'] = [x for x in intersection_x if x > end_points['top']['x']][0] end_points[end_type]["x"] = [x for x in intersection_x if x > end_points["top"]["x"]][0]
if method == 'end_points': if method == "end_points":
x_top = end_points['top']['x'] x_top = end_points["top"]["x"]
x_btm = end_points['btm']['x'] x_btm = end_points["btm"]["x"]
z_top = end_points['top']['z'] z_top = end_points["top"]["z"]
z_btm = end_points['btm']['z'] z_btm = end_points["btm"]["z"]
return -(z_top - z_btm) / (x_top - x_btm) return -(z_top - z_btm) / (x_top - x_btm)
elif method == 'least_squares': elif method == "least_squares":
profile_mask = [True if end_points['top']['x'] < pts < end_points['btm']['x'] else False for pts in x] profile_mask = [True if end_points["top"]["x"] < pts < end_points["btm"]["x"] else False for pts in x]
slope_x = np.array(profile_x)[profile_mask].tolist() slope_x = np.array(profile_x)[profile_mask].tolist()
slope_z = np.array(profile_z)[profile_mask].tolist() slope_z = np.array(profile_z)[profile_mask].tolist()
slope, _, _, _, _ = stats.linregress(slope_x, slope_z) slope, _, _, _, _ = stats.linregress(slope_x, slope_z)
@ -245,23 +258,25 @@ def crossings(profile_x, profile_z, constant_z):
return [profile_x[i] - (profile_x[i] - profile_x[i + 1]) / (z[i] - z[i + 1]) * (z[i]) for i in indicies] return [profile_x[i] - (profile_x[i] - profile_x[i + 1]) / (z[i] - z[i + 1]) * (z[i]) for i in indicies]
if __name__ == '__main__': if __name__ == "__main__":
logger.info('Importing data') logger.info("Importing data")
data_folder = './data/interim' data_folder = "./data/interim"
df_waves = pd.read_csv(os.path.join(data_folder, 'waves.csv'), index_col=[0, 1]) df_waves = pd.read_csv(os.path.join(data_folder, "waves.csv"), index_col=[0, 1])
df_tides = pd.read_csv(os.path.join(data_folder, 'tides.csv'), index_col=[0, 1]) df_tides = pd.read_csv(os.path.join(data_folder, "tides.csv"), index_col=[0, 1])
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0, 1, 2]) df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
df_sites = pd.read_csv(os.path.join(data_folder, 'sites.csv'), index_col=[0]) df_sites = pd.read_csv(os.path.join(data_folder, "sites.csv"), index_col=[0])
df_profile_features = pd.read_csv(os.path.join(data_folder, 'profile_features.csv'), index_col=[0]) df_profile_features = pd.read_csv(os.path.join(data_folder, "profile_features.csv"), index_col=[0])
logger.info('Forecasting TWL') logger.info("Forecasting TWL")
df_twl_foreshore_slope_sto06 = forecast_twl(df_tides, df_profiles, df_waves, df_profile_features, df_twl_foreshore_slope_sto06 = forecast_twl(
runup_function=sto06, slope='foreshore') df_tides, df_profiles, df_waves, df_profile_features, runup_function=sto06, slope="foreshore"
df_twl_foreshore_slope_sto06.to_csv(os.path.join(data_folder, 'twl_foreshore_slope_sto06.csv')) )
df_twl_foreshore_slope_sto06.to_csv(os.path.join(data_folder, "twl_foreshore_slope_sto06.csv"))
df_twl_mean_slope_sto06 = forecast_twl(df_tides, df_profiles, df_waves, df_profile_features, df_twl_mean_slope_sto06 = forecast_twl(
runup_function=sto06, slope='mean') df_tides, df_profiles, df_waves, df_profile_features, runup_function=sto06, slope="mean"
df_twl_mean_slope_sto06.to_csv(os.path.join(data_folder, 'twl_mean_slope_sto06.csv')) )
df_twl_mean_slope_sto06.to_csv(os.path.join(data_folder, "twl_mean_slope_sto06.csv"))
logger.info('Done') logger.info("Done")

@ -7,7 +7,7 @@ import os
import pandas as pd import pandas as pd
logging.config.fileConfig('./src/logging.conf', disable_existing_loggers=False) logging.config.fileConfig("./src/logging.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -19,20 +19,19 @@ def forecasted_impacts(df_profile_features, df_forecasted_twl):
:param df_forecasted_twl: :param df_forecasted_twl:
:return: :return:
""" """
logger.info('Getting forecasted storm regimes') logger.info("Getting forecasted storm regimes")
df_forecasted_impacts = pd.DataFrame(index=df_profile_features.index) df_forecasted_impacts = pd.DataFrame(index=df_profile_features.index)
# For each site, find the maximum R_high value and the corresponding R_low value. # For each site, find the maximum R_high value and the corresponding R_low value.
idx = df_forecasted_twl.groupby(level=['site_id'])['R_high'].idxmax().dropna() idx = df_forecasted_twl.groupby(level=["site_id"])["R_high"].idxmax().dropna()
df_r_vals = df_forecasted_twl.loc[idx, ['R_high', 'R_low']].reset_index(['datetime']) df_r_vals = df_forecasted_twl.loc[idx, ["R_high", "R_low"]].reset_index(["datetime"])
df_forecasted_impacts = df_forecasted_impacts.merge(df_r_vals, how='left', left_index=True, right_index=True) df_forecasted_impacts = df_forecasted_impacts.merge(df_r_vals, how="left", left_index=True, right_index=True)
# Join with df_profile features to find dune toe and crest elevations # Join with df_profile features to find dune toe and crest elevations
df_forecasted_impacts = df_forecasted_impacts.merge(df_profile_features[['dune_toe_z', 'dune_crest_z']], df_forecasted_impacts = df_forecasted_impacts.merge(
how='left', df_profile_features[["dune_toe_z", "dune_crest_z"]], how="left", left_index=True, right_index=True
left_index=True, )
right_index=True)
# Compare R_high and R_low wirth dune crest and toe elevations # Compare R_high and R_low wirth dune crest and toe elevations
df_forecasted_impacts = storm_regime(df_forecasted_impacts) df_forecasted_impacts = storm_regime(df_forecasted_impacts)
@ -47,27 +46,33 @@ def storm_regime(df_forecasted_impacts):
:param df_forecasted_impacts: :param df_forecasted_impacts:
:return: :return:
""" """
logger.info('Getting forecasted storm regimes') logger.info("Getting forecasted storm regimes")
df_forecasted_impacts.loc[ df_forecasted_impacts.loc[
df_forecasted_impacts.R_high <= df_forecasted_impacts.dune_toe_z, 'storm_regime'] = 'swash' df_forecasted_impacts.R_high <= df_forecasted_impacts.dune_toe_z, "storm_regime"
] = "swash"
df_forecasted_impacts.loc[ df_forecasted_impacts.loc[
df_forecasted_impacts.dune_toe_z <= df_forecasted_impacts.R_high, 'storm_regime'] = 'collision' df_forecasted_impacts.dune_toe_z <= df_forecasted_impacts.R_high, "storm_regime"
df_forecasted_impacts.loc[(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_high) & ] = "collision"
(df_forecasted_impacts.R_low <= df_forecasted_impacts.dune_crest_z), df_forecasted_impacts.loc[
'storm_regime'] = 'overwash' (df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_high)
df_forecasted_impacts.loc[(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_low) & & (df_forecasted_impacts.R_low <= df_forecasted_impacts.dune_crest_z),
(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_high), "storm_regime",
'storm_regime'] = 'inundation' ] = "overwash"
df_forecasted_impacts.loc[
(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_low)
& (df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_high),
"storm_regime",
] = "inundation"
return df_forecasted_impacts return df_forecasted_impacts
if __name__ == '__main__': if __name__ == "__main__":
logger.info('Importing existing data') logger.info("Importing existing data")
data_folder = './data/interim' data_folder = "./data/interim"
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0, 1, 2]) df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
df_profile_features = pd.read_csv(os.path.join(data_folder, 'profile_features.csv'), index_col=[0]) df_profile_features = pd.read_csv(os.path.join(data_folder, "profile_features.csv"), index_col=[0])
df_forecasted_twl = pd.read_csv(os.path.join(data_folder, 'twl_mean_slope_sto06.csv'), index_col=[0, 1]) df_forecasted_twl = pd.read_csv(os.path.join(data_folder, "twl_mean_slope_sto06.csv"), index_col=[0, 1])
df_forecasted_impacts = forecasted_impacts(df_profile_features, df_forecasted_twl) df_forecasted_impacts = forecasted_impacts(df_profile_features, df_forecasted_twl)
df_forecasted_impacts.to_csv(os.path.join(data_folder, 'impacts_forecasted_mean_slope_sto06.csv')) df_forecasted_impacts.to_csv(os.path.join(data_folder, "impacts_forecasted_mean_slope_sto06.csv"))

@ -5,7 +5,7 @@ import numpy as np
import pandas as pd import pandas as pd
from scipy.integrate import simps from scipy.integrate import simps
logging.config.fileConfig('./src/logging.conf', disable_existing_loggers=False) logging.config.fileConfig("./src/logging.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -29,14 +29,14 @@ def volume_change(df_profiles, df_profile_features, zone):
:param zone: Either 'swash' or 'dune_face' :param zone: Either 'swash' or 'dune_face'
:return: :return:
""" """
logger.info('Calculating change in beach volume in {} zone'.format(zone)) logger.info("Calculating change in beach volume in {} zone".format(zone))
df_vol_changes = pd.DataFrame(index=df_profile_features.index) df_vol_changes = pd.DataFrame(index=df_profile_features.index)
df_profiles = df_profiles.sort_index() df_profiles = df_profiles.sort_index()
sites = df_profiles.groupby(level=['site_id']) sites = df_profiles.groupby(level=["site_id"])
for site_id, df_site in sites: for site_id, df_site in sites:
logger.debug('Calculating change in beach volume at {} in {} zone'.format(site_id, zone)) logger.debug("Calculating change in beach volume at {} in {} zone".format(site_id, zone))
prestorm_dune_toe_x = df_profile_features.loc[df_profile_features.index == site_id].dune_toe_x.tolist() prestorm_dune_toe_x = df_profile_features.loc[df_profile_features.index == site_id].dune_toe_x.tolist()
prestorm_dune_crest_x = df_profile_features.loc[df_profile_features.index == site_id].dune_crest_x.tolist() prestorm_dune_crest_x = df_profile_features.loc[df_profile_features.index == site_id].dune_crest_x.tolist()
@ -50,36 +50,44 @@ def volume_change(df_profiles, df_profile_features, zone):
# Find last x coordinate where we have both prestorm and poststorm measurements. If we don't do this, # Find last x coordinate where we have both prestorm and poststorm measurements. If we don't do this,
# the prestorm and poststorm values are going to be calculated over different lengths. # the prestorm and poststorm values are going to be calculated over different lengths.
df_zone = df_site.dropna(subset=['z']) df_zone = df_site.dropna(subset=["z"])
x_last_obs = min([max(df_zone.query("profile_type == '{}'".format(profile_type)).index.get_level_values('x')) x_last_obs = min(
for profile_type in ['prestorm', 'poststorm']]) [
max(df_zone.query("profile_type == '{}'".format(profile_type)).index.get_level_values("x"))
for profile_type in ["prestorm", "poststorm"]
]
)
# Where we want to measure pre and post storm volume is dependant on the zone selected # Where we want to measure pre and post storm volume is dependant on the zone selected
if zone == 'swash': if zone == "swash":
x_min = prestorm_dune_toe_x x_min = prestorm_dune_toe_x
x_max = x_last_obs x_max = x_last_obs
elif zone == 'dune_face': elif zone == "dune_face":
x_min = prestorm_dune_crest_x x_min = prestorm_dune_crest_x
x_max = prestorm_dune_toe_x x_max = prestorm_dune_toe_x
else: else:
logger.warning('Zone argument not properly specified. Please check') logger.warning("Zone argument not properly specified. Please check")
x_min = None x_min = None
x_max = None x_max = None
# Now, compute the volume of sand between the x-coordinates prestorm_dune_toe_x and x_swash_last for both prestorm # Now, compute the volume of sand between the x-coordinates prestorm_dune_toe_x and x_swash_last for both prestorm
# and post storm profiles. # and post storm profiles.
prestorm_vol = beach_volume(x=df_zone.query("profile_type=='prestorm'").index.get_level_values('x'), prestorm_vol = beach_volume(
z=df_zone.query("profile_type=='prestorm'").z, x=df_zone.query("profile_type=='prestorm'").index.get_level_values("x"),
x_min=x_min, z=df_zone.query("profile_type=='prestorm'").z,
x_max=x_max) x_min=x_min,
poststorm_vol = beach_volume(x=df_zone.query("profile_type=='poststorm'").index.get_level_values('x'), x_max=x_max,
z=df_zone.query("profile_type=='poststorm'").z, )
x_min=x_min, poststorm_vol = beach_volume(
x_max=x_max) x=df_zone.query("profile_type=='poststorm'").index.get_level_values("x"),
z=df_zone.query("profile_type=='poststorm'").z,
df_vol_changes.loc[site_id, 'prestorm_{}_vol'.format(zone)] = prestorm_vol x_min=x_min,
df_vol_changes.loc[site_id, 'poststorm_{}_vol'.format(zone)] = poststorm_vol x_max=x_max,
df_vol_changes.loc[site_id, '{}_vol_change'.format(zone)] = prestorm_vol - poststorm_vol )
df_vol_changes.loc[site_id, "prestorm_{}_vol".format(zone)] = prestorm_vol
df_vol_changes.loc[site_id, "poststorm_{}_vol".format(zone)] = poststorm_vol
df_vol_changes.loc[site_id, "{}_vol_change".format(zone)] = prestorm_vol - poststorm_vol
return df_vol_changes return df_vol_changes
@ -110,28 +118,28 @@ def storm_regime(df_observed_impacts):
:param df_observed_impacts: :param df_observed_impacts:
:return: :return:
""" """
logger.info('Getting observed storm regimes') logger.info("Getting observed storm regimes")
df_observed_impacts.loc[df_observed_impacts.swash_vol_change < 3, 'storm_regime'] = 'swash' df_observed_impacts.loc[df_observed_impacts.swash_vol_change < 3, "storm_regime"] = "swash"
df_observed_impacts.loc[df_observed_impacts.dune_face_vol_change > 3, 'storm_regime'] = 'collision' df_observed_impacts.loc[df_observed_impacts.dune_face_vol_change > 3, "storm_regime"] = "collision"
return df_observed_impacts return df_observed_impacts
if __name__ == '__main__': if __name__ == "__main__":
logger.info('Importing existing data') logger.info("Importing existing data")
data_folder = './data/interim' data_folder = "./data/interim"
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0, 1, 2]) df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
df_profile_features = pd.read_csv(os.path.join(data_folder, 'profile_features.csv'), index_col=[0]) df_profile_features = pd.read_csv(os.path.join(data_folder, "profile_features.csv"), index_col=[0])
logger.info('Creating new dataframe for observed impacts') logger.info("Creating new dataframe for observed impacts")
df_observed_impacts = pd.DataFrame(index=df_profile_features.index) df_observed_impacts = pd.DataFrame(index=df_profile_features.index)
logger.info('Getting pre/post storm volumes') logger.info("Getting pre/post storm volumes")
df_swash_vol_changes = volume_change(df_profiles, df_profile_features, zone='swash') df_swash_vol_changes = volume_change(df_profiles, df_profile_features, zone="swash")
df_dune_face_vol_changes = volume_change(df_profiles, df_profile_features, zone='dune_face') df_dune_face_vol_changes = volume_change(df_profiles, df_profile_features, zone="dune_face")
df_observed_impacts = df_observed_impacts.join([df_swash_vol_changes, df_dune_face_vol_changes]) df_observed_impacts = df_observed_impacts.join([df_swash_vol_changes, df_dune_face_vol_changes])
# Classify regime based on volume changes # Classify regime based on volume changes
df_observed_impacts = storm_regime(df_observed_impacts) df_observed_impacts = storm_regime(df_observed_impacts)
# Save dataframe to csv # Save dataframe to csv
df_observed_impacts.to_csv(os.path.join(data_folder, 'impacts_observed.csv')) df_observed_impacts.to_csv(os.path.join(data_folder, "impacts_observed.csv"))

@ -1,6 +1,7 @@
import numpy as np import numpy as np
import pandas as pd import pandas as pd
def sto06_individual(Hs0, Tp, beta): def sto06_individual(Hs0, Tp, beta):
Lp = 9.8 * Tp ** 2 / 2 / np.pi Lp = 9.8 * Tp ** 2 / 2 / np.pi
@ -9,17 +10,18 @@ def sto06_individual(Hs0, Tp, beta):
S_inc = 0.75 * beta * np.sqrt(Hs0 * Lp) S_inc = 0.75 * beta * np.sqrt(Hs0 * Lp)
# Dissipative conditions # Dissipative conditions
if beta / (Hs0/Lp)**(0.5) <= 0.3: if beta / (Hs0 / Lp) ** (0.5) <= 0.3:
setup = 0.016 * (Hs0 * Lp) ** 0.5 setup = 0.016 * (Hs0 * Lp) ** 0.5
S_total = 0.046 * (Hs0 * Lp) ** 0.5 S_total = 0.046 * (Hs0 * Lp) ** 0.5
R2 = 0.043 * (Hs0 * Lp) ** 0.5 R2 = 0.043 * (Hs0 * Lp) ** 0.5
else: else:
setup = 0.35 * beta * (Hs0 * Lp) ** 0.5 setup = 0.35 * beta * (Hs0 * Lp) ** 0.5
S_total = np.sqrt(S_inc ** 2 + S_ig **2) S_total = np.sqrt(S_inc ** 2 + S_ig ** 2)
R2 = 1.1 * (setup + S_total / 2) R2 = 1.1 * (setup + S_total / 2)
return R2, setup, S_total, S_inc, S_ig return R2, setup, S_total, S_inc, S_ig
def sto06(df, Hs0_col, Tp_col, beta_col): def sto06(df, Hs0_col, Tp_col, beta_col):
""" """
Vectorized version of Stockdon06 which can be used with dataframes Vectorized version of Stockdon06 which can be used with dataframes
@ -30,22 +32,23 @@ def sto06(df, Hs0_col, Tp_col, beta_col):
:return: :return:
""" """
Lp = 9.8 * df[Tp_col] ** 2 / 2 / np.pi Lp = 9.8 * df[Tp_col] ** 2 / 2 / np.pi
# General equation # General equation
S_ig = pd.to_numeric(0.06 * np.sqrt(df[Hs0_col] * Lp), errors='coerce') S_ig = pd.to_numeric(0.06 * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
S_inc = pd.to_numeric(0.75 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors='coerce') S_inc = pd.to_numeric(0.75 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
setup = pd.to_numeric(0.35 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors='coerce') setup = pd.to_numeric(0.35 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
S_total = np.sqrt(S_inc ** 2 + S_ig ** 2) S_total = np.sqrt(S_inc ** 2 + S_ig ** 2)
R2 = 1.1 * (setup + S_total / 2) R2 = 1.1 * (setup + S_total / 2)
# Dissipative conditions # Dissipative conditions
dissipative = df[beta_col] / (df[Hs0_col] / Lp)**(0.5) <= 0.3 dissipative = df[beta_col] / (df[Hs0_col] / Lp) ** (0.5) <= 0.3
setup.loc[dissipative,:] = 0.016 * (df[Hs0_col] * Lp) ** (0.5) # eqn 16 setup.loc[dissipative, :] = 0.016 * (df[Hs0_col] * Lp) ** (0.5) # eqn 16
S_total.loc[dissipative,:] = 0.046 * (df[Hs0_col] * Lp) ** (0.5) # eqn 17 S_total.loc[dissipative, :] = 0.046 * (df[Hs0_col] * Lp) ** (0.5) # eqn 17
R2.loc[dissipative,:] = 0.043 * (df[Hs0_col] * Lp) ** (0.5) # eqn 18 R2.loc[dissipative, :] = 0.043 * (df[Hs0_col] * Lp) ** (0.5) # eqn 18
return R2, setup, S_total, S_inc, S_ig return R2, setup, S_total, S_inc, S_ig
if __name__ == '__main__':
if __name__ == "__main__":
pass pass

@ -19,14 +19,14 @@ def shapes_from_shp(shp_file):
shapes = [] shapes = []
ids = [] ids = []
properties = [] properties = []
for feat in fiona.open(shp_file, 'r'): for feat in fiona.open(shp_file, "r"):
shapes.append(shape(feat['geometry'])) shapes.append(shape(feat["geometry"]))
ids.append(feat['id']) ids.append(feat["id"])
properties.append(feat['properties']) properties.append(feat["properties"])
return shapes, ids, properties return shapes, ids, properties
def convert_coord_systems(g1, in_coord_system='EPSG:4326', out_coord_system='EPSG:28356'): def convert_coord_systems(g1, in_coord_system="EPSG:4326", out_coord_system="EPSG:28356"):
""" """
Converts coordinates from one coordinates system to another. Needed because shapefiles are usually defined in Converts coordinates from one coordinates system to another. Needed because shapefiles are usually defined in
lat/lon but should be converted to GDA to calculated distances. lat/lon but should be converted to GDA to calculated distances.
@ -38,7 +38,8 @@ def convert_coord_systems(g1, in_coord_system='EPSG:4326', out_coord_system='EPS
project = partial( project = partial(
pyproj.transform, pyproj.transform,
pyproj.Proj(init=in_coord_system), # source coordinate system pyproj.Proj(init=in_coord_system), # source coordinate system
pyproj.Proj(init=out_coord_system)) # destination coordinate system pyproj.Proj(init=out_coord_system),
) # destination coordinate system
g2 = transform(project, g1) # apply projection g2 = transform(project, g1) # apply projection
return g2 return g2
@ -59,15 +60,19 @@ def distance_to_intersection(lat, lon, landward_orientation, beach, line_strings
start_point = convert_coord_systems(start_point) start_point = convert_coord_systems(start_point)
distance = 1000 # m look up to 1000m for an intersection distance = 1000 # m look up to 1000m for an intersection
landward_point = Point(start_point.coords.xy[0] + distance * np.cos(np.deg2rad(landward_orientation)), landward_point = Point(
start_point.coords.xy[1] + distance * np.sin(np.deg2rad(landward_orientation))) start_point.coords.xy[0] + distance * np.cos(np.deg2rad(landward_orientation)),
start_point.coords.xy[1] + distance * np.sin(np.deg2rad(landward_orientation)),
)
landward_line = LineString([start_point, landward_point]) landward_line = LineString([start_point, landward_point])
seaward_point = Point(start_point.coords.xy[0] - distance * np.cos(np.deg2rad(landward_orientation)), seaward_point = Point(
start_point.coords.xy[1] - distance * np.sin(np.deg2rad(landward_orientation))) start_point.coords.xy[0] - distance * np.cos(np.deg2rad(landward_orientation)),
start_point.coords.xy[1] - distance * np.sin(np.deg2rad(landward_orientation)),
)
seaward_line = LineString([start_point, seaward_point]) seaward_line = LineString([start_point, seaward_point])
# Look at relevant line_strings which have the same beach property in order to reduce computation time # Look at relevant line_strings which have the same beach property in order to reduce computation time
line_strings = [s for s, p in zip(line_strings, line_properties) if p['beach'] == beach] line_strings = [s for s, p in zip(line_strings, line_properties) if p["beach"] == beach]
# Check whether profile_line intersects with any lines in line_string. If intersection point is landwards, # Check whether profile_line intersects with any lines in line_string. If intersection point is landwards,
# consider this negative, otherwise seawards is positive. # consider this negative, otherwise seawards is positive.
@ -99,7 +104,7 @@ def beach_profile_elevation(x_coord, df_profiles, profile_type, site_id):
# Get profile # Get profile
df_profile = df_profiles.query('profile_type == "{}" and site_id =="{}"'.format(profile_type, site_id)) df_profile = df_profiles.query('profile_type == "{}" and site_id =="{}"'.format(profile_type, site_id))
return np.interp(x_coord, df_profile.index.get_level_values('x'), df_profile['z']) return np.interp(x_coord, df_profile.index.get_level_values("x"), df_profile["z"])
def parse_profile_features(df_sites, df_profiles, dune_crest_shp, dune_toe_shp): def parse_profile_features(df_sites, df_profiles, dune_crest_shp, dune_toe_shp):
@ -111,51 +116,38 @@ def parse_profile_features(df_sites, df_profiles, dune_crest_shp, dune_toe_shp):
# Get site information. Base our profile features on each site # Get site information. Base our profile features on each site
df_profile_features = df_sites df_profile_features = df_sites
features = { features = {"dune_crest": {"file": dune_crest_shp}, "dune_toe": {"file": dune_toe_shp}}
'dune_crest':
{
'file': dune_crest_shp
},
'dune_toe':
{
'file': dune_toe_shp
},
}
# Import our dune crest and toes # Import our dune crest and toes
for feat in features.keys(): for feat in features.keys():
shapes, _, properties = shapes_from_shp(features[feat]['file']) shapes, _, properties = shapes_from_shp(features[feat]["file"])
shapes = [convert_coord_systems(x) for x in shapes] shapes = [convert_coord_systems(x) for x in shapes]
# Figure out the x coordinates of our crest and toes, by looking at where our beach sections intersect our # Figure out the x coordinates of our crest and toes, by looking at where our beach sections intersect our
# shape files. # shape files.
col_name = '{}_x'.format(feat) col_name = "{}_x".format(feat)
df_profile_features[col_name] = df_profile_features['profile_x_lat_lon'] + \ df_profile_features[col_name] = df_profile_features["profile_x_lat_lon"] + df_profile_features.apply(
df_profile_features.apply(lambda row: lambda row: distance_to_intersection(
distance_to_intersection( row["lat"], row["lon"], row["orientation"], row["beach"], shapes, properties
row['lat'], row['lon'], row['orientation'], ),
row['beach'], shapes, properties), axis=1,
axis=1) )
# Get the elevations of the crest and toe # Get the elevations of the crest and toe
col_name = '{}_z'.format(feat) col_name = "{}_z".format(feat)
df_profile_features[col_name] = df_profile_features.apply(lambda row: df_profile_features[col_name] = df_profile_features.apply(
beach_profile_elevation( lambda row: beach_profile_elevation(row["{}_x".format(feat)], df_profiles, "prestorm", row.name), axis=1
row['{}_x'.format(feat)], )
df_profiles,
'prestorm', df_profile_features = df_profile_features.drop(columns=["beach", "lat", "lon", "orientation"])
row.name),
axis=1)
df_profile_features = df_profile_features.drop(columns=['beach', 'lat', 'lon', 'orientation'])
return df_profile_features return df_profile_features
if __name__ == '__main__': if __name__ == "__main__":
data_folder = './data/interim' data_folder = "./data/interim"
df_sites = pd.read_csv(os.path.join(data_folder, 'sites.csv'), index_col=[0]) df_sites = pd.read_csv(os.path.join(data_folder, "sites.csv"), index_col=[0])
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0, 1, 2]) df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
dune_crest_shp = './data/raw/profile_features/dune_crests.shp' dune_crest_shp = "./data/raw/profile_features/dune_crests.shp"
dune_toe_shp = './data/raw/profile_features/dune_toes.shp' dune_toe_shp = "./data/raw/profile_features/dune_toes.shp"
df_profile_features = parse_profile_features(df_sites, df_profiles, dune_crest_shp, dune_toe_shp) df_profile_features = parse_profile_features(df_sites, df_profiles, dune_crest_shp, dune_toe_shp)
df_profile_features.to_csv('./data/interim/profile_features.csv') df_profile_features.to_csv("./data/interim/profile_features.csv")

Loading…
Cancel
Save