Fix formatting with black

develop
Chris Leaman 6 years ago
parent 6d62a30b2f
commit 8f790cbf1b

@ -1,13 +1,15 @@
import pandas as pd
import os
def main():
data_folder = './data/interim'
df_waves = pd.read_csv(os.path.join(data_folder, 'waves.csv'), index_col=[0,1])
df_tides = pd.read_csv(os.path.join(data_folder, 'tides.csv'), index_col=[0,1])
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0,1,2])
df_sites = pd.read_csv(os.path.join(data_folder, 'sites.csv'),index_col=[0])
data_folder = "./data/interim"
df_waves = pd.read_csv(os.path.join(data_folder, "waves.csv"), index_col=[0, 1])
df_tides = pd.read_csv(os.path.join(data_folder, "tides.csv"), index_col=[0, 1])
df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
df_sites = pd.read_csv(os.path.join(data_folder, "sites.csv"), index_col=[0])
if __name__ == '__main__':
if __name__ == "__main__":
main()

@ -7,7 +7,7 @@ import os
import pandas as pd
logging.config.fileConfig('./src/logging.conf', disable_existing_loggers=False)
logging.config.fileConfig("./src/logging.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
@ -18,15 +18,16 @@ def compare_impacts(df_forecasted, df_observed):
:param df_observed:
:return:
"""
df_compared = df_forecasted.merge(df_observed, left_index=True, right_index=True,
suffixes=['_forecasted', '_observed'])
df_compared = df_forecasted.merge(
df_observed, left_index=True, right_index=True, suffixes=["_forecasted", "_observed"]
)
return df_compared
if __name__ == '__main__':
logger.info('Importing existing data')
data_folder = './data/interim'
df_forecasted = pd.read_csv(os.path.join(data_folder, 'impacts_forecasted_mean_slope_sto06.csv'), index_col=[0])
df_observed = pd.read_csv(os.path.join(data_folder, 'impacts_observed.csv'), index_col=[0])
if __name__ == "__main__":
logger.info("Importing existing data")
data_folder = "./data/interim"
df_forecasted = pd.read_csv(os.path.join(data_folder, "impacts_forecasted_mean_slope_sto06.csv"), index_col=[0])
df_observed = pd.read_csv(os.path.join(data_folder, "impacts_observed.csv"), index_col=[0])
df_compared = compare_impacts(df_forecasted, df_observed)
df_compared.to_csv(os.path.join(data_folder, 'impacts_observed_vs_forecasted_mean_slope_sto06.csv'))
df_compared.to_csv(os.path.join(data_folder, "impacts_observed_vs_forecasted_mean_slope_sto06.csv"))

@ -9,54 +9,57 @@ from scipy import stats
from src.analysis.runup_models import sto06_individual, sto06
logging.config.fileConfig('./src/logging.conf', disable_existing_loggers=False)
logging.config.fileConfig("./src/logging.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
def forecast_twl(df_tides, df_profiles, df_waves, df_profile_features, runup_function, n_processes=4,
slope='foreshore'):
def forecast_twl(
df_tides, df_profiles, df_waves, df_profile_features, runup_function, n_processes=4, slope="foreshore"
):
# Use df_waves as a base
df_twl = df_waves.copy()
# Merge tides
logger.info('Merging tides')
logger.info("Merging tides")
df_twl = df_twl.merge(df_tides, left_index=True, right_index=True)
# Estimate foreshore slope. Do the analysis per site_id. This is so we only have to query the x and z
# cross-section profiles once per site.
logger.info('Calculating beach slopes')
site_ids = df_twl.index.get_level_values('site_id').unique()
logger.info("Calculating beach slopes")
site_ids = df_twl.index.get_level_values("site_id").unique()
# site_ids = [x for x in site_ids if 'NARRA' in x] # todo remove this - for testing narrabeen only
if slope == 'foreshore':
if slope == "foreshore":
# Process each site_id with a different process and combine results at the end
with Pool(processes=n_processes) as pool:
results = pool.starmap(foreshore_slope_for_site_id,
[(site_id, df_twl, df_profiles) for site_id in site_ids])
df_twl['beta'] = pd.concat(results)
results = pool.starmap(
foreshore_slope_for_site_id, [(site_id, df_twl, df_profiles) for site_id in site_ids]
)
df_twl["beta"] = pd.concat(results)
elif slope == 'mean':
elif slope == "mean":
# todo mean beach profile
df_temp = df_twl.join(df_profile_features, how='inner')
df_temp['mhw'] = 0.5
df_temp = df_twl.join(df_profile_features, how="inner")
df_temp["mhw"] = 0.5
with Pool(processes=n_processes) as pool:
results = pool.starmap(mean_slope_for_site_id,
[(site_id, df_temp, df_profiles, 'dune_toe_z', 'mhw') for site_id in site_ids])
df_twl['beta'] = pd.concat(results)
results = pool.starmap(
mean_slope_for_site_id, [(site_id, df_temp, df_profiles, "dune_toe_z", "mhw") for site_id in site_ids]
)
df_twl["beta"] = pd.concat(results)
# Estimate runup
R2, setup, S_total, S_inc, S_ig = runup_function(df_twl, Hs0_col='Hs0', Tp_col='Tp', beta_col='beta')
R2, setup, S_total, S_inc, S_ig = runup_function(df_twl, Hs0_col="Hs0", Tp_col="Tp", beta_col="beta")
df_twl['R2'] = R2
df_twl['setup'] = setup
df_twl['S_total'] = S_total
df_twl["R2"] = R2
df_twl["setup"] = setup
df_twl["S_total"] = S_total
# Estimate TWL
df_twl['R_high'] = df_twl['tide'] + df_twl['R2']
df_twl['R_low'] = df_twl['tide'] + 1.1 * df_twl['setup'] - 1.1 / 2 * df_twl['S_total']
df_twl["R_high"] = df_twl["tide"] + df_twl["R2"]
df_twl["R_low"] = df_twl["tide"] + 1.1 * df_twl["setup"] - 1.1 / 2 * df_twl["S_total"]
# Drop unneeded columns
df_twl.drop(columns=['E', 'Exs', 'P', 'Pxs', 'dir'], inplace=True, errors='ignore')
df_twl.drop(columns=["E", "Exs", "P", "Pxs", "dir"], inplace=True, errors="ignore")
return df_twl
@ -74,15 +77,21 @@ def mean_slope_for_site_id(site_id, df_twl, df_profiles, top_elevation_col, btm_
# Get the prestorm beach profile
profile = df_profiles.query("site_id =='{}' and profile_type == 'prestorm'".format(site_id))
profile_x = profile.index.get_level_values('x').tolist()
profile_x = profile.index.get_level_values("x").tolist()
profile_z = profile.z.tolist()
df_twl_site = df_twl.query("site_id == '{}'".format(site_id))
df_beta = df_twl_site.apply(lambda row: slope_from_profile(profile_x=profile_x, profile_z=profile_z,
df_beta = df_twl_site.apply(
lambda row: slope_from_profile(
profile_x=profile_x,
profile_z=profile_z,
top_elevation=row[top_elevation_col],
btm_elevation=row[btm_elevation_col],
method='end_points'), axis=1)
method="end_points",
),
axis=1,
)
return df_beta
@ -99,16 +108,22 @@ def foreshore_slope_for_site_id(site_id, df_twl, df_profiles):
# Get the prestorm beach profile
profile = df_profiles.query("site_id =='{}' and profile_type == 'prestorm'".format(site_id))
profile_x = profile.index.get_level_values('x').tolist()
profile_x = profile.index.get_level_values("x").tolist()
profile_z = profile.z.tolist()
df_twl_site = df_twl.query("site_id == '{}'".format(site_id))
df_beta = df_twl_site.apply(lambda row: foreshore_slope_from_profile(profile_x=profile_x, profile_z=profile_z,
df_beta = df_twl_site.apply(
lambda row: foreshore_slope_from_profile(
profile_x=profile_x,
profile_z=profile_z,
tide=row.tide,
runup_function=sto06_individual,
Hs0=row.Hs0,
Tp=row.Tp), axis=1)
Tp=row.Tp,
),
axis=1,
)
return df_beta
@ -137,9 +152,13 @@ def foreshore_slope_from_profile(profile_x, profile_z, tide, runup_function, **k
while True:
R2, setup, S_total, _, _ = runup_function(beta=beta, **kwargs)
beta_new = slope_from_profile(profile_x=profile_x, profile_z=profile_z, method='end_points',
beta_new = slope_from_profile(
profile_x=profile_x,
profile_z=profile_z,
method="end_points",
top_elevation=tide + setup + S_total / 2,
btm_elevation=tide + setup - S_total / 2)
btm_elevation=tide + setup - S_total / 2,
)
# Return None if we can't find a slope, usually because the elevations we've specified are above/below our
# profile x and z coordinates.
@ -158,7 +177,7 @@ def foreshore_slope_from_profile(profile_x, profile_z, tide, runup_function, **k
iteration_count += 1
def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, method='end_points'):
def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, method="end_points"):
"""
Returns a slope (beta) from a bed profile, given the top and bottom elevations of where the slope should be taken.
:param x: List of x bed profile coordinates
@ -173,16 +192,10 @@ def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, metho
if any([x is None for x in [profile_x, profile_z, top_elevation, btm_elevation]]):
return None
end_points = {
'top': {
'z': top_elevation,
},
'btm': {
'z': btm_elevation,
}}
end_points = {"top": {"z": top_elevation}, "btm": {"z": btm_elevation}}
for end_type in end_points.keys():
elevation = end_points[end_type]['z']
elevation = end_points[end_type]["z"]
intersection_x = crossings(profile_x, profile_z, elevation)
# No intersections found
@ -191,26 +204,26 @@ def slope_from_profile(profile_x, profile_z, top_elevation, btm_elevation, metho
# One intersection
elif len(intersection_x) == 1:
end_points[end_type]['x'] = intersection_x[0]
end_points[end_type]["x"] = intersection_x[0]
# More than on intersection
else:
if end_type == 'top':
if end_type == "top":
# For top elevation, take most seaward intersection
end_points[end_type]['x'] = intersection_x[-1]
end_points[end_type]["x"] = intersection_x[-1]
else:
# For bottom elevation, take most landward intersection that is seaward of top elevation
end_points[end_type]['x'] = [x for x in intersection_x if x > end_points['top']['x']][0]
end_points[end_type]["x"] = [x for x in intersection_x if x > end_points["top"]["x"]][0]
if method == 'end_points':
x_top = end_points['top']['x']
x_btm = end_points['btm']['x']
z_top = end_points['top']['z']
z_btm = end_points['btm']['z']
if method == "end_points":
x_top = end_points["top"]["x"]
x_btm = end_points["btm"]["x"]
z_top = end_points["top"]["z"]
z_btm = end_points["btm"]["z"]
return -(z_top - z_btm) / (x_top - x_btm)
elif method == 'least_squares':
profile_mask = [True if end_points['top']['x'] < pts < end_points['btm']['x'] else False for pts in x]
elif method == "least_squares":
profile_mask = [True if end_points["top"]["x"] < pts < end_points["btm"]["x"] else False for pts in x]
slope_x = np.array(profile_x)[profile_mask].tolist()
slope_z = np.array(profile_z)[profile_mask].tolist()
slope, _, _, _, _ = stats.linregress(slope_x, slope_z)
@ -245,23 +258,25 @@ def crossings(profile_x, profile_z, constant_z):
return [profile_x[i] - (profile_x[i] - profile_x[i + 1]) / (z[i] - z[i + 1]) * (z[i]) for i in indicies]
if __name__ == '__main__':
logger.info('Importing data')
data_folder = './data/interim'
df_waves = pd.read_csv(os.path.join(data_folder, 'waves.csv'), index_col=[0, 1])
df_tides = pd.read_csv(os.path.join(data_folder, 'tides.csv'), index_col=[0, 1])
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0, 1, 2])
df_sites = pd.read_csv(os.path.join(data_folder, 'sites.csv'), index_col=[0])
df_profile_features = pd.read_csv(os.path.join(data_folder, 'profile_features.csv'), index_col=[0])
if __name__ == "__main__":
logger.info("Importing data")
data_folder = "./data/interim"
df_waves = pd.read_csv(os.path.join(data_folder, "waves.csv"), index_col=[0, 1])
df_tides = pd.read_csv(os.path.join(data_folder, "tides.csv"), index_col=[0, 1])
df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
df_sites = pd.read_csv(os.path.join(data_folder, "sites.csv"), index_col=[0])
df_profile_features = pd.read_csv(os.path.join(data_folder, "profile_features.csv"), index_col=[0])
logger.info('Forecasting TWL')
logger.info("Forecasting TWL")
df_twl_foreshore_slope_sto06 = forecast_twl(df_tides, df_profiles, df_waves, df_profile_features,
runup_function=sto06, slope='foreshore')
df_twl_foreshore_slope_sto06.to_csv(os.path.join(data_folder, 'twl_foreshore_slope_sto06.csv'))
df_twl_foreshore_slope_sto06 = forecast_twl(
df_tides, df_profiles, df_waves, df_profile_features, runup_function=sto06, slope="foreshore"
)
df_twl_foreshore_slope_sto06.to_csv(os.path.join(data_folder, "twl_foreshore_slope_sto06.csv"))
df_twl_mean_slope_sto06 = forecast_twl(df_tides, df_profiles, df_waves, df_profile_features,
runup_function=sto06, slope='mean')
df_twl_mean_slope_sto06.to_csv(os.path.join(data_folder, 'twl_mean_slope_sto06.csv'))
df_twl_mean_slope_sto06 = forecast_twl(
df_tides, df_profiles, df_waves, df_profile_features, runup_function=sto06, slope="mean"
)
df_twl_mean_slope_sto06.to_csv(os.path.join(data_folder, "twl_mean_slope_sto06.csv"))
logger.info('Done')
logger.info("Done")

@ -7,7 +7,7 @@ import os
import pandas as pd
logging.config.fileConfig('./src/logging.conf', disable_existing_loggers=False)
logging.config.fileConfig("./src/logging.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
@ -19,20 +19,19 @@ def forecasted_impacts(df_profile_features, df_forecasted_twl):
:param df_forecasted_twl:
:return:
"""
logger.info('Getting forecasted storm regimes')
logger.info("Getting forecasted storm regimes")
df_forecasted_impacts = pd.DataFrame(index=df_profile_features.index)
# For each site, find the maximum R_high value and the corresponding R_low value.
idx = df_forecasted_twl.groupby(level=['site_id'])['R_high'].idxmax().dropna()
df_r_vals = df_forecasted_twl.loc[idx, ['R_high', 'R_low']].reset_index(['datetime'])
df_forecasted_impacts = df_forecasted_impacts.merge(df_r_vals, how='left', left_index=True, right_index=True)
idx = df_forecasted_twl.groupby(level=["site_id"])["R_high"].idxmax().dropna()
df_r_vals = df_forecasted_twl.loc[idx, ["R_high", "R_low"]].reset_index(["datetime"])
df_forecasted_impacts = df_forecasted_impacts.merge(df_r_vals, how="left", left_index=True, right_index=True)
# Join with df_profile features to find dune toe and crest elevations
df_forecasted_impacts = df_forecasted_impacts.merge(df_profile_features[['dune_toe_z', 'dune_crest_z']],
how='left',
left_index=True,
right_index=True)
df_forecasted_impacts = df_forecasted_impacts.merge(
df_profile_features[["dune_toe_z", "dune_crest_z"]], how="left", left_index=True, right_index=True
)
# Compare R_high and R_low wirth dune crest and toe elevations
df_forecasted_impacts = storm_regime(df_forecasted_impacts)
@ -47,27 +46,33 @@ def storm_regime(df_forecasted_impacts):
:param df_forecasted_impacts:
:return:
"""
logger.info('Getting forecasted storm regimes')
logger.info("Getting forecasted storm regimes")
df_forecasted_impacts.loc[
df_forecasted_impacts.R_high <= df_forecasted_impacts.dune_toe_z, 'storm_regime'] = 'swash'
df_forecasted_impacts.R_high <= df_forecasted_impacts.dune_toe_z, "storm_regime"
] = "swash"
df_forecasted_impacts.loc[
df_forecasted_impacts.dune_toe_z <= df_forecasted_impacts.R_high, 'storm_regime'] = 'collision'
df_forecasted_impacts.loc[(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_high) &
(df_forecasted_impacts.R_low <= df_forecasted_impacts.dune_crest_z),
'storm_regime'] = 'overwash'
df_forecasted_impacts.loc[(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_low) &
(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_high),
'storm_regime'] = 'inundation'
df_forecasted_impacts.dune_toe_z <= df_forecasted_impacts.R_high, "storm_regime"
] = "collision"
df_forecasted_impacts.loc[
(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_high)
& (df_forecasted_impacts.R_low <= df_forecasted_impacts.dune_crest_z),
"storm_regime",
] = "overwash"
df_forecasted_impacts.loc[
(df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_low)
& (df_forecasted_impacts.dune_crest_z <= df_forecasted_impacts.R_high),
"storm_regime",
] = "inundation"
return df_forecasted_impacts
if __name__ == '__main__':
logger.info('Importing existing data')
data_folder = './data/interim'
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0, 1, 2])
df_profile_features = pd.read_csv(os.path.join(data_folder, 'profile_features.csv'), index_col=[0])
df_forecasted_twl = pd.read_csv(os.path.join(data_folder, 'twl_mean_slope_sto06.csv'), index_col=[0, 1])
if __name__ == "__main__":
logger.info("Importing existing data")
data_folder = "./data/interim"
df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
df_profile_features = pd.read_csv(os.path.join(data_folder, "profile_features.csv"), index_col=[0])
df_forecasted_twl = pd.read_csv(os.path.join(data_folder, "twl_mean_slope_sto06.csv"), index_col=[0, 1])
df_forecasted_impacts = forecasted_impacts(df_profile_features, df_forecasted_twl)
df_forecasted_impacts.to_csv(os.path.join(data_folder, 'impacts_forecasted_mean_slope_sto06.csv'))
df_forecasted_impacts.to_csv(os.path.join(data_folder, "impacts_forecasted_mean_slope_sto06.csv"))

@ -5,7 +5,7 @@ import numpy as np
import pandas as pd
from scipy.integrate import simps
logging.config.fileConfig('./src/logging.conf', disable_existing_loggers=False)
logging.config.fileConfig("./src/logging.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
@ -29,14 +29,14 @@ def volume_change(df_profiles, df_profile_features, zone):
:param zone: Either 'swash' or 'dune_face'
:return:
"""
logger.info('Calculating change in beach volume in {} zone'.format(zone))
logger.info("Calculating change in beach volume in {} zone".format(zone))
df_vol_changes = pd.DataFrame(index=df_profile_features.index)
df_profiles = df_profiles.sort_index()
sites = df_profiles.groupby(level=['site_id'])
sites = df_profiles.groupby(level=["site_id"])
for site_id, df_site in sites:
logger.debug('Calculating change in beach volume at {} in {} zone'.format(site_id, zone))
logger.debug("Calculating change in beach volume at {} in {} zone".format(site_id, zone))
prestorm_dune_toe_x = df_profile_features.loc[df_profile_features.index == site_id].dune_toe_x.tolist()
prestorm_dune_crest_x = df_profile_features.loc[df_profile_features.index == site_id].dune_crest_x.tolist()
@ -50,36 +50,44 @@ def volume_change(df_profiles, df_profile_features, zone):
# Find last x coordinate where we have both prestorm and poststorm measurements. If we don't do this,
# the prestorm and poststorm values are going to be calculated over different lengths.
df_zone = df_site.dropna(subset=['z'])
x_last_obs = min([max(df_zone.query("profile_type == '{}'".format(profile_type)).index.get_level_values('x'))
for profile_type in ['prestorm', 'poststorm']])
df_zone = df_site.dropna(subset=["z"])
x_last_obs = min(
[
max(df_zone.query("profile_type == '{}'".format(profile_type)).index.get_level_values("x"))
for profile_type in ["prestorm", "poststorm"]
]
)
# Where we want to measure pre and post storm volume is dependant on the zone selected
if zone == 'swash':
if zone == "swash":
x_min = prestorm_dune_toe_x
x_max = x_last_obs
elif zone == 'dune_face':
elif zone == "dune_face":
x_min = prestorm_dune_crest_x
x_max = prestorm_dune_toe_x
else:
logger.warning('Zone argument not properly specified. Please check')
logger.warning("Zone argument not properly specified. Please check")
x_min = None
x_max = None
# Now, compute the volume of sand between the x-coordinates prestorm_dune_toe_x and x_swash_last for both prestorm
# and post storm profiles.
prestorm_vol = beach_volume(x=df_zone.query("profile_type=='prestorm'").index.get_level_values('x'),
prestorm_vol = beach_volume(
x=df_zone.query("profile_type=='prestorm'").index.get_level_values("x"),
z=df_zone.query("profile_type=='prestorm'").z,
x_min=x_min,
x_max=x_max)
poststorm_vol = beach_volume(x=df_zone.query("profile_type=='poststorm'").index.get_level_values('x'),
x_max=x_max,
)
poststorm_vol = beach_volume(
x=df_zone.query("profile_type=='poststorm'").index.get_level_values("x"),
z=df_zone.query("profile_type=='poststorm'").z,
x_min=x_min,
x_max=x_max)
x_max=x_max,
)
df_vol_changes.loc[site_id, 'prestorm_{}_vol'.format(zone)] = prestorm_vol
df_vol_changes.loc[site_id, 'poststorm_{}_vol'.format(zone)] = poststorm_vol
df_vol_changes.loc[site_id, '{}_vol_change'.format(zone)] = prestorm_vol - poststorm_vol
df_vol_changes.loc[site_id, "prestorm_{}_vol".format(zone)] = prestorm_vol
df_vol_changes.loc[site_id, "poststorm_{}_vol".format(zone)] = poststorm_vol
df_vol_changes.loc[site_id, "{}_vol_change".format(zone)] = prestorm_vol - poststorm_vol
return df_vol_changes
@ -110,28 +118,28 @@ def storm_regime(df_observed_impacts):
:param df_observed_impacts:
:return:
"""
logger.info('Getting observed storm regimes')
df_observed_impacts.loc[df_observed_impacts.swash_vol_change < 3, 'storm_regime'] = 'swash'
df_observed_impacts.loc[df_observed_impacts.dune_face_vol_change > 3, 'storm_regime'] = 'collision'
logger.info("Getting observed storm regimes")
df_observed_impacts.loc[df_observed_impacts.swash_vol_change < 3, "storm_regime"] = "swash"
df_observed_impacts.loc[df_observed_impacts.dune_face_vol_change > 3, "storm_regime"] = "collision"
return df_observed_impacts
if __name__ == '__main__':
logger.info('Importing existing data')
data_folder = './data/interim'
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0, 1, 2])
df_profile_features = pd.read_csv(os.path.join(data_folder, 'profile_features.csv'), index_col=[0])
if __name__ == "__main__":
logger.info("Importing existing data")
data_folder = "./data/interim"
df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
df_profile_features = pd.read_csv(os.path.join(data_folder, "profile_features.csv"), index_col=[0])
logger.info('Creating new dataframe for observed impacts')
logger.info("Creating new dataframe for observed impacts")
df_observed_impacts = pd.DataFrame(index=df_profile_features.index)
logger.info('Getting pre/post storm volumes')
df_swash_vol_changes = volume_change(df_profiles, df_profile_features, zone='swash')
df_dune_face_vol_changes = volume_change(df_profiles, df_profile_features, zone='dune_face')
logger.info("Getting pre/post storm volumes")
df_swash_vol_changes = volume_change(df_profiles, df_profile_features, zone="swash")
df_dune_face_vol_changes = volume_change(df_profiles, df_profile_features, zone="dune_face")
df_observed_impacts = df_observed_impacts.join([df_swash_vol_changes, df_dune_face_vol_changes])
# Classify regime based on volume changes
df_observed_impacts = storm_regime(df_observed_impacts)
# Save dataframe to csv
df_observed_impacts.to_csv(os.path.join(data_folder, 'impacts_observed.csv'))
df_observed_impacts.to_csv(os.path.join(data_folder, "impacts_observed.csv"))

@ -1,6 +1,7 @@
import numpy as np
import pandas as pd
def sto06_individual(Hs0, Tp, beta):
Lp = 9.8 * Tp ** 2 / 2 / np.pi
@ -9,17 +10,18 @@ def sto06_individual(Hs0, Tp, beta):
S_inc = 0.75 * beta * np.sqrt(Hs0 * Lp)
# Dissipative conditions
if beta / (Hs0/Lp)**(0.5) <= 0.3:
if beta / (Hs0 / Lp) ** (0.5) <= 0.3:
setup = 0.016 * (Hs0 * Lp) ** 0.5
S_total = 0.046 * (Hs0 * Lp) ** 0.5
R2 = 0.043 * (Hs0 * Lp) ** 0.5
else:
setup = 0.35 * beta * (Hs0 * Lp) ** 0.5
S_total = np.sqrt(S_inc ** 2 + S_ig **2)
S_total = np.sqrt(S_inc ** 2 + S_ig ** 2)
R2 = 1.1 * (setup + S_total / 2)
return R2, setup, S_total, S_inc, S_ig
def sto06(df, Hs0_col, Tp_col, beta_col):
"""
Vectorized version of Stockdon06 which can be used with dataframes
@ -33,19 +35,20 @@ def sto06(df, Hs0_col, Tp_col, beta_col):
Lp = 9.8 * df[Tp_col] ** 2 / 2 / np.pi
# General equation
S_ig = pd.to_numeric(0.06 * np.sqrt(df[Hs0_col] * Lp), errors='coerce')
S_inc = pd.to_numeric(0.75 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors='coerce')
setup = pd.to_numeric(0.35 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors='coerce')
S_ig = pd.to_numeric(0.06 * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
S_inc = pd.to_numeric(0.75 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
setup = pd.to_numeric(0.35 * df[beta_col] * np.sqrt(df[Hs0_col] * Lp), errors="coerce")
S_total = np.sqrt(S_inc ** 2 + S_ig ** 2)
R2 = 1.1 * (setup + S_total / 2)
# Dissipative conditions
dissipative = df[beta_col] / (df[Hs0_col] / Lp)**(0.5) <= 0.3
setup.loc[dissipative,:] = 0.016 * (df[Hs0_col] * Lp) ** (0.5) # eqn 16
S_total.loc[dissipative,:] = 0.046 * (df[Hs0_col] * Lp) ** (0.5) # eqn 17
R2.loc[dissipative,:] = 0.043 * (df[Hs0_col] * Lp) ** (0.5) # eqn 18
dissipative = df[beta_col] / (df[Hs0_col] / Lp) ** (0.5) <= 0.3
setup.loc[dissipative, :] = 0.016 * (df[Hs0_col] * Lp) ** (0.5) # eqn 16
S_total.loc[dissipative, :] = 0.046 * (df[Hs0_col] * Lp) ** (0.5) # eqn 17
R2.loc[dissipative, :] = 0.043 * (df[Hs0_col] * Lp) ** (0.5) # eqn 18
return R2, setup, S_total, S_inc, S_ig
if __name__ == '__main__':
if __name__ == "__main__":
pass

@ -19,14 +19,14 @@ def shapes_from_shp(shp_file):
shapes = []
ids = []
properties = []
for feat in fiona.open(shp_file, 'r'):
shapes.append(shape(feat['geometry']))
ids.append(feat['id'])
properties.append(feat['properties'])
for feat in fiona.open(shp_file, "r"):
shapes.append(shape(feat["geometry"]))
ids.append(feat["id"])
properties.append(feat["properties"])
return shapes, ids, properties
def convert_coord_systems(g1, in_coord_system='EPSG:4326', out_coord_system='EPSG:28356'):
def convert_coord_systems(g1, in_coord_system="EPSG:4326", out_coord_system="EPSG:28356"):
"""
Converts coordinates from one coordinates system to another. Needed because shapefiles are usually defined in
lat/lon but should be converted to GDA to calculated distances.
@ -38,7 +38,8 @@ def convert_coord_systems(g1, in_coord_system='EPSG:4326', out_coord_system='EPS
project = partial(
pyproj.transform,
pyproj.Proj(init=in_coord_system), # source coordinate system
pyproj.Proj(init=out_coord_system)) # destination coordinate system
pyproj.Proj(init=out_coord_system),
) # destination coordinate system
g2 = transform(project, g1) # apply projection
return g2
@ -59,15 +60,19 @@ def distance_to_intersection(lat, lon, landward_orientation, beach, line_strings
start_point = convert_coord_systems(start_point)
distance = 1000 # m look up to 1000m for an intersection
landward_point = Point(start_point.coords.xy[0] + distance * np.cos(np.deg2rad(landward_orientation)),
start_point.coords.xy[1] + distance * np.sin(np.deg2rad(landward_orientation)))
landward_point = Point(
start_point.coords.xy[0] + distance * np.cos(np.deg2rad(landward_orientation)),
start_point.coords.xy[1] + distance * np.sin(np.deg2rad(landward_orientation)),
)
landward_line = LineString([start_point, landward_point])
seaward_point = Point(start_point.coords.xy[0] - distance * np.cos(np.deg2rad(landward_orientation)),
start_point.coords.xy[1] - distance * np.sin(np.deg2rad(landward_orientation)))
seaward_point = Point(
start_point.coords.xy[0] - distance * np.cos(np.deg2rad(landward_orientation)),
start_point.coords.xy[1] - distance * np.sin(np.deg2rad(landward_orientation)),
)
seaward_line = LineString([start_point, seaward_point])
# Look at relevant line_strings which have the same beach property in order to reduce computation time
line_strings = [s for s, p in zip(line_strings, line_properties) if p['beach'] == beach]
line_strings = [s for s, p in zip(line_strings, line_properties) if p["beach"] == beach]
# Check whether profile_line intersects with any lines in line_string. If intersection point is landwards,
# consider this negative, otherwise seawards is positive.
@ -99,7 +104,7 @@ def beach_profile_elevation(x_coord, df_profiles, profile_type, site_id):
# Get profile
df_profile = df_profiles.query('profile_type == "{}" and site_id =="{}"'.format(profile_type, site_id))
return np.interp(x_coord, df_profile.index.get_level_values('x'), df_profile['z'])
return np.interp(x_coord, df_profile.index.get_level_values("x"), df_profile["z"])
def parse_profile_features(df_sites, df_profiles, dune_crest_shp, dune_toe_shp):
@ -111,51 +116,38 @@ def parse_profile_features(df_sites, df_profiles, dune_crest_shp, dune_toe_shp):
# Get site information. Base our profile features on each site
df_profile_features = df_sites
features = {
'dune_crest':
{
'file': dune_crest_shp
},
'dune_toe':
{
'file': dune_toe_shp
},
}
features = {"dune_crest": {"file": dune_crest_shp}, "dune_toe": {"file": dune_toe_shp}}
# Import our dune crest and toes
for feat in features.keys():
shapes, _, properties = shapes_from_shp(features[feat]['file'])
shapes, _, properties = shapes_from_shp(features[feat]["file"])
shapes = [convert_coord_systems(x) for x in shapes]
# Figure out the x coordinates of our crest and toes, by looking at where our beach sections intersect our
# shape files.
col_name = '{}_x'.format(feat)
df_profile_features[col_name] = df_profile_features['profile_x_lat_lon'] + \
df_profile_features.apply(lambda row:
distance_to_intersection(
row['lat'], row['lon'], row['orientation'],
row['beach'], shapes, properties),
axis=1)
col_name = "{}_x".format(feat)
df_profile_features[col_name] = df_profile_features["profile_x_lat_lon"] + df_profile_features.apply(
lambda row: distance_to_intersection(
row["lat"], row["lon"], row["orientation"], row["beach"], shapes, properties
),
axis=1,
)
# Get the elevations of the crest and toe
col_name = '{}_z'.format(feat)
df_profile_features[col_name] = df_profile_features.apply(lambda row:
beach_profile_elevation(
row['{}_x'.format(feat)],
df_profiles,
'prestorm',
row.name),
axis=1)
df_profile_features = df_profile_features.drop(columns=['beach', 'lat', 'lon', 'orientation'])
col_name = "{}_z".format(feat)
df_profile_features[col_name] = df_profile_features.apply(
lambda row: beach_profile_elevation(row["{}_x".format(feat)], df_profiles, "prestorm", row.name), axis=1
)
df_profile_features = df_profile_features.drop(columns=["beach", "lat", "lon", "orientation"])
return df_profile_features
if __name__ == '__main__':
data_folder = './data/interim'
df_sites = pd.read_csv(os.path.join(data_folder, 'sites.csv'), index_col=[0])
df_profiles = pd.read_csv(os.path.join(data_folder, 'profiles.csv'), index_col=[0, 1, 2])
if __name__ == "__main__":
data_folder = "./data/interim"
df_sites = pd.read_csv(os.path.join(data_folder, "sites.csv"), index_col=[0])
df_profiles = pd.read_csv(os.path.join(data_folder, "profiles.csv"), index_col=[0, 1, 2])
dune_crest_shp = './data/raw/profile_features/dune_crests.shp'
dune_toe_shp = './data/raw/profile_features/dune_toes.shp'
dune_crest_shp = "./data/raw/profile_features/dune_crests.shp"
dune_toe_shp = "./data/raw/profile_features/dune_toes.shp"
df_profile_features = parse_profile_features(df_sites, df_profiles, dune_crest_shp, dune_toe_shp)
df_profile_features.to_csv('./data/interim/profile_features.csv')
df_profile_features.to_csv("./data/interim/profile_features.csv")

Loading…
Cancel
Save