#initial commit to setup the repo

master
tinoheimhuber 5 years ago
commit 66e671e4dc

@ -0,0 +1,60 @@
clear all
close all
t3sFilename = 'Daly_5m_Mesh.t3s';
BathyIndex = 1;
fid = fopen(t3sFilename,'r');
tline = fgets(fid);
while strcmp(tline(1:min(length(tline),10)),':EndHeader')== 0
if strcmp(tline(1:min(length(tline),10)),':NodeCount')== 1
nodeCount=str2num(tline(11:end));
end
if strcmp(tline(1:min(length(tline),13)),':ElementCount')== 1
elementCount=str2num(tline(14:end));
end
tline = fgets(fid);
end
tline = fgets(fid);
%%%%Read T3S File
for i = 1 : nodeCount
tempLine=strsplit(tline,' ');
node(i).X = str2num(tempLine{1});
node(i).Y = str2num(tempLine{2});
node(i).Z = str2num(tempLine{2+BathyIndex});
tline = fgets(fid);
end
for i = 1 : elementCount
tempLine=strsplit(tline,' ');
element(i).p1 = str2num(tempLine{1});
element(i).p2 = str2num(tempLine{2});
element(i).p3 = str2num(tempLine{3});
tline = fgets(fid);
end
fclose(fid)
rmaFilename = [t3sFilename(1:end-3) , 'rm1'];
fid1 = fopen(rmaFilename,'w');
fprintf(fid1,'\n');
fprintf(fid1,' 0 1 1 1 1 1 0 0 0 0 1 0 0 1 0 0.0 0.0 2\n');
fprintf(fid1,' 0. 0. 0.0000 0.0000 0.0000 0.0000 0. 0.\n');
for i = 1 : elementCount
fprintf(fid1,'%5.0f%5.0f 0%5.0f 0%5.0f 0 0 0 1 0.000%5.0f\n', ...
i,element(i).p1,element(i).p2,element(i).p3,i);
end
fprintf(fid1,' 9999\n');
for i = 1 : nodeCount
fprintf(fid1,'%10.0f%16.7f%20.7f%14.4f%70.0f 0.0000\n', ...
i,node(i).X,node(i).Y,node(i).Z,0);
end
fprintf(fid1,' 9999\n');
fprintf(fid1,' 9999\n');
fprintf(fid1,' 0 NENTRY\n');
fprintf(fid1,' 0 NCLM\n');
fprintf(fid1,'ENDDATA\n');

@ -0,0 +1,159 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for combining all extracted RMA2 and 11 results into a single data frame and save it to CSV
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
#==========================================================#
#==========================================================#
#Input parameters and directories
#==========================================================#
# Set working direcotry (where postprocessed NARClIM data is located)
#set beginning and end years and corresponding scenario code
fs=['Hwq003', 'Hwq005']
HDvariables = ['depth', 'elev','vel']
WQvariables = ['sal']
Subset_years = False
startyear=1999 #years need to be adjusted based on the time period of the model runs
endyear=2004
#year=range(startyear, endyear+1)
#set directory path for output files
output_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/Postprocessed/Compound_data/'
nodes_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes.csv'
#read csv file with nodes and chainages to extract data from
node = pd.read_csv(nodes_csv)['Hunter'].values
chainages = pd.read_csv(nodes_csv)['x_km'].values
#==========================================================#
#==========================================================#
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
#==========================================================#
#==========================================================#
#data extraction for RMA11 Variables
#==========================================================#
WQ_Summary_df = pd.DataFrame()
for variable in WQvariables:
for f in fs:
f = 'Hwq003'
input_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output2/'+ f
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
Summary_df = pd.DataFrame()
df = pd.DataFrame()
for NODE in node:
NODE = str(NODE)
#set input and output directories
#==========================================================#
#Load data file
Clim_Var_CSVs = glob.glob('*_'+ NODE + '_*WQ*')
print Clim_Var_CSVs
print NODE
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
#df.columns = [NODE+'_Sal'] #, NODE+'_Tem']
df.columns = [NODE + '_'+ variable + '_'+ f]
#df = df.loc[~df.index.duplicated(keep='first')]
Summary_df = pd.concat([Summary_df, df], axis=1)
out_path = input_directory + '/' + f + '_' + variable + '4.csv'
print('writing ' + out_path)
Summary_df.to_csv(out_path)
#Optionally cut down the summary df to common years
if Subset_years:
Summary_df = Summary_df[datetime.strptime(str(startyear) + ' 01 01', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 06 31', '%Y %m %d').date()]
WQ_Summary_df = pd.concat([WQ_Summary_df ,Summary_df], axis=1, join='outer')
#==========================================================#
##==========================================================#
##data extraction for RMA2 variables
##==========================================================#
#HD_Summary_df = pd.DataFrame()
#for variable in HDvariables:
# Summary_df = pd.DataFrame()
# df = pd.DataFrame()
# for f in fs:
# #set input and output directories
# input_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/' + f
# # Set working direcotry (where postprocessed NARClIM data is located)
# os.chdir(input_directory)
# #==========================================================#
# #Load data file
# if variable == 'depth' or variable == 'elev':
# Clim_Var_CSVs = glob.glob('*' + variable + '*')
# clim_var_csv_path = Clim_Var_CSVs[0]
# df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
# df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
# df= df.drop(columns=['Year', 'Hour'])
# a=len(df.columns)-1
# df=df.iloc[:,:a]
# if variable == 'vel':
# #x velocity
# Clim_Var_CSVs = glob.glob('*' +'x'+ variable + '*')
# clim_var_csv_path = Clim_Var_CSVs[0]
# df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
# df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
# dfx= df.drop(columns=['Year', 'Hour','1'])
# #y velocity
# Clim_Var_CSVs = glob.glob('*' +'y'+ variable + '*')
# clim_var_csv_path = Clim_Var_CSVs[0]
# df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
# df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
# dfy= df.drop(columns=['Year', 'Hour','1'])
# df = np.sqrt(dfx*dfx + dfy*dfy)
#
# df.columns = df.columns + '_'+ variable + '_'+ f
# Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
# #Optionally cut down the summary df to common years
# if Subset_years:
# Summary_df = Summary_df[datetime.strptime(str(startyear) + ' 01 01', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 06 31', '%Y %m %d').date()]
# HD_Summary_df = pd.concat([HD_Summary_df , Summary_df], axis=1, join='outer')
##==========================================================#
#
#
#
#
##==========================================================#
##generate and safe the final data frame as csv
##==========================================================#
#Compound_df = pd.concat([WQ_Summary_df , HD_Summary_df], axis=1, join='outer')
#var = 'Scn_'
#for f in fs:
# var = var+ '_' + f
#WQvars = 'WQ'
#for variabs in WQvariables:
# WQvars = WQvars + '_' + variabs
#out_path = output_directory + var + '_' + WQvars + '_' + str(startyear) + '_' + str(endyear) + '_compound.csv'
#Compound_df.to_csv(out_path)
# #==========================================================#
#
#

@ -0,0 +1,221 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for creating climate prioritization plots for NARCLIM variables.
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
matplotlib.style.use('ggplot')
#==========================================================#
#Input parameters
#==========================================================#
# Set working direcotry (where postprocessed NARClIM data is located)
#set beginning and end years and corresponding scenario code
fs=['Hwq003', 'Hwq005']
comparison_code = '003v005'
startyear=1999
endyear=1999
year=range(startyear, endyear+1)
#set directory path for in and output files
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/Postprocessed' + comparison_code +'/'
nodes_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes.csv'
Flow_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/01_Input/BCGeneration/Scenarios/Calibration/Calibration_Greta.csv'
png_output_directory = output_directory + '/Figures/'
variables = ['Sal'] #Tem or Sal
#read csv file with nodes to extract data from
#node=[837, 666, 981, 59,527, 34, 1, 391]
node = pd.read_csv(nodes_csv)['Hunter'].values
chainages = pd.read_csv(nodes_csv)['x_km'].values
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#==========================================================#
#100% automated part of the code doing the data extraction
#==========================================================
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(png_output_directory):
os.makedirs(png_output_directory)
Summary_df = pd.DataFrame()
for f in fs:
for NODE in node:
NODE = str(NODE)
#set input and output directories
input_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ f
#set directory path for individual png files
png_output_directory = output_directory + '/Figures/'
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
#==========================================================#
#Load data file
Clim_Var_CSVs = glob.glob('*'+ NODE + '*WQ*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
df.columns = [NODE+'_Sal', NODE+'_Tem']
#append to summary data frame
if f in ['HCC010', 'HCC042']:
scname = 'Present'
if f == 'HCC011':
scname = 'Near_Future'
df.index = df.index - (datetime.strptime('2025 01 01', '%Y %m %d').date() - datetime.strptime('1995 01 01', '%Y %m %d').date())
if f == 'HCC012':
scname = 'Far_Future'
df.index = df.index - (datetime.strptime('2065 01 01', '%Y %m %d').date() - datetime.strptime('1995 01 01', '%Y %m %d').date())
if f in ['HCC040', 'HCC041']:
scname = 'Present_Resto'
df.columns = df.columns+'_'+ scname
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
#cut down the summary df to common years
Summary_df = Summary_df[datetime.strptime(str(startyear) + ' 01 05', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 12 31', '%Y %m %d').date()]
Summary_df.tail()
for variable in variables:
ii=1
for NODE in node:
NODE = str(NODE)
#=========#
#comparison time series plot at node
#=========#
#out name
png_out_file_name = comparison_code + '_' + variable + '_' + NODE + '_time_series.png'
png_out_path = png_output_directory + '/' + png_out_file_name
#
plotin_df = Summary_df.filter(regex=variable)
plotin_df = plotin_df.filter(regex=NODE)
#prepare data frame for plot
fig = plt.figure(figsize=(14,18))
ax=plt.subplot(4,1,1)
#start and end dat of plots
plt.title(variable + '_' + NODE + ' - time series')
start_date = datetime(int('1996'), int('02'), int('01'))
end_date = datetime(int('1996'), int('02'), int('5'))
plotin_df1=plotin_df[start_date:end_date]
ymin = min(plotin_df1.min()) - 0.01 *min(plotin_df1.min())
ymax = max(plotin_df1.max()) + 0.01 *max(plotin_df1.max())
plotin_df1.plot(ylim=(ymin,ymax) ,legend=False, ax=ax)
#plt.legend( loc=1)
plt.xticks([])
ax=plt.subplot(4,1,2)
start_date = datetime(int('1996'), int('02'), int('01'))
end_date = datetime(int('1996'), int('02'), int('3'))
plotin_df2=plotin_df[start_date:end_date]
ymin = min(plotin_df2.min()) - 0.1 *min(plotin_df2.min())
ymax = max(plotin_df2.max()) + 0.1 *max(plotin_df2.max())
plotin_df2.plot(ylim=(ymin,ymax),legend=False, ax=ax)
#plt.legend( loc=1)
ax.xaxis.grid(False)
ax=plt.subplot(4,1,3)
plt.title(variable + '_' + NODE + ' - monthly maximum')
Monthly_max = plotin_df.resample('M').max()
Monthly_max.plot(ax=ax,legend=False)
plt.legend( loc=1)
ax.xaxis.grid(False)
ax=plt.subplot(4,1,4)
plt.title(variable + '_' + NODE + ' - monthly mean')
Monthly_mean = plotin_df.resample('M').mean()
Monthly_mean.plot(ax=ax)
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=2, mode="expand", borderaxespad=0.)
#plt.legend( loc=1)
ax.xaxis.grid(False)
#ax.patch.set_alpha(ALPHA_figs)
fig.patch.set_alpha(ALPHA_figs)
fig.tight_layout()
fig.savefig(png_out_path)
plt.close()
#=========#
png_out_file_name = comparison_code + '_' + variable + '_' + NODE + '_exceedance.png'
png_out_path = png_output_directory + '/' + png_out_file_name
fig = plt.figure(figsize=(10,5))
ax=plt.subplot(1,1,1)
start_date = datetime(int('1995'), int('02'), int('01'))
end_date = datetime(int('1998'), int('12'), int('31'))
plotin_df3=plotin_df[start_date:end_date]
Qhist = plotin_df3.filter(regex='Present')
col = Qhist.columns[0]
Qhist = Qhist.sort_values(by=[col],ascending=False).values
Qnearf = plotin_df3.filter(regex='Near_Future')
col = Qnearf.columns[0]
Qnearf = Qnearf.sort_values(by=[col],ascending=False).values
Qmodern = plotin_df3.filter(regex='Far_Future')
col = Qmodern.columns[0]
Qmodern = Qmodern.sort_values(by=[col],ascending=False).values
Nh = len(Qhist) # number of data points
Nm = len(Qmodern)
# here's one place types matter: must divide by float not int
plt.plot(np.arange(Nh)/float(Nh)*100, Qhist)
plt.plot(np.arange(Nm)/float(Nm)*100, Qnearf)
plt.plot(np.arange(Nm)/float(Nm)*100, Qmodern)
#plt.plot(np.sort(Qmodern)[::-1])
plt.xlabel('% Exceedance')
if variable == 'Sal':
plt.ylabel('Salinity [PSU]')
if variable == 'Tem':
plt.ylabel('Temperature [C]')
plt.legend(['Present', 'Near Future', 'Far Future'])
ax.xaxis.grid(False)
#ax.patch.set_alpha(ALPHA_figs)
fig.patch.set_alpha(ALPHA_figs)
fig.tight_layout()
fig.savefig(png_out_path)
plt.close()

@ -0,0 +1,385 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for creating climate prioritization plots for NARCLIM variables.
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
matplotlib.style.use('ggplot')
#==========================================================#
#Input parameters
#==========================================================#
#set scenario codes and beginning and end years and corresponding scenario code
fs=['Hwq003', 'Hwq005']
scenario_real_names = ['Present', 'Far_Future']
variable = 'elev' #depth or vel
startyear=1999
endyear=1999
#year=range(startyear, endyear+1)
#set directory path for output files
output_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/Postprocessed/Figures'
#set input directories and data
#postprocessed RMA results
RMA_data_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/Postprocessed/Compound_data/Scn__Hwq003_Hwq005_WQ_sal_1999_1999compound.csv'
#tidal boundary data
tides_csv='C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/Tide Generator/Tidal Simulation Python Script (BMM)/Tides_1990_2010.csv'
#CSV file of mesh nodes and corresponding chainages
nodes_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes.csv'
#River flow boundary
Flow_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/01_Input/BCGeneration/Scenarios/Calibration/Calibration_Greta.csv'
#==========================================================#
#==========================================================#
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#========================================================#
#automated part of the code doing the data extraction
#==========================================================
#read csv file with extracted RMA data
RMA.df = pd.read_csv(RMA_data_csv, parse_dates=True, index_col=0)
#read csv file with nodes to extract data from
node = pd.read_csv(nodes_csv)['Hunter'].values
chainages = pd.read_csv(nodes_csv)['x_km'].values
#Load boundary flow data
Flow_df = pd.read_csv(Flow_csv, parse_dates=True, index_col=0)['Q[ML/d]']
Flow_df.columns = ['Q[ML/d]']
Flow_df = Flow_df[datetime.strptime(str(startyear) + ' 07 13', '%Y %m %d').date():datetime.strptime(str(2005) + ' 07 19', '%Y %m %d').date()]
flowannual = Flow_df.resample('A').mean()
Flow_df.resample('A').mean().plot()
#Load tide boundary data
tides_df = pd.read_csv(tides_csv, parse_dates=True, index_col=0)
tides_df.columns = ['tide']
#append to summary data frame
if f in ['HCC010', 'HCC042']:
scname = 'Present'
if f == 'HCC011':
scname = 'Near_Future'
df.index = df.index - (datetime.strptime('2025 01 01', '%Y %m %d').date() - datetime.strptime('1995 01 01', '%Y %m %d').date())
if f == 'HCC012':
scname = 'Far_Future'
df.index = df.index - (datetime.strptime('2065 01 01', '%Y %m %d').date() - datetime.strptime('1995 01 01', '%Y %m %d').date())
if f in ['HCC040', 'HCC041']:
scname = 'Present_Resto'
df.columns = df.columns+'_'+ scname
#cut down the summary df to common years
Summary_df = Summary_df[datetime.strptime(str(startyear) + ' 07 13', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 07 19', '%Y %m %d').date()]
Summary_df.tail()
#Summary_df.columns = chainages
#type(Summary_df.index)
#cut down the summary df to common years
Summary_df = Summary_df[datetime.strptime(str(startyear) + ' 01 05', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 12 31', '%Y %m %d').date()]
Summary_df.tail()
variables = ['Sal']
Present_df = Summary_df.filter(regex='Present')
Present_df.columns
Far_future_df = Summary_df.filter(regex='Far')
Far_future_df.columns
#==========================================================#
#Tidal analysis
#==========================================================#
#read in tide data
Plot_scenario = 'Flood'
ylims = [-1.1,8]
tides_df = tides_df[datetime.strptime(str(startyear) + ' 07 13', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 07 19', '%Y %m %d').date()]
central_tides_df = tides_df[datetime.strptime(str(startyear) + ' 07 16', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 07 16', '%Y %m %d').date()]
maxtide_index = central_tides_df['tide'].idxmax()
tides_df['tide'].max()
end_maxtide_index = maxtide_index + timedelta(days=0.4)
beg_maxtide_index = maxtide_index - timedelta(days=0.2)
short_tides_df = tides_df[beg_maxtide_index: end_maxtide_index]
short_tides_df.plot(cmap='viridis', legend=True)
mintide_index = short_tides_df['tide'].idxmin()
#plot the minimum and maximum elevation along the estuary occuring within a single tidal cycle
Short_tide_elev_df = Summary_df[min(short_tides_df.index):max(short_tides_df.index)]
max_min_1cycle_elev_0slr = pd.concat([Short_tide_elev_df.filter(regex='Present').max(), Short_tide_elev_df.filter(regex='Present').min()], axis=1, join='inner')
max_min_1cycle_elev_09slr = pd.concat([Short_tide_elev_df.filter(regex='Far').max(), Short_tide_elev_df.filter(regex='Far').min()], axis=1, join='inner')
max_min_1cycle_elev_0slr.index = chainages
max_min_1cycle_elev_09slr.index = chainages
max_min_1cycle_elev = pd.concat([max_min_1cycle_elev_0slr, max_min_1cycle_elev_09slr], axis=1, join='outer')
max_min_1cycle_elev.columns = ['MaxElev 0 SLR','MinElev 0 SLR','MaxElev 0.9 SLR','MinElev 0.9 SLR' ]
max_min_1cycle_elev.plot(cmap='viridis', legend=True)
#plot the elevation along the estuary at exactly the time of high and low tide
max_min_extime_elev_0slr = pd.concat([Summary_df.filter(regex='Present').loc[maxtide_index], Summary_df.filter(regex='Present').loc[mintide_index]], axis=1, join='outer')
max_min_extime_elev_09slr = pd.concat([Summary_df.filter(regex='Far').loc[maxtide_index], Summary_df.filter(regex='Far').loc[mintide_index]], axis=1, join='outer')
max_min_extime_elev_0slr.index = chainages
max_min_extime_elev_09slr.index = chainages
max_min_extime_elev = pd.concat([max_min_extime_elev_0slr, max_min_extime_elev_09slr], axis=1, join='outer')
max_min_extime_elev.columns = ['HigT Elev 0SLR','LowT Elev 0SLR','HigT Elev 0.9 SLR','LowT Elev 0.9 SLR' ]
max_min_extime_elev.plot(cmap='viridis', legend=True)
png_out_name = png_output_directory + variable + '_high_low_tide_analysis_' + Plot_scenario + '.pdf'
fig = plt.figure(figsize=(17,20))
ax=plt.subplot(3,1,1)
plt.title('Max and min water surface elevation along the estuary over one tidal cycle')
ax.set_prop_cycle(color=['slateblue', 'slateblue', 'tomato', 'tomato'], linestyle=['--', '-', '--', '-'], lw=[2,2,2,2])
max_min_1cycle_elev.plot( legend=False, ax=ax)
plt.axhline(y=0, color='slateblue', linestyle='--', lw=1, alpha=0.5)
plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Distance from ocean boundary [km]")
# Put a legend to the right of the current axis
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
ax=plt.subplot(3,1,2)
plt.title('Max and min water surface elevation along the estuary at time of high & low tide')
ax.set_prop_cycle(color=['slateblue', 'slateblue', 'tomato', 'tomato'], linestyle=['--', '-', '--', '-'], lw=[2,2,2,2])
max_min_extime_elev.plot(legend=False, ax=ax)
plt.ylabel("Water level [m AHD]")
plt.ylim(ylims)
plt.xlabel("Distance from ocean boundary [km]")
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
ax=plt.subplot(3,1,3)
plt.ylim(ylims)
plt.title('Tide at open ocean boundary')
short_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(maxtide_index, color='black', linestyle='--')
ax.axvline(mintide_index, color='black', linestyle='-')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#==========================================================#
#animate 2 tidal cycles baseline scenario:
#==========================================================#
Six_day_tide_elev_df = Summary_df.filter(regex='Present')[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
Six_day_tide_elev_df.columns = chainages
Six_day_tides_df = tides_df[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
Six_day_tides_df.plot(cmap='viridis', legend=True)
Six_day_flow_df = Flow_df[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
Six_day_flow_df = pd.concat([Six_day_tides_df, Six_day_flow_df], axis=1, join='outer')['Q[ML/d]'].interpolate()
Six_day_flow_df.plot(cmap='viridis', legend=True)
ii=1
for index in Six_day_tide_elev_df.index:
#index = Six_day_tide_elev_df.index[1]
png_out_path = output_directory + '/TidalCycle_' + variable + '_' + Plot_scenario + '3/'
if not os.path.exists(png_out_path):
os.makedirs(png_out_path)
#png_out_name = png_out_path + str(index) + 'longitudinalXS.png'
png_out_name = png_out_path + str(ii) + variable + '_longitudinalXS.png'
fig = plt.figure(figsize=(17,20))
ax=plt.subplot(3,1,1)
plt.title('Water surface elevation along the estuary')
Six_day_tide_elev_df.loc[index].plot(color='blue', legend=False)
#plt.axhline(y=0, color='r', linestyle='--', lw=1, alpha=0.5)
plt.axhline(tides_df.loc[index].values, color='black',lw=0.5, linestyle='--')
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Distance from ocean boundary [km]")
#plot tides
ax=plt.subplot(3,1,2)
plt.ylim(-1.1,1.3)
plt.title('Tide at open ocean boundary')
Six_day_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(index, color='green', linestyle='--')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
ax=plt.subplot(3,1,3)
#plt.ylim(ylims)
plt.title('Tide at open ocean boundary')
Six_day_flow_df.plot(color='black', legend=False, ax=ax)
ax.axvline(index, color='green', linestyle='--')
plt.xlabel("Time")
plt.ylabel("Hunter Inflow [ML/day]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
ii=ii+1
#==========================================================#
#animate 2 tidal cycles baseline + SLR scenario:
#==========================================================#
Six_day_tide_elev_df_0slr = Summary_df.filter(regex='Present')[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1)]
Six_day_tide_elev_df_0slr.columns = chainages
Six_day_tide_elev_df_09slr = Summary_df.filter(regex='Far')[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1)]
Six_day_tide_elev_df_09slr.columns = chainages
Six_day_tides_df = tides_df[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1.5)]
Six_day_tides_df.plot(cmap='viridis', legend=True)
ii=0
for index in Six_day_tide_elev_df_0slr.index:
ii=ii+1
#index = Six_day_tide_elev_df.index[1]
png_out_path = output_directory + '/TidalCycle_SLR_' + variable + '_' + Plot_scenario + '/'
if not os.path.exists(png_out_path):
os.makedirs(png_out_path)
#png_out_name = png_out_path + str(index) + 'longitudinalXS.png'
png_out_name = png_out_path + str(ii) + variable + '_longitudinalXS.png'
max_min_extime_elev_both = pd.concat([Six_day_tide_elev_df_0slr.loc[index], Six_day_tide_elev_df_09slr.loc[index]], axis=1, join='outer')
max_min_extime_elev_both.columns = ['Present Day','0.9m SLR']
fig = plt.figure(figsize=(17,12))
ax=plt.subplot(2,1,1)
plt.ylim(ylims)
plt.title('Water surface elevation along the estuary')
ax.set_prop_cycle(color=['slateblue','tomato'], linestyle=['-', '-'], lw=[2,2])
max_min_extime_elev_both.plot(ax=ax)
ax.legend(loc=1)
plt.axhline(y=0, color='r', linestyle='--', lw=1, alpha=0.5)
plt.ylim(-1.1,2.5)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Distance from ocean boundary [km]")
#plot tides
ax=plt.subplot(2,1,2)
plt.ylim(-1.1,1.5)
plt.title('Tide at open ocean boundary')
Six_day_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(index, color='green', linestyle='--')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
for NODE in node:
NODE = str(NODE)
#=========#
#comparison time series plot at node
#=========#
#out name
png_out_file_name = comparison_code + '_' + variable + '_' + NODE + '_time_series.png'
png_out_path = png_output_directory + '/' + png_out_file_name
#
plotin_df = Summary_df.filter(regex=NODE)
#prepare data frame for plot
fig = plt.figure(figsize=(14,18))
ax=plt.subplot(4,1,1)
#start and end dat of plots
plt.title(variable + '_' + NODE + ' - time series')
start_date = datetime(int('1996'), int('02'), int('01'))
end_date = datetime(int('1996'), int('02'), int('5'))
plotin_df1=plotin_df[start_date:end_date]
ymin = min(plotin_df1.min()) - 0.01 *min(plotin_df1.min())
ymax = max(plotin_df1.max()) + 0.01 *max(plotin_df1.max())
plotin_df1.plot(ylim=(ymin,ymax) ,legend=False, ax=ax)
#plt.legend( loc=1)
plt.xticks([])
ax=plt.subplot(4,1,2)
start_date = datetime(int('1996'), int('02'), int('01'))
end_date = datetime(int('1996'), int('02'), int('3'))
plotin_df2=plotin_df[start_date:end_date]
ymin = min(plotin_df2.min()) - 0.1 *min(plotin_df2.min())
ymax = max(plotin_df2.max()) + 0.1 *max(plotin_df2.max())
plotin_df2.plot(ylim=(ymin,ymax),legend=False, ax=ax)
#plt.legend( loc=1)
ax.xaxis.grid(False)
ax=plt.subplot(4,1,3)
plt.title(variable + '_' + NODE + ' - monthly maximum')
Monthly_max = plotin_df.resample('M').max()
Monthly_max.plot(ax=ax,legend=False)
plt.legend( loc=1)
ax.xaxis.grid(False)
ax=plt.subplot(4,1,4)
plt.title(variable + '_' + NODE + ' - monthly mean')
Monthly_mean = plotin_df.resample('M').mean()
Monthly_mean.plot(ax=ax)
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=2, mode="expand", borderaxespad=0.)
#plt.legend( loc=1)
ax.xaxis.grid(False)
#ax.patch.set_alpha(ALPHA_figs)
fig.patch.set_alpha(ALPHA_figs)
fig.tight_layout()
fig.savefig(png_out_path)
plt.close()
#=========#

@ -0,0 +1,107 @@
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('H:/WRL_Projects/Hunter_CC_Modeling/Code/RMA_result_explorer')
from pylab import *
import sys
from py_rmatools import rma
import pandas as pd
#==========================================================#
#Input parameters
#==========================================================#
#print "Enter the RMA filename (without extension):"
#f=raw_input()
#set beginning and end years and corresponding scenario code
run='Hwq003'
startyear=1999
endyear=1999
year=range(startyear, endyear+1)
print('extracting WQ data for' + run)
#set run directory where RMA results are located
run_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/02_Simulations/' + run +'/'
#set directory path for output files
output_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output_gridded1/'+ run + '/'
#set model element nodes from chainage file
#node=[666, 59,527]
#read csv file with nodes to extract data from
nodes_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes_gridded.csv'
node = pd.read_csv(nodes_csv)['Hunter'].values
#==========================================================#
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
time=[]
files=[]
for ii in node:
files.append(output_directory + run + '_%d_WQ.txt' %ii)
I_const=[1]
for kk in list(enumerate(node)):
target = open(files[kk[0]], 'w')
target.write("Year Hour")
for ii in I_const:
target.write(" %d" %(ii))
target.write("\n")
target.close()
for jj in year:
print(jj)
f=run_directory + run +'_%d_SAL' %jj
R=rma()
R.open(f)
while R.next():
time.append(R.time)
for kk in list(enumerate(node)):
target = open(files[kk[0]], 'a')
target.write("%i %f" %(jj,time[-1]))
for ii in I_const:
target.write(" %f" %(R.constit[ii][kk[1]]))
target.write("\n")
target.close()
###########################
#I_const=[1,2]
#filename1= output_directory + run + '_SAL.txt'
#target = open(filename1, 'w')
#target.write("Year Hour ")
#for inode in node:
# target.write("%i " % inode)
#target.write('\n')
#print (filename1)
#for jj in year:
# f1=run_directory + run + '_%d' %jj
# R=rma()
# print(f1)
# R.open(f1)
# print (jj)
# while R.next():
# time.append(R.time)
# for kk in list(enumerate(node)):
# target.write("%i %f" %(jj,time[-1]))
# target.write(" %f" %(R.constit[I_const[ii]][kk[1]]))
# target.write('\n')
#target.close()
#
#
#
#for kk in node:
#
# filename1='DataCR046_%d_.txt' %kk
#

@ -0,0 +1,291 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for processing RMA11 water quality results - using gridded approach instead of a chainage line (estuary longitudinal line)
#==========================================================#
#Load packages
#==========================================================#
import geopandas as gpd
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
matplotlib.style.use('ggplot')
#==========================================================#
#Input parameters
#==========================================================#
#set scenario codes and beginning and end years and corresponding scenario code
fs=['Hwq003', 'Hwq005']
scenario_real_names = ['Present', '0.9m SLR']
WQvariables = ['SALINITY']
variable = 'elev' #depth or vel
maxrowsincsv = 15000
#name of scenario
Plot_scenario = 'HD_grid_V1'
ylims = [0,40]
#time frames for analysis
startdate = '1999 01 01'
enddate = '2000 12 30'
tideanalysis_day = '1999 05 26'
#year=range(startyear, endyear+1)
#load and refine node vs mesh shapefile to store the results of the statistics
Node_mesh = gpd.read_file('H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/GIS_data/Mesh_Node_GIS_Setup/hcc002_fishnet_500m_V3_joinXY.shp')
Node_mesh.index = Node_mesh['Field1']
Node_mesh = Node_mesh.iloc[:,-4:]
Node_mesh.columns = ['Node', 'X', 'Y', 'geometry']
node = Node_mesh['Node'].values
chainages = node
Node_mesh.plot()
#set directory path for output files
output_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/Postprocessed/' + Plot_scenario + '/Figures/'
#set input directories and data
#tidal boundary data
tides_csv='C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/Tide Generator/Tidal Simulation Python Script (BMM)/Tides_1990_2010.csv'
#CSV file of mesh nodes and corresponding chainages
nodes_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes.csv'
#River flow boundary
Flow_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/01_Input/BCGeneration/Scenarios/Calibration/Calibration_Greta.csv'
#River bathy
bathy_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes_bathy.csv'
#input csv with species and their salinity optima and thresholds
salinity_eco_csv = "H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Eco_thresholds/Salintiy_Hunter_physiology_V1.csv"
salinity_eco_df = pd.read_csv(salinity_eco_csv, index_col=2)
salinity_eco_df = salinity_eco_df[salinity_eco_df.index.notnull()]
#==========================================================#
#==========================================================#
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#==========================================================#
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
#==========================================================#
#========================================================#
#automated part of the code doing the data extraction
#==========================================================
#Load boundary flow data
Flow_df = pd.read_csv(Flow_csv, parse_dates=True, dayfirst=True, index_col=0)['Q[ML/d]']
Flow_df.columns = ['Q[ML/d]']
Flow_df = Flow_df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
#Load tide boundary data
tides_dfm = pd.read_csv(tides_csv, parse_dates=True, index_col=0)
tides_dfm.columns = ['tide']
#Load bathymetry data
bathy_dfm = pd.read_csv(bathy_csv, index_col=0)
bathy_dfm.plot()
#read csv file with extracted RMA data
#RMA_df = pd.read_csv(RMA_data_csv, parse_dates=True, index_col=0)
#load RMA2 data
HD_Summary_df = pd.DataFrame()
for variable in WQvariables:
Summary_df = pd.DataFrame()
df = pd.DataFrame()
for f in fs:
#set input and output directories
input_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/Raw/Output_gridded1/' + f
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
#==========================================================#
#Load data file
Clim_Var_CSVs = glob.glob('*' + variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
#df.columns = [NODE+'_Sal'] #, NODE+'_Tem']
df.columns = [s + variable + '_'+ f for s in df.columns]
#cut down the df to start and end date
df = df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
HD_Summary_df = pd.concat([HD_Summary_df , Summary_df], axis=1, join='outer')
#subset the input data frame
Present_df = HD_Summary_df.filter(regex='Hwq003')
Present_df.columns = chainages
Far_future_df = HD_Summary_df.filter(regex='Hwq005')
Far_future_df.columns = chainages
#do statistics to summarize the time series based on optimum, min and max thresholds
salinity_eco_df.columns
variable = 'Sal'
for index, row in salinity_eco_df.iterrows():
print row["Minimum optimal"]
print row["Maximum optimal"]
print row['# species']
print row['Lower treshold']
print row['Upper treshold']
Pres_Sal_median_df = pd.DataFrame(Present_df.median())
Pres_Sal_median_df.columns = ['med']
Present_df_95 = pd.DataFrame(Present_df.quantile(q=0.95, axis=0))
Present_df_05 = pd.DataFrame(Present_df.quantile(q=0.05, axis=0))
Present_stats = pd.concat([Present_df_05, Pres_Sal_median_df, Present_df_95], axis=1, join='outer')
Present_stats.columns = ['0_' + str(s) for s in Present_stats.columns]
agg = ('>'+ str(row["Maximum optimal"]) + '_count', lambda x: x.gt(row["Maximum optimal"]).sum()),
Present_df_gtTHS = pd.DataFrame(Present_df.resample('A').agg(agg)*100/(365*24*2))
Present_df_gtTHS = pd.DataFrame(Present_df_gtTHS.transpose().mean(axis=1))
Present_df_gtTHS.columns = [str(int(row['# species'])) + '_0>' + 'MaOp']
Present_df_gtTHS.index = chainages
agg = ('>'+ str(row["Minimum optimal"]) + '_count', lambda x: x.lt(row["Minimum optimal"]).sum()),
Present_df_gtTHS_miop = pd.DataFrame(Present_df.resample('A').agg(agg)*100/(365*24*2))
Present_df_gtTHS_miop = pd.DataFrame(Present_df_gtTHS_miop.transpose().mean(axis=1))
Present_df_gtTHS_miop.columns = [str(int(row['# species'])) + '_0<' + 'MiOp']
Present_df_gtTHS_miop.index = chainages
agg = ('>'+ str(row["Upper treshold"]) + '_count', lambda x: x.gt(row["Upper treshold"]).sum()),
Present_df_gtTHS_malim = pd.DataFrame(Present_df.resample('A').agg(agg)*100/(365*24*2))
Present_df_gtTHS_malim = pd.DataFrame(Present_df_gtTHS_malim.transpose().mean(axis=1))
Present_df_gtTHS_malim.columns = [str(int(row['# species'])) + '_0>' + 'Malim']
Present_df_gtTHS_malim.index = chainages
agg = ('>'+ str(row["Lower treshold"]) + '_count', lambda x: x.lt(row["Lower treshold"]).sum()),
Present_df_gtTHS_lolim = pd.DataFrame(Present_df.resample('A').agg(agg)*100/(365*24*2))
Present_df_gtTHS_lolim = pd.DataFrame(Present_df_gtTHS_lolim.transpose().mean(axis=1))
Present_df_gtTHS_lolim.columns = [str(int(row['# species'])) + '_0<' + 'LoLim']
Present_df_gtTHS_lolim.index = chainages
#future
Far_future_Sal_median_df = pd.DataFrame(Far_future_df.median())
Far_future_Sal_median_df.columns = ['med']
Far_future_df_95 = pd.DataFrame(Far_future_df.quantile(q=0.95, axis=0))
Far_future_df_05 = pd.DataFrame(Far_future_df.quantile(q=0.05, axis=0))
Far_future_stats = pd.concat([Far_future_df_05, Far_future_Sal_median_df, Far_future_df_95], axis=1, join='outer')
Far_future_stats.columns = ['9_' + str(s) for s in Far_future_stats.columns]
agg = ('>'+ str(row["Maximum optimal"]) + '_count', lambda x: x.gt(row["Maximum optimal"]).sum()),
Far_future_df_gtTHS = pd.DataFrame(Far_future_df.resample('A').agg(agg)*100/(365*24*2))
Far_future_df_gtTHS = pd.DataFrame(Far_future_df_gtTHS.transpose().mean(axis=1))
Far_future_df_gtTHS.columns = [str(int(row['# species'])) + '_9>' + 'MaOp']
Far_future_df_gtTHS.index = chainages
agg = ('>'+ str(row["Minimum optimal"]) + '_count', lambda x: x.lt(row["Minimum optimal"]).sum()),
Far_future_df_gtTHS_miop = pd.DataFrame(Far_future_df.resample('A').agg(agg)*100/(365*24*2))
Far_future_df_gtTHS_miop = pd.DataFrame(Far_future_df_gtTHS_miop.transpose().mean(axis=1))
Far_future_df_gtTHS_miop.columns = [str(int(row['# species'])) + '_9<' + 'MiOp']
Far_future_df_gtTHS_miop.index = chainages
agg = ('>'+ str(row["Upper treshold"]) + '_count', lambda x: x.gt(row["Upper treshold"]).sum()),
Far_future_df_gtTHS_malim = pd.DataFrame(Far_future_df.resample('A').agg(agg)*100/(365*24*2))
Far_future_df_gtTHS_malim = pd.DataFrame(Far_future_df_gtTHS_malim.transpose().mean(axis=1))
Far_future_df_gtTHS_malim.columns = [str(int(row['# species'])) + '_9>' + 'Malim']
Far_future_df_gtTHS_malim.index = chainages
agg = ('>'+ str(row["Lower treshold"]) + '_count', lambda x: x.lt(row["Lower treshold"]).sum()),
Far_future_df_gtTHS_lolim = pd.DataFrame(Far_future_df.resample('A').agg(agg)*100/(365*24*2))
Far_future_df_gtTHS_lolim = pd.DataFrame(Far_future_df_gtTHS_lolim.transpose().mean(axis=1))
Far_future_df_gtTHS_lolim.columns = [str(int(row['# species'])) + '_9<' + 'LoLim']
Far_future_df_gtTHS_lolim.index = chainages
#combine statistics into single df
Shapefile_df = pd.concat([Node_mesh, Present_stats, Present_df_gtTHS, Present_df_gtTHS_miop,
Present_df_gtTHS_malim, Present_df_gtTHS_lolim, Far_future_stats, Far_future_df_gtTHS,
Far_future_df_gtTHS_miop, Far_future_df_gtTHS_malim, Far_future_df_gtTHS_lolim], axis=1, join='outer')
Shapefile_df['Ch%_MaOp'] = Shapefile_df[str(int(row['# species'])) + '_9>' + 'MaOp'] - Shapefile_df[str(int(row['# species'])) + '_0>' + 'MaOp']
Shapefile_df['Ch%_MiOp'] = Shapefile_df[str(int(row['# species'])) + '_9<' + 'MiOp'] - Shapefile_df[str(int(row['# species'])) + '_0<' + 'MiOp']
Shapefile_df['Ch%_Malim'] = Shapefile_df[str(int(row['# species'])) + '_9>' + 'Malim'] - Shapefile_df[str(int(row['# species'])) + '_0>' + 'Malim']
Shapefile_df['Ch%_LoLim'] = Shapefile_df[str(int(row['# species'])) + '_9<' + 'LoLim'] - Shapefile_df[str(int(row['# species'])) + '_0<' + 'LoLim']
#Shapefile_df.columns
#Combined_df_gtTHS.columns = [s + '_gtTHS' for s in scenario_real_names]
#Combined_df_gtTHS.plot(ylim=[0,500])
Shape_out_path = output_directory + 'Hunter_salinity_stats_V3.shp'
Shapefile_df.to_file(Shape_out_path, driver='ESRI Shapefile')
#==========================================================#
#Plot salinityanalysis
#==========================================================#
png_out_name = output_directory + variable + 'sea level rise analysis_' + Plot_scenario + '.pdf'
fig = plt.figure(figsize=(40,20))
ax=plt.subplot(1,3,1)
plt.title('median '+ variable + ' present day')
Shapefile_df.plot(cmap='viridis_r', column='present median' , legend=True,ax=ax)
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
ax=plt.subplot(1,3,2)
plt.title('Change in median of '+ variable + ' under 0.9m SLR')
Shapefile_df.plot(cmap='viridis_r', column='Dif in median' , legend=True,ax=ax)
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
ax=plt.subplot(1,3,3)
plt.title('Change in time where '+ variable + ' > '+ str(Threshold) + ' under 0.9m SLR')
#max_min_extime_elev.plot(cmap='viridis_r', column='HigT Elev 0SLR' , legend=True, vmin=ylims[0], vmax=ylims[1],ax=ax)
Shapefile_df.plot(cmap='viridis_r', column='Dif in % time > threshold' , legend=True,ax=ax)
ax.legend()
ax.legend()
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()

@ -0,0 +1,92 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 23 17:09:03 2019
@author: z5025317
"""
#==========================================================#
#Load packages
#==========================================================#
import geopandas as gpd
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
#name of scenario
Plot_scenario = 'HD_grid_V1'
ylims = [0,40]
variable = 'salinity'
#set directory path for output files
output_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/Postprocessed/' + Plot_scenario + '/Figures/'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
#input csv with species and their salinity optima and thresholds
salinity_eco_csv = "H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Eco_thresholds/Salintiy_Hunter_physiology_V1.csv"
salinity_eco_df = pd.read_csv(salinity_eco_csv, index_col=2)
salinity_eco_df = salinity_eco_df[salinity_eco_df.index.notnull()]
#load and refine node vs mesh shapefile to store the results of the statistics
test = gpd.read_file('H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/GIS_output/Hunter_Saltmarsh_salinity_stats_V2_clipped_estuary.shp')
tost = test.copy()
tost= tost.to_crs({'init': 'epsg:3857'})
print tost.crs
tost.head(2)
#Now the area in square kilometers
tost["area"] = tost['geometry'].area/ 10**6
#calculate areas of salinity intervals at 10th of the total range
Area_df = {}
Area_f_df = {}
incl_bool=False
Nr_of_intervs = 100
for i in range(1,Nr_of_intervs+1,1):
Interval = ((tost['0_med'].max() - tost['0_med'].min())/Nr_of_intervs)
if i == 1:
Area_df[round(Interval*i - Interval/2 ,2)] = round(tost[tost['0_med'].between(0,Interval, inclusive=incl_bool)]['area'].sum(),2)
Area_f_df[round(Interval*i - Interval/2 ,2)] = round(tost[tost['9_med'].between(0,Interval, inclusive=incl_bool)]['area'].sum(),2)
else:
Area_df[round(Interval*i - Interval/2 ,2)] = round(tost[tost['0_med'].between(Interval*(i-1), i*Interval, inclusive=incl_bool)]['area'].sum(),2)
Area_f_df[round(Interval*i - Interval/2 ,2)] = round(tost[tost['9_med'].between(Interval*(i-1), i*Interval, inclusive=incl_bool)]['area'].sum(),2)
Area_df = pd.DataFrame.from_dict(Area_df, orient='index')
Area_df.columns = ['0_med']
Area_f_df2 = pd.DataFrame.from_dict(Area_f_df, orient='index')
Area_f_df2.columns = ['0.9_med']
SLR_stats = pd.concat([Area_df, Area_f_df2], axis=1, join='outer')
SLR_stats = SLR_stats.sort_index(axis=0, level=None, ascending=True)
SLR_stats.plot()
png_out_name = output_directory +'/' + variable + '_salinity_habitat_analysis_' + Plot_scenario + '.pdf'
fig = plt.figure(figsize=(15,8))
ax=plt.subplot(1,1,1)
plt.title('Salinity range habitat size for Hunter River 0 and 0.9m sea level rise')
SLR_stats.plot( legend=False, ax=ax)
plt.ylabel("Area [square km]")
plt.xlabel("Salinity [PSU]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()

@ -0,0 +1,445 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for creating climate prioritization plots for NARCLIM variables.
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#==========================================================#
#Load packages
#==========================================================#
import geopandas as gpd
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
matplotlib.style.use('ggplot')
#==========================================================#
#Input parameters
#==========================================================#
#set scenario codes and beginning and end years and corresponding scenario code
fs=['Hwq003', 'Hwq005']
scenario_real_names = ['Present', 'Far_Future']
HDvariables = ['depth', 'elev']
variable = 'elev' #depth or vel
#Save bathyfile?
save_bathyfile = False
#set max rows to be read in from the RMA2 csv files
maxrowsincsv = 15000
#name of scenario
Plot_scenario = 'HD_grid_V1'
ylims = [-1.1,8]
#time frames for analysis
startdate = '1999 01 01'
enddate = '1999 12 01'
tideanalysis_day = '1999 04 26'
#year=range(startyear, endyear+1)
#load and refine node vs mesh shapefile to store the results of the statistics
Node_mesh = gpd.read_file('H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/GIS_data/Mesh_Node_GIS_Setup/hcc002_fishnet_500m_V3_joinXY.shp')
Node_mesh.index = Node_mesh['Field1']
Node_mesh = Node_mesh.iloc[:,-4:]
Node_mesh.columns = ['Node', 'X', 'Y', 'geometry']
node = Node_mesh['Node'].values
chainages = node
#load actual mesh file
Actual_mesh = gpd.read_file('H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/GIS_data/Mesh_Node_GIS_Setup/hcc002.shp')
#set directory path for output files
output_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/Postprocessed/' + Plot_scenario + '/Figures'
#set input directories and data
#tidal boundary data
tides_csv='C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/Tide Generator/Tidal Simulation Python Script (BMM)/Tides_1990_2010.csv'
#CSV file of mesh nodes and corresponding chainages
nodes_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes.csv'
#River flow boundary
Flow_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/01_Input/BCGeneration/Scenarios/Calibration/Calibration_Greta.csv'
#==========================================================#
#==========================================================#
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#==========================================================#
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("output directory folder didn't exist and was generated")
#==========================================================#
#========================================================#
#automated part of the code doing the data extraction
#==========================================================
#Load boundary flow data
Flow_df = pd.read_csv(Flow_csv, parse_dates=True, dayfirst=True, index_col=0)['Q[ML/d]']
Flow_df.columns = ['Q[ML/d]']
Flow_df = Flow_df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
Flow_df.plot()
#Load tide boundary data
tides_dfm = pd.read_csv(tides_csv, parse_dates=True, index_col=0)
tides_dfm.columns = ['tide']
#read csv file with extracted RMA data
#RMA_df = pd.read_csv(RMA_data_csv, parse_dates=True, index_col=0)
#load RMA2 data
HD_Summary_df = pd.DataFrame()
for variable in HDvariables:
Summary_df = pd.DataFrame()
df = pd.DataFrame()
for f in fs:
#set input and output directories
input_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/' + f
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
#==========================================================#
#Load data file
if variable == 'depth' or variable == 'elev':
Clim_Var_CSVs = glob.glob('*' + variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
a=len(df.columns)-1
df=df.iloc[:,:a]
if variable == 'vel':
#x velocity
Clim_Var_CSVs = glob.glob('*' +'x'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfx= df.drop(columns=['Year', 'Hour','1'])
#y velocity
Clim_Var_CSVs = glob.glob('*' +'y'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ',nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfy= df.drop(columns=['Year', 'Hour','1'])
df = np.sqrt(dfx*dfx + dfy*dfy)
df.columns = df.columns + '_'+ f + '_'+ variable
#cut down the df to start and end date
df = df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
HD_Summary_df = pd.concat([HD_Summary_df , Summary_df], axis=1, join='outer')
#Summary_df.columns = chainages
#subset the input data frame
Present_df = HD_Summary_df.filter(regex='Hwq003').filter(regex='elev')
Far_future_df = HD_Summary_df.filter(regex='Hwq005').filter(regex='elev')
#Create Bathymetry along the nodes and chainage
Elev_df = HD_Summary_df.filter(regex='Hwq003').filter(regex='elev')
Elev_df.columns = node
Depth_df = HD_Summary_df.filter(regex='Hwq003').filter(regex='depth')
Depth_df.columns = node
Bathy_df = pd.DataFrame(Elev_df.loc[Elev_df.index[0]] - Depth_df.loc[Elev_df.index[0]])
Bathy_df.columns = ['Bathymetry']
if save_bathyfile:
bathy_out_path = output_directory + 'Hunter_nodes_bathy.shp'
Node_mesh_df.to_file(bathy_out_path, driver='ESRI Shapefile')
#plot bathy
Node_mesh_df = pd.concat([Node_mesh , Bathy_df], axis=1, join='outer')
png_out_name = output_directory + 'bathy.pdf'
#plot figure
fig = plt.figure(figsize=(12,17))
ax=plt.subplot(1,1,1)
Node_mesh_df.plot(column='Bathymetry',
legend=True,
cmap='viridis',
ax=ax,
vmin=-20, vmax=0);
Actual_mesh.plot(ax=ax, alpha=1, edgecolor='grey',facecolor="None")
plt.title('Bathymetry of the Hunter River estuary')
ax.legend()
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#==========================================================#
#Tidal analysis
#==========================================================#
#read in tide data
tides_df = tides_dfm[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=2) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=3) ]
central_tides_df = tides_dfm[datetime.strptime(tideanalysis_day , '%Y %m %d').date():datetime.strptime(tideanalysis_day , '%Y %m %d').date()+ timedelta(days=1) ]
maxtide_index = central_tides_df['tide'].idxmax()
end_maxtide_index = maxtide_index + timedelta(days=0.4)
beg_maxtide_index = maxtide_index - timedelta(days=0.2)
short_tides_df = tides_df[beg_maxtide_index: end_maxtide_index]
short_tides_df.plot(cmap='viridis', legend=True)
mintide_index = short_tides_df['tide'].idxmin()
#plot the minimum and maximum elevation along the estuary occuring within a single tidal cycle
Short_tide_elev_df = HD_Summary_df.filter(regex='elev')[min(short_tides_df.index):max(short_tides_df.index)]
max_min_1cycle_elev_0slr = pd.concat([Short_tide_elev_df.filter(regex=fs[0]).max(), Short_tide_elev_df.filter(regex=fs[0]).min()], axis=1, join='inner')
max_min_1cycle_elev_09slr = pd.concat([Short_tide_elev_df.filter(regex=fs[1]).max(), Short_tide_elev_df.filter(regex=fs[1]).min()], axis=1, join='inner')
max_min_1cycle_elev_0slr.index = chainages
max_min_1cycle_elev_09slr.index = chainages
max_min_1cycle_elev = pd.concat([max_min_1cycle_elev_0slr, max_min_1cycle_elev_09slr], axis=1, join='outer')
max_min_1cycle_elev.columns = ['MaxElev 0 SLR','MinElev 0 SLR','MaxElev 0.9 SLR','MinElev 0.9 SLR' ]
max_min_1cycle_elev = pd.concat([Node_mesh , max_min_1cycle_elev], axis=1, join='outer')
#max_min_1cycle_elev.plot(cmap='viridis_r', column='MaxElev 0.9 SLR' , legend=True, vmin=-1, vmax=2)
#plot the elevation along the estuary at exactly the time of high and low tide
max_min_extime_elev_0slr = pd.concat([HD_Summary_df.filter(regex=fs[0]).filter(regex='elev').loc[maxtide_index], Summary_df.filter(regex=fs[0]).loc[mintide_index]], axis=1, join='outer')
max_min_extime_elev_09slr = pd.concat([HD_Summary_df.filter(regex=fs[1]).filter(regex='elev').loc[maxtide_index], Summary_df.filter(regex=fs[1]).loc[mintide_index]], axis=1, join='outer')
max_min_extime_elev_0slr.index = chainages
max_min_extime_elev_09slr.index = chainages
max_min_extime_elev = pd.concat([max_min_extime_elev_0slr, max_min_extime_elev_09slr], axis=1, join='outer')
max_min_extime_elev.columns = ['HigT Elev 0SLR','LowT Elev 0SLR','HigT Elev 0.9 SLR','LowT Elev 0.9 SLR' ]
max_min_extime_elev = pd.concat([Node_mesh , max_min_extime_elev], axis=1, join='outer')
#max_min_extime_elev.plot(cmap='viridis_r', column='HigT Elev 0.9 SLR' , legend=True, vmin=-1, vmax=2)
#==========================================================#
#End of Tidal analysis
#==========================================================#
#==========================================================#
#Plot Tidal analysis
#==========================================================#
ylims = [-0.6, 1]
png_out_name = output_directory + variable + '_high_low_tide_analysis_' + Plot_scenario + '.pdf'
fig = plt.figure(figsize=(23,20))
ax=plt.subplot(2,2,1)
plt.title('High tide water surface elevation along the estuary')
#max_min_extime_elev.plot(cmap='viridis_r', column='HigT Elev 0SLR' , legend=True, vmin=ylims[0], vmax=ylims[1],ax=ax)
max_min_extime_elev.plot(cmap='viridis_r', column='HigT Elev 0SLR' , legend=True,ax=ax)
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
ax=plt.subplot(2,2,2)
plt.title('Low tide water surface elevation along the estuary')
max_min_extime_elev.plot(cmap='viridis_r', column='LowT Elev 0SLR' , legend=True,ax=ax)
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
ax=plt.subplot(2,1,2)
plt.ylim(ylims)
plt.title('Tide at open ocean boundary')
short_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(maxtide_index, color='black', linestyle='--')
ax.axvline(mintide_index, color='black', linestyle='-')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#==========================================================#
#animate 2 tidal cycles baseline scenario:
#==========================================================#
Six_day_tide_elev_df = HD_Summary_df.filter(regex=fs[0]).filter(regex='elev')[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=2)]
Six_day_tide_elev_df.columns = chainages
Six_day_tides_df = tides_df[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=2)]
Six_day_tides_df.plot(cmap='viridis', legend=True)
Six_day_flow_df = Flow_df[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=2)]
Six_day_flow_df = pd.concat([Six_day_tides_df, Six_day_flow_df], axis=1, join='outer')['Q[ML/d]'].interpolate()
Six_day_flow_df.plot(cmap='viridis', legend=True)
ylims = [-0.6, 1]
ii=0
for index in Six_day_tide_elev_df.index[0::4]:
ii=ii+1
#index = Six_day_tide_elev_df.index[1]
png_out_path = output_directory + '/TidalCycle_' + variable + '_' + Plot_scenario + '/'
if not os.path.exists(png_out_path):
os.makedirs(png_out_path)
#png_out_name = png_out_path + str(index) + 'longitudinalXS.png'
png_out_name = png_out_path + str(ii) + variable + '_longitudinalXS.png'
fig = plt.figure(figsize=(17,20))
ax=plt.subplot(2,1,1)
plt.title('Water surface elevation along the estuary')
df1 = pd.DataFrame(Six_day_tide_elev_df.loc[index])
df1.columns = ['Instant']
Six_day_tide_elev_df_instant = pd.concat([Node_mesh , df1], axis=1, join='outer')
Six_day_tide_elev_df_instant.columns
Six_day_tide_elev_df_instant.plot(cmap='viridis_r', column='Instant' ,vmin=ylims[0], vmax=ylims[1], legend=True,ax=ax)
#plot tides
ax=plt.subplot(2,1,2)
plt.ylim(ylims)
plt.title('Tide at open ocean boundary')
Six_day_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(index, color='green', linestyle='--')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#==========================================================#
#animate 2 tidal cycles baseline + SLR scenario:
#==========================================================#
Six_day_tide_elev_df_0slr = HD_Summary_df.filter(regex=fs[0]).filter(regex='elev')[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1)]
Six_day_tide_elev_df_0slr.columns = chainages
Six_day_tide_elev_df_09slr = HD_Summary_df.filter(regex=fs[1]).filter(regex='elev')[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1)]
Six_day_tide_elev_df_09slr.columns = chainages
Six_day_tides_df = tides_df[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1.5)]
Six_day_tides_df.plot(cmap='viridis', legend=True)
ii=0
for index in Six_day_tide_elev_df_0slr.index:
ii=ii+1
#index = Six_day_tide_elev_df_0slr.index[1]
png_out_path = output_directory + '/' + Plot_scenario + '/TidalCycle_SLR_' + variable + '_' + Plot_scenario + '/'
if not os.path.exists(png_out_path):
os.makedirs(png_out_path)
#png_out_name = png_out_path + str(index) + 'longitudinalXS.png'
png_out_name = png_out_path + str(ii) + variable + '_longitudinalXS.png'
max_min_extime_elev_both = pd.concat([Six_day_tide_elev_df_0slr.loc[index], Six_day_tide_elev_df_09slr.loc[index]], axis=1, join='outer')
max_min_extime_elev_both.columns = ['Present Day','0.9m SLR']
fig = plt.figure(figsize=(17,12))
ax=plt.subplot(2,1,1)
plt.ylim(ylims)
plt.title('Water surface elevation along the estuary')
ax.set_prop_cycle(color=['slateblue','tomato'], linestyle=['-', '-'], lw=[2,2])
max_min_extime_elev_both.plot(ax=ax)
ax.legend(loc=1)
plt.axhline(y=0, color='r', linestyle='--', lw=1, alpha=0.5)
plt.ylim(-1.1,2.5)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Distance from ocean boundary [km]")
#plot tides
ax=plt.subplot(2,1,2)
plt.ylim(-1.1,1.5)
plt.title('Tide at open ocean boundary')
Six_day_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(index, color='green', linestyle='--')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#for NODE in node:
# NODE = str(NODE)
# #=========#
# #comparison time series plot at node
# #=========#
# #out name
# png_out_file_name = comparison_code + '_' + variable + '_' + NODE + '_time_series.png'
# png_out_path = png_output_directory + '/' + png_out_file_name
# #
# plotin_df = Summary_df.filter(regex=NODE)
# #prepare data frame for plot
# fig = plt.figure(figsize=(14,18))
# ax=plt.subplot(4,1,1)
# #start and end dat of plots
# plt.title(variable + '_' + NODE + ' - time series')
# start_date = datetime(int('1996'), int('02'), int('01'))
# end_date = datetime(int('1996'), int('02'), int('5'))
# plotin_df1=plotin_df[start_date:end_date]
# ymin = min(plotin_df1.min()) - 0.01 *min(plotin_df1.min())
# ymax = max(plotin_df1.max()) + 0.01 *max(plotin_df1.max())
# plotin_df1.plot(ylim=(ymin,ymax) ,legend=False, ax=ax)
# #plt.legend( loc=1)
# plt.xticks([])
#
# ax=plt.subplot(4,1,2)
# start_date = datetime(int('1996'), int('02'), int('01'))
# end_date = datetime(int('1996'), int('02'), int('3'))
# plotin_df2=plotin_df[start_date:end_date]
# ymin = min(plotin_df2.min()) - 0.1 *min(plotin_df2.min())
# ymax = max(plotin_df2.max()) + 0.1 *max(plotin_df2.max())
# plotin_df2.plot(ylim=(ymin,ymax),legend=False, ax=ax)
# #plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# ax=plt.subplot(4,1,3)
# plt.title(variable + '_' + NODE + ' - monthly maximum')
# Monthly_max = plotin_df.resample('M').max()
# Monthly_max.plot(ax=ax,legend=False)
# plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# ax=plt.subplot(4,1,4)
# plt.title(variable + '_' + NODE + ' - monthly mean')
# Monthly_mean = plotin_df.resample('M').mean()
# Monthly_mean.plot(ax=ax)
# #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# # ncol=2, mode="expand", borderaxespad=0.)
# #plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# #ax.patch.set_alpha(ALPHA_figs)
# fig.patch.set_alpha(ALPHA_figs)
# fig.tight_layout()
# fig.savefig(png_out_path)
# plt.close()
# #=========#

@ -0,0 +1,425 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for creating climate prioritization plots for NARCLIM variables.
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
matplotlib.style.use('ggplot')
#==========================================================#
#Input parameters
#==========================================================#
#set scenario codes and beginning and end years and corresponding scenario code
fs=['Hwq003', 'Hwq005']
scenario_real_names = ['Present', 'Far_Future']
HDvariables = ['depth', 'elev']
variable = 'elev' #depth or vel
maxrowsincsv = 15000
#name of scenario
Plot_scenario = 'Flood2'
ylims = [-1.1,8]
#time frames for analysis
startdate = '2000 01 01'
enddate = '2000 12 01'
tideanalysis_day = '1999 01 26'
#year=range(startyear, endyear+1)
#set directory path for output files
output_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/Postprocessed/Figures'
#set input directories and data
#tidal boundary data
tides_csv='C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/Tide Generator/Tidal Simulation Python Script (BMM)/Tides_1990_2010.csv'
#CSV file of mesh nodes and corresponding chainages
nodes_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes.csv'
#River flow boundary
Flow_csv = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/01_Input/BCGeneration/Scenarios/Calibration/Calibration_Greta.csv'
#==========================================================#
#==========================================================#
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#==========================================================#
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory + '/' + Plot_scenario):
os.makedirs(output_directory+ '/' + Plot_scenario)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
#==========================================================#
#========================================================#
#automated part of the code doing the data extraction
#==========================================================
#read csv file with nodes to extract data from
node = pd.read_csv(nodes_csv)['Hunter'].values
chainages = pd.read_csv(nodes_csv)['x_km'].values
#Load boundary flow data
Flow_df = pd.read_csv(Flow_csv, parse_dates=True, dayfirst=True, index_col=0)['Q[ML/d]']
Flow_df.columns = ['Q[ML/d]']
Flow_df = Flow_df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
Flow_df.plot()
#Load tide boundary data
tides_dfm = pd.read_csv(tides_csv, parse_dates=True, index_col=0)
tides_dfm.columns = ['tide']
#read csv file with extracted RMA data
#RMA_df = pd.read_csv(RMA_data_csv, parse_dates=True, index_col=0)
#load RMA2 data
HD_Summary_df = pd.DataFrame()
for variable in HDvariables:
Summary_df = pd.DataFrame()
df = pd.DataFrame()
for f in fs:
#set input and output directories
input_directory = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Output/' + f
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
#==========================================================#
#Load data file
if variable == 'depth' or variable == 'elev':
Clim_Var_CSVs = glob.glob('*' + variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
a=len(df.columns)-1
df=df.iloc[:,:a]
if variable == 'vel':
#x velocity
Clim_Var_CSVs = glob.glob('*' +'x'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfx= df.drop(columns=['Year', 'Hour','1'])
#y velocity
Clim_Var_CSVs = glob.glob('*' +'y'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ',nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfy= df.drop(columns=['Year', 'Hour','1'])
df = np.sqrt(dfx*dfx + dfy*dfy)
df.columns = df.columns + '_'+ f + '_'+ variable
#cut down the df to start and end date
df = df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
HD_Summary_df = pd.concat([HD_Summary_df , Summary_df], axis=1, join='outer')
#Summary_df.columns = chainages
#subset the input data frame
Present_df = HD_Summary_df.filter(regex='Hwq003').filter(regex='elev')
Far_future_df = HD_Summary_df.filter(regex='Hwq005').filter(regex='elev')
#Create Bathymetry along the nodes and chainages
Elev_df = HD_Summary_df.filter(regex='Hwq003').filter(regex='elev')
Elev_df.columns = chainages
Depth_df = HD_Summary_df.filter(regex='Hwq003').filter(regex='depth')
Depth_df.columns = chainages
Bathy_df = pd.DataFrame(Elev_df.loc[maxtide_index] - Depth_df.loc[maxtide_index])
Bathy_df.columns = ['Bathymetry']
Bathy_df.plot()
bathy_out_path = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes_bathy.csv'
Bathy_df.to_csv(bathy_out_path)
#plt.show()
#==========================================================#
#Tidal analysis
#==========================================================#
#read in tide data
tides_df = tides_dfm[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=2) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=3) ]
central_tides_df = tides_dfm[datetime.strptime(tideanalysis_day , '%Y %m %d').date():datetime.strptime(tideanalysis_day , '%Y %m %d').date()]
maxtide_index = central_tides_df['tide'].idxmax()
end_maxtide_index = maxtide_index + timedelta(days=0.4)
beg_maxtide_index = maxtide_index - timedelta(days=0.2)
short_tides_df = tides_df[beg_maxtide_index: end_maxtide_index]
short_tides_df.plot(cmap='viridis', legend=True)
mintide_index = short_tides_df['tide'].idxmin()
#plot the minimum and maximum elevation along the estuary occuring within a single tidal cycle
Short_tide_elev_df = HD_Summary_df.filter(regex='elev')[min(short_tides_df.index):max(short_tides_df.index)]
max_min_1cycle_elev_0slr = pd.concat([Short_tide_elev_df.filter(regex=fs[0]).max(), Short_tide_elev_df.filter(regex=fs[0]).min()], axis=1, join='inner')
max_min_1cycle_elev_09slr = pd.concat([Short_tide_elev_df.filter(regex=fs[1]).max(), Short_tide_elev_df.filter(regex=fs[1]).min()], axis=1, join='inner')
max_min_1cycle_elev_0slr.index = chainages
max_min_1cycle_elev_09slr.index = chainages
max_min_1cycle_elev = pd.concat([max_min_1cycle_elev_0slr, max_min_1cycle_elev_09slr], axis=1, join='outer')
max_min_1cycle_elev.columns = ['MaxElev 0 SLR','MinElev 0 SLR','MaxElev 0.9 SLR','MinElev 0.9 SLR' ]
max_min_1cycle_elev.plot(cmap='viridis', legend=True)
#plot the elevation along the estuary at exactly the time of high and low tide
max_min_extime_elev_0slr = pd.concat([HD_Summary_df.filter(regex=fs[0]).filter(regex='elev').loc[maxtide_index], Summary_df.filter(regex=fs[0]).loc[mintide_index]], axis=1, join='outer')
max_min_extime_elev_09slr = pd.concat([HD_Summary_df.filter(regex=fs[1]).filter(regex='elev').loc[maxtide_index], Summary_df.filter(regex=fs[1]).loc[mintide_index]], axis=1, join='outer')
max_min_extime_elev_0slr.index = chainages
max_min_extime_elev_09slr.index = chainages
max_min_extime_elev = pd.concat([max_min_extime_elev_0slr, max_min_extime_elev_09slr], axis=1, join='outer')
max_min_extime_elev.columns = ['HigT Elev 0SLR','LowT Elev 0SLR','HigT Elev 0.9 SLR','LowT Elev 0.9 SLR' ]
max_min_extime_elev.plot(cmap='viridis', legend=True)
png_out_name = output_directory +'/' + Plot_scenario + '/' + variable + '_high_low_tide_analysis_' + Plot_scenario + '.pdf'
fig = plt.figure(figsize=(17,20))
ax=plt.subplot(3,1,1)
plt.title('Max and min water surface elevation along the estuary over one tidal cycle')
ax.set_prop_cycle(color=['slateblue', 'slateblue', 'tomato', 'tomato'], linestyle=['--', '-', '--', '-'], lw=[2,2,2,2])
max_min_1cycle_elev.plot( legend=False, ax=ax)
plt.axhline(y=0, color='slateblue', linestyle='--', lw=1, alpha=0.5)
plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Distance from ocean boundary [km]")
# Put a legend to the right of the current axis
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
ax=plt.subplot(3,1,2)
plt.title('Max and min water surface elevation along the estuary at time of high & low tide')
ax.set_prop_cycle(color=['slateblue', 'slateblue', 'tomato', 'tomato'], linestyle=['--', '-', '--', '-'], lw=[2,2,2,2])
max_min_extime_elev.plot(legend=False, ax=ax)
plt.ylabel("Water level [m AHD]")
plt.ylim(ylims)
plt.xlabel("Distance from ocean boundary [km]")
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
ax=plt.subplot(3,1,3)
plt.ylim(ylims)
plt.title('Tide at open ocean boundary')
short_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(maxtide_index, color='black', linestyle='--')
ax.axvline(mintide_index, color='black', linestyle='-')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#==========================================================#
#animate 2 tidal cycles baseline scenario:
#==========================================================#
Six_day_tide_elev_df = HD_Summary_df.filter(regex=fs[0]).filter(regex='elev')[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
Six_day_tide_elev_df.columns = chainages
Six_day_tides_df = tides_df[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
Six_day_tides_df.plot(cmap='viridis', legend=True)
Six_day_flow_df = Flow_df[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
Six_day_flow_df = pd.concat([Six_day_tides_df, Six_day_flow_df], axis=1, join='outer')['Q[ML/d]'].interpolate()
Six_day_flow_df.plot(cmap='viridis', legend=True)
ii=0
for index in Six_day_tide_elev_df.index[0::4]:
ii=ii+1
#index = Six_day_tide_elev_df.index[1]
png_out_path = output_directory + '/' + Plot_scenario + '/TidalCycle_' + variable + '_' + Plot_scenario + '/'
if not os.path.exists(png_out_path):
os.makedirs(png_out_path)
#png_out_name = png_out_path + str(index) + 'longitudinalXS.png'
png_out_name = png_out_path + str(ii) + variable + '_longitudinalXS.png'
fig = plt.figure(figsize=(17,20))
ax=plt.subplot(3,1,1)
plt.title('Water surface elevation along the estuary')
Six_day_tide_elev_df.loc[index].plot(color='blue', legend=False)
#plt.axhline(y=0, color='r', linestyle='--', lw=1, alpha=0.5)
plt.axhline(tides_df.loc[index].values, color='black',lw=0.5, linestyle='--')
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Distance from ocean boundary [km]")
#plot tides
ax=plt.subplot(3,1,2)
plt.ylim(-1.1,1.3)
plt.title('Tide at open ocean boundary')
Six_day_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(index, color='green', linestyle='--')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
ax=plt.subplot(3,1,3)
#plt.ylim(ylims)
plt.title('Flow at the upstream river boundary')
Six_day_flow_df.plot(color='black', legend=False, ax=ax)
ax.axvline(index, color='green', linestyle='--')
plt.xlabel("Time")
plt.ylabel("Hunter Inflow [ML/day]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#==========================================================#
#animate 2 tidal cycles baseline + SLR scenario:
#==========================================================#
Six_day_tide_elev_df_0slr = HD_Summary_df.filter(regex=fs[0]).filter(regex='elev')[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1)]
Six_day_tide_elev_df_0slr.columns = chainages
Six_day_tide_elev_df_09slr = HD_Summary_df.filter(regex=fs[1]).filter(regex='elev')[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1)]
Six_day_tide_elev_df_09slr.columns = chainages
Six_day_tides_df = tides_df[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1.5)]
Six_day_tides_df.plot(cmap='viridis', legend=True)
ii=0
for index in Six_day_tide_elev_df_0slr.index:
ii=ii+1
#index = Six_day_tide_elev_df_0slr.index[1]
png_out_path = output_directory + '/' + Plot_scenario + '/TidalCycle_SLR_' + variable + '_' + Plot_scenario + '/'
if not os.path.exists(png_out_path):
os.makedirs(png_out_path)
#png_out_name = png_out_path + str(index) + 'longitudinalXS.png'
png_out_name = png_out_path + str(ii) + variable + '_longitudinalXS.png'
max_min_extime_elev_both = pd.concat([Six_day_tide_elev_df_0slr.loc[index], Six_day_tide_elev_df_09slr.loc[index]], axis=1, join='outer')
max_min_extime_elev_both.columns = ['Present Day','0.9m SLR']
fig = plt.figure(figsize=(17,12))
ax=plt.subplot(2,1,1)
plt.ylim(ylims)
plt.title('Water surface elevation along the estuary')
ax.set_prop_cycle(color=['slateblue','tomato'], linestyle=['-', '-'], lw=[2,2])
max_min_extime_elev_both.plot(ax=ax)
ax.legend(loc=1)
plt.axhline(y=0, color='r', linestyle='--', lw=1, alpha=0.5)
plt.ylim(-1.1,2.5)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Distance from ocean boundary [km]")
#plot tides
ax=plt.subplot(2,1,2)
plt.ylim(-1.1,1.5)
plt.title('Tide at open ocean boundary')
Six_day_tides_df.plot(cmap='viridis', legend=False, ax=ax)
ax.axvline(index, color='green', linestyle='--')
plt.xlabel("Time")
plt.ylabel("Water level [m AHD]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#for NODE in node:
# NODE = str(NODE)
# #=========#
# #comparison time series plot at node
# #=========#
# #out name
# png_out_file_name = comparison_code + '_' + variable + '_' + NODE + '_time_series.png'
# png_out_path = png_output_directory + '/' + png_out_file_name
# #
# plotin_df = Summary_df.filter(regex=NODE)
# #prepare data frame for plot
# fig = plt.figure(figsize=(14,18))
# ax=plt.subplot(4,1,1)
# #start and end dat of plots
# plt.title(variable + '_' + NODE + ' - time series')
# start_date = datetime(int('1996'), int('02'), int('01'))
# end_date = datetime(int('1996'), int('02'), int('5'))
# plotin_df1=plotin_df[start_date:end_date]
# ymin = min(plotin_df1.min()) - 0.01 *min(plotin_df1.min())
# ymax = max(plotin_df1.max()) + 0.01 *max(plotin_df1.max())
# plotin_df1.plot(ylim=(ymin,ymax) ,legend=False, ax=ax)
# #plt.legend( loc=1)
# plt.xticks([])
#
# ax=plt.subplot(4,1,2)
# start_date = datetime(int('1996'), int('02'), int('01'))
# end_date = datetime(int('1996'), int('02'), int('3'))
# plotin_df2=plotin_df[start_date:end_date]
# ymin = min(plotin_df2.min()) - 0.1 *min(plotin_df2.min())
# ymax = max(plotin_df2.max()) + 0.1 *max(plotin_df2.max())
# plotin_df2.plot(ylim=(ymin,ymax),legend=False, ax=ax)
# #plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# ax=plt.subplot(4,1,3)
# plt.title(variable + '_' + NODE + ' - monthly maximum')
# Monthly_max = plotin_df.resample('M').max()
# Monthly_max.plot(ax=ax,legend=False)
# plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# ax=plt.subplot(4,1,4)
# plt.title(variable + '_' + NODE + ' - monthly mean')
# Monthly_mean = plotin_df.resample('M').mean()
# Monthly_mean.plot(ax=ax)
# #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# # ncol=2, mode="expand", borderaxespad=0.)
# #plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# #ax.patch.set_alpha(ALPHA_figs)
# fig.patch.set_alpha(ALPHA_figs)
# fig.tight_layout()
# fig.savefig(png_out_path)
# plt.close()
# #=========#

@ -0,0 +1,594 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for creating climate prioritization plots for NARCLIM variables.
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#Datum offset factors can be found at https://www.mhl.nsw.gov.au/docs/Data%20Conditions%20and%20Fit%20for%20Purpose.pdf
#==========================================================#
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import geopandas as gpd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
#from matplotlib.backends.backend_pdf import PdfPages
import seaborn
matplotlib.style.use('ggplot')
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#==========================================================#
#Set Input parameters
#==========================================================#
River = 'Macleay'
Order_ID = 'Ocean_bdr' #choose order column name
startdate = '2007 04 13'
enddate = '2007 04 21'
tideanalysis_day = '2018 06 01'
#usually the min and max observed water levels in the estuary in AHD used for plot ylims
#========================================================#
#========================================================#
#mostly automated part of the code but might have to be double checked - debugged
#========================================================#
#set directory path for input and output files
main_input_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/MHL_Gauge_Data/Analysis/'
output_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/MHL_Gauge_Data/Analysis/Postprocessed'
if not os.path.exists(output_directory + '/' ):
os.makedirs(output_directory+ '/' )
#load MHL gauge shapefile to store the results of the statistics
MHL_gauges_df = gpd.read_file('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/GIS/MHL_gauges_received_MGA56.shp')
#MHL_gauges_df = MHL_gauges_df.loc[MHL_gauges_df['River'] == River]
MHL_gauges_df = MHL_gauges_df.loc[MHL_gauges_df[Order_ID].notnull()]
MHL_gauges_df = pd.DataFrame(MHL_gauges_df)
MHL_gauges_df = MHL_gauges_df.reset_index()
gauges = MHL_gauges_df['Station Na'].values
gauge_type = MHL_gauges_df[Order_ID].values
#rivers = MHL_gauges_df['River'].values
order_dict = dict(zip(gauges, gauge_type))
#Load in situ gauge data
WL_df = pd.DataFrame()
for gauge in gauges:
#gauge = gauges[1]
#gauge = 'Harrington'
print('loading MHL gauge data for ' + str(gauge))
CSVpath = glob.glob('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/MHL_Gauge_Data/20200409_Recieved/*/' + '*' + gauge.replace(" ", "") + '*' )
Full_df = pd.read_csv(CSVpath[0] ,header= 0, skiprows= 30, parse_dates = True)
Full_df.index = pd.to_datetime(Full_df.Date, format = '%d/%m/%Y') + pd.to_timedelta(Full_df.Time, unit='h')
#Full_df = Full_df.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()]
Full_df = Full_df.iloc[:,[-2]]
Full_df.columns = [gauge]
Full_df = Full_df.loc[~Full_df.index.duplicated(keep='first')] #get rid of possible duplicate dates
Full_df = Full_df.replace(to_replace ='---', value =np.NaN) #--- is used as the no data value in the MHL data provided
Full_df = pd.DataFrame(pd.to_numeric(Full_df[gauge]))
Full_df = Full_df - np.float(MHL_gauges_df['AHDadjust'][MHL_gauges_df['Station Na']==gauge]) #subtract the datum adjustment factor provided by MHL as written in the shapefile
WL_df = pd.concat([WL_df, Full_df], axis=1)
#postprocess the in situ gauge dataframe
#WL_df = WL_df.replace(to_replace ='---', value =np.NaN) #--- is used as the no data value in the MHL data provided
#WL_df = WL_df.convert_objects(convert_numeric=True)
#WL_df = WL_df - datum_offset
order_colnames=[]
for colname in WL_df.columns:
order_colnames.append(str(order_dict[colname]) + '_' + colname)
#WL_df.columns = order_colnames
WL_df.columns = [str(s) + '_MHL' for s in order_colnames]
order_colnames.sort() #this will later be used to loop through the MHL gauges in a downstream to upstream order
order_colnames = pd.Series(order_colnames).str.replace("(",'')
order_colnames = pd.Series(order_colnames).str.replace(")",'')
order_colnames = order_colnames[~order_colnames.duplicated(keep='first')]
print('Performing analysis in the following order of locations')
print(order_colnames)
#========================================================#
#plot annual MSL for all open ocean gauges
ylims = [-0.2,0.2]
Open_ocean_only_df = WL_df.filter(regex='1.0')
Open_ocean_only_df.columns
Open_ocean_only_df_annual = Open_ocean_only_df.resample('A').mean()
png_out_name = output_directory + '/Annual mean sea levels.pdf'
fig = plt.figure(figsize=(40,18))
ax=plt.subplot(1,1,1)
plt.title('Annual mean sea levels at MHL open ocean gauges')
ax.set_prop_cycle(color=['steelblue', 'coral', 'red', 'deeppink'],linestyle=[ '-', '-','--','-'], lw=[2,2,2,2])
Open_ocean_only_df_annual.iloc[:,[0,1,2,3]].plot( legend=False,style=['+-','o-','.--','s:'], ax=ax)
plt.axhline(y=0, color='slateblue', linestyle='--', lw=1, alpha=0.5)
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Time")
# Put a legend to the right of the current axis
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
#ax=plt.subplot(2,1,2)
#plt.title('Max and min water surface elevation along the estuary at time of high & low tide')
#ax.set_prop_cycle(color=['slateblue', 'tomato', ], linestyle=[ '-', '-'], lw=[2,2])
#WL_df.iloc[:,[2,3]].plot(legend=False, ax=ax)
#plt.ylabel("Water level [m AHD]")
#plt.ylim(ylims)
#plt.xlabel("Time")
##lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend()
#plot tides
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#plot annual MSL for all open ocean gauges
ylims = [-0.5,0.5]
Open_ocean_only_df = WL_df.filter(regex='1.0')
Open_ocean_only_df_annual = Open_ocean_only_df.resample('M').mean()
Plot_Title = 'Monthly mean sea levels at MHL open ocean gauges'
fig = plt.figure(figsize=(40,18))
ax=plt.subplot(1,1,1)
plt.title(Plot_Title)
png_out_name = output_directory + '/' + Plot_Title + '.pdf'
ax.set_prop_cycle(color=['steelblue', 'coral', 'red', 'deeppink'],linestyle=[ '-', '-','--','-'], lw=[2,2,2,2])
Open_ocean_only_df_annual.iloc[:,[0,1,2,3]].plot( legend=False,style=['+-','o-','.--','s:'], ax=ax)
plt.axhline(y=0, color='slateblue', linestyle='--', lw=1, alpha=0.5)
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m relative]")
plt.xlabel("Time")
# Put a legend to the right of the current axis
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
#ax=plt.subplot(2,1,2)
#plt.title('Max and min water surface elevation along the estuary at time of high & low tide')
#ax.set_prop_cycle(color=['slateblue', 'tomato', ], linestyle=[ '-', '-'], lw=[2,2])
#WL_df.iloc[:,[2,3]].plot(legend=False, ax=ax)
#plt.ylabel("Water level [m AHD]")
#plt.ylim(ylims)
#plt.xlabel("Time")
##lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend()
#plot tides
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#plot time series for a few windows
ylims = [-1,1.5]
Open_ocean_only_df = WL_df.filter(regex='1.0')
halfwindow = 15
Open_ocean_only_df = Open_ocean_only_df[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=halfwindow) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=halfwindow) ]
#Open_ocean_only_df = Open_ocean_only_df.iloc[:,[0,1,2,3]][Open_ocean_only_df.iloc[:,0].notnull()]
Plot_Title = 'water level time series '+ tideanalysis_day + ' _window_of_' + str(halfwindow*2) +'_days'
fig = plt.figure(figsize=(40,18))
ax=plt.subplot(1,1,1)
plt.title(Plot_Title)
png_out_name = output_directory + '/' + Plot_Title + '.pdf'
ax.set_prop_cycle(color=['steelblue', 'coral', 'green', 'deeppink'],linestyle=[ '-', '-','--','-'], lw=[2,2,2,2])
Open_ocean_only_df.iloc[:,[0,1,2,3]].interpolate(method='linear').plot( legend=False, ax=ax)
plt.axhline(y=0, color='slateblue', linestyle='--', lw=1, alpha=0.5)
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m relative]")
plt.xlabel("Time")
# Put a legend to the right of the current axis
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
#ax=plt.subplot(2,1,2)
#plt.title('Max and min water surface elevation along the estuary at time of high & low tide')
#ax.set_prop_cycle(color=['slateblue', 'tomato', ], linestyle=[ '-', '-'], lw=[2,2])
#WL_df.iloc[:,[2,3]].plot(legend=False, ax=ax)
#plt.ylabel("Water level [m AHD]")
#plt.ylim(ylims)
#plt.xlabel("Time")
##lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend()
#plot tides
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#plot time series for a few windows
ylims = [-1,1.5]
xlocation = datetime.strptime(tideanalysis_day , '%Y %m %d').date()
Regex = 'Port Macquarie'
Regex = 'Tweed'
#Regex = 'haven'
#'Crowdy', 'Harrington'
Open_ocean_only_df = WL_df.filter(regex=Regex )
halfwindow = 200
Open_ocean_only_df = Open_ocean_only_df[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=halfwindow) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=halfwindow) ]
#Open_ocean_only_df = Open_ocean_only_df.iloc[:,[0,1,2,3]][Open_ocean_only_df.iloc[:,0].notnull()]
Plot_Title = 'water level time series ocean vs entrance '+ tideanalysis_day + ' _window_of_' + str(halfwindow*2) +'_days_' + Regex + 'V2'
fig = plt.figure(figsize=(40,18))
ax=plt.subplot(1,1,1)
plt.title(Plot_Title)
png_out_name = output_directory + '/' + Plot_Title + '.pdf'
ax.set_prop_cycle(color=['steelblue', 'coral', 'green', 'deeppink'],linestyle=[ '-', '-','--','-'], lw=[2,2,2,2])
Open_ocean_only_df.iloc[:,[0,1]].interpolate(method='linear').plot( legend=False, ax=ax)
plt.axhline(y=0, color='slateblue', linestyle='--', lw=1, alpha=0.5)
plt.axhline(y=np.nanpercentile(Open_ocean_only_df.iloc[:,[0]],50), color='steelblue', linestyle='solid', lw=2, alpha=1)
plt.text(xlocation, (np.nanpercentile(Open_ocean_only_df.iloc[:,[0]],50)), 'MSL= ' + str(np.round(np.nanpercentile(Open_ocean_only_df.iloc[:,[0]],50),2)) , ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(Open_ocean_only_df.iloc[:,[1]],50), color='red', linestyle='solid', lw=2, alpha=1)
plt.text(xlocation , (np.nanpercentile(Open_ocean_only_df.iloc[:,[1]],50)), 'MSL= ' + str(np.round(np.nanpercentile(Open_ocean_only_df.iloc[:,[1]],50),2)), ha='left', va='bottom')
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m relative]")
plt.xlabel("Time")
# Put a legend to the right of the current axis
#lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend()
#plot tides
#ax=plt.subplot(2,1,2)
#plt.title('Max and min water surface elevation along the estuary at time of high & low tide')
#ax.set_prop_cycle(color=['slateblue', 'tomato', ], linestyle=[ '-', '-'], lw=[2,2])
#WL_df.iloc[:,[2,3]].plot(legend=False, ax=ax)
#plt.ylabel("Water level [m AHD]")
#plt.ylim(ylims)
#plt.xlabel("Time")
##lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend()
#plot tides
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#========================================================#
#fully automated part of the code doing the data extraction
#========================================================#
#load RMA2 data
Summary_df = pd.DataFrame()
df = pd.DataFrame()
for f in fs:
#set input and output directories
input_directory = main_input_directory + f + '_' + Order_ID
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
#Load data file
if variable == 'depth' or variable == 'elev':
Clim_Var_CSVs = glob.glob('*' + variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
a=len(df.columns)-1
df=df.iloc[:,:a]
if variable == 'vel':
#x velocity
Clim_Var_CSVs = glob.glob('*' +'x'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfx= df.drop(columns=['Year', 'Hour','1'])
#y velocity
Clim_Var_CSVs = glob.glob('*' +'y'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ',nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfy= df.drop(columns=['Year', 'Hour','1'])
df = np.sqrt(dfx*dfx + dfy*dfy)
df.columns = df.columns + '_'+ f + '_'+ variable
#cut down the df to start and end date
df = df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
HD_Summary_df = Summary_df
#========================================================#
#========================================================#
#interactive data analysis part of the code - alternatively, the dataframe could be written to csv at this stage and analysed with a separate code.
#========================================================#
#Summary_df.columns = chainages
#subset the input data frame
Summary_df_nodes = pd.DataFrame()
#for node, gauges in nodes:
for key,value in node_dict.items():
df1 = HD_Summary_df.filter(regex='^'+ str(key)+'_').filter(regex=variable)
df1.columns = [value + '_' + str(s) for s in df1.columns]
Summary_df_nodes = pd.concat([Summary_df_nodes, df1], axis=1, join='outer')
for key,value in scenario_dict.items():
Summary_df_nodes.columns = pd.Series(Summary_df_nodes.columns).str.replace(key, value)
combined_df = pd.DataFrame()
combined_df = pd.concat([Summary_df_nodes , WL_df], axis=1, join='outer')
combined_df.columns = pd.Series(combined_df.columns).str.replace("(",'') #brackets mess up the regex commands below
combined_df.columns = pd.Series(combined_df.columns).str.replace(")",'')
combined_df_hourly = combined_df.resample('1H').max()
#this needs to be fixed to be more generic!
Histogram_df = combined_df.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()] #dataframe for histogram comparison between RMA and MHL
#========================================================#
#drop first gauge which doesn't have any data in 2006
#gauges = gauges[1:]
#========================================================#
# Plot water level histograms at control points
#========================================================#
ylims = [-0.9,2.5]
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 5}
matplotlib.rc('font', **font)
lwd_hist = 2 #line width of histograms
i=1
png_out_name = output_directory + '/' + Plot_scenario + '/RMA_vs_MHL_gauges_water level histogram and exceedance planes '+ startdate + '_' + enddate + '2.pdf'
fig = plt.figure(figsize=(len(gauges)*7,12))
for Gauge in order_colnames:
#Gauge = order_colnames[0]
ax=plt.subplot(1,len(gauges),i)
plt.title( Gauge + ' WL '+ startdate + '-' + enddate)
xlocation = 0.6
#Gauge_only_df = Histogram_df.filter(regex=Gauge[3:])
Gauge_only_df = combined_df.filter(regex=Gauge[3:])
#scenario 1
TS_1 = Gauge_only_df[Gauge+'_MHL']
TS_1.name = 'MHL full period' + str(TS_1.index[0].date()) + '_' + str(TS_1.index[-1].date())
if len(TS_1[TS_1.notnull()])>0:
color_1 = 'blue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=1, linestyle='dotted', ax=ax)
Gauge_only_df = Gauge_only_df[Gauge_only_df.iloc[:,0].notnull()]
TS_1 = Gauge_only_df[Gauge+'_MHL']
TS_1 = TS_1.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()]
TS_1.name = 'MHL period from ' + startdate + '_' + enddate
if len(TS_1[TS_1.notnull()])>0:
color_1 = 'royalblue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=lwd_hist, ax=ax)
#seaborn.kdeplot(Gauge_only_df.iloc[:,2], vertical=True, color=color_1, lw=3)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 2
#TS_1 = Gauge_only_df.filter(regex='elev')
TS_1 = Gauge_only_df.iloc[:,0][Gauge_only_df.iloc[:,0].notnull()]
color_1 = 'red'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=lwd_hist, ax=ax)
#plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
#plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 3
TS_1 = Gauge_only_df.iloc[:,1][Gauge_only_df.iloc[:,1].notnull()]
color_1 = 'limegreen'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=lwd_hist, ax=ax)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
#plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#finish the plot
plt.ylim(ylims)
if i == 1:
plt.ylabel("Water level [m AHD]")
plt.xlabel("Probability/density")
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
#========================================================#
# Plot MHL water levels only
#========================================================#
ylims = [-1,1.5]
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 6}
matplotlib.rc('font', **font)
i=1
png_out_name = output_directory + '/' + Plot_scenario + '/MHL_gauges_water level histograms and exceedance planes '+ startdate + '_' + enddate + '.pdf'
fig = plt.figure(figsize=(len(gauges)*5,11))
for Gauge in order_colnames:
#Gauge = order_colnames[0]
ax=plt.subplot(1,len(gauges),i)
plt.title( Gauge + ' wL '+ startdate + '-' + enddate)
xlocation = 0.6
TS_1 = combined_df[Gauge+'_MHL']
TS_1 = TS_1[TS_1.notnull()]
#scenario 1
TS_1.name = 'Full Period ' + str(TS_1.index[0].date()) + '_' + str(TS_1.index[-1].date())
color_1 = 'royalblue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=2, ax=ax)
#seaborn.kdeplot(Gauge_only_df.iloc[:,2], vertical=True, color=color_1, lw=3)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 2
#TS_1 = Gauge_only_df.filter(regex='elev')
TS_1 = TS_1.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()]
if len(TS_1[TS_1.notnull()])>0:
TS_1.name = 'Period from ' + startdate + '_' + enddate
color_1 = 'red'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=2, ax=ax)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#finish the plot
plt.ylim(ylims)
if i == 1:
plt.ylabel("Water level [m AHD]")
plt.xlabel("Probability/density")
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
#========================================================#
# Plot time series over various intervals at control points
#========================================================#
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 5}
matplotlib.rc('font', **font)
halfwindows = [1,2,5,10,15]
for halfwindow in halfwindows:
#halfwindow = 10
png_out_name = output_directory + '/' + Plot_scenario + '/water level time series '+ startdate + '_' + enddate + ' _window_of_' + str(halfwindow*2) +'_days.pdf'
fig = plt.figure(figsize=(10,len(gauges)*3))
i=1
for Gauge in order_colnames:
Gauge_only_df = combined_df.filter(regex=Gauge[3:])
combined_df.columns
#Gauge_only_df = df1.filter(regex=Gauge)
#Gauge_only_df_1 = Gauge_only_df.loc[(Gauge_only_df.index >= '2008-05-09 12:00:00') & (Gauge_only_df.index <= '2008-05-20 12:00:00')]
Gauge_only_df_1 = Gauge_only_df[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=halfwindow) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=halfwindow) ]
Gauge_only_df_1 = Gauge_only_df_1[Gauge_only_df_1.iloc[:,0].notnull()]
ax=plt.subplot(len(gauges),1,i)
plt.title('Tidal water levels at ' + Gauge + ' | ' + River +' Estuary')
ax.set_prop_cycle(color=['steelblue', 'coral', 'steelblue', 'deeppink'],linestyle=[ '-', '-','--','-'], lw=[0.8,0.8,0.8,0.8])
Gauge_only_df_1.iloc[:,[0,1,2]].plot( legend=False, ax=ax)
plt.axhline(y=0, color='black', linestyle='-', lw=0.8, alpha=0.5)
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Time")
ax.legend()
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
##========================================================#
##write out exceedance data to csv
##========================================================#
#Summary={}
#Summary['13% exceedance'] = Wollow_13ExPr
#Summary['6.4% exceedance'] = Wollow_6p4ExPr
#Summary['1.3% exceedance'] = Wollow_1_3ExPr
#Summary['0.01% exceedance'] = Wollow_0p01ExPr
#Summary['50% exceedance'] = Wollow_50ExPr
#pdf1 = pd.DataFrame(Summary, index=[0])
#pdf1.to_csv(os.path.join( Out_Folder + Gauge + '_entrance_stats.csv'))
##========================================================#

@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 19:02:20 2020
@author: z5025317
"""
#this code simply opens up all the MHL tide gauge csv files and writes the key information provided in there into a cvs file for GIS import
#==========================================================#
#Load packages
#==========================================================#
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn
os.chdir('C:/Users/z5025317/UNSW/Danial Khojasteh - HEMIP/RMA_result_explorer_code')
year = '2015'
Ocean_gauges_folder = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_HEMIP_Data/MHL_Gauge_Data/20200409_Recieved/'
filenames = glob.glob(Ocean_gauges_folder +'*/' + '*.csv')
WL_df = pd.DataFrame()
for tgr_csv_name in filenames:
print (os.path.basename(tgr_csv_name)[:-11])
#tgr_csv_name = filenames[3]
Full_df = pd.read_csv(tgr_csv_name ,nrows=7, skiprows= 17, parse_dates = True)
Full_df = Full_df.transpose()
Full_df['River'] = os.path.basename(os.path.dirname(tgr_csv_name))
Full_df2 = pd.read_csv(tgr_csv_name ,header= 0, skiprows= 30, parse_dates = True)
Full_df['Start_Date'] =Full_df2.Date[0]
Full_df['End_Date'] =Full_df2.Date[len(Full_df2.Date)-1]
Full_df2.index = pd.to_datetime(Full_df2.Date, format = '%d/%m/%Y') + pd.to_timedelta(Full_df2.Time, unit='h')
Full_df2 = Full_df2.iloc[:,[-2]]
Full_df2.columns = ['WL[AHD]']
WL_df = pd.concat([WL_df, Full_df], axis=0)
#WL_df.to_csv(Ocean_gauges_folder + 'summary_dataframe_of_all_gauges4.csv')
#create a histogram for each gauge if desired
#this isn't working yet due to gaps and weird stuff in the data
ylims = [-1.5,2.2]
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 6}
matplotlib.rc('font', **font)
pdf_out_name = os.path.dirname(tgr_csv_name) + os.path.basename(tgr_csv_name)[:-11]+ '_histogram.pdf'
fig = plt.figure(figsize=(4,8))
np.array(np.float16(Full_df2['WL[AHD]'])
#full period density comparison for individual gauges overlaid by tidal planes
Wollow_13ExPr = np.nanpercentile(np.array(Full_df2['WL[AHD]']),87)
np.percentile(Full_df2['WL[AHD]'].astype(float), 50)
Wollow_6p4ExPr = np.nanpercentile(Full_df2['WL[AHD]'].astype(float),93.6)
Wollow_1_3ExPr = np.nanpercentile(Full_df2,98.7)
Wollow_0p01ExPr = np.nanpercentile(Full_df2,99.99)
Wollow_50ExPr = np.nanpercentile(Full_df2,50)
ax=plt.subplot(4,1,i)
plt.title( Gauge + ' water level density comparison - Jan 2016- Feb 2020')
#WL_df[Gauge].plot.kde(xlim=(-0.5,1),vertical=True, lw=3, ax=ax)
seaborn.kdeplot(Full_df2['WL[AHD]'], vertical=True, color='royalblue', lw=3)
plt.axhline(y=Wollow_13ExPr, color='firebrick', linestyle='--', lw=3, alpha=1)
plt.text( xlocation , (Wollow_13ExPr), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=Wollow_6p4ExPr, color='orange', linestyle='--', lw=3, alpha=1)
plt.text(xlocation , (Wollow_6p4ExPr), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=Wollow_1_3ExPr, color='yellow', linestyle='--', lw=3, alpha=1)
plt.text( xlocation ,Wollow_1_3ExPr , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=Wollow_0p01ExPr, color='limegreen', linestyle='--', lw=3, alpha=1)
plt.text( xlocation , Wollow_0p01ExPr , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=Wollow_50ExPr, color='cyan', linestyle='--', lw=3, alpha=1)
plt.text( xlocation , Wollow_50ExPr , '50% exceedance', ha='left', va='bottom')
plt.ylim(-1,1.7)
plt.xlabel("Probability/density")
plt.ylabel("Water level [m AHD]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
fig.tight_layout()
fig.savefig(png_out_name)

@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 19:02:20 2020
@author: z5025317
"""
#creates a tide boundary time series in RMA format based on MHL tide data as provided for the RAP
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
os.chdir('C:/Users/z5025317/UNSW/Danial Khojasteh - HEMIP/RMA_result_explorer_code')
year = '2015'
Ocean_gauges_folder = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_HEMIP_Data/MHL_Gauge_Data/20200409_Recieved/Open_ocean/'
filenames = glob.glob(Ocean_gauges_folder + '*.csv')
filenames = [filenames[0],filenames[1], filenames[3]]
timestep = 0
for tgr_csv_name in filenames:
print (tgr_csv_name)
tgr_csv_name = filenames[2]
tgr_outname = Ocean_gauges_folder + os.path.basename(tgr_csv_name)[:-4] + '_2' + year
Full_df = pd.read_csv(tgr_csv_name ,header= 0, skiprows= 30, parse_dates = True)
Full_df.index = pd.to_datetime(Full_df.Date, format = '%d/%m/%Y') + pd.to_timedelta(Full_df.Time, unit='h')
Full_df = Full_df.iloc[:,[-2]]
Full_df.columns = ['WL[AHD]']
Full_df = Full_df.loc[(Full_df.index >= year + '-01-01 00:00:00') & (Full_df.index <= str(np.int(year)+1) + '-01-01 00:00:00')]
# str(df['2'][1500].astype(int))
# RMA2: create input file
fq = open(
os.path.join(Ocean_gauges_folder, tgr_outname + '.tgr'),
'w')
fq.write('TT MHL tide for ' + os.path.basename(tgr_csv_name)+ ' ' + year + '\n')
fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format('CL', '', '2', year))
#fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format('CL', '', int(df['3'][0]), int(df['4'][0])))
for i in range(1,len(Full_df),1):
# For each new element
fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format('HD', str(np.int(Full_df.index[i-1].strftime('%j'))), str(np.round((np.float(Full_df.index[i-1].strftime('%M'))+ np.float(Full_df.index[i-1].strftime('%H'))*60) /60,2)), str(np.round(np.float(Full_df['WL[AHD]'][i-1]),3))))
fq.write('ENDDATA')
fq.close()

@ -0,0 +1,214 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 25 10:56:21 2019
@author: z5025317
"""
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
import seaborn
matplotlib.style.use('ggplot')
#========================================================#
#========================================================#
#set input parameters and paths
#========================================================#
Gauges = [ 'Stockton', 'Raymond', 'Oakhampton', 'Paterson'] #name of all gauges or IDs in a given river
River = 'Clarence'
#name of plot scenario/experiment . This is a unique identifier for this particular scenario comparison
Plot_scenario = 'Clarence_gauge_analysis_V1'
#time frames for analysis
tideanalysis_day = '2006 06 07' # the time series plots will be centred around this day.
year = 2006
#set directories
Folder = 'C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdoc/Projects/Paper#2_HD_modeling/01_Data/Tides/CSV/' #needs to be adjusted - should be a folder with all the MHL data in individual CSV files
Out_Folder = 'C:/Users/z5025317/OneDrive - UNSW/WRL_Postdoc_Manual_Backup/WRL_Postdoc/Projects/Paper#2_HD_modeling/02_Analysis/Tides/' #needs to be adjusted
#========================================================#
#========================================================#
#laod MHL data and store in a single dataframe
#========================================================#
WL_df = pd.DataFrame()
for gauge in Gauges:
# gauge = 'Oyster'
print 'loading data for ' + gauge
CSVpath = glob.glob(Folder + '*' + gauge + '*' ) # will find the csv file corresponding to the gauge name or ID
print(CSVpath)
Full_df = pd.read_csv(CSVpath ,header= 0, skiprows= 30, parse_dates = True) #needs to be adjusted to fit the MHL format
#Full_df.index = pd.to_datetime(Full_df.Date, format = '%d/%m/%Y') + pd.to_timedelta(Full_df.Time, unit='h') #time series index may have to be adjusted, depending on the outcome of parse_dates
Full_df.index = pd.to_datetime(Full_df.Date, format = '%d/%m/%Y') + pd.to_timedelta(Full_df.Time, unit='h')
Full_df = Full_df.iloc[:,[-2]]
Full_df.columns = ['WL[AHD]']
Full_df = Full_df.loc[(Full_df.index >= year + '-01-01 00:00:00') & (Full_df.index <= str(np.int(year)+1) + '-01-01 00:00:00')]
Full_df = Full_df.loc[~Full_df.index.duplicated(keep='first')] #get rid of possible duplicate datetimes
WL_df = pd.concat([WL_df, Full_df], axis=1)
WL_df.columns = Gauges
#subset the data in time if desired (e.g. to a given year)
WL_df_1 = WL_df.loc[(WL_df.index >= '2017-01-10 00:00:00') & (WL_df.index <= '2017-01-18 00:00:00')]
#better
#WL_df_1 = WL_df.loc[(WL_df.index >= datetime.strptime(startdate , '%Y %m %d').date()) & (WL_df.index <= datetime.strptime(enddate , '%Y %m %d').date())] #may need to be adjusted
#export dataframe to CSV if desired
#export the pandas data frame as a CSV file within the output directory is desired
#WL_df.to_csv(Folder + 'data_combined.csv')
#========================================================#
#========================================================#
# Plot water level histogram at all gauges on a singel plot
#========================================================#
#full period density comparison all four gauges
png_out_name = Out_Folder + River + '_water level histograms.pdf'
fig = plt.figure(figsize=(40,18))
ax=plt.subplot(1,1,1)
plt.title('Hunter water level density comparison - Jan 2016- Feb 2020')
WL_df.iloc[:,[0,1,2,3]].plot.kde(xlim=(-1.5,1.5), ax=ax)
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
#========================================================#
#========================================================#
# Plot water level histogram at all gauges
#========================================================#
ylims = [-0.9,2.2]
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 6}
matplotlib.rc('font', **font)
#plot
i=1
png_out_name = Out_Folder + '/' + Plot_scenario + '/' + River + 'B_water level histogram and exceedance planes '+ startdate + '_' + enddate + '3.pdf'
fig = plt.figure(figsize=(len(Gauges)*4,8))
for Gauge in Gauges:
#Gauge = 'Yamba'
xlocation = 0.6
#full period density comparison for individual gauges overlaid by tidal planes
Wollow_13ExPr = np.nanpercentile(WL_df[Gauge],87)
Wollow_6p4ExPr = np.nanpercentile(WL_df[Gauge],93.6)
Wollow_1_3ExPr = np.nanpercentile(WL_df[Gauge],98.7)
Wollow_0p01ExPr = np.nanpercentile(WL_df[Gauge],99.99)
Wollow_50ExPr = np.nanpercentile(WL_df[Gauge],50)
ax=plt.subplot(4,1,i)
plt.title( Gauge + ' water level density comparison - Jan 2016- Feb 2020')
#WL_df[Gauge].plot.kde(xlim=(-0.5,1),vertical=True, lw=3, ax=ax)
seaborn.kdeplot(WL_df[Gauge], vertical=True, color='royalblue', lw=3, ax=ax)
plt.axhline(y=Wollow_13ExPr, color='firebrick', linestyle='--', lw=3, alpha=1)
plt.text( xlocation , (Wollow_13ExPr), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=Wollow_6p4ExPr, color='orange', linestyle='--', lw=3, alpha=1)
plt.text(xlocation , (Wollow_6p4ExPr), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=Wollow_1_3ExPr, color='yellow', linestyle='--', lw=3, alpha=1)
plt.text( xlocation ,Wollow_1_3ExPr , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=Wollow_0p01ExPr, color='limegreen', linestyle='--', lw=3, alpha=1)
plt.text( xlocation , Wollow_0p01ExPr , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=Wollow_50ExPr, color='cyan', linestyle='--', lw=3, alpha=1)
plt.text( xlocation , Wollow_50ExPr , '50% exceedance', ha='left', va='bottom')
plt.ylim(-1,1.7)
plt.xlabel("Probability/density")
plt.ylabel("Water level [m AHD]")
fig.tight_layout()
fig.savefig(png_out_name)
plt.close()
fig.tight_layout()
fig.savefig(png_out_name)
i=i+1
plt.close()
#========================================================#
#========================================================#
# Save exceedance planes to csv
#========================================================#
Summary={}
Summary['13% exceedance'] = Wollow_13ExPr
Summary['6.4% exceedance'] = Wollow_6p4ExPr
Summary['1.3% exceedance'] = Wollow_1_3ExPr
Summary['0.01% exceedance'] = Wollow_0p01ExPr
Summary['50% exceedance'] = Wollow_50ExPr
pdf1 = pd.DataFrame(Summary, index=[0])
pdf1.to_csv(os.path.join( Out_Folder + Gauge + '_entrance_stats.csv'))
#========================================================#
#========================================================#
# Plot water level time series at control points
#========================================================#
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 5}
matplotlib.rc('font', **font)
halfwindows = [1,2,5] #Days to add before and after central date chosen for analysis
for halfwindow in halfwindows:
png_out_name = Out_Folder + '/' + Plot_scenario + '/' +River+ 'water level time series '+ startdate + '_' + enddate + ' _window_of_' + str(halfwindow*2) +'_days.pdf'
fig = plt.figure(figsize=(10,len(Gauges)*3))
i=1
for Gauge in Gauges:
Gauge_only_df = WL_df.filter(regex=Gauge)
#Gauge_only_df_1 = Gauge_only_df.loc[(Gauge_only_df.index >= '2008-05-09 12:00:00') & (Gauge_only_df.index <= '2008-05-20 12:00:00')]
Gauge_only_df_1 = Gauge_only_df[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=halfwindow) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=halfwindow) ]
ax=plt.subplot(len(Gauges),1,i)
plt.title('Tidal water levels at 4 locations in the ' + River +' Estuary')
ax.set_prop_cycle(color=['steelblue', 'coral', 'steelblue', 'deeppink'],linestyle=[ '-', '-','--','-'], lw=[1,1,1,2])
Gauge_only_df_1.iloc[:,[0,1,2]].plot( legend=False, ax=ax)
plt.axhline(y=0, color='slateblue', linestyle='--', lw=1, alpha=0.5)
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Time")
ax.legend()
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
# Plot water level histograms at control points
#========================================================#

@ -0,0 +1,186 @@
#==========================================================#
#Load packages
#==========================================================#
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('H:/WRL_Projects/Hunter_CC_Modeling/Code/RMA_result_explorer')
from py_rmatools_v02 import rma
import pandas as pd
import geopandas as gpd
import argparse
import glob
#==========================================================#
#==========================================================#
#Input parameters
#==========================================================#
River = 'Clarence'
foldernames =['046_CLA_CAL_01' , 'basecase_2008'] #['TWD_HYD_CAL_05', 'TWD_HYD_CAL_06'] #'base_case_2005' #SLR_1m_2005
foldername = '046_CLA_CAL_01'
Fishnet_ID = 'FN1'
startyear=2008 #it's critical that this matches the year of the run since the code will overwrite the year from the rma file with this one
endyear=2008 #this is technically only needed when multiple years have been run (for which the code needs to be adusted below)
year=range(startyear, endyear+1)
#==========================================================#
#==========================================================#
#Set paths to filelocations - largely automated
#==========================================================#
#load and refine node vs mesh shapefile to store the results of the statistics
Fishnet_path = glob.glob('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Chainages/*' + Fishnet_ID + '*.shp')
Node_mesh = gpd.read_file(Fishnet_path[0])
Node_mesh = Node_mesh.loc[Node_mesh['Node'].notnull()]
Node_mesh = pd.DataFrame(Node_mesh)
Node_mesh = Node_mesh.reset_index()
#Node_mesh.index = Node_mesh['Field1']
#Node_mesh = Node_mesh.iloc[:,-4:]
#Node_mesh.columns = ['Node', 'X', 'Y', 'geometry']
node = Node_mesh['Node'].astype(int).values
run_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/03_Simulations/RAP_SLR/' + foldername +'/'
#set directory path for output files
output_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Output/'+ foldername + '_' + Fishnet_ID + '/'
#==========================================================#
#==========================================================#
#100% automated part of the code doing the data extraction
#==========================================================#
f = os.path.basename(glob.glob(run_directory + '*'+ str(startyear) + '*.rma')[0])[:-4] #rma file name without .rma ending
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
time=[]
xvel=[]
yvel=[]
totvel=[]
elevation=[]
depth=[]
#
#==========================================================#
#exctract elevation
#==========================================================#
print('extracting RM2 results for' + f)
filename1= output_directory + f +'_elev.txt'
filename1= output_directory + foldername +'_elev'+ '_' + Fishnet_ID + '.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
for jj in year:
f1=run_directory + f #+ '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.elevation[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
#==========================================================#
#exctract depth
#==========================================================#
filename1= output_directory + f+'_depth.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
for jj in year:
print(jj)
f1=run_directory + f + '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.depth[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
#==========================================================#
#exctract xvel
#==========================================================#
filename1= output_directory + f+'_xvel.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
print (filename1)
for jj in year:
f1=run_directory + f + '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.xvel[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
#==========================================================#
#yvel
#==========================================================#
filename1= output_directory + f+'_yvel.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
print (filename1)
for jj in year:
f1=run_directory + f + '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.yvel[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#

@ -0,0 +1,232 @@
#==========================================================#
#Load packages
#==========================================================#
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/RMA_result_explorer_code')
from py_rmatools_v02 import rma
import geopandas as gpd
import pandas as pd
import glob
from pyrma import pyrma
#==========================================================#
#==========================================================#
#Input parameters
#==========================================================#
#set beginning and end years and corresponding scenario code
#River = 'Shoalhaven'
#foldernames = ['base_case_2005']# , 'SLR_1m_2005'] #['TWD_HYD_CAL_05', 'TWD_HYD_CAL_06'] #'base_case_2005' #SLR_1m_2005
#Order_ID = 'Core_order' #choose order column name
#
#startyear=2005 #it's critical that this matches the year of the run since the code will overwrite the year from the rma file with this one
#endyear=2005 #this is technically only needed when multiple years have been run (for which the code needs to be adusted below)
#year=range(startyear, endyear+1)
#set beginning and end years and corresponding scenario code
River = 'Clarence'
foldernames =['046_CLA_CAL_01' , 'basecase_2008'] #['TWD_HYD_CAL_05', 'TWD_HYD_CAL_06'] #'base_case_2005' #SLR_1m_2005
Order_ID = 'Core_ord_2' #choose order column name
startyear=2008 #it's critical that this matches the year of the run since the code will overwrite the year from the rma file with this one
endyear=2008 #this is technically only needed when multiple years have been run (for which the code needs to be adusted below)
year=range(startyear, endyear+1)
#==========================================================#
#==========================================================#
#After here should be mostly automated
#==========================================================#
for foldername in foldernames:
#foldername = foldernames[0]
#set run directory where RMA results are located
run_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/03_Simulations/RAP_SLR/' + foldername +'/'
#set directory path for output files
output_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Output/'+ foldername + '_' + Order_ID + '/'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
#load MHL gauge shapefile to store the results of the statistics
MHL_gauges_df = gpd.read_file('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/GIS/MHL_gauges_received_MGA56.shp')
MHL_gauges_df = MHL_gauges_df.loc[MHL_gauges_df['River'] == River]
MHL_gauges_df = MHL_gauges_df.loc[MHL_gauges_df[Order_ID].notnull()]
MHL_gauges_df = pd.DataFrame(MHL_gauges_df)
MHL_gauges_df = MHL_gauges_df.reset_index()
MHL_gauges_df['NrstNode'] = 0
# Load RMA mesh and find nearest node for each MHL gauge as basis for the data extraction
print('Reading RMA mesh file and finding the nearest node for the following XY locations of input points')
meshpath = glob.glob(run_directory + '*v2.rm1')
nodes, elements = pyrma.loadMesh(meshpath[0])
mesh = pd.DataFrame(elements, index=[0]).transpose()
mesh.columns = ['name']
mesh['centroid'] = [e.centroid for e in elements.values()]
mesh['nodeIDs'] = [e.nodes for e in elements.values()]
#trying to make it work for nodes rather than elements (not working yet)
# elements.values()[0].centroid
# nodedf['centroid'] =
# [e.location() for e in elements.values()]
# nodedf = pd.DataFrame(elements, index=[0]).transpose()
# nodes1 = pyrma.loadNodes(meshpath[0])
# nodes1.values()[0].location
# type(elements.values()[0])
# nodes.keys().location()
# elements.values()[0].nodes
# nodes.values().location()
# nodes.values()[0].location()
node = []
for i in range(0,len(MHL_gauges_df),1):
# Find nearest element in RMA mesh
x = MHL_gauges_df['Easting'][i]
y = MHL_gauges_df['Northing'][i]
print(x,y)
mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']]
#idx = mesh['calc'].idxmin()
#node.append(idx)
node.append(mesh['nodeIDs'][mesh['calc'].idxmin()][0].ID)
MHL_gauges_df['NrstNode'] = node
MHL_gauges_df.to_csv(output_directory + '_Gauges_with_nodes.csv')
#==========================================================#
#==========================================================#
#100% automated part of the code doing the data extraction
#==========================================================#
#define rma file from which data will be extracted
f = os.path.basename(glob.glob(run_directory + '*'+ str(startyear) + '*.rma')[0])[:-4] #rma file name without .rma ending
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
time=[]
xvel=[]
yvel=[]
totvel=[]
elevation=[]
depth=[]
#
#==========================================================#
#exctract elevation
#==========================================================#
print('Extracting RM2 results for' + f)
filename1= output_directory + foldername +'_elev'+ '_' + Order_ID + '.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
for jj in year:
f1=run_directory + f #+ '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.elevation[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
# #==========================================================#
# #exctract depth
# #==========================================================#
# filename1= output_directory + foldername+'_depth'+ '_' + Order_ID + '.txt'
# print(filename1)
# target = open(filename1, 'w')
# target.write("Year Hour ")
# for inode in node:
# target.write("%i " % inode)
# target.write('\n')
# for jj in year:
# print(jj)
# f1=run_directory + f #+ '_%d' %jj
# R=rma()
# print(f1)
# R.open(f1)
# print (jj)
#
# while R.next():
# time.append(R.time)
# target.write("%.0f %r " %(jj,R.time))
# for inode in node:
# target.write("%f " % R.depth[inode])
# target.write('\n')
#
# #print (" Press any ENTER to exit")
# target.close()
# #f=input()
# #==========================================================#
# #==========================================================#
# #exctract xvel
# #==========================================================#
# filename1= output_directory + foldername+'_xvel'+ '_' + Order_ID + '.txt'
# print(filename1)
# target = open(filename1, 'w')
#
# target.write("Year Hour ")
# for inode in node:
# target.write("%i " % inode)
# target.write('\n')
# print (filename1)
# for jj in year:
# f1=run_directory + f #+ '_%d' %jj
# R=rma()
# print(f1)
# R.open(f1)
# print (jj)
#
# while R.next():
# time.append(R.time)
# target.write("%.0f %r " %(jj,R.time))
# for inode in node:
# target.write("%f " % R.xvel[inode])
# target.write('\n')
# #print (" Press any ENTER to exit")
# target.close()
# #f=input()
# #==========================================================#
#
# #==========================================================#
# #yvel
# #==========================================================#
# filename1= output_directory + foldername+'_yvel'+ '_' + Order_ID + '.txt'
# print(filename1)
# target = open(filename1, 'w')
#
# target.write("Year Hour ")
# for inode in node:
# target.write("%i " % inode)
# target.write('\n')
# print (filename1)
# for jj in year:
# f1=run_directory + f #+ '_%d' %jj
# R=rma()
# print(f1)
# R.open(f1)
# print (jj)
#
# while R.next():
# time.append(R.time)
# target.write("%.0f %r " %(jj,R.time))
# for inode in node:
# target.write("%f " % R.yvel[inode])
# target.write('\n')
#
# #print (" Press any ENTER to exit")
# target.close()
# #f=input()
# #==========================================================#

@ -0,0 +1,209 @@
#==========================================================#
#Load packages
#==========================================================#
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/RMA_result_explorer_code')
from py_rmatools_v02 import rma
import geopandas as gpd
import pandas as pd
import glob
from pyrma import pyrma
#==========================================================#
#==========================================================#
#Input parameters
#==========================================================#
#set beginning and end years and corresponding scenario code
River = 'Tweed'
foldernames =['TWD_HYD_CAL_03','TWD_HYD_CAL_04' , 'TWD_HYD_CAL_05', 'TWD_HYD_CAL_06'] #'base_case_2005' #SLR_1m_2005
Order_ID = 'Core_order' #choose order column name
startyear=2017 #it's critical that this matches the year of the run since the code will overwrite the year from the rma file with this one
endyear=2017 #this is technically only needed when multiple years have been run (for which the code needs to be adusted below)
year=range(startyear, endyear+1)
#==========================================================#
#==========================================================#
#After here should be mostly automated
#==========================================================#
for foldername in foldernames:
#set run directory where RMA results are located
run_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/03_Simulations/RAP_SLR/' + foldername +'/'
#set directory path for output files
output_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Output/'+ foldername + '_' + Order_ID + '/'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
#load MHL gauge shapefile to store the results of the statistics
MHL_gauges_df = gpd.read_file('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/GIS/MHL_gauges_received_MGA56.shp')
MHL_gauges_df = MHL_gauges_df.loc[MHL_gauges_df['River'] == River]
MHL_gauges_df = MHL_gauges_df.loc[MHL_gauges_df[Order_ID].notnull()]
MHL_gauges_df = pd.DataFrame(MHL_gauges_df )
MHL_gauges_df = MHL_gauges_df.reset_index()
MHL_gauges_df['NrstNode'] = 0
# Load RMA mesh and find nearest node for each MHL gauge as basis for the data extraction
print('Reading RMA mesh file and finding the nearest node for the following XY locations')
meshpath = glob.glob(run_directory + '*.rm1')
nodes, elements = pyrma.loadMesh(meshpath[0])
mesh = pd.DataFrame(elements, index=[0]).transpose()
mesh.columns = ['name']
mesh['centroid'] = [e.centroid for e in elements.values()]
node = []
for i in range(0,len(MHL_gauges_df),1):
# Find nearest element in RMA mesh
x = MHL_gauges_df['Easting'][i]
y = MHL_gauges_df['Northing'][i]
print(x,y)
mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']]
idx = mesh['calc'].idxmin()
node.append(idx)
MHL_gauges_df['NrstNode'] = node
MHL_gauges_df.to_csv(output_directory + '_Gauges_with_nodes.csv')
#==========================================================#
#==========================================================#
#100% automated part of the code doing the data extraction
#==========================================================#
#define rma file from which data will be extracted
f = os.path.basename(glob.glob(run_directory + '*'+ str(startyear) + '*.rma')[0])[:-4] #rma file name without .rma ending
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
time=[]
xvel=[]
yvel=[]
totvel=[]
elevation=[]
depth=[]
#
#==========================================================#
#exctract elevation
#==========================================================#
print('Extracting RM2 results for' + f)
filename1= output_directory + foldername +'_elev'+ '_' + Order_ID + '.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
for jj in year:
f1=run_directory + f #+ '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.elevation[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
#==========================================================#
#exctract depth
#==========================================================#
filename1= output_directory + foldername+'_depth'+ '_' + Order_ID + '.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
for jj in year:
print(jj)
f1=run_directory + f #+ '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.depth[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
#==========================================================#
#exctract xvel
#==========================================================#
filename1= output_directory + foldername+'_xvel'+ '_' + Order_ID + '.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
print (filename1)
for jj in year:
f1=run_directory + f #+ '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.xvel[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
#==========================================================#
#yvel
#==========================================================#
filename1= output_directory + foldername+'_yvel'+ '_' + Order_ID + '.txt'
print(filename1)
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
print (filename1)
for jj in year:
f1=run_directory + f #+ '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.yvel[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#

@ -0,0 +1,392 @@
# coding: utf-8
# In[64]:
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/RMA_result_explorer_code')
import struct
import matplotlib.pyplot as plt
import math
from py_rmatools import rma
import glob
get_ipython().magic('matplotlib qt')
plt.rcParams.update({'figure.max_open_warning': 0})
River ='Tweed' #'Clarence'
foldername ='TWD_HYD_CAL_28' #'046_CLA_CAL_01'
run_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/03_Simulations/RAP_SLR/' + foldername +'/'
#set directory path for output files
output_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Output/'+ foldername + '_SERAPHIN/'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# In[65]:
meshpath = glob.glob(run_directory + '*.rm1')
meshFilename = meshpath[0]
#meshFilename = run_directory + 'woolo_v8.rm1'
channelWidth = 100
#rmapath = glob.glob(run_directory + '*.rma')
RMAfilename = meshpath[0]
RMAfilename = run_directory + 'Tweed_2017.rma' #'yamba_iluka_2008_044.rma'
#If RMA11
constNum = [0]
os.chdir(run_directory)
# In[66]:
def isElementOneD(nodelist):
if len(nodelist) == 2:
return True
return False
def isElementSquare(nodelist):
if len(nodelist) == 4:
return True
return False
def square2Triangle(ElementNum):
nodelist = ElementDict[ElementNum]
if isElementSquare(nodelist):
ElementDict[ElementNum] = [nodelist[0], nodelist[1], nodelist[2]]
ElementList.append(max(ElementList) + 1)
ElementDict[ElementList[-1]]= [nodelist[0], nodelist[2], nodelist[3]]
def oneD2triangle(ElementNum):
if isElementOneD(ElementDict[ElementNum]):
nAe = ElementDict[ElementNum][0] #nAe Node A existing
nBe = ElementDict[ElementNum][1]
if not nAe in node1Dduplicate: node1Dduplicate[nAe] = []
if not nBe in node1Dduplicate: node1Dduplicate[nBe] = []
xA = nodeDict[nAe][0]
xB = nodeDict[nBe][0]
yA = nodeDict[nAe][1]
yB = nodeDict[nBe][1]
normalVec = [-(yB - yA),(xB - xA)]
dist = math.sqrt(normalVec[0]**2 + normalVec[1]**2)
normalVec[0] = normalVec[0] / dist
normalVec[1] = normalVec[1] / dist
xA2 = xA + channelWidth * normalVec[0]
xB2 = xB + channelWidth * normalVec[0]
yA2 = yA + channelWidth * normalVec[1]
yB2 = yB + channelWidth * normalVec[1]
nA = max(NodeList) + 1
nB = max(NodeList) + 2
node1Dduplicate[nAe].append(nA)
node1Dduplicate[nBe].append(nB)
node2nodevalue[nA] = nAe
node2nodevalue[nB] = nBe
NodeList.append(nA)
NodeList.append(nB)
nodeDict[nA] = [xA2, yA2, -1.01]
nodeDict[nB] = [xB2, yB2, -1.01]
newEle = max(ElementList) + 1
ElementList .append(newEle)
ElementDict[ElementNum] = [nAe, nA, nBe]
ElementDict[newEle] = [nA, nB, nBe]
def RMA11toSerafin():
f = open('{}.slf'.format(RMAfilename), 'wb')
f.write(struct.pack(">l",80))
str='{0: >80}'.format('SERAFIN ')
f.write(str.encode('ascii'))
f.write(struct.pack(">l",80))
f.write(struct.pack(">l",8))
f.write(struct.pack(">l",len(constNum)))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",8))
for c in constName:
f.write(struct.pack(">l",32))
str='{0: <32}'.format(c)
f.write(str.encode('ascii'))
f.write(struct.pack(">l",32))
f.write(struct.pack(">l",40))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",40))
f.write(struct.pack(">l",24))
f.write(struct.pack(">l",R.year))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",24))
f.write(struct.pack(">l",16))
f.write(struct.pack(">l",len(ElementList)))
f.write(struct.pack(">l",len(NodeList)))
f.write(struct.pack(">l",3))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",16))
f.write(struct.pack(">l",len(ElementList)*3*4))
for el in ElementList:
for nd in ElementDict[el]:
f.write(struct.pack(">l",nodeOrdered[nd]))
f.write(struct.pack(">l",len(ElementList)*3*4))
f.write(struct.pack(">l",len(NodeList)))
for i in range(0,len(NodeList)):
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",len(NodeList)))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
f.write(struct.pack(">f",value[0]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
f.write(struct.pack(">f",value[1]))
f.write(struct.pack(">l",len(NodeList)*4))
while R.next():
#for i in range(3):
f.write(struct.pack(">l",4))
f.write(struct.pack(">f",R.time * 3600))
f.write(struct.pack(">l",4))
for c in constNum:
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.constit[c][node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.constit[c][key]))
f.write(struct.pack(">l",len(NodeList)*4))
R.next()
f.close()
def RMA2toSerafin():
f = open('{}.slf'.format(RMAfilename), 'wb')
f.write(struct.pack(">l",80))
str='{0: >80}'.format('SERAFIN ')
f.write(str.encode('ascii'))
f.write(struct.pack(">l",80))
f.write(struct.pack(">l",8))
f.write(struct.pack(">l",len(constName)))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",8))
for c in constName:
f.write(struct.pack(">l",32))
str='{0: <32}'.format(c)
f.write(str.encode('ascii'))
f.write(struct.pack(">l",32))
f.write(struct.pack(">l",40))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",40))
f.write(struct.pack(">l",24))
f.write(struct.pack(">l",R.year))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",24))
f.write(struct.pack(">l",16))
f.write(struct.pack(">l",len(ElementList)))
f.write(struct.pack(">l",len(NodeList)))
f.write(struct.pack(">l",3))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",16))
f.write(struct.pack(">l",len(ElementList)*3*4))
for el in ElementList:
for nd in ElementDict[el]:
f.write(struct.pack(">l",nodeOrdered[nd]))
f.write(struct.pack(">l",len(ElementList)*3*4))
f.write(struct.pack(">l",len(NodeList)))
for i in range(0,len(NodeList)):
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",len(NodeList)))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
f.write(struct.pack(">f",value[0]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
f.write(struct.pack(">f",value[1]))
f.write(struct.pack(">l",len(NodeList)*4))
while R.next():
f.write(struct.pack(">l",4))
f.write(struct.pack(">f",R.time * 3600))
f.write(struct.pack(">l",4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.xvel[node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.xvel[key]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.yvel[node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.yvel[key]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.depth[node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.depth[key]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.elevation[node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.elevation[key]))
f.write(struct.pack(">l",len(NodeList)*4))
R.next()
f.close()
# In[67]:
#Read mesh file and extract node (except mid node) and elements - plus convert 1D element to 2D for vizualisation
NodeList = []
ElementList = []
ElementDict = {}
nodeDict = {}
node1Dduplicate = {} #Original Number: List of Duplicates
node2nodevalue = {} #link between the node number and the node value to use
#(e.g. if node 10 is a 1D node: 10 is not duplicate so {1:1},
#but node 2050 (duplicate of 10) (1D to 2D) the value of the duplicated
#node will be the same as the original so we might have {2050: 10})
with open(meshFilename) as f:
line = f.readline()
line = f.readline()
line = f.readline()
line = f.readline()
cpt = 1
while line and line != ' 9999\n':
temp = line.split()
ElementDict[int(temp[0])] = [int(temp[i]) for i in range(1,9,2) if temp[i] != '0' and int(temp[9]) < 100]
ElementList.append(int(temp[0]))
line = f.readline()
for key, value in ElementDict.items():
NodeList.extend(value)
NodeList = list(set(NodeList))
line = f.readline()
while line and line != ' 9999\n':
temp = line.split()
if int(temp[0]) in NodeList:
nodeDict[int(temp[0])] = [float(temp[1]),float(temp[2]),float(temp[3])]
line = f.readline()
for e in ElementList:
oneD2triangle(e)
square2Triangle(e)
for key in list(ElementDict): #Remove Special Element 902.....
if len(ElementDict[key]) != 3:
print(key, ElementDict[key])
ElementDict.pop(key)
ElementList.remove(key)
nodeOrdered = {}
cpt = 1
for key, value in nodeDict.items():
nodeOrdered[key] = cpt
cpt +=1
# # Open and Read First Step of the RMA File and Save a Serafin
# In[72]:
R=rma()
R.open(RMAfilename)
R.next()
if R.type==b'RMA11 ':
constName = []
for c in constNum:
constName.append(R.constit_name[c].decode("utf-8"))
RMA11toSerafin()
if R.type==b'RMA2 ':
constName = ['X-VEL','Y-VEL','DEPTH','FREE SURFACE']
RMA2toSerafin()

@ -0,0 +1,483 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for creating climate prioritization plots for NARCLIM variables.
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#Datum offset factors can be found at https://www.mhl.nsw.gov.au/docs/Data%20Conditions%20and%20Fit%20for%20Purpose.pdf
#==========================================================#
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import geopandas as gpd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
#from matplotlib.backends.backend_pdf import PdfPages
import seaborn
matplotlib.style.use('ggplot')
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#==========================================================#
#Set Input parameters
#==========================================================#
#set scenario codes and beginning and end years and corresponding scenario code
River = 'Clarence'
Order_ID = 'Core_ord_2' #choose order column name
fs= ['basecase_2008' , '046_CLA_CAL_01'] #input scenarios
startdate = '2008 05 01'
enddate = '2008 12 30'
tideanalysis_day = '2008 06 01'
Surrogate_year = '2019' #plot histrogram for MHL data for this year, if gauge wasn't active yet in the year of the model run
#River = 'Macleay'
#Order_ID = 'Core_order' #choose order column name
#fs= ['FinalCalibration', 'FinalCalibration_V2']
#startdate = '2007 04 13'
#enddate = '2007 04 21'
#tideanalysis_day = '2007 04 17'
#
#River = 'Hastings'
#Order_ID = 'Core_order' #choose order column name
#fs= ['HAS_CAL_01' , 'HAS_CAL_02']
#startdate = '2017 01 01'
#enddate = '2017 03 30'
#tideanalysis_day = '2017 01 12'
#======================#
#Set optional parameters
#======================#
#set plot parameters
ylims = [-1,1.7] #usually the min and max observed water levels in the estuary in AHD used for plot ylims
bandwidth = 0.3 #width of bins parameter for seaborn.kde histogram plots
#HDvariables = ['depth', 'elev']
variable = 'elev' #depth or vel
maxrowsincsv = 30000000 #limits the nr. of timesteps read in via the RMA extracted txt files
#scenario_real_names = ['TWD_HYD_CAL_03', 'TWD_HYD_CAL_04'] #['TWD_HYD_CAL_01', 'TWD_HYD_CAL_02'] #'2008_RMA_1m_SLR']
scenario_real_names = fs #these will be used in the plotting so weird scenarios codes can be replaced with more intuitive names if desired. Set to fs if scenario codes from run logfile should be used
#==========================================================#
#End of set Input parameters
#========================================================#
#========================================================#
#mostly automated part of the code but might have to be double checked - debugged
#========================================================#
scenario_dict = dict(zip(fs, scenario_real_names))
Plot_scenario = fs[0] +'_vs_' + fs[1] + Order_ID #'Cal_05vs06vsMHL'
#set directory path for input and output files
main_input_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Output/'
output_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Output/' + '/Postprocessed'
if not os.path.exists(output_directory + '/' + Plot_scenario):
os.makedirs(output_directory+ '/' + Plot_scenario)
#load and refine node vs mesh shapefile to store the results of the statistics
Gauge_exchange_df = pd.read_csv(glob.glob(main_input_directory + fs[0] + '_' + Order_ID + '/*.csv' )[0])
keys = Gauge_exchange_df['NrstNode'].values
gauges = Gauge_exchange_df['Station Na'].values
#node_dict = dict(zip(keys, gauges))
order = Gauge_exchange_df[Order_ID].astype(int).values
order_dict = dict(zip(gauges, order))
#load and refine node vs mesh shapefile to store the results of the statistics
Gauge_exchange_df = pd.read_csv(glob.glob(main_input_directory + fs[1] + '_' + Order_ID + '/*.csv' )[0])
keys2 = Gauge_exchange_df['NrstNode'].values
gauges2 = Gauge_exchange_df['Station Na'].values
keys = np.append(keys, keys2)
gauges = np.append(gauges, gauges2)
node_dict = dict(zip(keys, gauges)) # A PROBLEM REMAINS THAT IF THERE ARE TWO mhl GAUGES THAT RESULTED IN THE SAME NODE id, ONE OF THOSE GAUGES IS LOST HERE CAUSE KEYS NEED TO BE UNIQUE
gauges = list(node_dict.values()) #MAKE SURE TO ONLY USE GAUGES THAT MADE IT IN THE DICTIONARY
#Load in situ gauge data
WL_df = pd.DataFrame()
for gauge in gauges:
#gauge = gauges[1]
#gauge = 'Harrington'
print('loading MHL gauge data for ' + str(gauge))
CSVpath = glob.glob('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/MHL_Gauge_Data/20200409_Recieved/*/' + '*' + gauge.replace(" ", "") + '*' )
Full_df = pd.read_csv(CSVpath[0] ,header= 0, skiprows= 30, parse_dates = True)
Full_df.index = pd.to_datetime(Full_df.Date, format = '%d/%m/%Y') + pd.to_timedelta(Full_df.Time, unit='h')
#Full_df = Full_df.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()]
Full_df = Full_df.iloc[:,[-2]]
Full_df.columns = [gauge]
Full_df = Full_df.loc[~Full_df.index.duplicated(keep='first')] #get rid of possible duplicate dates
Full_df = Full_df.replace(to_replace ='---', value =np.NaN) #--- is used as the no data value in the MHL data provided
Full_df = pd.DataFrame(pd.to_numeric(Full_df[gauge]))
Full_df = Full_df - np.float(Gauge_exchange_df['AHDadjust'][Gauge_exchange_df['Station Na']==gauge]) #subtract the datum adjustment factor provided by MHL as written in the shapefile
WL_df = pd.concat([WL_df, Full_df], axis=1)
#postprocess the in situ gauge dataframe
#WL_df = WL_df.replace(to_replace ='---', value =np.NaN) #--- is used as the no data value in the MHL data provided
#WL_df = WL_df.convert_objects(convert_numeric=True)
#WL_df = WL_df - datum_offset
order_colnames=[]
for colname in WL_df.columns:
order_colnames.append(str(order_dict[colname]) + '_' + colname)
#WL_df.columns = order_colnames
WL_df.columns = [str(s) + '_MHL' for s in order_colnames]
order_colnames.sort() #this will later be used to loop through the MHL gauges in a downstream to upstream order
order_colnames = pd.Series(order_colnames).str.replace("(",'')
order_colnames = pd.Series(order_colnames).str.replace(")",'')
order_colnames = order_colnames[~order_colnames.duplicated(keep='first')]
print('Performing analysis in the following order of locations')
print(order_colnames)
#========================================================#
#========================================================#
#fully automated part of the code doing the data extraction
#========================================================#
#load RMA2 data
Summary_df = pd.DataFrame()
df = pd.DataFrame()
for f in fs:
#set input and output directories
input_directory = main_input_directory + f + '_' + Order_ID
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
#Load data file
if variable == 'depth' or variable == 'elev':
Clim_Var_CSVs = glob.glob('*' + variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
a=len(df.columns)-1
df=df.iloc[:,:a]
if variable == 'vel':
#x velocity
Clim_Var_CSVs = glob.glob('*' +'x'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfx= df.drop(columns=['Year', 'Hour','1'])
#y velocity
Clim_Var_CSVs = glob.glob('*' +'y'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ',nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfy= df.drop(columns=['Year', 'Hour','1'])
df = np.sqrt(dfx*dfx + dfy*dfy)
df.columns = df.columns + '_'+ f + '_'+ variable
#cut down the df to start and end date
df = df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
HD_Summary_df = Summary_df
#========================================================#
#========================================================#
#interactive data analysis part of the code - alternatively, the dataframe could be written to csv at this stage and analysed with a separate code.
#========================================================#
#Summary_df.columns = chainages
#subset the input data frame
Summary_df_nodes = pd.DataFrame()
#for node, gauges in nodes:
for key,value in node_dict.items():
df1 = HD_Summary_df.filter(regex='^'+ str(key)+'_').filter(regex=variable)
df1.columns = [value + '_' + str(s) for s in df1.columns]
Summary_df_nodes = pd.concat([Summary_df_nodes, df1], axis=1, join='outer')
for key,value in scenario_dict.items():
Summary_df_nodes.columns = pd.Series(Summary_df_nodes.columns).str.replace(key, value)
combined_df = pd.DataFrame()
combined_df = pd.concat([Summary_df_nodes , WL_df], axis=1, join='outer')
combined_df.columns = pd.Series(combined_df.columns).str.replace("(",'') #brackets mess up the regex commands below
combined_df.columns = pd.Series(combined_df.columns).str.replace(")",'')
combined_df = combined_df.loc[:,~combined_df.columns.duplicated()]
combined_df_hourly = combined_df.resample('1H').max()
#this needs to be fixed to be more generic!
Histogram_df = combined_df.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()] #dataframe for histogram comparison between RMA and MHL
#========================================================#
#drop first gauge which doesn't have any data in 2006
#gauges = gauges[1:]
#========================================================#
# Plot water level histograms at control points
#========================================================#
ylims = [-0.9,2.5]
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 5}
matplotlib.rc('font', **font)
lwd_hist = 2 #line width of histograms
i=1
png_out_name = output_directory + '/' + Plot_scenario + '/RMA_vs_MHL_gauges_water level histogram and exceedance planes '+ startdate + '_' + enddate + '.pdf'
fig = plt.figure(figsize=(len(gauges)*7,12))
for Gauge in order_colnames:
#Gauge = order_colnames[0]
ax=plt.subplot(1,len(gauges),i)
plt.title( Gauge + ' WL '+ startdate + '-' + enddate)
xlocation = 0.6
#Gauge_only_df = Histogram_df.filter(regex=Gauge[3:])
Gauge_only_df = combined_df.filter(regex=Gauge[3:])
Gauge_only_df = Gauge_only_df.iloc[:,[0,1,2]]
#scenario 1
TS_1 = Gauge_only_df[Gauge+'_MHL']
TS_1.name = 'MHL full period' + str(TS_1.index[0].date()) + '_' + str(TS_1.index[-1].date())
if len(TS_1[TS_1.notnull()])>0:
color_1 = 'blue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=1, linestyle='dotted', ax=ax)
Gauge_only_df = Gauge_only_df[Gauge_only_df.iloc[:,0].notnull()]
TS_1 = Gauge_only_df[Gauge+'_MHL']
TS_1 = TS_1.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()]
TS_1.name = 'MHL period from ' + startdate + '_' + enddate
if len(TS_1[TS_1.notnull()])>0:
color_1 = 'royalblue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1,bw=bandwidth, lw=lwd_hist, ax=ax)
#seaborn.kdeplot(Gauge_only_df.iloc[:,2], vertical=True, color=color_1, lw=3)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
else:
TS_1 = Gauge_only_df[Gauge+'_MHL']
TS_1 = TS_1.loc[datetime.strptime(Surrogate_year + ' ' + startdate[5:], '%Y %m %d').date() : datetime.strptime('2019 ' + enddate[5:] , '%Y %m %d').date()]
TS_1.name = 'MHL perd outside of model perd ' + startdate + '_' + enddate
if len(TS_1[TS_1.notnull()])>0:
color_1 = 'royalblue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1,bw=bandwidth, lw=lwd_hist, ax=ax)
#seaborn.kdeplot(Gauge_only_df.iloc[:,2], vertical=True, color=color_1, lw=3)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 2
#TS_1 = Gauge_only_df.filter(regex='elev')
TS_1 = Gauge_only_df.iloc[:,0][Gauge_only_df.iloc[:,0].notnull()]
color_1 = 'red'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1,bw=bandwidth, lw=lwd_hist, ax=ax)
#plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
#plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 3
TS_1 = Gauge_only_df.iloc[:,1][Gauge_only_df.iloc[:,1].notnull()]
color_1 = 'limegreen'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1,bw=bandwidth, lw=lwd_hist, ax=ax)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
#plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#finish the plot
plt.ylim(ylims)
if i == 1:
plt.ylabel("Water level [m AHD]")
plt.xlabel("Probability/density")
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
#========================================================#
# Plot MHL water levels only
#========================================================#
ylims = [-1,1.5]
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 6}
matplotlib.rc('font', **font)
i=1
png_out_name = output_directory + '/' + Plot_scenario + '/MHL_gauges_water level histograms and exceedance planes '+ startdate + '_' + enddate + '.pdf'
fig = plt.figure(figsize=(len(gauges)*5,11))
for Gauge in order_colnames:
#Gauge = order_colnames[0]
ax=plt.subplot(1,len(gauges),i)
plt.title( Gauge + ' wL '+ startdate + '-' + enddate)
xlocation = 0.6
combined_df.columns
TS_1 = combined_df[Gauge+'_MHL']
TS_1 = TS_1[TS_1.notnull()]
#scenario 1
TS_1.name = 'Full Period ' + str(TS_1.index[0].date()) + '_' + str(TS_1.index[-1].date())
color_1 = 'royalblue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1,bw=bandwidth, lw=2, ax=ax)
#seaborn.kdeplot(Gauge_only_df.iloc[:,2], vertical=True, color=color_1, lw=3)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 2
#TS_1 = Gauge_only_df.filter(regex='elev')
TS_1 = TS_1.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()]
if len(TS_1[TS_1.notnull()])>0:
TS_1.name = 'Period from ' + startdate + '_' + enddate
color_1 = 'red'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1,bw=bandwidth, lw=2, ax=ax)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#finish the plot
plt.ylim(ylims)
if i == 1:
plt.ylabel("Water level [m AHD]")
plt.xlabel("Probability/density")
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
#========================================================#
# Plot time series over various intervals at control points
#========================================================#
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 5}
matplotlib.rc('font', **font)
halfwindows = [1,2,5,10,15]
for halfwindow in halfwindows:
#halfwindow = 10
png_out_name = output_directory + '/' + Plot_scenario + '/water level time series '+ startdate + '_' + enddate + ' _window_of_' + str(halfwindow*2) +'_days.pdf'
fig = plt.figure(figsize=(10,len(gauges)*3))
i=1
for Gauge in order_colnames:
Gauge_only_df = combined_df.filter(regex=Gauge[3:])
#combined_df.columns
#Gauge_only_df = df1.filter(regex=Gauge)
#Gauge_only_df_1 = Gauge_only_df.loc[(Gauge_only_df.index >= '2008-05-09 12:00:00') & (Gauge_only_df.index <= '2008-05-20 12:00:00')]
Gauge_only_df_1 = Gauge_only_df[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=halfwindow) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=halfwindow)]
Gauge_only_df_1 = Gauge_only_df_1[Gauge_only_df_1.iloc[:,0].notnull()]
ax=plt.subplot(len(gauges),1,i)
plt.title('Tidal water levels at ' + Gauge + ' | ' + River +' Estuary')
ax.set_prop_cycle(color=['steelblue', 'coral', 'steelblue', 'deeppink'],linestyle=[ '-', '-','--','-'], lw=[0.8,0.8,0.8,0.8])
Gauge_only_df_1.iloc[:,[0,1,2]].plot( legend=False, ax=ax)
plt.axhline(y=0, color='black', linestyle='-', lw=0.8, alpha=0.5)
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Time")
ax.legend()
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
##========================================================#
##write out exceedance data to csv
##========================================================#
#Summary={}
#Summary['13% exceedance'] = Wollow_13ExPr
#Summary['6.4% exceedance'] = Wollow_6p4ExPr
#Summary['1.3% exceedance'] = Wollow_1_3ExPr
#Summary['0.01% exceedance'] = Wollow_0p01ExPr
#Summary['50% exceedance'] = Wollow_50ExPr
#pdf1 = pd.DataFrame(Summary, index=[0])
#pdf1.to_csv(os.path.join( Out_Folder + Gauge + '_entrance_stats.csv'))
##========================================================#

@ -0,0 +1,749 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for creating climate prioritization plots for NARCLIM variables.
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#Datum offset factors can be found at https://www.mhl.nsw.gov.au/docs/Data%20Conditions%20and%20Fit%20for%20Purpose.pdf
#==========================================================#
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import geopandas as gpd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
#from matplotlib.backends.backend_pdf import PdfPages
import seaborn
matplotlib.style.use('ggplot')
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#==========================================================#
#Set Input parameters
#==========================================================#
#set scenario codes and beginning and end years and corresponding scenario code
River = 'Hastings'
Order_ID = 'Core_order' #choose order column name
#input scenarios
fs= ['HAS_CAL_01' , 'HAS_CAL_02'] #,'TWD_HYD_CAL_12'] #['TWD_HYD_CAL_05', 'TWD_HYD_CAL_06'] #['TWD_HYD_CAL_01','TWD_HYD_CAL_02']#, 'SLR_1m_2008_FN1']
#scenario_real_names = ['TWD_HYD_CAL_03', 'TWD_HYD_CAL_04'] #['TWD_HYD_CAL_01', 'TWD_HYD_CAL_02'] #'2008_RMA_1m_SLR'] #these will be used in the plotting so weird scenarios codes can be replaced with more intuitive names if desired
scenario_real_names = fs
scenario_dict = dict(zip(fs, scenario_real_names))
#name of plot scenario/experiment . This is a unique identifier for this particular scenario comparison
Plot_scenario = fs[0] +'_vs_' + fs[1] + 'v2'#'Cal_05vs06vsMHL'
#HDvariables = ['depth', 'elev']
variable = 'elev' #depth or vel
maxrowsincsv = 30000000 #limits the nr. of timesteps read in via the RMA extracted txt files
ylims = [-1,1.7] #usually the min and max observed water levels in the estuary in AHD used for plot ylims
#time frames for analysis
startdate = '2017 01 01'
enddate = '2017 03 30'
tideanalysis_day = '2017 02 01'
#year=range(startyear, endyear+1)
#========================================================#
#========================================================#
#mostly automated part of the code but might have to be double checked - debugged
#========================================================#
#set directory path for input and output files
main_input_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Output/'
output_directory = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Models/' + River + '/04_Results/Output/' + '/Postprocessed'
if not os.path.exists(output_directory + '/' + Plot_scenario):
os.makedirs(output_directory+ '/' + Plot_scenario)
#load and refine node vs mesh shapefile to store the results of the statistics
Gauge_exchange_df = pd.read_csv(glob.glob(main_input_directory + fs[0] + '_' + Order_ID + '/*.csv' )[0])
keys = Gauge_exchange_df['NrstNode'].values
gauges = Gauge_exchange_df['Station Na'].values
#node_dict = dict(zip(keys, gauges))
order = Gauge_exchange_df[Order_ID].astype(int).values
order_dict = dict(zip(gauges, order))
#load and refine node vs mesh shapefile to store the results of the statistics
Gauge_exchange_df = pd.read_csv(glob.glob(main_input_directory + fs[1] + '_' + Order_ID + '/*.csv' )[0])
keys2 = Gauge_exchange_df['NrstNode'].values
gauges2 = Gauge_exchange_df['Station Na'].values
keys = np.append(keys, keys2)
gauges = np.append(gauges, gauges2)
node_dict = dict(zip(keys, gauges))
#Load in situ gauge data
WL_df = pd.DataFrame()
for gauge in gauges:
#gauge = gauges[1]
#gauge = 'Harrington'
print ('loading MHL gauge data for ' + gauge)
CSVpath = glob.glob('J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/MHL_Gauge_Data/20200409_Recieved/*/' + '*' + gauge.replace(" ", "") + '*' )
Full_df = pd.read_csv(CSVpath[0] ,header= 0, skiprows= 30, parse_dates = True)
Full_df.index = pd.to_datetime(Full_df.Date, format = '%d/%m/%Y') + pd.to_timedelta(Full_df.Time, unit='h')
#Full_df = Full_df.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()]
Full_df = Full_df.iloc[:,[-2]]
Full_df.columns = [gauge]
Full_df = Full_df.loc[~Full_df.index.duplicated(keep='first')] #get rid of possible duplicate dates
Full_df = Full_df.replace(to_replace ='---', value =np.NaN) #--- is used as the no data value in the MHL data provided
Full_df = pd.DataFrame(pd.to_numeric(Full_df[gauge]))
Full_df = Full_df - np.float(Gauge_exchange_df['AHDadjust'][Gauge_exchange_df['Station Na']==gauge]) #subtract the datum adjustment factor provided by MHL as written in the shapefile
WL_df = pd.concat([WL_df, Full_df], axis=1)
#postprocess the in situ gauge dataframe
#WL_df = WL_df.replace(to_replace ='---', value =np.NaN) #--- is used as the no data value in the MHL data provided
#WL_df = WL_df.convert_objects(convert_numeric=True)
#WL_df = WL_df - datum_offset
order_colnames=[]
for colname in WL_df.columns:
print (colname)
order_colnames.append(str(order_dict[colname]) + '_' + colname)
#WL_df.columns = order_colnames
WL_df.columns = [str(s) + '_MHL' for s in order_colnames]
order_colnames.sort() #this will late be used to loop through the MHL gauges in a downstream to upstream order
order_colnames = pd.Series(order_colnames).str.replace("(",'')
order_colnames = pd.Series(order_colnames).str.replace(")",'')
#========================================================#
#========================================================#
#fully automated part of the code doing the data extraction
#========================================================#
#load RMA2 data
Summary_df = pd.DataFrame()
df = pd.DataFrame()
for f in fs:
#set input and output directories
input_directory = main_input_directory + f + '_' + Order_ID
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
#Load data file
if variable == 'depth' or variable == 'elev':
Clim_Var_CSVs = glob.glob('*' + variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
a=len(df.columns)-1
df=df.iloc[:,:a]
if variable == 'vel':
#x velocity
Clim_Var_CSVs = glob.glob('*' +'x'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ', nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfx= df.drop(columns=['Year', 'Hour','1'])
#y velocity
Clim_Var_CSVs = glob.glob('*' +'y'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ',nrows= maxrowsincsv)
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfy= df.drop(columns=['Year', 'Hour','1'])
df = np.sqrt(dfx*dfx + dfy*dfy)
df.columns = df.columns + '_'+ f + '_'+ variable
#cut down the df to start and end date
df = df[datetime.strptime(startdate , '%Y %m %d').date():datetime.strptime(enddate, '%Y %m %d').date()]
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
HD_Summary_df = Summary_df
#========================================================#
#========================================================#
#interactive data analysis part of the code - alternatively, the dataframe could be written to csv at this stage and analysed with a separate code.
#========================================================#
#Summary_df.columns = chainages
#subset the input data frame
Summary_df_nodes = pd.DataFrame()
#for node, gauges in nodes:
for key,value in node_dict.items():
df1 = HD_Summary_df.filter(regex='^'+ str(key)+'_').filter(regex=variable)
df1.columns = [value + '_' + str(s) for s in df1.columns]
Summary_df_nodes = pd.concat([Summary_df_nodes, df1], axis=1, join='outer')
for key,value in scenario_dict.items():
Summary_df_nodes.columns = pd.Series(Summary_df_nodes.columns).str.replace(key, value)
combined_df = pd.DataFrame()
combined_df = pd.concat([Summary_df_nodes , WL_df], axis=1, join='outer')
combined_df.columns = pd.Series(combined_df.columns).str.replace("(",'') #brackets mess up the regex commands below
combined_df.columns = pd.Series(combined_df.columns).str.replace(")",'')
combined_df_hourly = combined_df.resample('1H').max()
#this needs to be fixed to be more generic!
Histogram_df = combined_df_hourly.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()] #dataframe for histogram comparison between RMA and MHL
#========================================================#
#drop first gauge which doesn't have any data in 2006
#gauges = gauges[1:]
#========================================================#
# Plot water level histograms at control points
#========================================================#
ylims = [-0.9,2.5]
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 6}
matplotlib.rc('font', **font)
i=1
png_out_name = output_directory + '/' + Plot_scenario + '/RMA_vs_MHL_gauges_water level histogram and exceedance planes '+ startdate + '_' + enddate + '.pdf'
fig = plt.figure(figsize=(len(gauges)*5,11))
for Gauge in order_colnames:
#Gauge = order_colnames[0]
ax=plt.subplot(1,len(gauges),i)
plt.title( Gauge + ' water level density comparison '+ startdate + '-' + enddate)
xlocation = 0.6
Gauge_only_df = Histogram_df.filter(regex=Gauge[2:])
combined_df.columns
#scenario 1
TS_1 = Gauge_only_df[Gauge+'_MHL'].iloc[:,0]
if len(TS_1[TS_1.notnull()])>0:
color_1 = 'royalblue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=2, ax=ax)
#seaborn.kdeplot(Gauge_only_df.iloc[:,2], vertical=True, color=color_1, lw=3)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 2
#TS_1 = Gauge_only_df.filter(regex='elev')
TS_1 = Gauge_only_df.iloc[:,0][Gauge_only_df.iloc[:,0].notnull()]
color_1 = 'red'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=2, ax=ax)
#plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
#plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
#plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 3
TS_1 = Gauge_only_df.iloc[:,1][Gauge_only_df.iloc[:,1].notnull()]
color_1 = 'limegreen'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=2, ax=ax)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
#plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
#plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
#plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#finish the plot
plt.ylim(ylims)
if i == 1:
plt.ylabel("Water level [m AHD]")
plt.xlabel("Probability/density")
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
#========================================================#
# Plot MHL water levels only
#========================================================#
ylims = [-1,1.5]
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 6}
matplotlib.rc('font', **font)
i=1
png_out_name = output_directory + '/' + Plot_scenario + '/MHL_gauges_water level histograms and exceedance planes '+ startdate + '_' + enddate + '.pdf'
fig = plt.figure(figsize=(len(gauges)*5,11))
for Gauge in order_colnames:
#Gauge = order_colnames[0]
ax=plt.subplot(1,len(gauges),i)
plt.title( Gauge + ' water level density comparison '+ startdate + '-' + enddate)
xlocation = 0.6
TS_1 = combined_df[Gauge+'_MHL'].iloc[:,0]
TS_1 = TS_1[TS_1.notnull()]
#scenario 1
TS_1.name = 'Full Period ' + str(TS_1.index[0].date()) + '_' + str(TS_1.index[-1].date())
color_1 = 'royalblue'
width = 1
#full period density comparison for individual gauges overlaid by tidal planes
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=2, ax=ax)
#seaborn.kdeplot(Gauge_only_df.iloc[:,2], vertical=True, color=color_1, lw=3)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#scenario 2
#TS_1 = Gauge_only_df.filter(regex='elev')
TS_1 = TS_1.loc[datetime.strptime(startdate , '%Y %m %d').date() : datetime.strptime(enddate , '%Y %m %d').date()]
if len(TS_1[TS_1.notnull()])>0:
TS_1.name = 'Period from ' + startdate + '_' + enddate
color_1 = 'red'
width = 1
seaborn.kdeplot(TS_1, vertical=True, color=color_1, lw=2, ax=ax)
plt.axhline(y=np.nanpercentile(TS_1,87), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , (np.nanpercentile(TS_1,87)), '13% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,93.6), color=color_1, linestyle='dotted', lw=width, alpha=1)
plt.text(xlocation , (np.nanpercentile(TS_1,93.6)), '6.4% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,98.7), color=color_1, linestyle='dashed', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,98.7) , '1.3% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,99.99), color=color_1, linestyle='dashdot', lw=width, alpha=1)
plt.text( xlocation ,np.nanpercentile(TS_1,99.99) , '0.01% exceedance', ha='left', va='bottom')
plt.axhline(y=np.nanpercentile(TS_1,50), color=color_1, linestyle='solid', lw=width, alpha=1)
plt.text( xlocation , np.nanpercentile(TS_1,50), '50% exceedance', ha='left', va='bottom')
#finish the plot
plt.ylim(ylims)
if i == 1:
plt.ylabel("Water level [m AHD]")
plt.xlabel("Probability/density")
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
#========================================================#
# Plot time series over various intervals at control points
#========================================================#
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 5}
matplotlib.rc('font', **font)
halfwindows = [1,2,5,10]
for halfwindow in halfwindows:
#halfwindow = 10
png_out_name = output_directory + '/' + Plot_scenario + '/water level time series '+ startdate + '_' + enddate + ' _window_of_' + str(halfwindow*2) +'_days.pdf'
fig = plt.figure(figsize=(10,len(gauges)*3))
i=1
for Gauge in order_colnames:
Gauge_only_df = combined_df.filter(regex=Gauge[2:])
#Gauge_only_df = df1.filter(regex=Gauge)
#Gauge_only_df_1 = Gauge_only_df.loc[(Gauge_only_df.index >= '2008-05-09 12:00:00') & (Gauge_only_df.index <= '2008-05-20 12:00:00')]
Gauge_only_df_1 = Gauge_only_df[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=halfwindow) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=halfwindow) ]
Gauge_only_df_1 = Gauge_only_df_1[Gauge_only_df_1.iloc[:,0].notnull()]
ax=plt.subplot(len(gauges),1,i)
plt.title('Tidal water levels at ' + Gauge + ' | ' + River +' Estuary')
ax.set_prop_cycle(color=['steelblue', 'coral', 'steelblue', 'deeppink'],linestyle=[ '-', '-','--','-'], lw=[0.8,0.8,0.8,0.8])
Gauge_only_df_1.iloc[:,[0,1,2]].plot( legend=False, ax=ax)
plt.axhline(y=0, color='black', linestyle='-', lw=0.8, alpha=0.5)
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
plt.ylim(ylims)
plt.ylabel("Water level [m AHD]")
plt.xlabel("Time")
ax.legend()
fig.tight_layout()
i=i+1
fig.savefig(png_out_name)
plt.close()
#========================================================#
# Plot water level histograms at control points
#========================================================#
##========================================================#
##write out exceedance data to csv
##========================================================#
#Summary={}
#Summary['13% exceedance'] = Wollow_13ExPr
#Summary['6.4% exceedance'] = Wollow_6p4ExPr
#Summary['1.3% exceedance'] = Wollow_1_3ExPr
#Summary['0.01% exceedance'] = Wollow_0p01ExPr
#Summary['50% exceedance'] = Wollow_50ExPr
#pdf1 = pd.DataFrame(Summary, index=[0])
#pdf1.to_csv(os.path.join( Out_Folder + Gauge + '_entrance_stats.csv'))
##========================================================#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
##Create Bathymetry along the nodes and chainages
#Elev_df = HD_Summary_df.filter(regex='Hwq003').filter(regex='elev')
#Elev_df.columns = chainages
#Depth_df = HD_Summary_df.filter(regex='Hwq003').filter(regex='depth')
#Depth_df.columns = chainages
#Bathy_df = pd.DataFrame(Elev_df.loc[maxtide_index] - Depth_df.loc[maxtide_index])
#Bathy_df.columns = ['Bathymetry']
#Bathy_df.plot()
#
#bathy_out_path = 'H:/WRL_Projects/Hunter_CC_Modeling/Module_6/03_Results/Chainages/Hunter_nodes_bathy.csv'
#Bathy_df.to_csv(bathy_out_path)
##plt.show()
#
#
##==========================================================#
##Tidal analysis
##==========================================================#
##read in tide data
#tides_df = tides_dfm[datetime.strptime(tideanalysis_day , '%Y %m %d').date()- timedelta(days=2) : datetime.strptime(tideanalysis_day , '%Y %m %d').date() + timedelta(days=3) ]
#central_tides_df = tides_dfm[datetime.strptime(tideanalysis_day , '%Y %m %d').date():datetime.strptime(tideanalysis_day , '%Y %m %d').date()]
#
#maxtide_index = central_tides_df['tide'].idxmax()
#end_maxtide_index = maxtide_index + timedelta(days=0.4)
#beg_maxtide_index = maxtide_index - timedelta(days=0.2)
#short_tides_df = tides_df[beg_maxtide_index: end_maxtide_index]
#short_tides_df.plot(cmap='viridis', legend=True)
#mintide_index = short_tides_df['tide'].idxmin()
#
##plot the minimum and maximum elevation along the estuary occuring within a single tidal cycle
#Short_tide_elev_df = HD_Summary_df.filter(regex='elev')[min(short_tides_df.index):max(short_tides_df.index)]
#max_min_1cycle_elev_0slr = pd.concat([Short_tide_elev_df.filter(regex=fs[0]).max(), Short_tide_elev_df.filter(regex=fs[0]).min()], axis=1, join='inner')
#max_min_1cycle_elev_09slr = pd.concat([Short_tide_elev_df.filter(regex=fs[1]).max(), Short_tide_elev_df.filter(regex=fs[1]).min()], axis=1, join='inner')
#max_min_1cycle_elev_0slr.index = chainages
#max_min_1cycle_elev_09slr.index = chainages
#max_min_1cycle_elev = pd.concat([max_min_1cycle_elev_0slr, max_min_1cycle_elev_09slr], axis=1, join='outer')
#max_min_1cycle_elev.columns = ['MaxElev 0 SLR','MinElev 0 SLR','MaxElev 0.9 SLR','MinElev 0.9 SLR' ]
#max_min_1cycle_elev.plot(cmap='viridis', legend=True)
#
##plot the elevation along the estuary at exactly the time of high and low tide
#max_min_extime_elev_0slr = pd.concat([HD_Summary_df.filter(regex=fs[0]).filter(regex='elev').loc[maxtide_index], Summary_df.filter(regex=fs[0]).loc[mintide_index]], axis=1, join='outer')
#max_min_extime_elev_09slr = pd.concat([HD_Summary_df.filter(regex=fs[1]).filter(regex='elev').loc[maxtide_index], Summary_df.filter(regex=fs[1]).loc[mintide_index]], axis=1, join='outer')
#max_min_extime_elev_0slr.index = chainages
#max_min_extime_elev_09slr.index = chainages
#max_min_extime_elev = pd.concat([max_min_extime_elev_0slr, max_min_extime_elev_09slr], axis=1, join='outer')
#max_min_extime_elev.columns = ['HigT Elev 0SLR','LowT Elev 0SLR','HigT Elev 0.9 SLR','LowT Elev 0.9 SLR' ]
#max_min_extime_elev.plot(cmap='viridis', legend=True)
#
#png_out_name = output_directory +'/' + Plot_scenario + '/' + variable + '_high_low_tide_analysis_' + Plot_scenario + '.pdf'
#fig = plt.figure(figsize=(17,20))
#ax=plt.subplot(3,1,1)
#plt.title('Max and min water surface elevation along the estuary over one tidal cycle')
#ax.set_prop_cycle(color=['slateblue', 'slateblue', 'tomato', 'tomato'], linestyle=['--', '-', '--', '-'], lw=[2,2,2,2])
#max_min_1cycle_elev.plot( legend=False, ax=ax)
#plt.axhline(y=0, color='slateblue', linestyle='--', lw=1, alpha=0.5)
#plt.axhline(y=0.9, color='tomato', linestyle='--', lw=1, alpha=0.5)
#plt.ylim(ylims)
#plt.ylabel("Water level [m AHD]")
#plt.xlabel("Distance from ocean boundary [km]")
## Put a legend to the right of the current axis
##lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend()
##plot tides
#ax=plt.subplot(3,1,2)
#plt.title('Max and min water surface elevation along the estuary at time of high & low tide')
#ax.set_prop_cycle(color=['slateblue', 'slateblue', 'tomato', 'tomato'], linestyle=['--', '-', '--', '-'], lw=[2,2,2,2])
#max_min_extime_elev.plot(legend=False, ax=ax)
#plt.ylabel("Water level [m AHD]")
#plt.ylim(ylims)
#plt.xlabel("Distance from ocean boundary [km]")
##lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend()
##plot tides
#ax=plt.subplot(3,1,3)
#plt.ylim(ylims)
#plt.title('Tide at open ocean boundary')
#short_tides_df.plot(cmap='viridis', legend=False, ax=ax)
#ax.axvline(maxtide_index, color='black', linestyle='--')
#ax.axvline(mintide_index, color='black', linestyle='-')
#plt.xlabel("Time")
#plt.ylabel("Water level [m AHD]")
##lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
#ax.legend()
#fig.tight_layout()
#fig.savefig(png_out_name)
#plt.close()
#
#
##==========================================================#
##animate 2 tidal cycles baseline scenario:
##==========================================================#
#Six_day_tide_elev_df = HD_Summary_df.filter(regex=fs[0]).filter(regex='elev')[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
#Six_day_tide_elev_df.columns = chainages
#Six_day_tides_df = tides_df[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
#Six_day_tides_df.plot(cmap='viridis', legend=True)
#
#Six_day_flow_df = Flow_df[maxtide_index - timedelta(days=2): maxtide_index + timedelta(days=3)]
#Six_day_flow_df = pd.concat([Six_day_tides_df, Six_day_flow_df], axis=1, join='outer')['Q[ML/d]'].interpolate()
#Six_day_flow_df.plot(cmap='viridis', legend=True)
#
#ii=0
#for index in Six_day_tide_elev_df.index[0::4]:
# ii=ii+1
# #index = Six_day_tide_elev_df.index[1]
# png_out_path = output_directory + '/' + Plot_scenario + '/TidalCycle_' + variable + '_' + Plot_scenario + '/'
# if not os.path.exists(png_out_path):
# os.makedirs(png_out_path)
# #png_out_name = png_out_path + str(index) + 'longitudinalXS.png'
# png_out_name = png_out_path + str(ii) + variable + '_longitudinalXS.png'
# fig = plt.figure(figsize=(17,20))
# ax=plt.subplot(3,1,1)
# plt.title('Water surface elevation along the estuary')
# Six_day_tide_elev_df.loc[index].plot(color='blue', legend=False)
# #plt.axhline(y=0, color='r', linestyle='--', lw=1, alpha=0.5)
# plt.axhline(tides_df.loc[index].values, color='black',lw=0.5, linestyle='--')
# plt.ylim(ylims)
# plt.ylabel("Water level [m AHD]")
# plt.xlabel("Distance from ocean boundary [km]")
# #plot tides
# ax=plt.subplot(3,1,2)
# plt.ylim(-1.1,1.3)
# plt.title('Tide at open ocean boundary')
# Six_day_tides_df.plot(cmap='viridis', legend=False, ax=ax)
# ax.axvline(index, color='green', linestyle='--')
# plt.xlabel("Time")
# plt.ylabel("Water level [m AHD]")
#
# ax=plt.subplot(3,1,3)
# #plt.ylim(ylims)
# plt.title('Flow at the upstream river boundary')
# Six_day_flow_df.plot(color='black', legend=False, ax=ax)
# ax.axvline(index, color='green', linestyle='--')
# plt.xlabel("Time")
# plt.ylabel("Hunter Inflow [ML/day]")
#
# fig.tight_layout()
# fig.savefig(png_out_name)
# plt.close()
#
#
#
#
#
#
##==========================================================#
##animate 2 tidal cycles baseline + SLR scenario:
##==========================================================#
#Six_day_tide_elev_df_0slr = HD_Summary_df.filter(regex=fs[0]).filter(regex='elev')[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1)]
#Six_day_tide_elev_df_0slr.columns = chainages
#Six_day_tide_elev_df_09slr = HD_Summary_df.filter(regex=fs[1]).filter(regex='elev')[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1)]
#Six_day_tide_elev_df_09slr.columns = chainages
#Six_day_tides_df = tides_df[maxtide_index - timedelta(days=1): maxtide_index + timedelta(days=1.5)]
#Six_day_tides_df.plot(cmap='viridis', legend=True)
#
#ii=0
#for index in Six_day_tide_elev_df_0slr.index:
# ii=ii+1
# #index = Six_day_tide_elev_df_0slr.index[1]
# png_out_path = output_directory + '/' + Plot_scenario + '/TidalCycle_SLR_' + variable + '_' + Plot_scenario + '/'
# if not os.path.exists(png_out_path):
# os.makedirs(png_out_path)
#
# #png_out_name = png_out_path + str(index) + 'longitudinalXS.png'
# png_out_name = png_out_path + str(ii) + variable + '_longitudinalXS.png'
# max_min_extime_elev_both = pd.concat([Six_day_tide_elev_df_0slr.loc[index], Six_day_tide_elev_df_09slr.loc[index]], axis=1, join='outer')
# max_min_extime_elev_both.columns = ['Present Day','0.9m SLR']
#
# fig = plt.figure(figsize=(17,12))
# ax=plt.subplot(2,1,1)
# plt.ylim(ylims)
# plt.title('Water surface elevation along the estuary')
# ax.set_prop_cycle(color=['slateblue','tomato'], linestyle=['-', '-'], lw=[2,2])
# max_min_extime_elev_both.plot(ax=ax)
# ax.legend(loc=1)
# plt.axhline(y=0, color='r', linestyle='--', lw=1, alpha=0.5)
# plt.ylim(-1.1,2.5)
# plt.ylabel("Water level [m AHD]")
# plt.xlabel("Distance from ocean boundary [km]")
#
# #plot tides
# ax=plt.subplot(2,1,2)
# plt.ylim(-1.1,1.5)
# plt.title('Tide at open ocean boundary')
# Six_day_tides_df.plot(cmap='viridis', legend=False, ax=ax)
# ax.axvline(index, color='green', linestyle='--')
# plt.xlabel("Time")
# plt.ylabel("Water level [m AHD]")
# fig.tight_layout()
# fig.savefig(png_out_name)
plt.close()
#for NODE in node:
# NODE = str(NODE)
# #=========#
# #comparison time series plot at node
# #=========#
# #out name
# png_out_file_name = comparison_code + '_' + variable + '_' + NODE + '_time_series.png'
# png_out_path = png_output_directory + '/' + png_out_file_name
# #
# plotin_df = Summary_df.filter(regex=NODE)
# #prepare data frame for plot
# fig = plt.figure(figsize=(14,18))
# ax=plt.subplot(4,1,1)
# #start and end dat of plots
# plt.title(variable + '_' + NODE + ' - time series')
# start_date = datetime(int('1996'), int('02'), int('01'))
# end_date = datetime(int('1996'), int('02'), int('5'))
# plotin_df1=plotin_df[start_date:end_date]
# ymin = min(plotin_df1.min()) - 0.01 *min(plotin_df1.min())
# ymax = max(plotin_df1.max()) + 0.01 *max(plotin_df1.max())
# plotin_df1.plot(ylim=(ymin,ymax) ,legend=False, ax=ax)
# #plt.legend( loc=1)
# plt.xticks([])
#
# ax=plt.subplot(4,1,2)
# start_date = datetime(int('1996'), int('02'), int('01'))
# end_date = datetime(int('1996'), int('02'), int('3'))
# plotin_df2=plotin_df[start_date:end_date]
# ymin = min(plotin_df2.min()) - 0.1 *min(plotin_df2.min())
# ymax = max(plotin_df2.max()) + 0.1 *max(plotin_df2.max())
# plotin_df2.plot(ylim=(ymin,ymax),legend=False, ax=ax)
# #plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# ax=plt.subplot(4,1,3)
# plt.title(variable + '_' + NODE + ' - monthly maximum')
# Monthly_max = plotin_df.resample('M').max()
# Monthly_max.plot(ax=ax,legend=False)
# plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# ax=plt.subplot(4,1,4)
# plt.title(variable + '_' + NODE + ' - monthly mean')
# Monthly_mean = plotin_df.resample('M').mean()
# Monthly_mean.plot(ax=ax)
# #plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# # ncol=2, mode="expand", borderaxespad=0.)
# #plt.legend( loc=1)
# ax.xaxis.grid(False)
#
# #ax.patch.set_alpha(ALPHA_figs)
# fig.patch.set_alpha(ALPHA_figs)
# fig.tight_layout()
# fig.savefig(png_out_path)
# plt.close()
# #=========#

@ -0,0 +1,91 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 12:54:11 2020
@original author: Saniya Khan
@adopted code: Valentin Heimhuber
This code uses a coords.csv file with a list of lat lon locations to extract SILO gridded climatology data at each location and
writes it into a csv file
SILO gridded data is 0.05° × 0.05° or 50x50km
More information about the interpolated griddes SILO datasets can be found here: https://www.longpaddock.qld.gov.au/silo/faq/#faq5
Available variables are:
"""
from __future__ import unicode_literals
import urllib.request
import urllib.parse
import pandas as pd
from io import StringIO
from itertools import repeat
api_url = 'https://www.longpaddock.qld.gov.au/cgi-bin/silo'
path_to_coordscsv_file = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/Climatology/SILO/'
#replace by actual geocodes
#geocode=pd.DataFrame([['YANKALILLA','-35.494262', '138.362596'],
# ['PORT WAKEFIELD','-34.185349', '138.155379']],columns=['Brigade','latitude','longitude'])
list_of_weather=[]
weather=pd.DataFrame()
def getGeocode():
geocode= pd.read_csv(path_to_coordscsv_file + "coords.csv")
print(geocode.columns)
geocode.set_index('Brigade')
#print("dfdfdfD")
#print(geocode)
return geocode;
def buildUrl(lat,long):
params = {
'format': 'standard',
'lat': str(lat),
'lon': str(long),
'start': '20090101',
'finish': '20181231',
'username': 'sk3862@drexel.edu',
'password': 'silo'
}
url = api_url + '/DataDrillDataset.php?' + urllib.parse.urlencode(params)
return url
def sendRequest(url):
with urllib.request.urlopen(url) as remote:
data = remote.read()
s=str(data,'utf-8')
data_formatted = StringIO(s)
df=pd.read_csv(data_formatted)
return df
def getData(lat,long) :
return weather
#lat long index
geocode=getGeocode()
for i in range(len(geocode)):
print(i)
brigade=[geocode.loc[i,'Brigade']]
url=buildUrl(geocode.loc[i,'latitude'], geocode.loc[i,'longitude']) #url for lat long
df=sendRequest(url) #ping the australian websiten
if i==0:
headr=df.iloc[31,:]
headr.reset_index(inplace=True, drop=True)
headr.replace('\s+', ',',regex=True,inplace=True) #separate out the header
headr[0]=headr[0]+",Brigade"+",Latitude"+",Longitude";
list_of_weather.append(headr)
df=df[32:(len(df)-1)]#cleaning remove header, indexes
df.replace('\s+', ',',regex=True,inplace=True) #make csv space delimited to comma delimited
df=df.iloc[:,-1]#cleaning
df.name=brigade[0]
formatted_data=df+","+brigade[0]+","+str(geocode.loc[i,'latitude'])+","+str(geocode.loc[i,'longitude'])
list_of_weather.append(formatted_data)#combine for different locations
#weather=pd.concat(list_of_weather)
#weather = [getData(x, y) for x, y in zip(geocode['latittude'], geocode['longitude'])]
#reformat into a nice dataframe with colnames and save that as CSV
col_names=["Date (yyyymmdd)","Day","T.Max(oC)","Smx","T.Min(oC)","Smn","Rain (mm)","Srn","Evap(mm)","Sev", "Radn(MJ/m2)","Ssl" ,"VP (hPA)","Svp","RHmaxT(%)" ,"RHminT(%)" ,"Date2(ddmmyyyy)","Brigade","Latitude","Longitude"]
df = pd.DataFrame(weather.str.split(',',n=19).tolist(),
columns = col_names)
df = df.iloc[1:]
print(df.head())
df.to_csv(path_to_coordscsv_file + 'SILO_weather_data' + brigade[0] + '.csv')

@ -0,0 +1,94 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 12:54:11 2020
@original author: Saniya Khan
@adopted code: Valentin Heimhuber
This code uses a coords.csv file with a list of lat lon locations to extract SILO gridded climatology data at each location and
writes it into a csv file
More info about how to do the datadrills via api are provided here: https://www.longpaddock.qld.gov.au/silo/api-documentation/
SILO gridded data are 0.05° × 0.05° or 50x50km in resolution
More information about the interpolated & gridded SILO datasets can be found here: https://www.longpaddock.qld.gov.au/silo/faq/#faq5
"""
from __future__ import unicode_literals
import urllib.request
import urllib.parse
import pandas as pd
from io import StringIO
from itertools import repeat
api_url = 'https://www.longpaddock.qld.gov.au/cgi-bin/silo'
path_to_coordscsv_file = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_Data/Climatology/SILO/'
#replace by actual geocodes
#geocode=pd.DataFrame([['YANKALILLA','-35.494262', '138.362596'],
# ['PORT WAKEFIELD','-34.185349', '138.155379']],columns=['Brigade','latitude','longitude'])
list_of_weather=[]
weather=pd.DataFrame()
def getGeocode():
geocode= pd.read_csv(path_to_coordscsv_file + "coords.csv")
print(geocode.columns)
geocode.set_index('Brigade')
#print("dfdfdfD")
#print(geocode)
return geocode;
def buildUrl(lat,long):
params = {
'format': 'alldata',
'lat': str(lat),
'lon': str(long),
'start': '20090101',
'finish': '20181231',
'username': 'sk3862@drexel.edu',
'password': 'silo'
}
url = api_url + '/DataDrillDataset.php?' + urllib.parse.urlencode(params)
return url
def sendRequest(url):
with urllib.request.urlopen(url) as remote:
data = remote.read()
s=str(data,'utf-8')
data_formatted = StringIO(s)
df=pd.read_csv(data_formatted)
return df
def getData(lat,long) :
return weather
#lat long index
geocode=getGeocode()
for i in range(len(geocode)):
print(i)
brigade=[geocode.loc[i,'Brigade']]
print(brigade)
url=buildUrl(np.round(geocode.loc[i,'latitude'],4), np.round(geocode.loc[i,'longitude'], 4)) #url for lat long
df=sendRequest(url) #ping the australian websiten
if i==0:
headr=df.iloc[46,:]
headr.reset_index(inplace=True, drop=True)
headr.replace('\s+', ',',regex=True,inplace=True) #separate out the header
headr[0]=headr[0]+",Brigade"+",Latitude"+",Longitude";
list_of_weather.append(headr)
df=df[47:(len(df)-1)]#cleaning remove header, indexes
df.replace('\s+', ',',regex=True,inplace=True) #make csv space delimited to comma delimited
df=df.iloc[:,-1]#cleaning
df.name=brigade[0]
formatted_data=df+","+brigade[0]+","+str(geocode.loc[i,'latitude'])+","+str(geocode.loc[i,'longitude'])
type(formatted_data)
list_of_weather.append(formatted_data)#combine for different locations
#weather=pd.concat(list_of_weather)
#weather = [getData(x, y) for x, y in zip(geocode['latittude'], geocode['longitude'])]
#reformat into a nice dataframe with colnames and save that as CSV
#col_names=["Date (yyyymmdd)","Day","T.Max(oC)","Smx","T.Min(oC)","Smn","Rain (mm)","Srn","Evap(mm)","Sev", "Radn(MJ/m2)","Ssl" ,"VP (hPA)","Svp","RHmaxT(%)" ,"RHminT(%)" ,"Date2(ddmmyyyy)","Brigade","Latitude","Longitude"]
col_names=["Date (yyyymmdd)","Day", "Date2(ddmmyyyy)", "T.Max(oC)","Smx","T.Min(oC)","Smn","Rain (mm)","Srn","Evap(mm)","Sev", "Radn(MJ/m2)","Ssl" ,"VP (hPA)","Svp","RHmaxT(%)" ,"RHminT(%)" ,
'FAO56(mm)', 'Mlake(mm)','(Mpot(mm)' ,'Mact(mm)' ,'Mwet(mm)' ,'Span(mm)' ,'Ssp()' ,'EvSp(mm)' ,'Ses()' ,'MSLPres(hPa)' ,'Sp()', "Brigade","Latitude","Longitude"]
df = pd.DataFrame(formatted_data.str.split(',',n=30).tolist(),
columns = col_names)
df = df.iloc[1:]
print(df.head())
df.to_csv(path_to_coordscsv_file + '_SILO_weather_alldata_' + brigade[0] + '.csv')

@ -0,0 +1,163 @@
from struct import *
from pylab import *
import sys
# Updated 10/11/15 BMM : Added mesh.
# Updated 10/11/15 MD : Added dictionnary initialisation in mesh.
class rma:
def __init__(self):
self.file='not opened yet'
# May want to do some other things here later
def open(self,filename):
self.file=open(('%s.rma' % (filename)),'rb')
self.header=self.file.read(1000)
self.type=self.header[0:10]
self.title=self.header[100:172]
self.geometry=self.header[200:300]
self.num_nodes=int(self.header[40:50])
self.num_elements=int(self.header[50:60])
if self.type==b'RMA11 ':
self.num_constits=int(self.header[60:70])
if len(self.header[80:90].strip())==0:
self.num_sedlayers=0
else:
self.num_sedlayers=int(self.header[80:90])
self.constit_name=[]
self.constit_name.append("NULL")
i=1
#print(self.num_constits)
while i <= self.num_constits:
#print(i, self.num_constits, i <= self.num_constits)
self.constit_name.append(self.header[300+(i-1)*8:308+(i-1)*8])
if self.header[300+(i-1)*8:308+(i-1)*8]==" SSED ":
self.constit_name.append(" BSHEAR")
self.constit_name.append("BedThick")
j=1
while j<=self.num_sedlayers:
self.constit_name.append(" L%dThick" % j)
j=j+1
i=i+1
#if self.num_sedlayers > 0:
# self.num_constits=self.num_constits+2+self.num_sedlayers
# Header format in RMA2
# HEADER(1:10) ='RMA2 '
# HEADER(11:20)=DATEC = Date of run
# HEADER(21:30)=TIMEC = Time of run
# HEADER(31:40)=ZONEC = Time zone
# HEADER(41:50)=NP = Number of nodes
# HEADER(51:60)=NE = Number of elements
# HEADER(101:172)=TITLE = Title line
# HEADER(201:300)=FNAME = File name of binary geometry
# Header format in RMA11
# HEADER(1:10) - Model
# HEADER(11:20) - DATEC of model runtime
# HEADER(21:30) - TIMEC of model runtime
# HEADER(31:40) - ZONEC of model runtime
# HEADER(41:90) - '(5I10)' NP,NE,NQAL,IGS,LGS or '(5I10)' NP,NE,NQAL,ISEDS,NLAYT
# HEADER(101:172) - TITLE
# HEADER(301:540) - '(30A8)' CLABL
# HEADER(201:248) - FNAMD directory of the geometry
# HEADER(251:298) - FNAM2 filename of the geometry
def mesh(self,filename):
self.meshfile=open(('%s.rm1' % (filename)),'r')
l=self.meshfile.readline()
while l[0:5]!=' 9999' and len(l)!=5:
l=self.meshfile.readline()
l=self.meshfile.readline()
self.node_e={}
self.node_n={}
self.node_z={}
while l[0:10]!=' 9999' and len(l)!=10:
ll=l.split()
self.node_e[ll[0]]=ll[1]
self.node_n[ll[0]]=ll[2]
self.node_z[ll[0]]=ll[3]
l=self.meshfile.readline()
def next(self):
# This reads the next timestep and populates the variables.
# Reset all of the variables
self.time=0.0
self.year=0
self.xvel=[]
self.yvel=[]
self.zvel=[]
self.depth=[]
self.elevation=[]
# Add in an entry to fill position 0. Important since RMA files start numbering nodes from 1.
self.xvel.append(-999)
self.yvel.append(-999)
self.zvel.append(-999)
self.depth.append(-999)
self.elevation.append(-999)
if self.type==b'RMA2 ':
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
t=self.file.read(12)
if t:
a=unpack('fii',t)
self.time=a[0]
np=int(a[1])
self.year=a[2]
if (np!=self.num_nodes):
print ("Warning - NP (%d) on this timestep does not match header (%d)" % (np,self.num_nodes))
b=unpack('%df' % 5*np, self.file.read(20*np))
i=1
while i<=np:
self.xvel.append(b[0+(i-1)*3])
self.yvel.append(b[1+(i-1)*3])
self.depth.append(b[2+(i-1)*3])
self.elevation.append(b[np*3+(i-1)])
i=i+1
if self.type==b'RMA11 ':
self.constit=[]
c=0
# Start at zero to leave an empty array space. Constits numbered 1 .. num_constits
while c<=self.num_constits:
self.constit.append([])
# Put a null into the 0 position so that RMA node numbers are in the same numbered array position.
self.constit[c].append(-999)
c=c+1
# READ(file1,END=100) TETT1,NQAL,NP,IYRR, ((VEL(K,J),J=1,NP),K=1,3), (wd(j),j=1,np), (wsel(j),j=1,np), ((TCON1(K,J),J=1,NP),K=1,NQAL-5)
t=self.file.read(16)
if t:
a=unpack('fiii',t)
self.time=a[0]
nqal=int(a[1])
np=int(a[2])
self.year=a[3]
if ((nqal-5)!=(self.num_constits)):
print ("Warning - NQAL-5 (%d) on this timestep does not match header (%d)" % (nqal-5,self.num_constits))
if (np!=self.num_nodes):
print ("Warning - NP (%d) on this timestep does not match header (%d)" % (np,self.num_nodes))
b=unpack('%df' % nqal*np, self.file.read(4*nqal*np))
i=1
while i<=np:
self.xvel.append(b[0+(i-1)*3])
self.yvel.append(b[1+(i-1)*3])
self.zvel.append(b[2+(i-1)*3])
self.depth.append(b[np*3+(i-1)])
self.elevation.append(b[np*4+(i-1)])
c=1
while c<=self.num_constits:
self.constit[c].append(b[np*((c-1)+5)+(i-1)])
c=c+1
i=i+1
if len(self.xvel)==1:
# Note that this is a 1 now as we've filled the zero array position
return False
else:
return True

@ -0,0 +1,199 @@
from struct import *
from pylab import *
import sys
# Updated 10/11/15 BMM : Added mesh.
# Updated 10/11/15 MD : Added dictionnary initialisation in mesh.
class rma:
def __init__(self):
self.file = 'not opened yet'
# May want to do some other things here later
def open(self, filename):
self.file = open(('%s.rma' % (filename)), 'rb')
self.header = self.file.read(1000).decode("utf-8")
self.type = self.header[0:10]
self.title = self.header[100:172]
self.geometry = self.header[200:300]
self.num_nodes = int(self.header[40:50])
self.num_elements = int(self.header[50:60])
if self.type == 'RMA11 ':
self.num_constits = int(self.header[60:70])
if len(self.header[80:90].strip()) == 0:
self.num_sedlayers = 0
else:
self.num_sedlayers = int(self.header[80:90])
self.constit_name = []
self.constit_name.append("NULL")
i = 1
print(self.num_constits)
print(self.header[300:1000])
while i <= self.num_constits:
# print self.header[300:400]
self.constit_name.append(self.header[300 + (i - 1) * 8:308 + (i - 1) * 8])
if self.header[300 + (i - 1) * 8:308 + (i - 1) * 8] == " SSED ":
self.constit_name.append(" BSHEAR")
self.constit_name.append("BedThick")
j = 1
print(self.num_sedlayers)
while j <= self.num_sedlayers:
self.constit_name.append(" L%dThick" % j)
j = j + 1
i = i + 1
# print i
# print self.num_constits
# print i
# if self.num_sedlayers > 0:
# self.num_constits=self.num_constits+2+self.num_sedlayers
# Header format in RMA2
# HEADER(1:10) ='RMA2 '
# HEADER(11:20)=DATEC = Date of run
# HEADER(21:30)=TIMEC = Time of run
# HEADER(31:40)=ZONEC = Time zone
# HEADER(41:50)=NP = Number of nodes
# HEADER(51:60)=NE = Number of elements
# HEADER(101:172)=TITLE = Title line
# HEADER(201:300)=FNAME = File name of binary geometry
# Header format in RMA11
# HEADER(1:10) - Model
# HEADER(11:20) - DATEC of model runtime
# HEADER(21:30) - TIMEC of model runtime
# HEADER(31:40) - ZONEC of model runtime
# HEADER(41:90) - '(5I10)' NP,NE,NQAL,IGS,LGS or '(5I10)' NP,NE,NQAL,ISEDS,NLAYT
# HEADER(101:172) - TITLE
# HEADER(301:540) - '(30A8)' CLABL
# HEADER(201:248) - FNAMD directory of the geometry
# HEADER(251:298) - FNAM2 filename of the geometry
def mesh(self, filename):
self.meshfile = open(('%s.rm1' % (filename)), 'r')
l = self.meshfile.readline()
while l[0:5] != ' 9999' and len(l) != 5:
l = self.meshfile.readline()
l = self.meshfile.readline()
self.node_e = {}
self.node_n = {}
self.node_z = {}
while l[0:10] != ' 9999' and len(l) != 10:
ll = l.split()
self.node_e[ll[0]] = ll[1]
self.node_n[ll[0]] = ll[2]
self.node_z[ll[0]] = ll[3]
l = self.meshfile.readline
def next(self):
# This reads the next timestep and populates the variables.
# Reset all of the variables
self.time = 0.0
self.year = 0
self.xvel = []
self.yvel = []
self.zvel = []
self.depth = []
self.elevation = []
self.temperature = []
self.salinity = []
self.sussed = []
# Add in an entry to fill position 0. Important since RMA files start numbering nodes from 1.
self.xvel.append(-999)
self.yvel.append(-999)
self.zvel.append(-999)
self.depth.append(-999)
self.elevation.append(-999)
self.temperature.append(-999)
self.salinity.append(-999)
self.sussed.append(-999)
if self.type == 'RMA2 ':
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
t = self.file.read(12)
if t:
a = unpack('fii', t)
self.time = a[0]
np = int(a[1])
self.year = a[2]
if (np != self.num_nodes):
print("Warning - NP (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
b = unpack('%df' % 5 * np, self.file.read(20 * np))
tempA = [(x - 1) * 3 for x in range(1, np + 1)]
tempB = [np * 3 + (x - 1) for x in range(1, np + 1)]
self.xvel.extend([b[i] for i in tempA])
self.yvel.extend([b[i + 1] for i in tempA])
self.depth.extend([b[i + 2] for i in tempA])
self.elevation.extend([b[i] for i in tempB])
if self.type == 'RMA11 ':
self.constit = []
c = 0
# Start at zero to leave an empty array space. Constits numbered 1 .. num_constits
while c <= self.num_constits:
self.constit.append([])
# Put a null into the 0 position so that RMA node numbers are in the same numbered array position.
self.constit[c].append(-999)
c = c + 1
# READ(file1,END=100) TETT1,NQAL,NP,IYRR, ((VEL(K,J),J=1,NP),K=1,3), (wd(j),j=1,np), (wsel(j),j=1,np), ((TCON1(K,J),J=1,NP),K=1,NQAL-5)
t = self.file.read(16)
if t:
a = unpack('fiii', t)
self.time = a[0]
nqal = int(a[1])
np = int(a[2])
self.year = a[3]
if ((nqal - 5) != (self.num_constits)):
print("Warning - NQAL-5 (%d) on this timestep does not match header (%d)" % (
nqal - 5, self.num_constits))
if (np != self.num_nodes):
print("Warning - NP (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
b = unpack('%df' % nqal * np, self.file.read(4 * nqal * np))
tempA = [(x - 1) * 3 for x in range(1, np + 1)]
tempB = [np * 3 + (x - 1) for x in range(1, np + 1)]
tempC = [np * 4 + (x - 1) for x in range(1, np + 1)]
self.xvel.extend([b[i] for i in tempA])
self.yvel.extend([b[i + 1] for i in tempA])
self.zvel.extend([b[i + 2] for i in tempA])
self.depth.extend([b[i] for i in tempB])
self.elevation.extend([b[i] for i in tempC])
for c in range(1, self.num_constits + 1):
(self.constit[c].extend([b[np * ((c - 1) + 5) + (x - 1)] for x in range(1, np + 1)]))
if self.type == 'RMA10 ':
# WRITE(IRMAFM) TETT,NP,NDF,NE,IYRR,((VSING(K,J),K=1,NDF),VVEL(J),WSLL(J),J=1,NP),(DFCT(J),J=1,NE),(VSING(7,J),J=1,NP)
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
t = self.file.read(20)
if t:
a = unpack('fiiii', t)
self.time = a[0]
np = a[1]
ndf = 6
ne = a[3]
self.year = a[4]
if (np != self.num_nodes):
print("Warning - NP1 (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
tempRead = np * (3 + ndf) + ne
b = unpack('%df' % tempRead, self.file.read(4 * tempRead))
i = 1
while i <= np:
self.xvel.append(b[0 + (i - 1) * 8])
self.yvel.append(b[1 + (i - 1) * 8])
self.depth.append(b[2 + (i - 1) * 8])
self.salinity.append(b[3 + (i - 1) * 8])
self.temperature.append(b[4 + (i - 1) * 8])
self.sussed.append(b[5 + (i - 1) * 8])
self.zvel.append(b[6 + (i - 1) * 8])
self.elevation.append(b[7 + (i - 1) * 8])
i = i + 1
if len(self.xvel) == 1:
# Note that this is a 1 now as we've filled the zero array position
return False
else:
return True

@ -0,0 +1 @@
from .pyrma import loadMesh, point

@ -0,0 +1,54 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 19:02:20 2020
@author: z5025317
"""
#load existing tgr time series boundary, raise by 1m and output a new tgr file
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
os.chdir('C:/Users/z5025317/UNSW/Danial Khojasteh - HEMIP/RMA_result_explorer_code')
River = 'Shoalhaven'
tgr_csv_folder_path = 'C:/Users/z5025317/UNSW/Danial Khojasteh - HEMIP/Models/' + River + '/02_Input/Boundary_conditions/Tide/'
filenames = glob.glob(tgr_csv_folder_path + '*05.csv')
for tgr_csv_name in filenames:
print (tgr_csv_name)
#tgr_csv_name = 'R45_2008_existing_run_tgr.csv'
Waterlevel_increases = [0.25, 0.5, 1, 2] # in m AHD
for Waterlevel_increase in Waterlevel_increases:
tgr_outname = tgr_csv_folder_path + os.path.basename(tgr_csv_name)[:-4] + '_plus_SLR_' + str(Waterlevel_increase) + 'm'
Full_df = pd.read_csv(tgr_csv_name ,header= 0, parse_dates = False)
Full_df.columns = ['1','2','3','4']
df = Full_df #.loc[1:2000]
# str(df['2'][1500].astype(int))
# RMA2: create input file
fq = open(
os.path.join(tgr_csv_folder_path, tgr_outname + '.tgr'),
'w')
fq.write('TT Manning base case tide + SLR of ' + str(Waterlevel_increase) + 'm \n')
#fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format('CLH', '', '2', '2008'))
fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format('CLH', '', int(df['3'][0]), int(df['4'][0])))
for i in range(1,len(df)-1,1):
# For each new element
fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format(df['1'][i], df['2'][i], df['3'][i], str(np.round(df['4'][i].astype(float)+ Waterlevel_increase,3))))
fq.write('ENDDATA')
fq.close()

@ -0,0 +1,49 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 19:02:20 2020
@author: z5025317
"""
#load existing tgr time series boundary, raise by 1m and output a new tgr file
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
os.chdir('C:/Users/z5025317/UNSW/Danial Khojasteh - HEMIP/RMA_result_explorer_code')
year = '2017'
Ocean_gauges_folder = 'J:/Project/wrl2018064 Fisheries RAP/04_Working/05_Modelling/RMA/HEMIP/Global_HEMIP_Data/MHL_Gauge_Data/20200409_Recieved/Open_ocean/'
filenames = glob.glob(Ocean_gauges_folder + '*.csv')
filenames = filenames[,[0,1,3]]
timestep = 0
for tgr_csv_name in filenames:
print (tgr_csv_name)
tgr_csv_name = filenames[3]
tgr_outname = Ocean_gauges_folder + os.path.basename(tgr_csv_name)[:-4] + '_2' + year
Full_df = pd.read_csv(tgr_csv_name ,header= 0, skiprows= 30, parse_dates = True)
Full_df.index = pd.to_datetime(Full_df.Date, format = '%d/%m/%Y') + pd.to_timedelta(Full_df.Time, unit='h')
Full_df = Full_df.iloc[:,[-2]]
Full_df.columns = ['WL[AHD]']
Full_df = Full_df.loc[(Full_df.index >= year + '-01-01 00:00:00') & (Full_df.index <= str(np.int(year)+1) + '-01-01 00:00:00')]
# str(df['2'][1500].astype(int))
# RMA2: create input file
fq = open(
os.path.join(Ocean_gauges_folder, tgr_outname + '.tgr'),
'w')
fq.write('TT MHL tide for ' + os.path.basename(tgr_csv_name)+ ' ' + year + '\n')
fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format('CL', '', '2', year))
#fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format('CL', '', int(df['3'][0]), int(df['4'][0])))
for i in range(1,len(Full_df),1):
# For each new element
fq.write('{:<5}{:>3}{:>8}{:>8}\n'.format('HD', str(np.int(Full_df.index[i-1].strftime('%j'))), str(np.round((np.float(Full_df.index[i-1].strftime('%M'))+ np.float(Full_df.index[i-1].strftime('%H'))*60) /60,2)), str(np.round(np.float(Full_df['WL[AHD]'][i-1]),3))))
fq.write('ENDDATA')
fq.close()
Loading…
Cancel
Save