Compare commits

..

6 Commits

Author SHA1 Message Date
tinoheimhuber 873eed7540 #suite of python codes for exploring rma result files 6 years ago
tinoheimhuber 83f89c254d #made some changes to accomodate the new structure of the MESH_v2. Catchment 25-31 were merged into one single catchment (25). The rain and ET data is untouched but are cropped down to 25 catchments in the preprocessing code. 6 years ago
tinoheimhuber 399318dcf3 #added feature for including ocean WQ boundary as well as for increasing
river inflow temp and ocean temp by a user defined delta for near and
far future
#fully working.
6 years ago
tinoheimhuber ddf60b7cb0 #back up of working python code 6 years ago
Valentin Heimhuber 342f4f3bc0 #new code for preparing the NARCLIM data for use in the rma_preprocessing.py
code= it's fullly working!
7 years ago
Valentin Heimhuber 46ade05763 #Comprises a fully working version of the major BC Generation R and Python codes
the project is still being set up so most of the code will still undergo
significant changes
7 years ago

@ -0,0 +1,735 @@
# coding: utf-8
import re
import os
import time
import collections
import numpy as np
import pandas as pd
from tqdm import tqdm
from datetime import datetime
import pyrma.pyrma
path = "C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/BCGeneration/"
# Load project settings
# Establish the settings and run parameters (see the description of
# settings that are in header of this code)
if __name__ == '__main__':
setup_files = [f for f in os.listdir(path) if f.lower().endswith('.s')]
if len(setup_files) == 1:
settingsfile = setup_files[0]
else:
print('Enter the name of the settings file: ')
settingsfile = input()
S = collections.OrderedDict()
print('Reading settings file: {}'.format(settingsfile))
with open(settingsfile, 'r') as f:
for line in f:
# Ignore commented and empty lines
if line[0] is not '#' and line[0] is not '\n':
# Take key name before colon
ln = line.strip().split(':', 1)
key = ln[0]
# Separate multiple values
val = [x.strip() for x in ln[1].strip().split(',')]
if len(val) == 1:
val = val[0]
S[key] = val
val = ln[1].strip().split(',')
if len(val) == 1:
val = val[0]
S[key] = val
#create output directory
if not os.path.exists(S['output_dir']):
os.makedirs(S['output_dir'])
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
if not os.path.exists(S['output_dir'] + 'RMA2'):
os.makedirs(S['output_dir'] + 'RMA2')
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
if not os.path.exists(S['output_dir'] + 'RMA11'):
os.makedirs(S['output_dir'] + 'RMA11')
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
# Collect run parameters
env_str = ''
env_str += "{0:20} : {1}\n".format("Time Run",
time.strftime('%Y-%m-%d %H:%M:%S'))
env_str += "{0:20} : {1}\n".format("Settings File", settingsfile)
for envvar in S.keys():
env_str += "{0:20} : {1}\n".format(envvar, S[envvar])
# Load RMA mesh
print('Reading RMA mesh file')
nodes, elements = pyrma.loadMesh(S['mesh_file'])
mesh = pd.DataFrame(elements, index=[0]).transpose()
mesh.columns = ['name']
mesh['centroid'] = [e.centroid for e in elements.values()]
# Add empty lists to dataframe
mesh['inflows'] = np.empty((len(mesh), 0)).tolist()
mesh['inflows'] = mesh['inflows'].astype(object)
# Generate empty dataframe for inflows
start_date = datetime(int(S['start_year']), int(S['start_month']), int(S['start_day']))
end_date = datetime(int(S['end_year']), int(S['end_month']), int(S['end_day']))
inflow_timeseries = pd.DataFrame(index=pd.date_range(start_date, end_date))
#presend day period start and end day
pres_start_date = datetime(int(S['PD_start_year']), int(S['PD_start_month']), int(S['PD_start_day']))
pres_end_date = datetime(int(S['PD_end_year']), int(S['PD_end_month']), int(S['PD_end_day']))
# Generate empty dictionary (to be filled with dataframes) for water quality
wq_timeseries = {}
# Read upstream boundary inflows
if S['include_boundary_flows'].lower() == 'yes':
# Read boundary condition data from setup file
bc_data = {}
for key, val in S.items():
if re.match('bc_\d', key):
bc_data[val[0]] = dict(east=int(val[1]), north=int(val[2]))
dir_name = S['bc_directory']
for key, val in bc_data.items():
file_name = [x for x in os.listdir(dir_name) if x.startswith(key)][0]
bc_data[key]['path'] = os.path.join(dir_name, file_name)
# Assign upstream boundary inflows to RMA mesh
for key, val in bc_data.items():
# Find nearest element in RMA mesh
x = val['east']
y = val['north']
mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']]
idx = mesh['calc'].idxmin()
# Add nearest mesh element location to dataframe
mesh.at[idx, 'inflows'] = np.append(mesh.loc[idx, 'inflows'], key)
for key, val in bc_data.items():
# Read upstream boundary condition file
print('Reading upstream boundary inflow: {}'.format(key))
df = pd.read_csv(
val['path'], index_col=0, parse_dates=['datetime'], dayfirst=True)
#TH # Shift the upstream boundary flows into the future if date is in the future.
df.index = df.index + (start_date - pres_start_date)
# Trim dates to valid range
df = df[start_date:end_date]
# Scale flow units to m3/s
df[key] = df['Q[ML/d]'] * 1000 / 24 / 3600
# Merge all dataframes together
#inflow_timeseries2 = pd.merge(
#inflow_timeseries, df[[key]], right_index=True, left_index=True)
#TH #Tino added:
inflow_timeseries = pd.concat([inflow_timeseries, df[[key]]], axis=1)
# Add to water quality timeseries
df = df.drop(['Q[ML/d]', key], axis = 1)
#increase river temperature by Delta
if S['Increase_riv_temp'] == 'yes':
df['Temperature'] = df['Temperature'] + float(S['Riv_temp_increase'])
wq_timeseries[key] = df
CC_data = {}
for key, val in S.items():
if re.match('CC_\d', key):
CC_data[val[0]] = dict(cc=int(val[1]))
dir_name = S['CC_WQ_directory']
for key, val in CC_data.items():
file_name = [x for x in os.listdir(dir_name) if x.startswith(key)][0]
CC_data[key]['path'] = os.path.join(dir_name, file_name)
CC_timeseries = {}
if S['include_CC_wq'].lower() == 'yes':
print('Reading WQ at CC line')
for key in CC_data.keys():
df = pd.read_csv(
CC_data[key]['path'],
index_col=0,
parse_dates=[0],
dayfirst=True)
#Shift the water quality time series data frame by
df.index = df.index + (start_date - pres_start_date)
#increase temperature by Delta
if S['Increase_SST_temp'] == 'yes':
df['Temperature'] = df['Temperature'] + float(S['SST_increase'])
CC_timeseries[key] = df.copy()
# Read WWTP data from setup file
wwtp_data = {}
for key, val in S.items():
if re.match('wwtp_\d', key):
wwtp_data[val[0]] = dict(east=int(val[1]), north=int(val[2]))
dir_name = S['wwtp_directory']
for key, val in wwtp_data.items():
file_name = [x for x in os.listdir(dir_name) if x.startswith(key)][0]
wwtp_data[key]['path'] = os.path.join(dir_name, file_name)
# Read WWTP inflows
if S['include_wwtp_flows'].lower() == 'yes':
print('Reading WWTP inflows (variable)')
for key in wwtp_data.keys():
df = pd.read_csv(
wwtp_data[key]['path'],
index_col=0,
parse_dates=[0],
dayfirst=True)
# Find nearest element in RMA mesh
x = wwtp_data[key]['east']
y = wwtp_data[key]['north']
mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']]
idx = mesh['calc'].idxmin()
# Add nearest mesh element location to dataframe
mesh.at[idx, 'inflows'] = np.append(mesh.loc[idx, 'inflows'], key)
# Convert from ML/day to m3/s
df[key] = df[['Q[ML/d]']] * 1000 / 24 / 3600
#Shift the water quality time series data frame by
df.index = df.index + (start_date - pres_start_date)
# Add to inflow time series dataframes
inflow_timeseries = inflow_timeseries.join(df[[key]])
# Add to water quality timeseries
wq_timeseries[key] = df.drop(['Q[ML/d]', key], axis = 1)
# Load reference rainfall and evapotranspiration
eto_master = pd.read_csv(
S['evap_file'], parse_dates=['datetime'], index_col=['datetime'])
rain_master = pd.read_csv(
S['rain_file'], parse_dates=['datetime'], index_col=['datetime'])
# Trim climate data to current date range
eto_master = eto_master[start_date:end_date]
eto_master = eto_master.iloc[:,0:25]
rain_master = rain_master[start_date:end_date]
rain_master = rain_master.iloc[:,0:25]
#inflow_timeseries.index.difference(rain_master.index)
# Calculate catchment inflows with AWBM
if S['include_hydro_model'].lower() == 'yes':
# Load water quality data for catchment inflows
for key in ['awbm_wq_natural', 'awbm_wq_urban']:
df = pd.read_csv(S[key], index_col=0, parse_dates=[0], dayfirst=True)
#TH # Shift the upstream boundary flows into the future if date is in the future.
df.index = df.index + (start_date - pres_start_date)
wq_timeseries[key] = df
print('Calculating AWBM inflows')
# Read catchment data
catchments = pd.read_csv(S['catchment_file'], index_col=[0])
catchments = catchments.set_index(catchments['Cat_Name'])
for index, row in catchments.iterrows():
# Find nearest element in RMA mesh
x = row.Easting
y = row.Northing
mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']]
idx = mesh['calc'].idxmin()
# Add nearest mesh element location to dataframe
mesh.at[idx, 'inflows'] = np.append(mesh.loc[idx, 'inflows'],
row.Cat_Name)
# Get weather station data
station_names = list(eto_master.columns)
# Load weights from Thiessen polygons
thiessen_weights = pd.read_csv(S['catchment_thiessen_weights'])
# Add catchment inflows
for index, c in catchments.iterrows():
# Time step (units: days)
timeStep = 1.0
# Area (units: m2)
totalArea = c['Area (km2)'] * 1000 * 1000
# S
consS = [c['C1'], c['C2'], c['C3']]
# A (must sum to 1)
consA = [c['A1'], c['A2'], c['A3']]
consKB = c['Kbase']
consKS = c['Ksurf']
BFI = c['BFI']
bucketValue = [0, 0, 0]
bfValue = 0
sfValue = 0
def flowInit(length):
vec = [0] * length
return vec
def updatebucket(Elevation, surfaceCon, previousValue, flow):
if Elevation > surfaceCon:
flow = Elevation - surfaceCon
previousValue = surfaceCon
else:
flow = 0
previousValue = max(Elevation, 0)
return previousValue, flow
# Calculate Thiessen weightings
weights = thiessen_weights[thiessen_weights['Name'] == c['Cat_Name']][
station_names]
rain_local = (rain_master[station_names] *
weights.values.flatten()).sum(axis=1).values
eto_local = (eto_master[station_names] *
weights.values.flatten()).sum(axis=1).values
# Count number of timesteps
n = len(rain_master.index)
Excess = [flowInit(n) for i in range(3)]
ExcessTotal = flowInit(n)
ExcessBF = flowInit(n)
ExcessSF = flowInit(n)
ExcessRunoff = flowInit(n)
Qflow = flowInit(n)
[aa, bb] = updatebucket(1, 2, 3, 4)
for i in range(1, len(rain_local)):
ElevTemp = [[bucketValue[j] + rain_local[i] - eto_local[i]]
for j in range(3)]
for k in range(3):
[bucketValue[k], Excess[k][i]] = updatebucket(
ElevTemp[k][0], consS[k], bucketValue[k], Excess[k][i - 1])
ExcessTotal[i] = (Excess[0][i] * consA[0] + Excess[1][i] * consA[1]
+ Excess[2][i] * consA[2])
ExcessBF[i] = bfValue + ExcessTotal[i] * BFI
bfValue = max(consKB * ExcessBF[i], 0)
ExcessSF[i] = sfValue + (1 - BFI) * ExcessTotal[i]
sfValue = max(consKS * ExcessSF[i], 0)
ExcessRunoff[i] = ((1 - consKB) * ExcessBF[i] +
(1 - consKS) * ExcessSF[i])
Qflow = [
a * (1e-3) * totalArea / (timeStep * 86400) for a in ExcessRunoff
] # flow in m3/s
Qflow_df = pd.DataFrame(Qflow, index=rain_master.index)
Qflow_df.columns = [c['Cat_Name']]
#inflow_timeseries[c['Cat_Name']] = Qflow
inflow_timeseries = pd.concat([inflow_timeseries, Qflow_df], axis=1)
#interpolate the NA value of the leap year 29th of March
inflow_timeseries[c['Cat_Name']]= inflow_timeseries[c['Cat_Name']].interpolate(method='linear', axis=0)
# Calculate irrigation demand
if S['include_crop_model'].lower() == 'yes':
print('Calculating irrigation demand')
# Create QA summary
qa_fraction_used = pd.DataFrame(
index=pd.date_range(start=start_date, end=end_date, freq='AS'))
# Load water licence holders
licences = pd.read_csv(S['licences_file'])
licences['point'] = [
pyrma.point(x, y)
for x, y in zip(licences['Easting'], licences['Northing'])
]
# Assign water licences to RMA mesh
for index, row in licences.iterrows():
# Find nearest element in RMA mesh
x = row.Easting
y = row.Northing
mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']]
idx = mesh['calc'].idxmin()
# Add nearest mesh element location to dataframe
mesh.at[idx, 'inflows'] = np.append(mesh.loc[idx, 'inflows'],
row.CWLICENSE)
weather_stations = pd.read_excel(S['weather_station_file'])
weather_stations['point'] = [
pyrma.point(x, y) for x, y in zip(weather_stations['E_MGA56'],
weather_stations['N_MGA56'])
]
# Find nearest weather station
licences['station_name'] = ''
for index, row in licences.iterrows():
idx = np.argmin(
[row['point'].dist(p) for p in weather_stations['point']])
licences.at[index, 'station_name'] = weather_stations['Name'][idx]
# http://www.fao.org/docrep/x0490e/x0490e0e.htm
crop_types = {
'type': [
'pasture', 'turf', 'lucerne', 'vegetables', 'orchard',
'non_irrigation'
],
'crop_coefficient': [1, 1, 1, 1, 1, np.nan],
'root_zone_depth': [1000, 750, 1500, 500, 1500, np.nan],
'allowable_depletion': [0.6, 0.5, 0.6, 0.5, 0.5, np.nan],
}
crop_types = pd.DataFrame(crop_types)
# Check number of days to spread irrigation over
irrigation_days = int(S['irrigation_days'])
# Check if moisture in soil should be kept full (saturated) or empty
if S['irrigate_to_saturation'].lower() == 'yes':
saturation_mode = True
else:
saturation_mode = False
irrigation_time_factor = 1 / irrigation_days
# Iterate through licences
for index, lic in tqdm(licences.iterrows(), total=licences.shape[0]):
# Initialise variables for new licence
currently_irrigating = False
licence_exhausted = False
# Annual extraction volume
annual_volume_capped = lic['SHARECOMPO'] * 1000
annual_volume_uncapped = np.inf
# Set a maximum daily extraction limit
daily_maximum_volume = annual_volume_capped * float(
S['daily_limit_fraction'])
if S['capped_to_licence'].lower() == 'yes':
# Limited to share component
annual_volume = annual_volume_capped
else:
# Unlimited
annual_volume = annual_volume_uncapped
# Check if licence is for non-irrigation purposes
if lic['PRIMARY_USE'].lower() == 'non_irrigation':
# Distribute licenced amount evenly over year (m3/year to m3/s)
irrigation_q = annual_volume / 365.25 / 24 / 3600
inflow_timeseries[lic['CWLICENSE']] = -irrigation_q
continue
# Irrigation area (m2)
area = lic['Area']
# Available water holding capacity (mm per m)
water_holding_capacity = 110 / 1000
# Get parameters for specific crop type
crop = crop_types[crop_types['type'] == lic['PRIMARY_USE'].lower()]
# Crop coefficient (from FAO56)
crop_coefficient = float(crop['crop_coefficient'])
# Root zone depth (mm)
root_zone_depth = float(crop['root_zone_depth'])
# Allowable water level depletion, before irrigation is required (%)
allowable_depletion = float(crop['allowable_depletion'])
# Irrigation efficiency (percent)
efficiency = float(S['irrigation_efficiency'])
# Plant available water (mm)
plant_available_water = root_zone_depth * water_holding_capacity
# Irrigation trigger depth (mm)
threshold_depth = plant_available_water * allowable_depletion
# Calculate soil moisture over time
rain_local = rain_master[lic['station_name']]
eto_local = eto_master[lic['station_name']]
date = rain_master.index
depletion_depth = np.zeros(len(date))
irrigation_volume = np.zeros(len(date))
annual_irrigation_volume = np.zeros(len(date))
etc = eto_local * crop_coefficient
for i in range(1, len(date)):
if not saturation_mode:
currently_irrigating = False
# Calculate remaining licence allocation
remaining_allocation = annual_volume - annual_irrigation_volume[i -
1]
# Check if licence is exhausted
if remaining_allocation <= 0:
licence_exhausted = True
# Apply evapotranspiration and rain
current_depth = depletion_depth[i - 1] + etc[i] - rain_local[i - 1]
# Check if soil was irrigated the previous day
if irrigation_volume[i - 1] > 0:
current_depth = (
current_depth -
irrigation_volume[i - 1] / area * 1000 * efficiency)
# If soil is saturated from rain or irrigation, do not store excess
if current_depth < 0:
current_depth = 0
currently_irrigating = False
# Check if soil moisture is too low
if (((current_depth > threshold_depth) and
(rain_local[i] < 0.2 * current_depth)) or currently_irrigating):
if currently_irrigating:
idx_last_irrigation = np.where(
irrigation_volume[i::-1])[0][0]
irrigation_volume[i] = np.min([
irrigation_volume[i - idx_last_irrigation],
remaining_allocation, daily_maximum_volume
])
else:
currently_irrigating = True
irrigation_volume[i] = np.min([
current_depth / 1000 * area / efficiency *
irrigation_time_factor, remaining_allocation,
daily_maximum_volume
])
if licence_exhausted:
irrigation_volume[i] = 0
current_depth = threshold_depth
currently_irrigating = False
# Check if new year has started
if date[i].dayofyear == 1:
annual_irrigation_volume[i] = 0 + irrigation_volume[i]
licence_exhausted = False
else:
annual_irrigation_volume[
i] = annual_irrigation_volume[i - 1] + irrigation_volume[i]
# Update depletion depth
depletion_depth[i] = current_depth
# Update QA table at end of year
if (date[i].month == 12) & (date[i].day == 31):
q_fraction_of_licence = annual_irrigation_volume[
i] / annual_volume_capped
qa_fraction_used.loc[datetime(date[i].year, 1, 1), lic[
'CWLICENSE']] = q_fraction_of_licence
# Update inflows with irrigation demand (sign is negative for outflow)
irrigation_q = irrigation_volume / 24 / 3600
irrigation_q_df = pd.DataFrame(irrigation_q, index=rain_master.index)
irrigation_q_df.columns = [lic['CWLICENSE']]
inflow_timeseries = pd.concat([inflow_timeseries, irrigation_q_df], axis=1)
#interpolate the NA value of the leap year 29th of March
inflow_timeseries[lic['CWLICENSE']]= inflow_timeseries[lic['CWLICENSE']].interpolate(method='linear', axis=0)
#inflow_timeseries[lic['CWLICENSE']] = -irrigation_q
# Consolidate wq data into single dataframe
if S['include_WQ'].lower() == 'yes':
wq_df = pd.DataFrame()
wq_cols = wq_timeseries.keys()
####Written by tino##############################################
# # Generate empty dataframe for inflows
# Full_present_period_df = pd.DataFrame(index=pd.date_range(pres_start_date, pres_end_date))
# # Generate empty dataframe for inflows
# start_date = datetime(
# int(S['start_year']), int(S['start_month']), int(S['start_day']))
# end_date = datetime(int(S['end_year']), int(S['end_month']), int(S['end_day']))
# for n in wq_cols:
# Full_present_period_df = pd.concat([Full_present_period_df, pd.DataFrame(wq_timeseries[n]['Salinity'])], axis=1)
# Full_present_period_df = pd.DataFrame(Full_present_period_df.loc[(Full_present_period_df .index >= pres_start_date) & (Full_present_period_df .index <= pres_end_date)])
# wq = Full_present_period_df.replace(np.nan, 0)
# wq.columns = wq_cols
# #shift the WQ time series into the future if a future model run is executed
# wq.index = wq.index + (start_date - pres_start_date)
# #wq.index.name = 'constituent'
# #wq = wq.reset_index()
# #wq.index = np.tile(1, wq.shape[0])
# wq_df = wq
# #wq_df = wq_df.append(wq)
####Written by tino##############################################
#there is a problem here if the model run goes earlier than 1994, it
#then can't find the 1990 index from teh inflow_timeseries
#wq_timeseries[n].index
for i in inflow_timeseries.index:
wq = pd.DataFrame([wq_timeseries[n].loc[i, :] for n in wq_cols]).T
wq.columns = wq_cols
wq.index.name = 'constituent'
wq = wq.reset_index()
wq.index = np.tile(i, wq.shape[0])
wq_df = wq_df.append(wq)
#Shift the water quality time series data frame by
#wq_df.index = wq_df.index + (start_date - pres_start_date)
# Write element inflows for RMA
# Consolidate inflow elements in RMA mesh (only include those with inflows)
inflow_elements = mesh.loc[[len(n) > 0
for n in mesh['inflows']], ['name', 'inflows']]
# Iterate through years
for current_year in range(start_date.year, end_date.year + 1):
# RMA2: create input file
fq = open(
os.path.join(S['output_dir'], 'RMA2', '{}.elt'.format(current_year)),
'w')
fq.write('TE Generated Runoff (see end of file for run parameters)\n')
# RMA11: create input file
if S['include_WQ'].lower() == 'yes':
fwq = open(
os.path.join(S['output_dir'], 'RMA11',
'{}.wqg'.format(current_year)), 'w')
# Create progress bar
pbar = tqdm(
inflow_elements['inflows'].iteritems(), total=inflow_elements.shape[0])
# Iterate through mesh elements
for ID, q_names in pbar:
# Update progess bar
pbar.set_description('Writing input for year {}'.format(current_year))
# For each new element
fq.write('{:<8}{:>8}{:>8}{:>8}'.format('QEI', ID, 1, current_year))
fq.write(' ### {}\n'.format(list(q_names)))
if S['include_WQ'].lower() == 'yes':
fwq.write('TI {}\n'.format(list(q_names)))
fwq.write('{:<8}{:>8}{:>8}{:>8}\n'.format('QT', ID, 3,
current_year))
# Iterate through time steps
for index, row in inflow_timeseries[inflow_timeseries.index.year ==
current_year].iterrows():
# Calculate flow rate for each timestep
q = sum(row[q_names].values)
fq.write('{:<5}{:>3}{:>8}{:>+8.1E}\n'.format(
'QE', index.dayofyear, index.hour, q))
if S['include_WQ'].lower() == 'yes':
# Get water quality values for current day
wq = wq_df.loc[index, :].set_index('constituent')
#wq = wq_df[wq_df.index == index].set_index('constituent') #TH I changed this since the constituent part did not work here.
# Get names of WWTP, catchment, and boundaries at current element
try:
w_names = [x for x in q_names if x in wwtp_data.keys()]
except NameError:
w_names = []
try:
c_names = [x for x in q_names if x in catchments.index]
except NameError:
c_names = []
try:
b_names = [x for x in q_names if x in bc_data.keys()]
except NameError:
b_names = []
# Initialise water quality values
wq_mass = np.zeros(len(wq.index))
# Calculate water quality in catchment runoff
if c_names:
c_natural_frac = catchments.loc[c_names, 'Natural'].values
c_natural_conc = wq['awbm_wq_natural'].values[:,
np.newaxis]
c_urban_frac = catchments.loc[c_names, 'Urban'].values
c_urban_conc = wq['awbm_wq_urban'].values[:, np.newaxis]
c_flow = row[c_names].values
wq_mass += np.sum(
c_flow * (c_natural_frac * c_natural_conc +
c_urban_frac * c_urban_conc),
axis=1)
# Calculate water quality from WWTP inflows
if w_names:
w_conc = wq[w_names].values
w_flow = row[w_names].values
wq_mass += np.sum(w_flow * w_conc, axis=1)
# Calculate water quality from upstream boundaries
if b_names:
b_conc = wq[b_names].values
b_flow = row[b_names].values
wq_mass += np.sum(b_flow * b_conc, axis=1)
# Calculate water quality concentrations
if q <= 0:
wq_conc = [0] * len(wq_mass)
else:
wq_conc = wq_mass / q
# Write water quality concentrations
fwq.write('{:<5}{:>3}{:>8}{:>+8.1E}'.format(
'QD', index.dayofyear, index.hour, q) + ''.join(
'{:>8.2E}'.format(x) for x in wq_conc))
fwq.write('\n')
fq.write('ENDDATA\n\n')
fq.write(env_str)
fq.close()
if S['include_WQ'].lower() == 'yes':
if S['include_CC_wq'].lower() == 'yes':
for key, value in CC_timeseries.items():
fwq.write('TI {}\n'.format(key))
fwq.write('{:<8}{:>8}{:>8}{:>8}\n'.format('QT', CC_data['Entrance']['cc'], 1,
current_year))
for index, row in value[value.index.year ==
current_year].iterrows():
fwq.write('{:<5}{:>3}{:>8}'.format(
'QD', index.dayofyear, index.hour) + ''.join(
'{:>8.2E}'.format(x) for x in row))
fwq.write('\n')
print('a')
fwq.write('ENDDATA\n\n')
fwq.write(env_str)
fwq.close()
print(env_str.split('\n')[0])
print('Done\n')

@ -0,0 +1,3 @@
(
echo HWQ027.s
) | C:\Users\z3509544\AppData\Local\Continuum\Anaconda3\python hunter_rma_preprocessing.py

@ -0,0 +1,39 @@
We are using a suit of python and R codes to download and preprocess ET and P data from NARCLIM for modeling the climate changes scenarios.
The codes are:
P1_NARCliM_NC_to_CSV_CCRC_SS.py This is the python code that downloads NARCLIM data (any variable) for a given point location in lat lon coordinates).
R_Preparing_BASH_script_for_NARCLIM_batch_download.R (this creates a LINUX BASH code
for executing the NARCLIM download python code (P1_NARCliM_NC_to_CSV_CCRC_SS.py) for each of the 31 catchments and per variable: uses NARCLIM data type and climate variable as inputs
and creates 3 text files per variable. Each text file contains BASH code for 10 catchments.
Info for P1_NARCliM_NC_to_CSV_CCRC_SS.py code:
Variables available from NARCLIM (output):
'evspsblmean' water_evaporation flux (actual ET) long_name: Surface evaporation standard_name: water_evaporation_flux units: kg m-2 s-1
'potevpmean' potential ET water_potential_evaporation_flux kg m-2 s-1
'tasmean mean near surface temperature
tasmax maximum near surface temperature
pracc precipitation daily precipitation sum (sum of convective prcacc and stratiform prncacc precip)
pr1Hmaxtstep maximum 1 hour interval rainfall in a one day period
'pr1Hmaxtstep' Max. 1-hour time-window moving averaged precipitation rate units: kg m-2 s-1 maximum 1-hour time-window moving averaged values from point values 60.0 second
'wss1Hmaxtstep' Max. 1-hour time-window moving averaged surface wind speed units: m s-1 maximum 1-hour time-window moving averaged values from point values 60.0 second
'wssmax' Surface wind speed standard_name: air_velocity units: m s-1 height: 10 m
'wssmean' Surface wind speed standard_name: air_velocity units: m s-1
Code Input Variables:
Datatype: Choose 'T_NNRP' for reanalysis or 'T_GCMS' for GCM forcing data
BiasBool: Choose 'True' for bias corrected data, 'False' for normal model outputs
Execution of code in bash-Code for netcdf interrogation:
1st step: log into storm servers: Putty: hurricane.ccrc.unsw.edu.au or typhoon.ccrc.unsw.edu.au or cyclone.ccrc.unsw.edu.au + UNSW credentials (zID)
In BASH copy and enter:
module load python
latitude=-32.91
longitude=151.80
name='HunterRiver'
Datatype='T_NNRP'
Biasboolean='False'
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'pracc' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;

@ -0,0 +1,233 @@
# -*- coding: utf-8 -*-
from netCDF4 import *
import numpy as np
from numpy import *
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
import argparse
import time
#
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir('/srv/ccrc/data30/z3393020/NARCliM/postprocess/')
#
#User input for location and variable type - from command line
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--lat", help="first number")
parser.add_argument("--lon", help="second number")
parser.add_argument("--varName", help="operation")
parser.add_argument("--timestep", help="operation")
parser.add_argument("--domain", help="operation")
parser.add_argument("--LocationName", help="operation")
parser.add_argument("--Datatype", help="operation")
parser.add_argument("--BiasBool", help="operation")
args = parser.parse_args()
print(args.lat)
print(args.lon)
print(args.varName)
mylat= float(args.lat)
mylon= float(args.lon)
Clim_var_type = args.varName
NC_Domain = args.domain
Timestep = args.timestep
Location = args.LocationName
Data_Type = args.Datatype
Bias_Correction_BOOL = args.BiasBool
print("Extracting all NARCLIM time series for variable: ", Clim_var_type, " for lat lon: ", mylat, mylon, Location, "domain", NC_Domain, " timestep ", Timestep, " Datatype ", Data_Type, " biascorrected? ", Bias_Correction_BOOL)
lat_equal_len_string="%.3f" % abs(mylat)
lon_equal_len_string= "%.3f" % mylon
if Bias_Correction_BOOL == 'False':
#set directory path for output files
output_directory = '/srv/ccrc/data02/z5025317/NARCliM_out/'+ Location + '_' + lat_equal_len_string + '_' + lon_equal_len_string + '/'
#output_directory = 'J:\Project wrl2016032\NARCLIM_Raw_Data\Extracted'
print '---------------------------------------------------------'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("output directory folder didn't exist and was generated here:")
print(output_directory)
print '---------------------------------------------------------'
#
#time.sleep(10)
#set up the loop variables for interrogating the entire NARCLIM raw data
NC_Periods = ('1990-2009','2020-2039','2060-2079')
if Data_Type == 'T_NNRP':
NC_Periods = ('1950-2009','Stop')
#
#Define empty pandas data frames
Full_df = pd.DataFrame()
GCM_df = pd.DataFrame()
R13_df = pd.DataFrame()
MultiNC_df = pd.DataFrame()
#
#Loop through models and construct CSV per site
for NC_Period in NC_Periods:
if NC_Period != "Stop":
Period_short = NC_Period[:4]
GCMs = os.listdir('./'+ NC_Period)
for GCM in GCMs:
print GCM
Warf_runs = os.listdir('./' + NC_Period + '/' + GCM + '/')
for Warf_run in Warf_runs:
Current_input_dir = './' + NC_Period + '/' + GCM + '/' + Warf_run + '/' + NC_Domain + '/'
print Current_input_dir
Climvar_ptrn = '*' + Timestep + '_*' + Clim_var_type + '.nc'
Climvar_NCs = glob.glob(Current_input_dir + Climvar_ptrn)
#print Climvar_NCs[1]
#Climvar_NCs = Climvar_NCs[0:2]
#print(Climvar_NCs)
for netcdf in Climvar_NCs:
f=Dataset(netcdf)
# This section print on the screen information contained in the headings of the file
#print '---------------------------------------------------------'
#print f.ncattrs()
#print f.title
#print f.variables
#print
#for varname in f.variables:
# print varname,' -> ',np.shape(f.variables[varname])
#print '---------------------------------------------------------'
# Based on the desired inputs, this finds the nearest grid centerpoint index (x,y) in the *.nc file
dist_x=np.abs(f.variables['lon'][:,:]-float(mylon))
dist_y=np.abs(f.variables['lat'][:,:]-float(mylat))
dist=dist_x + dist_y
latindex=np.where(dist_y==np.min(dist_y))
lonindex=np.where(dist_x==np.min(dist_x))
index=np.where(dist==np.min(dist))
print '---------------------------------------------------------'
print netcdf
print 'Information on the nearest point'
print 'Your desired lat,lon = ',mylat,mylon
print 'The nearest lat,lon = ', f.variables['lat'][latindex[0],latindex[1]], f.variables['lon'][lonindex[0],lonindex[1]]
#print 'The index of the nearest lat,lon (x,y) = ',index[0], index[1]
#Here we constract a pandas data frame, having the "time"/day as an index and a numer of variables (i.e. Clim_var_type, pracc, as columns)
d={}
#d["time"] = f.variables['time'][:]
d[ GCM +'_'+ Warf_run +'_'+ Period_short] = f.variables[Clim_var_type][:, int(index[0]), int(index[1])]
#if GCM == 'NNRP' and Warf_run == 'R1':
# d['Period']= NC_Period
timestamp = f.variables['time'][:]
timestamp_dates = pd.to_datetime(timestamp, unit='h', origin=pd.Timestamp('1949-12-01'))
df1=pd.DataFrame(d, index=timestamp_dates)
f.close()
print 'closing '+ os.path.basename(os.path.normpath(netcdf)) + ' moving to next netcdf file'
#print f
print '---------------------------------------------------------'
#append in time direction each new time series to the data frame
MultiNC_df = MultiNC_df.append(df1)
#append in columns direction individual GCM-RCM-123 run time series (along x axis)
MultiNC_df = MultiNC_df.sort_index(axis=0, ascending=True)
R13_df = pd.concat([R13_df, MultiNC_df], axis=1)
MultiNC_df =pd.DataFrame()
#append blocks of R1 R2 and R3 in x axis direction
R13_df = R13_df.sort_index(axis=0, ascending=True)
GCM_df = pd.concat([GCM_df, R13_df], axis=1)
R13_df = pd.DataFrame()
#append time periods in x axis direction (change axis=1 to =0 if periods for same model should be added to same model R123 column)
GCM_df = GCM_df.sort_index(axis=0, ascending=True)
Full_df = pd.concat([Full_df, GCM_df], axis=1)
GCM_df = pd.DataFrame()
Full_df = Full_df.sort_index(axis=0, ascending=True)
#adding a column with the NARCLIM decade
Full_df.loc[(Full_df.index > '1950-01-01') & (Full_df.index < '2010-01-01'), 'period']= '1990-2009'
Full_df.loc[(Full_df.index > '1990-01-01') & (Full_df.index < '2010-01-01'), 'period']= '1990-2009'
Full_df.loc[(Full_df.index > '2020-01-01') & (Full_df.index < '2040-01-01'), 'period']= '2020-2039'
Full_df.loc[(Full_df.index > '2060-01-01') & (Full_df.index < '2080-01-01'), 'period']= '2060-2079'
#
if Bias_Correction_BOOL == 'True':
os.chdir('/srv/ccrc/data30/z3393020/NARCliM/Bias_corrected/')
#set directory path for output files
output_directory = '/srv/ccrc/data02/z5025317/NARCliM_out/'+ Location + '_' + lat_equal_len_string + '_' + lon_equal_len_string + '/Bias_corrected/'
#output_directory = 'J:\Project wrl2016032\NARCLIM_Raw_Data\Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("output directory folder didn't exist and was generated here:")
print(output_directory)
#time.sleep(10)
#set up the loop variables for interrogating the entire NARCLIM raw data
GCMs = ('CCCMA3.1','CSIRO-MK3.0','ECHAM5', 'MIROC3.2', 'NNRP')
#
#Define empty pandas data frames
Full_df = pd.DataFrame()
GCM_df = pd.DataFrame()
R13_df = pd.DataFrame()
MultiNC_df = pd.DataFrame()
#
#Loop through models and construct CSV per site
for GCM in GCMs:
print GCM
Warf_runs = os.listdir('./' + GCM + '/')
for Warf_run in Warf_runs:
NC_Periods = os.listdir('./' + GCM + '/' + Warf_run + '/')
for NC_Period in NC_Periods:
Period_short = NC_Period[:4]
Current_input_dir = './' + GCM + '/' + Warf_run + '/' + NC_Period + '/' + NC_Domain + '/'
print Current_input_dir
Climvar_ptrn = '*' + Timestep + '_*' + Clim_var_type + '_bc.nc'
Climvar_NCs = glob.glob(Current_input_dir + Climvar_ptrn)
print Climvar_NCs[1]
print Climvar_NCs[2]
for netcdf in Climvar_NCs:
#netcdf = '/srv/ccrc/data31/z3393020/NARCliM/Bias_corrected/' + netcdf[2:]
#print netcdf
f=Dataset(netcdf)
# This section print on the screen information contained in the headings of the file
print '---------------------------------------------------------'
print f.ncattrs()
print f.title
print f.variables
print
for varname in f.variables:
print varname,' -> ',np.shape(f.variables[varname])
print '---------------------------------------------------------'
# Based on the desired inputs, this finds the nearest grid centerpoint index (x,y) in the *.nc file
dist_x=np.abs(f.variables['lon'][:,:]-float(mylon))
dist_y=np.abs(f.variables['lat'][:,:]-float(mylat))
dist=dist_x + dist_y
latindex=np.where(dist_y==np.min(dist_y))
lonindex=np.where(dist_x==np.min(dist_x))
index=np.where(dist==np.min(dist))
print '---------------------------------------------------------'
print netcdf
print 'Information on the nearest point'
print 'Your desired lat,lon = ',mylat,mylon
print 'The nearest lat,lon = ', f.variables['lat'][latindex[0],latindex[1]], f.variables['lon'][lonindex[0],lonindex[1]]
print 'The index of the nearest lat,lon (x,y) = ',index[0], index[1]
#Here we constract a pandas data frame, having the "time"/day as an index and a numer of variables (i.e. Clim_var_type, pracc, as columns)
d={}
#d["time"] = f.variables['time'][:]
d[ GCM +'_'+ Warf_run +'_'+ Period_short] = f.variables[Clim_var_type+'_bc'][:, int(index[0]), int(index[1])]
#if GCM == 'NNRP' and Warf_run == 'R1':
# d['Period']= NC_Period
timestamp = f.variables['time'][:]
timestamp_dates = pd.to_datetime(timestamp, unit='h', origin=pd.Timestamp('1949-12-01'))
df1=pd.DataFrame(d, index=timestamp_dates)
f.close()
print 'closing '+ os.path.basename(os.path.normpath(netcdf)) + ' moving to next netcdf file'
#print f
print '---------------------------------------------------------'
#append in time direction each new time series to the data frame
MultiNC_df = MultiNC_df.append(df1)
#append in columns direction individual GCM-RCM-123 run time series (along x axis)
MultiNC_df = MultiNC_df.sort_index(axis=0, ascending=True)
R13_df = pd.concat([R13_df, MultiNC_df], axis=1)
MultiNC_df =pd.DataFrame()
#append blocks of R1 R2 and R3 in x axis direction
R13_df = R13_df.sort_index(axis=0, ascending=True)
GCM_df = pd.concat([GCM_df, R13_df], axis=1)
R13_df = pd.DataFrame()
#append time periods in x axis direction (change axis=1 to =0 if periods for same model should be added to same model R123 column)
GCM_df = GCM_df.sort_index(axis=0, ascending=True)
Full_df = pd.concat([Full_df, GCM_df], axis=1)
GCM_df = pd.DataFrame()
Full_df = Full_df.sort_index(axis=0, ascending=True)
#export the pandas data frame as a CSV file within the output directory
out_file_name = Clim_var_type + '_'+ Data_Type[2:] + '_' + Location + '_' + lat_equal_len_string + '_' + lon_equal_len_string + '_NARCliM_summary.csv'
out_path = output_directory +'/' + out_file_name
Full_df.to_csv(out_path)

@ -0,0 +1,89 @@
Variables available from NARCLIM (output):
'evspsblmean' water_evaporation flux (actual ET) long_name: Surface evaporation standard_name: water_evaporation_flux units: kg m-2 s-1
'potevpmean' potential ET water_potential_evaporation_flux kg m-2 s-1
'tasmean mean near surface temperature
tasmax maximum near surface temperature
pracc precipitation daily precipitation sum (sum of convective prcacc and stratiform prncacc precip)
pr1Hmaxtstep maximum 1 hour interval rainfall in a one day period
'pr1Hmaxtstep' Max. 1-hour time-window moving averaged precipitation rate units: kg m-2 s-1 maximum 1-hour time-window moving averaged values from point values 60.0 second
'wss1Hmaxtstep' Max. 1-hour time-window moving averaged surface wind speed units: m s-1 maximum 1-hour time-window moving averaged values from point values 60.0 second
'wssmax' Surface wind speed standard_name: air_velocity units: m s-1 height: 10 m
'wssmean' Surface wind speed standard_name: air_velocity units: m s-1
Sites:
Northern NSW:
Tweed River: -28.17, 153.56
Cudgera Creek: -28.36, 153.58
Belongil Creek: -28.63, 153.59
Central NSW:
Port Stevens: -32.71, 152.20
Terrigal Lagoon: -33.4, 151.44
Hunter River: -32.91, 151.80
Hunter River near Mahmods site: -32.843, 151.706
Southern NSW:
Batemans Bay: -35.76, 150.25
Towamba River: -37.1, 149.91
Nadgee Lake: -37.47, 149.97
Code Input Variables:
Datatype: Choose 'T_NNRP' for reanalysis or 'T_GCMS' for GCM forcing data
BiasBool: Choose 'True' for bias corrected data, 'False' for normal model outputs
Execution of code in bash-Code for netcdf interrogation:
1st step: log into storm servers: Putty: hurricane.ccrc.unsw.edu.au or typhoon.ccrc.unsw.edu.au or cyclone.ccrc.unsw.edu.au + UNSW credentials (zID)
In BASH copy and enter:
module load python
latitude=-32.91
longitude=151.80
name='HunterRiver'
Datatype='T_NNRP'
Biasboolean='False'
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'pracc' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'tasmean' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'tasmax' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'pr1Hmaxtstep' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'wssmean' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'pracc' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'wss1Hmaxtstep' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'evspsblmean' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean;
python /srv/ccrc/data02/z5025317/Code_execution/P1_NARCliM_NC_to_CSV_CCRC_SS.py --lat $latitude --lon $longitude --varName 'potevpmean' --domain 'd02' --timestep 'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Biasboolean
#1 The above code extracts time series from the full model ensemble over a single model grid cell (based on lat lon input) for the above variables of interest and stores into CSV files.
Example of output name = evspsblmean_35.76_150.25_NARCliM_summary.csv
#2 The "P1_NARCliM_plots_Windows" code takes these CSV files as input and creates a) a pdf wiht a number of time series and box plots and b) another CSV file containing the Deltas between present day, near and far future
for each model in the ensemble. Output: C:\Users\z5025317\WRL_Postdoc\Projects\Paper#1\Output\Nadgee\Nadgee_tasmax_NARCliM_ensemble_changes.csv
#3 The "P1_NARCliM_First_Pass_variab_deviation_plots" code takes those Delta CSV files as input and generates the future climate deviation plots that were originally developed by Duncan Rayner.
Run the code with different constellations of Estuary (study sites) and climate variables:
e.g. Clim_var_type = "tasmax*" # '*' will create pdf for all variables in folder
Present_Day_Clim_Var = 'MaxT' #MaxT, MinT, Rainfall, (the name for present day clim var refers to the name of the observed climate data that's used for the baseline period variability.
#!!!# Important!
#For present day variability data, only rainfall and temperature data actually correspond to the study sites. ET and Wind are taken from the existing project folder and hence, are for a Hunter weather station
#e.g. Terrigal_Wind and Terrigal_ET are actually Hunter in reality. This is because we don't have ET for other sites than Hunter at this stage.
##PROBLEM: Without changing anything, the P1_NARCliM_NC_to_CSV_CCRC_SS.py stopped working properly on the CCRC storm servers. It's not giving an error but loading the nc files with Dataset(nc) just takes unlimited time.
It used to take only a few seconds. NOT solved yet as of 7th of May 2018.### This was solved for the /postprocessed folder at the end of May 2018 but the problem persits with the /bias_corrected/ data folder.
running a simple netcdf info script
python /srv/ccrc/data02/z5025317/Code_execution/P1_Basic_NETCDF_Interrogation.py
deactivate
conda env create --name EEenv -- file C:\Users\z5025317\WRL_Postdoc\Software\EE\ee-jupyter-examples-master\kilian_env

@ -0,0 +1,107 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 08 14:34:51 2018
@author: z5025317
"""
#####################################----------------------------------
#Last Updated - March 2018
#@author: z5025317 Valentin Heimhuber
#code for generating P and ET time series as input to the HUNTER AWBM and RMA models
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#####################################----------------------------------
#####################################----------------------------------
#Load packages
#####################################----------------------------------
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
matplotlib.style.use('ggplot')
#
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/BC_Generation/ClimateData/')
#set directory path for output files
output_directory = 'NARCLIM_Processed/Rainfall/'
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
print('-------------------')
#####################################----------------------------------
#set input parameters
Model_site = 'HUNTER' # 'Belongil'
Clim_var_type = "pracc" # '*' will create pdf for all variables in folder "pracc*|tasmax*"
Bias_Corrected = True
#####################################----------------------------------
Scenarios = ["NNRP_R1_1950", "NNRP_R2_1950", "NNRP_R3_1950", 'CCCMA3.1_R1_2020', 'CCCMA3.1_R2_2020', 'CCCMA3.1_R3_2020',
'CSIRO-MK3.0_R1_2020', 'CSIRO-MK3.0_R2_2020', 'CSIRO-MK3.0_R3_2020', 'ECHAM5_R1_2020','ECHAM5_R2_2020',
'ECHAM5_R3_2020','MIROC3.2_R1_2020', 'MIROC3.2_R2_2020', 'MIROC3.2_R3_2020']
#write the key plots to a single pdf document
pdf_out_file_name = Clim_var_type + '_summary_time_series_plots_NARCliM_summary_.pdf'
pdf_out_path = output_directory +'/' + pdf_out_file_name
#open pdf and add the plots
with PdfPages(pdf_out_path) as pdf:
for scenario in Scenarios:
#scenario = 'CCCMA3.1_R1_2020'
print scenario[:-5]
Catchment_directories = glob.glob('./NARCLIM/*')
Output_df = pd.DataFrame()
for Catchment_dir in Catchment_directories:
Filename = os.path.basename(os.path.normpath(Catchment_dir))
Catchment = 'Catchment' + '_' + Filename.split('_', 2)[1]
if Bias_Corrected:
Catchment_dir = Catchment_dir + '/Bias_corrected'
if scenario[:4] == 'NNRP':
clim_var_csv_path = glob.glob(Catchment_dir + '/' + Clim_var_type + '_NNRP*')
Full_df = pd.read_csv(clim_var_csv_path[0], index_col=0, parse_dates = True)
Full_df.index = Full_df.index.to_period('D')
IVM_df = Full_df.filter(regex= scenario[:-5])
IVM_df = IVM_df.iloc[:,:].loc[(IVM_df.index < '2010-01-01')]
#calculate daily ETo from the NARCLIM ET flux
if Clim_var_type == 'evspsblmean' or Clim_var_type == 'potevpmean':
IVM_df = IVM_df.multiply(86400)
IVM_df.columns = [Catchment]
Output_df = pd.concat([Output_df, IVM_df], axis=1)
else:
clim_var_csv_path = glob.glob(Catchment_dir + '/' + Clim_var_type + '_GCMS*')
Full_df = pd.read_csv(clim_var_csv_path[0], index_col=0, parse_dates = True)
Full_df.index = Full_df.index.to_period('D')
IVM_df = Full_df.filter(regex= scenario[:-5])
IVM_df = IVM_df.iloc[:,:].loc[(IVM_df.index > '1989-12-31')]
if Clim_var_type == 'evspsblmean' or Clim_var_type == 'potevpmean':
IVM_df = IVM_df.multiply(86400)
IVM_df.columns = [scenario[:-5],scenario[:-5], scenario[:-5]]
dfa1 = pd.DataFrame(IVM_df.iloc[:,0]).dropna(axis=0)
dfa2 = pd.DataFrame(IVM_df.iloc[:,1]).dropna(axis=0)
dfa3 = pd.DataFrame(IVM_df.iloc[:,2]).dropna(axis=0)
IVM_df = pd.concat([dfa1, dfa2], axis=0)
IVM_df = pd.concat([IVM_df, dfa3], axis=0)
IVM_df.columns = [Catchment]
Output_df = pd.concat([Output_df, IVM_df], axis=1)
Output_df.index.name = 'datetime'
Output_df_annual = Output_df.resample('A').sum()
Output_df_annual = Output_df_annual.replace(0, np.nan)
# time series plot annual ALL models
plt.title(Clim_var_type + ' - Time series - for all catchments')
Output_df_annual.plot(legend=False)
pdf.savefig(bbox_inches='tight', pad_inches=0.4)
plt.close()
#export the pandas data frame as a CSV file within the output directory
out_file_name = scenario[:-5] + '_' + Clim_var_type + '_NARCliM.csv'
out_path = output_directory +'/' + out_file_name
Output_df.to_csv(out_path)

@ -0,0 +1,59 @@
#code for preparing a text file with BASH code for batch download of NARCLIM data for the HUNTER WQ modeling of
#future climate scenarios
#NARCLIM Variables
#evspsblmean water_evaporation flux (actual ET) long_name: Surface evaporation standard_name: water_evaporation_flux units: kg m-2 s-1
#tasmean mean near surface temperature
#pracc precipitation daily precipitation sum (sum of convective prcacc and stratiform prncacc precip)
Clim_Var <- 'pracc'
Datatype <- 'T_GCM' #T_GCMS for GCM forcing, T_NNRP for reanalysis (only 1950-2009)
Biasboolean <- 'True' #use bias corrected data? True or False python boolean
Directory <- 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/BC_Generation/catchments/'
Filename <- 'Catchment_Prev_Hunter_Model_Centroids_VH_WGS84_attribute_Table.csv'
#Load CSV with location names and lat lon coordinates
Location.df <- data.frame(read.csv(paste(Directory, Filename, sep=""), header=T))
#create empty vector for storing the command line text and open file
Vector.for.command.line.txt <- c()
Vector.for.command.line.txt <- c(Vector.for.command.line.txt, "module load python")
text1 <- c(paste("Datatype='",Datatype,"'", sep=""),
paste("Bias_corrected='",Biasboolean,"'", sep=""), paste("ClimVarName='",Clim_Var,"'", sep=""))
Vector.for.command.line.txt <- c(Vector.for.command.line.txt, text1)
for (i in 1:(length(Location.df$Name))){
#name<-as.character(Location.df$Name[i])
#name<-gsub('([[:punct:]])|\\s+','_',name)
if(i<10){
name<-paste('Catchment_0', as.character(i), sep="")
}else{
name<-paste('Catchment_', as.character(i), sep="")
}
latitude=round(as.numeric(Location.df$Lat[i]),3)
longitude=round(as.numeric(Location.df$Long[i]),3)
text <- c(paste("latitude=",latitude,"", sep=""), paste("longitude=",longitude,"", sep=""),
paste("name='",name,"'", sep=""),
"python /srv/ccrc/data02/z5025317/Code_execution/\\
P1_NARCliM_NC_to_CSV_CCRC_SS.py \\
--lat $latitude --lon $longitude --varName $ClimVarName --domain 'd02' --timestep \\
'DAY' --LocationName $name --Datatype $Datatype --BiasBool $Bias_corrected")
Vector.for.command.line.txt <- c(Vector.for.command.line.txt, text)
if(i==10|i==20|i==31){
Vector.for.command.line.txt <- c(Vector.for.command.line.txt, " ")
text.file.name <- paste('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/BC_Generation/Code/NARCLIM_Download_and_Processing/',Clim_Var, "_", Datatype, "_", Biasboolean,substring(as.character(i), 1,1), ".txt", sep="")
#open and fill text file
fileConn <- file(text.file.name)
writeLines(Vector.for.command.line.txt, fileConn)
close(fileConn)
#
if(i==10|i==20){
Vector.for.command.line.txt <- c()
Vector.for.command.line.txt <- c(Vector.for.command.line.txt, "module load python")
text1 <- c(paste("Datatype='",Datatype,"'", sep=""),
paste("Bias_corrected='",Biasboolean,"'", sep=""), paste("ClimVarName='",Clim_Var,"'", sep=""))
Vector.for.command.line.txt <- c(Vector.for.command.line.txt, text1)
}
}
}

@ -0,0 +1,175 @@
# -*- coding: utf-8 -*-
#==========================================================#
#Last Updated - June 2018
#@author: z5025317 Valentin Heimhuber
#code for creating climate prioritization plots for NARCLIM variables.
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
#==========================================================#
#Load packages
#==========================================================#
import numpy as np
import os
import pandas as pd
import glob
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime
from datetime import timedelta
from matplotlib.backends.backend_pdf import PdfPages
from ggplot import *
matplotlib.style.use('ggplot')
#==========================================================#
#Input parameters
#==========================================================#
# Set working direcotry (where postprocessed NARClIM data is located)
#set beginning and end years and corresponding scenario code
fs=['HCC010','HCC040']
comparison_code = '10_40'
startyear=1995
endyear=1997
year=range(startyear, endyear+1)
#set directory path for output files
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/Postprocessed' + comparison_code +'/'
variable = 'vel' #depth or vel
#set model element nodes from chainage file
node=[837, 666, 981, 59,527, 34, 391] #1,
#set plot parameters
ALPHA_figs = 1
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
#==========================================================#
#==========================================================#
#100% automated part of the code doing the data extraction
#==========================================================
Summary_df = pd.DataFrame()
for f in fs:
#f= 'HCC040'
#set input and output directories
input_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ f
#set directory path for individual png files
png_output_directory = output_directory + '/Figures/'
# Set working direcotry (where postprocessed NARClIM data is located)
os.chdir(input_directory)
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(png_output_directory):
os.makedirs(png_output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
#==========================================================#
#Load data file
if variable == 'depth':
Clim_Var_CSVs = glob.glob('*' + variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
df= df.drop(columns=['Year', 'Hour'])
if variable == 'vel':
#x velocity
Clim_Var_CSVs = glob.glob('*' +'x'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfx= df.drop(columns=['Year', 'Hour','1'])
#y velocity
Clim_Var_CSVs = glob.glob('*' +'y'+ variable + '*')
clim_var_csv_path = Clim_Var_CSVs[0]
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
dfy= df.drop(columns=['Year', 'Hour','1'])
df = np.sqrt(dfx*dfx + dfy*dfy)
#if f == 'HCC040':
# df.index = df.index + pd.to_timedelta(72, unit='h')
#append to summary data frame
if f == 'HCC010':
scname = 'Present'
if f == 'HCC011':
scname = 'Near_Future'
df.index = df.index - (datetime.strptime('2025 01 01', '%Y %m %d').date() - datetime.strptime('1995 01 01', '%Y %m %d').date())
if f == 'HCC012':
scname = 'Far_Future'
df.index = df.index - (datetime.strptime('2065 01 01', '%Y %m %d').date() - datetime.strptime('1995 01 01', '%Y %m %d').date())
if f == 'HCC040':
scname = 'Present_Resto'
df.columns = df.columns+'_'+ scname
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
#cut down the summary df to common years
Summary_df = Summary_df[datetime.strptime(str(startyear) + ' 01 05', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 12 31', '%Y %m %d').date()]
Summary_df.tail()
for NODE in node:
NODE = str(NODE)
#=========#
#comparison time series plot at node
#=========#
#out name
png_out_file_name = comparison_code + '_' + variable + '_' + NODE + '_time_series.png'
png_out_path = png_output_directory + '/' + png_out_file_name
#
plotin_df = Summary_df.filter(regex=NODE)
#prepare data frame for plot
fig = plt.figure(figsize=(14,18))
ax=plt.subplot(4,1,1)
#start and end dat of plots
plt.title(variable + '_' + NODE + ' - time series')
start_date = datetime(int('1996'), int('02'), int('01'))
end_date = datetime(int('1996'), int('02'), int('5'))
plotin_df1=plotin_df[start_date:end_date]
ymin = min(plotin_df1.min()) - 0.01 *min(plotin_df1.min())
ymax = max(plotin_df1.max()) + 0.01 *max(plotin_df1.max())
plotin_df1.plot(ylim=(ymin,ymax) ,legend=False, ax=ax)
#plt.legend( loc=1)
plt.xticks([])
ax=plt.subplot(4,1,2)
start_date = datetime(int('1996'), int('02'), int('01'))
end_date = datetime(int('1996'), int('02'), int('3'))
plotin_df2=plotin_df[start_date:end_date]
ymin = min(plotin_df2.min()) - 0.1 *min(plotin_df2.min())
ymax = max(plotin_df2.max()) + 0.1 *max(plotin_df2.max())
plotin_df2.plot(ylim=(ymin,ymax),legend=False, ax=ax)
#plt.legend( loc=1)
ax.xaxis.grid(False)
ax=plt.subplot(4,1,3)
plt.title(variable + '_' + NODE + ' - monthly maximum')
Monthly_max = plotin_df.resample('M').max()
Monthly_max.plot(ax=ax,legend=False)
plt.legend( loc=1)
ax.xaxis.grid(False)
ax=plt.subplot(4,1,4)
plt.title(variable + '_' + NODE + ' - monthly mean')
Monthly_mean = plotin_df.resample('M').mean()
Monthly_mean.plot(ax=ax)
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
# ncol=2, mode="expand", borderaxespad=0.)
#plt.legend( loc=1)
ax.xaxis.grid(False)
#ax.patch.set_alpha(ALPHA_figs)
fig.patch.set_alpha(ALPHA_figs)
fig.tight_layout()
fig.savefig(png_out_path)
plt.close()
#=========#

@ -0,0 +1,105 @@
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/RMA_result_explorer')
from pylab import *
import sys
from py_rmatools import rma
#==========================================================#
#Input parameters
#==========================================================#
#print "Enter the RMA filename (without extension):"
#f=raw_input()
#set beginning and end years and corresponding scenario code
run='HCC012'
startyear=2065
endyear=2067
year=range(startyear, endyear+1)
#set model element nodes from chainage file
node=[666, 59,527]
#set run directory where RMA results are located
run_directory = 'I:/ENG/WRL/WRL1/Project wrl2016032/Hunter_CC_Modeling/Completed_Simulations/' + run +'/'
#set directory path for output files
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ run + '/'
#==========================================================#
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
time=[]
files=[]
for ii in node:
files.append(output_directory + run + '_%d_WQ.txt' %ii)
print node
I_const=[1,2]
for kk in list(enumerate(node)):
target = open(files[kk[0]], 'w')
target.write("Year Hour")
for ii in I_const:
target.write(" %d" %(ii))
target.write("\n")
target.close()
for jj in year:
print jj
f=run_directory + run +'_%d_SAL' %jj
R=rma()
R.open(f)
while R.next():
time.append(R.time)
for kk in list(enumerate(node)):
target = open(files[kk[0]], 'a')
target.write("%i %f" %(jj,time[-1]))
for ii in I_const:
target.write(" %f" %(R.constit[ii][kk[1]]))
target.write("\n")
target.close()
###########################
#I_const=[1,2]
#filename1= output_directory + run + '_SAL.txt'
#target = open(filename1, 'w')
#target.write("Year Hour ")
#for inode in node:
# target.write("%i " % inode)
#target.write('\n')
#print (filename1)
#for jj in year:
# f1=run_directory + run + '_%d' %jj
# R=rma()
# print(f1)
# R.open(f1)
# print (jj)
# while R.next():
# time.append(R.time)
# for kk in list(enumerate(node)):
# target.write("%i %f" %(jj,time[-1]))
# target.write(" %f" %(R.constit[I_const[0]][kk[1]]))
# target.write('\n')
#target.close()
#for kk in node:
# filename1='DataCR046_%d_.txt' %kk
#

@ -0,0 +1,140 @@
#==========================================================#
#Load packages
#==========================================================#
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/RMA_result_explorer')
from py_rmatools_v02 import rma
#==========================================================#
#==========================================================#
#Input parameters
#==========================================================#
#print "Enter the RMA filename (without extension):"
#f=raw_input()
#set beginning and end years and corresponding scenario code
f='HCC011'
startyear=2025
endyear=2027
year=range(startyear, endyear+1)
#set model element nodes from chainage file
node=[837, 666, 981, 59,527, 34, 1, 391]
#set run directory where RMA results are located
run_directory = 'I:/ENG/WRL/WRL1/Project wrl2016032/Hunter_CC_Modeling/Completed_Simulations/' + f +'/'
#set directory path for output files
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ f + '/'
#==========================================================#
#==========================================================#
#100% automated part of the code doing the data extraction
#==========================================================#
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
time=[]
xvel=[]
yvel=[]
totvel=[]
elevation=[]
depth=[]
#==========================================================#
#exctract depth
#==========================================================#
filename1= output_directory + f+'_depth.txt'
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
print (filename1)
for jj in year:
f1=run_directory + f + '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.depth[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
#==========================================================#
#exctract xvel
#==========================================================#
filename1= output_directory + f+'_xvel.txt'
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
print (filename1)
for jj in year:
f1=run_directory + f + '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.xvel[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#
#==========================================================#
#xvel
#==========================================================#
filename1= output_directory + f+'_yvel.txt'
target = open(filename1, 'w')
target.write("Year Hour ")
for inode in node:
target.write("%i " % inode)
target.write('\n')
print (filename1)
for jj in year:
f1=run_directory + f + '_%d' %jj
R=rma()
print(f1)
R.open(f1)
print (jj)
while R.next():
time.append(R.time)
target.write("%.0f %r " %(jj,R.time))
for inode in node:
target.write("%f " % R.yvel[inode])
target.write('\n')
#print (" Press any ENTER to exit")
target.close()
#f=input()
#==========================================================#

@ -0,0 +1,81 @@
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/RMA_result_explorer')
from pylab import *
import sys
from py_rmatools import rma
#==========================================================#
#Input parameters
#==========================================================#
#print "Enter the RMA filename (without extension):"
#f=raw_input()
#set beginning and end years and corresponding scenario code
f='HCC011'
startyear=2025
endyear=2027
year=range(startyear, endyear+1)
#set model element nodes from chainage file
node=[837, 666, 981, 59,527, 34, 1, 391]
node=[837, 666]
#set run directory where RMA results are located
run_directory = 'I:/ENG/WRL/WRL1/Project wrl2016032/Hunter_CC_Modeling/Completed_Simulations/' + f +'/'
#set directory path for output files
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ f + '/'
#==========================================================#
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
time=[]
files=[]
for ii in node:
files.append(output_directory + f + '_%d_.txt' %ii)
print node
I_const=[1,2]
for kk in list(enumerate(node)):
target = open(files[kk[0]], 'w')
target.write("Year Hour")
for ii in I_const:
target.write(" %d" %(ii))
target.write("\n")
target.close()
for jj in year:
print jj
f=run_directory + f +'_%d_SAL' %jj
R=rma()
R.open(f)
while R.next():
time.append(R.time)
for kk in list(enumerate(node)):
target = open(files[kk[0]], 'a')
target.write("%i %f" %(jj,time[-1]))
for ii in I_const:
target.write(" %f" %(R.constit[ii][kk[1]]))
target.write("\n")
target.close()
#for kk in node:
# filename1='DataCR046_%d_.txt' %kk
#

@ -0,0 +1,391 @@
# coding: utf-8
# In[64]:
# Set working direcotry where python code is located and results are to be stored
import os
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/RMA_result_explorer')
import struct
import matplotlib.pyplot as plt
import math
from py_rmatools import rma
get_ipython().magic('matplotlib qt')
plt.rcParams.update({'figure.max_open_warning': 0})
f='HCC010'
#set run directory where RMA results are located
run_directory = 'I:/ENG/WRL/WRL1/Project wrl2016032/Hunter_CC_Modeling/Completed_Simulations/'
#set directory path for output files
output_directory = 'Output/'+ f + '/'
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print('-------------------------------------------')
print("output directory folder didn't exist and was generated")
print('-------------------------------------------')
# In[65]:
meshFilename = 'hcc002.rm1'
channelWidth = 100
RMAfilename = run_directory + f + '/' + f + '_1995_SAL'
#If RMA11
constNum = [1]
# In[66]:
def isElementOneD(nodelist):
if len(nodelist) == 2:
return True
return False
def isElementSquare(nodelist):
if len(nodelist) == 4:
return True
return False
def square2Triangle(ElementNum):
nodelist = ElementDict[ElementNum]
if isElementSquare(nodelist):
ElementDict[ElementNum] = [nodelist[0], nodelist[1], nodelist[2]]
ElementList.append(max(ElementList) + 1)
ElementDict[ElementList[-1]]= [nodelist[0], nodelist[2], nodelist[3]]
def oneD2triangle(ElementNum):
if isElementOneD(ElementDict[ElementNum]):
nAe = ElementDict[ElementNum][0] #nAe Node A existing
nBe = ElementDict[ElementNum][1]
if not nAe in node1Dduplicate: node1Dduplicate[nAe] = []
if not nBe in node1Dduplicate: node1Dduplicate[nBe] = []
xA = nodeDict[nAe][0]
xB = nodeDict[nBe][0]
yA = nodeDict[nAe][1]
yB = nodeDict[nBe][1]
normalVec = [-(yB - yA),(xB - xA)]
dist = math.sqrt(normalVec[0]**2 + normalVec[1]**2)
normalVec[0] = normalVec[0] / dist
normalVec[1] = normalVec[1] / dist
xA2 = xA + channelWidth * normalVec[0]
xB2 = xB + channelWidth * normalVec[0]
yA2 = yA + channelWidth * normalVec[1]
yB2 = yB + channelWidth * normalVec[1]
nA = max(NodeList) + 1
nB = max(NodeList) + 2
node1Dduplicate[nAe].append(nA)
node1Dduplicate[nBe].append(nB)
node2nodevalue[nA] = nAe
node2nodevalue[nB] = nBe
NodeList.append(nA)
NodeList.append(nB)
nodeDict[nA] = [xA2, yA2, -1.01]
nodeDict[nB] = [xB2, yB2, -1.01]
newEle = max(ElementList) + 1
ElementList .append(newEle)
ElementDict[ElementNum] = [nAe, nA, nBe]
ElementDict[newEle] = [nA, nB, nBe]
def RMA11toSerafin():
f = open('{}.slf'.format(RMAfilename), 'wb')
f.write(struct.pack(">l",80))
str='{0: >80}'.format('SERAFIN ')
f.write(str.encode('ascii'))
f.write(struct.pack(">l",80))
f.write(struct.pack(">l",8))
f.write(struct.pack(">l",len(constNum)))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",8))
for c in constName:
f.write(struct.pack(">l",32))
str='{0: <32}'.format(c)
f.write(str.encode('ascii'))
f.write(struct.pack(">l",32))
f.write(struct.pack(">l",40))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",40))
f.write(struct.pack(">l",24))
f.write(struct.pack(">l",R.year))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",24))
f.write(struct.pack(">l",16))
f.write(struct.pack(">l",len(ElementList)))
f.write(struct.pack(">l",len(NodeList)))
f.write(struct.pack(">l",3))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",16))
f.write(struct.pack(">l",len(ElementList)*3*4))
for el in ElementList:
for nd in ElementDict[el]:
f.write(struct.pack(">l",nodeOrdered[nd]))
f.write(struct.pack(">l",len(ElementList)*3*4))
f.write(struct.pack(">l",len(NodeList)))
for i in range(0,len(NodeList)):
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",len(NodeList)))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
f.write(struct.pack(">f",value[0]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
f.write(struct.pack(">f",value[1]))
f.write(struct.pack(">l",len(NodeList)*4))
while R.next():
#for i in range(3):
f.write(struct.pack(">l",4))
f.write(struct.pack(">f",R.time * 3600))
f.write(struct.pack(">l",4))
for c in constNum:
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.constit[c][node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.constit[c][key]))
f.write(struct.pack(">l",len(NodeList)*4))
R.next()
f.close()
def RMA2toSerafin():
f = open('{}.slf'.format(RMAfilename), 'wb')
f.write(struct.pack(">l",80))
str='{0: >80}'.format('SERAFIN ')
f.write(str.encode('ascii'))
f.write(struct.pack(">l",80))
f.write(struct.pack(">l",8))
f.write(struct.pack(">l",len(constName)))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",8))
for c in constName:
f.write(struct.pack(">l",32))
str='{0: <32}'.format(c)
f.write(str.encode('ascii'))
f.write(struct.pack(">l",32))
f.write(struct.pack(">l",40))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",40))
f.write(struct.pack(">l",24))
f.write(struct.pack(">l",R.year))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",24))
f.write(struct.pack(">l",16))
f.write(struct.pack(">l",len(ElementList)))
f.write(struct.pack(">l",len(NodeList)))
f.write(struct.pack(">l",3))
f.write(struct.pack(">l",1))
f.write(struct.pack(">l",16))
f.write(struct.pack(">l",len(ElementList)*3*4))
for el in ElementList:
for nd in ElementDict[el]:
f.write(struct.pack(">l",nodeOrdered[nd]))
f.write(struct.pack(">l",len(ElementList)*3*4))
f.write(struct.pack(">l",len(NodeList)))
for i in range(0,len(NodeList)):
f.write(struct.pack(">l",0))
f.write(struct.pack(">l",len(NodeList)))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
f.write(struct.pack(">f",value[0]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
f.write(struct.pack(">f",value[1]))
f.write(struct.pack(">l",len(NodeList)*4))
while R.next():
f.write(struct.pack(">l",4))
f.write(struct.pack(">f",R.time * 3600))
f.write(struct.pack(">l",4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.xvel[node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.xvel[key]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.yvel[node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.yvel[key]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.depth[node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.depth[key]))
f.write(struct.pack(">l",len(NodeList)*4))
f.write(struct.pack(">l",len(NodeList)*4))
for key, value in nodeDict.items():
if key in node2nodevalue.keys():
f.write(struct.pack(">f",R.elevation[node2nodevalue[key]]))
else:
f.write(struct.pack(">f",R.elevation[key]))
f.write(struct.pack(">l",len(NodeList)*4))
R.next()
f.close()
# In[67]:
#Read mesh file and extract node (except mid node) and elements - plus convert 1D element to 2D for vizualisation
NodeList = []
ElementList = []
ElementDict = {}
nodeDict = {}
node1Dduplicate = {} #Original Number: List of Duplicates
node2nodevalue = {} #link between the node number and the node value to use
#(e.g. if node 10 is a 1D node: 10 is not duplicate so {1:1},
#but node 2050 (duplicate of 10) (1D to 2D) the value of the duplicated
#node will be the same as the original so we might have {2050: 10})
with open(meshFilename) as f:
line = f.readline()
line = f.readline()
line = f.readline()
line = f.readline()
cpt = 1
while line and line != ' 9999\n':
temp = line.split()
ElementDict[int(temp[0])] = [int(temp[i]) for i in range(1,9,2) if temp[i] != '0' and int(temp[9]) < 100]
ElementList.append(int(temp[0]))
line = f.readline()
for key, value in ElementDict.items():
NodeList.extend(value)
NodeList = list(set(NodeList))
line = f.readline()
while line and line != ' 9999\n':
temp = line.split()
if int(temp[0]) in NodeList:
nodeDict[int(temp[0])] = [float(temp[1]),float(temp[2]),float(temp[3])]
line = f.readline()
for e in ElementList:
oneD2triangle(e)
square2Triangle(e)
for key in list(ElementDict): #Remove Special Element 902.....
if len(ElementDict[key]) != 3:
print(key, ElementDict[key])
ElementDict.pop(key)
ElementList.remove(key)
nodeOrdered = {}
cpt = 1
for key, value in nodeDict.items():
nodeOrdered[key] = cpt
cpt +=1
# # Open and Read First Step of the RMA File and Save a Serafin
# In[72]:
R=rma()
R.open(RMAfilename)
R.next()
if R.type==b'RMA11 ':
constName = []
for c in constNum:
constName.append(R.constit_name[c].decode("utf-8"))
RMA11toSerafin()
if R.type==b'RMA2 ':
constName = ['X-VEL','Y-VEL','DEPTH','FREE SURFACE']
RMA2toSerafin()

@ -0,0 +1,163 @@
from struct import *
from pylab import *
import sys
# Updated 10/11/15 BMM : Added mesh.
# Updated 10/11/15 MD : Added dictionnary initialisation in mesh.
class rma:
def __init__(self):
self.file='not opened yet'
# May want to do some other things here later
def open(self,filename):
self.file=open(('%s.rma' % (filename)),'rb')
self.header=self.file.read(1000)
self.type=self.header[0:10]
self.title=self.header[100:172]
self.geometry=self.header[200:300]
self.num_nodes=int(self.header[40:50])
self.num_elements=int(self.header[50:60])
if self.type==b'RMA11 ':
self.num_constits=int(self.header[60:70])
if len(self.header[80:90].strip())==0:
self.num_sedlayers=0
else:
self.num_sedlayers=int(self.header[80:90])
self.constit_name=[]
self.constit_name.append("NULL")
i=1
#print(self.num_constits)
while i <= self.num_constits:
#print(i, self.num_constits, i <= self.num_constits)
self.constit_name.append(self.header[300+(i-1)*8:308+(i-1)*8])
if self.header[300+(i-1)*8:308+(i-1)*8]==" SSED ":
self.constit_name.append(" BSHEAR")
self.constit_name.append("BedThick")
j=1
while j<=self.num_sedlayers:
self.constit_name.append(" L%dThick" % j)
j=j+1
i=i+1
#if self.num_sedlayers > 0:
# self.num_constits=self.num_constits+2+self.num_sedlayers
# Header format in RMA2
# HEADER(1:10) ='RMA2 '
# HEADER(11:20)=DATEC = Date of run
# HEADER(21:30)=TIMEC = Time of run
# HEADER(31:40)=ZONEC = Time zone
# HEADER(41:50)=NP = Number of nodes
# HEADER(51:60)=NE = Number of elements
# HEADER(101:172)=TITLE = Title line
# HEADER(201:300)=FNAME = File name of binary geometry
# Header format in RMA11
# HEADER(1:10) - Model
# HEADER(11:20) - DATEC of model runtime
# HEADER(21:30) - TIMEC of model runtime
# HEADER(31:40) - ZONEC of model runtime
# HEADER(41:90) - '(5I10)' NP,NE,NQAL,IGS,LGS or '(5I10)' NP,NE,NQAL,ISEDS,NLAYT
# HEADER(101:172) - TITLE
# HEADER(301:540) - '(30A8)' CLABL
# HEADER(201:248) - FNAMD directory of the geometry
# HEADER(251:298) - FNAM2 filename of the geometry
def mesh(self,filename):
self.meshfile=open(('%s.rm1' % (filename)),'r')
l=self.meshfile.readline()
while l[0:5]!=' 9999' and len(l)!=5:
l=self.meshfile.readline()
l=self.meshfile.readline()
self.node_e={}
self.node_n={}
self.node_z={}
while l[0:10]!=' 9999' and len(l)!=10:
ll=l.split()
self.node_e[ll[0]]=ll[1]
self.node_n[ll[0]]=ll[2]
self.node_z[ll[0]]=ll[3]
l=self.meshfile.readline()
def next(self):
# This reads the next timestep and populates the variables.
# Reset all of the variables
self.time=0.0
self.year=0
self.xvel=[]
self.yvel=[]
self.zvel=[]
self.depth=[]
self.elevation=[]
# Add in an entry to fill position 0. Important since RMA files start numbering nodes from 1.
self.xvel.append(-999)
self.yvel.append(-999)
self.zvel.append(-999)
self.depth.append(-999)
self.elevation.append(-999)
if self.type==b'RMA2 ':
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
t=self.file.read(12)
if t:
a=unpack('fii',t)
self.time=a[0]
np=int(a[1])
self.year=a[2]
if (np!=self.num_nodes):
print ("Warning - NP (%d) on this timestep does not match header (%d)" % (np,self.num_nodes))
b=unpack('%df' % 5*np, self.file.read(20*np))
i=1
while i<=np:
self.xvel.append(b[0+(i-1)*3])
self.yvel.append(b[1+(i-1)*3])
self.depth.append(b[2+(i-1)*3])
self.elevation.append(b[np*3+(i-1)])
i=i+1
if self.type==b'RMA11 ':
self.constit=[]
c=0
# Start at zero to leave an empty array space. Constits numbered 1 .. num_constits
while c<=self.num_constits:
self.constit.append([])
# Put a null into the 0 position so that RMA node numbers are in the same numbered array position.
self.constit[c].append(-999)
c=c+1
# READ(file1,END=100) TETT1,NQAL,NP,IYRR, ((VEL(K,J),J=1,NP),K=1,3), (wd(j),j=1,np), (wsel(j),j=1,np), ((TCON1(K,J),J=1,NP),K=1,NQAL-5)
t=self.file.read(16)
if t:
a=unpack('fiii',t)
self.time=a[0]
nqal=int(a[1])
np=int(a[2])
self.year=a[3]
if ((nqal-5)!=(self.num_constits)):
print ("Warning - NQAL-5 (%d) on this timestep does not match header (%d)" % (nqal-5,self.num_constits))
if (np!=self.num_nodes):
print ("Warning - NP (%d) on this timestep does not match header (%d)" % (np,self.num_nodes))
b=unpack('%df' % nqal*np, self.file.read(4*nqal*np))
i=1
while i<=np:
self.xvel.append(b[0+(i-1)*3])
self.yvel.append(b[1+(i-1)*3])
self.zvel.append(b[2+(i-1)*3])
self.depth.append(b[np*3+(i-1)])
self.elevation.append(b[np*4+(i-1)])
c=1
while c<=self.num_constits:
self.constit[c].append(b[np*((c-1)+5)+(i-1)])
c=c+1
i=i+1
if len(self.xvel)==1:
# Note that this is a 1 now as we've filled the zero array position
return False
else:
return True

@ -0,0 +1,199 @@
from struct import *
from pylab import *
import sys
# Updated 10/11/15 BMM : Added mesh.
# Updated 10/11/15 MD : Added dictionnary initialisation in mesh.
class rma:
def __init__(self):
self.file = 'not opened yet'
# May want to do some other things here later
def open(self, filename):
self.file = open(('%s.rma' % (filename)), 'rb')
self.header = self.file.read(1000).decode("utf-8")
self.type = self.header[0:10]
self.title = self.header[100:172]
self.geometry = self.header[200:300]
self.num_nodes = int(self.header[40:50])
self.num_elements = int(self.header[50:60])
if self.type == 'RMA11 ':
self.num_constits = int(self.header[60:70])
if len(self.header[80:90].strip()) == 0:
self.num_sedlayers = 0
else:
self.num_sedlayers = int(self.header[80:90])
self.constit_name = []
self.constit_name.append("NULL")
i = 1
print(self.num_constits)
print(self.header[300:1000])
while i <= self.num_constits:
# print self.header[300:400]
self.constit_name.append(self.header[300 + (i - 1) * 8:308 + (i - 1) * 8])
if self.header[300 + (i - 1) * 8:308 + (i - 1) * 8] == " SSED ":
self.constit_name.append(" BSHEAR")
self.constit_name.append("BedThick")
j = 1
print(self.num_sedlayers)
while j <= self.num_sedlayers:
self.constit_name.append(" L%dThick" % j)
j = j + 1
i = i + 1
# print i
# print self.num_constits
# print i
# if self.num_sedlayers > 0:
# self.num_constits=self.num_constits+2+self.num_sedlayers
# Header format in RMA2
# HEADER(1:10) ='RMA2 '
# HEADER(11:20)=DATEC = Date of run
# HEADER(21:30)=TIMEC = Time of run
# HEADER(31:40)=ZONEC = Time zone
# HEADER(41:50)=NP = Number of nodes
# HEADER(51:60)=NE = Number of elements
# HEADER(101:172)=TITLE = Title line
# HEADER(201:300)=FNAME = File name of binary geometry
# Header format in RMA11
# HEADER(1:10) - Model
# HEADER(11:20) - DATEC of model runtime
# HEADER(21:30) - TIMEC of model runtime
# HEADER(31:40) - ZONEC of model runtime
# HEADER(41:90) - '(5I10)' NP,NE,NQAL,IGS,LGS or '(5I10)' NP,NE,NQAL,ISEDS,NLAYT
# HEADER(101:172) - TITLE
# HEADER(301:540) - '(30A8)' CLABL
# HEADER(201:248) - FNAMD directory of the geometry
# HEADER(251:298) - FNAM2 filename of the geometry
def mesh(self, filename):
self.meshfile = open(('%s.rm1' % (filename)), 'r')
l = self.meshfile.readline()
while l[0:5] != ' 9999' and len(l) != 5:
l = self.meshfile.readline()
l = self.meshfile.readline()
self.node_e = {}
self.node_n = {}
self.node_z = {}
while l[0:10] != ' 9999' and len(l) != 10:
ll = l.split()
self.node_e[ll[0]] = ll[1]
self.node_n[ll[0]] = ll[2]
self.node_z[ll[0]] = ll[3]
l = self.meshfile.readline
def next(self):
# This reads the next timestep and populates the variables.
# Reset all of the variables
self.time = 0.0
self.year = 0
self.xvel = []
self.yvel = []
self.zvel = []
self.depth = []
self.elevation = []
self.temperature = []
self.salinity = []
self.sussed = []
# Add in an entry to fill position 0. Important since RMA files start numbering nodes from 1.
self.xvel.append(-999)
self.yvel.append(-999)
self.zvel.append(-999)
self.depth.append(-999)
self.elevation.append(-999)
self.temperature.append(-999)
self.salinity.append(-999)
self.sussed.append(-999)
if self.type == 'RMA2 ':
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
t = self.file.read(12)
if t:
a = unpack('fii', t)
self.time = a[0]
np = int(a[1])
self.year = a[2]
if (np != self.num_nodes):
print("Warning - NP (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
b = unpack('%df' % 5 * np, self.file.read(20 * np))
tempA = [(x - 1) * 3 for x in range(1, np + 1)]
tempB = [np * 3 + (x - 1) for x in range(1, np + 1)]
self.xvel.extend([b[i] for i in tempA])
self.yvel.extend([b[i + 1] for i in tempA])
self.depth.extend([b[i + 2] for i in tempA])
self.elevation.extend([b[i] for i in tempB])
if self.type == 'RMA11 ':
self.constit = []
c = 0
# Start at zero to leave an empty array space. Constits numbered 1 .. num_constits
while c <= self.num_constits:
self.constit.append([])
# Put a null into the 0 position so that RMA node numbers are in the same numbered array position.
self.constit[c].append(-999)
c = c + 1
# READ(file1,END=100) TETT1,NQAL,NP,IYRR, ((VEL(K,J),J=1,NP),K=1,3), (wd(j),j=1,np), (wsel(j),j=1,np), ((TCON1(K,J),J=1,NP),K=1,NQAL-5)
t = self.file.read(16)
if t:
a = unpack('fiii', t)
self.time = a[0]
nqal = int(a[1])
np = int(a[2])
self.year = a[3]
if ((nqal - 5) != (self.num_constits)):
print("Warning - NQAL-5 (%d) on this timestep does not match header (%d)" % (
nqal - 5, self.num_constits))
if (np != self.num_nodes):
print("Warning - NP (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
b = unpack('%df' % nqal * np, self.file.read(4 * nqal * np))
tempA = [(x - 1) * 3 for x in range(1, np + 1)]
tempB = [np * 3 + (x - 1) for x in range(1, np + 1)]
tempC = [np * 4 + (x - 1) for x in range(1, np + 1)]
self.xvel.extend([b[i] for i in tempA])
self.yvel.extend([b[i + 1] for i in tempA])
self.zvel.extend([b[i + 2] for i in tempA])
self.depth.extend([b[i] for i in tempB])
self.elevation.extend([b[i] for i in tempC])
for c in range(1, self.num_constits + 1):
(self.constit[c].extend([b[np * ((c - 1) + 5) + (x - 1)] for x in range(1, np + 1)]))
if self.type == 'RMA10 ':
# WRITE(IRMAFM) TETT,NP,NDF,NE,IYRR,((VSING(K,J),K=1,NDF),VVEL(J),WSLL(J),J=1,NP),(DFCT(J),J=1,NE),(VSING(7,J),J=1,NP)
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
t = self.file.read(20)
if t:
a = unpack('fiiii', t)
self.time = a[0]
np = a[1]
ndf = 6
ne = a[3]
self.year = a[4]
if (np != self.num_nodes):
print("Warning - NP1 (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
tempRead = np * (3 + ndf) + ne
b = unpack('%df' % tempRead, self.file.read(4 * tempRead))
i = 1
while i <= np:
self.xvel.append(b[0 + (i - 1) * 8])
self.yvel.append(b[1 + (i - 1) * 8])
self.depth.append(b[2 + (i - 1) * 8])
self.salinity.append(b[3 + (i - 1) * 8])
self.temperature.append(b[4 + (i - 1) * 8])
self.sussed.append(b[5 + (i - 1) * 8])
self.zvel.append(b[6 + (i - 1) * 8])
self.elevation.append(b[7 + (i - 1) * 8])
i = i + 1
if len(self.xvel) == 1:
# Note that this is a 1 now as we've filled the zero array position
return False
else:
return True

@ -0,0 +1,45 @@
#This is the readme document for the modeling of climate change impacts in the Hunter River estuary project
#participants: Mat Deiber, Tino Heimhuber + 2/3 CVEN Masters Thesis Students
#Goal: use state of the art high-resolution climate change projection data (NARCLIM regional climate model ensemble)
in conjunction with sea level rise scenarios to model the potential impacts of climate change on the hydrodynamics,
salinity and temperature (potentially water quality) of the estuary system.
Folder Structure:
Modeling
01_Input
BC_Generation (this is where all the rainfall runoff modeling etc. is done for generation of the boundary condition input file for RMA)
Key steps:
The hydrological and hydrodynamic model is already fully set up and calibrated. We basically just need to create a plausible
range of future boundary condition scenarios and run them through the model
The first step will be to generate new freshwater inflow time series for the small catchments by using NARcLIM forcing data.
For each catchment, well use the GRID cell of which the GRID centerpoint is closest to the Catchment centerpoint.
NARCLIM will provide 12 rainfall time series per gridpoint so we need to adjust the python codes to automate the whole workflow even more.
To begin with, we can pick 1-3 scenarios and test how well the “present-day” reanalysis
data can reproduce the observed catchment flow time series and also how different the NARcLIM ET is from the observed.
Once we generated 12 RMA boundary condition files, one for each NARCCLIM ensemble member,
the next step will be to automate the climate change scenario runs for NARcLIM.
Specific Steps to run RMA2 and RMA11:
run the
Prepare the water quality input time series (they must cover the years specified for the run)
For the Hunter, we use temperature at GRETA which is interpolated linearly to fill a few gaps in the record using the R code:
C:\Users\z5025317\OneDrive - UNSW\WRL_Postdoc_Manual_Backup\WRL_Postdoc\Projects\Paper#1\Analysis\Code\ggplot_time_series_with_trends.R
For the Hunter at greta, the first few month prior to 01/07/1995 0:00 were set to 10.5 degree manually
For seeham old and godswick, we used the same temperature as for the Hunter River
The python code tide generator is used to generate the tidal boundary condition. The yearly files
have to be copy pasted into the run directories.
Check that the nodes in the Matlab code match the ones generated in the elt file.
for that we use the matlab code that's in the RMA generator folder
Once all the run files are generated, start with the RMA2 startup file, wihch will flood the model by 5m and than drop the water level to the starting value.
The RMA batch file is used to run all the years in batch mode. The executable names have to be updated there.
it is run by simply copy pasting the batch file into the windows command line after navigating to the correct
run directory i.e. /HCC001
Download NARCLIM Data for all tributary catchments:
Loading…
Cancel
Save