diff --git a/07_Modelling/01_Input/BCGeneration/hunter_rma_preprocessing.py b/07_Modelling/01_Input/BCGeneration/hunter_rma_preprocessing.py new file mode 100644 index 0000000..791d56b --- /dev/null +++ b/07_Modelling/01_Input/BCGeneration/hunter_rma_preprocessing.py @@ -0,0 +1,688 @@ +# coding: utf-8 +import re +import os +import time +import collections +import numpy as np +import pandas as pd +from tqdm import tqdm +from datetime import datetime +import pyrma.pyrma + +path = "C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/07_Modelling/01_Input/BCGeneration/" + +###Input parameters for Climate change runs +pres_start_date = datetime(int(1995), int('1'), int('1')) +pres_end_date = datetime(int(2005), int('12'), int('31')) +River_temp_increase = 0.5 + +# Load project settings +# Establish the settings and run parameters (see the description of +# settings that are in header of this code) +if __name__ == '__main__': + setup_files = [f for f in os.listdir(path) if f.lower().endswith('.s')] + if len(setup_files) == 1: + settingsfile = setup_files[0] + else: + print('Enter the name of the settings file: ') + settingsfile = input() + +S = collections.OrderedDict() + +print('Reading settings file: {}'.format(settingsfile)) +with open(settingsfile, 'r') as f: + for line in f: + # Ignore commented and empty lines + if line[0] is not '#' and line[0] is not '\n': + # Take key name before colon + ln = line.strip().split(':', 1) + key = ln[0] + + # Separate multiple values + val = [x.strip() for x in ln[1].strip().split(',')] + + if len(val) == 1: + val = val[0] + S[key] = val + + val = ln[1].strip().split(',') + if len(val) == 1: + val = val[0] + S[key] = val + +#create output directory +if not os.path.exists(S['output_dir']): + os.makedirs(S['output_dir']) + print('-------------------------------------------') + print("output directory folder didn't exist and was generated") + print('-------------------------------------------') +if not os.path.exists(S['output_dir'] + 'RMA2'): + os.makedirs(S['output_dir'] + 'RMA2') + print('-------------------------------------------') + print("output directory folder didn't exist and was generated") + print('-------------------------------------------') +if not os.path.exists(S['output_dir'] + 'RMA11'): + os.makedirs(S['output_dir'] + 'RMA11') + print('-------------------------------------------') + print("output directory folder didn't exist and was generated") + print('-------------------------------------------') + + +# Collect run parameters +env_str = '' +env_str += "{0:20} : {1}\n".format("Time Run", + time.strftime('%Y-%m-%d %H:%M:%S')) +env_str += "{0:20} : {1}\n".format("Settings File", settingsfile) +for envvar in S.keys(): + env_str += "{0:20} : {1}\n".format(envvar, S[envvar]) + +# Load RMA mesh +print('Reading RMA mesh file') +nodes, elements = pyrma.loadMesh(S['mesh_file']) +mesh = pd.DataFrame(elements, index=[0]).transpose() +mesh.columns = ['name'] +mesh['centroid'] = [e.centroid for e in elements.values()] + +# Add empty lists to dataframe +mesh['inflows'] = np.empty((len(mesh), 0)).tolist() +mesh['inflows'] = mesh['inflows'].astype(object) + +# Generate empty dataframe for inflows +start_date = datetime(int(S['start_year']), int(S['start_month']), int(S['start_day'])) +end_date = datetime(int(S['end_year']), int(S['end_month']), int(S['end_day'])) +inflow_timeseries = pd.DataFrame(index=pd.date_range(start_date, end_date)) + +# Generate empty dictionary (to be filled with dataframes) for water quality +wq_timeseries = {} + +# Read upstream boundary inflows +if S['include_boundary_flows'].lower() == 'yes': + + # Read boundary condition data from setup file + bc_data = {} + for key, val in S.items(): + if re.match('bc_\d', key): + bc_data[val[0]] = dict(east=int(val[1]), north=int(val[2])) + + dir_name = S['bc_directory'] + for key, val in bc_data.items(): + file_name = [x for x in os.listdir(dir_name) if x.startswith(key)][0] + bc_data[key]['path'] = os.path.join(dir_name, file_name) + + # Assign upstream boundary inflows to RMA mesh + for key, val in bc_data.items(): + + # Find nearest element in RMA mesh + x = val['east'] + y = val['north'] + mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']] + idx = mesh['calc'].idxmin() + + # Add nearest mesh element location to dataframe + mesh.at[idx, 'inflows'] = np.append(mesh.loc[idx, 'inflows'], key) + + for key, val in bc_data.items(): + + # Read upstream boundary condition file + print('Reading upstream boundary inflow: {}'.format(key)) + df = pd.read_csv( + val['path'], index_col=0, parse_dates=['datetime'], dayfirst=True) + + #TH # Shift the upstream boundary flows into the future if date is in the future. + df.index = df.index + (start_date - pres_start_date) + + # Trim dates to valid range + df = df[start_date:end_date] + + # Scale flow units to m3/s + df[key] = df['Q[ML/d]'] * 1000 / 24 / 3600 + + # Merge all dataframes together + #inflow_timeseries2 = pd.merge( + #inflow_timeseries, df[[key]], right_index=True, left_index=True) +#TH #Tino added: + inflow_timeseries = pd.concat([inflow_timeseries, df[[key]]], axis=1) + + # Add to water quality timeseries + wq_timeseries[key] = df.drop(['Q[ML/d]', key], axis = 1) + +# Read WWTP data from setup file +wwtp_data = {} +for key, val in S.items(): + if re.match('wwtp_\d', key): + wwtp_data[val[0]] = dict(east=int(val[1]), north=int(val[2])) + +dir_name = S['wwtp_directory'] +for key, val in wwtp_data.items(): + file_name = [x for x in os.listdir(dir_name) if x.startswith(key)][0] + wwtp_data[key]['path'] = os.path.join(dir_name, file_name) + +# Read WWTP inflows +if S['include_wwtp_flows'].lower() == 'yes': + print('Reading WWTP inflows (variable)') + for key in wwtp_data.keys(): + df = pd.read_csv( + wwtp_data[key]['path'], + index_col=0, + parse_dates=[0], + dayfirst=True) + + # Find nearest element in RMA mesh + x = wwtp_data[key]['east'] + y = wwtp_data[key]['north'] + mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']] + idx = mesh['calc'].idxmin() + + # Add nearest mesh element location to dataframe + mesh.at[idx, 'inflows'] = np.append(mesh.loc[idx, 'inflows'], key) + + # Convert from ML/day to m3/s + df[key] = df[['Q[ML/d]']] * 1000 / 24 / 3600 + + # Add to inflow time series dataframes + inflow_timeseries = inflow_timeseries.join(df[[key]]) + + # Add to water quality timeseries + wq_timeseries[key] = df.drop(['Q[ML/d]', key], axis = 1) + +# Load reference rainfall and evapotranspiration +eto_master = pd.read_csv( + S['evap_file'], parse_dates=['datetime'], index_col=['datetime']) +rain_master = pd.read_csv( + S['rain_file'], parse_dates=['datetime'], index_col=['datetime']) + +# Trim climate data to current date range +eto_master = eto_master[start_date:end_date] +rain_master = rain_master[start_date:end_date] +#inflow_timeseries.index.difference(rain_master.index) + + +# Calculate catchment inflows with AWBM +if S['include_hydro_model'].lower() == 'yes': + + # Load water quality data for catchment inflows + for key in ['awbm_wq_natural', 'awbm_wq_urban']: + df = pd.read_csv(S[key], index_col=0, parse_dates=[0], dayfirst=True) +#TH # Shift the upstream boundary flows into the future if date is in the future. + df.index = df.index + (start_date - pres_start_date) + wq_timeseries[key] = df + + print('Calculating AWBM inflows') + + # Read catchment data + catchments = pd.read_csv(S['catchment_file'], index_col=[0]) + catchments = catchments.set_index(catchments['Cat_Name']) + + for index, row in catchments.iterrows(): + + # Find nearest element in RMA mesh + x = row.Easting + y = row.Northing + mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']] + idx = mesh['calc'].idxmin() + + # Add nearest mesh element location to dataframe + mesh.at[idx, 'inflows'] = np.append(mesh.loc[idx, 'inflows'], + row.Cat_Name) + + # Get weather station data + station_names = list(eto_master.columns) + + # Load weights from Thiessen polygons + thiessen_weights = pd.read_csv(S['catchment_thiessen_weights']) + + # Add catchment inflows + for index, c in catchments.iterrows(): + + # Time step (units: days) + timeStep = 1.0 + + # Area (units: m2) + totalArea = c['Area (km2)'] * 1000 * 1000 + + # S + consS = [c['C1'], c['C2'], c['C3']] + + # A (must sum to 1) + consA = [c['A1'], c['A2'], c['A3']] + + consKB = c['Kbase'] + consKS = c['Ksurf'] + BFI = c['BFI'] + + bucketValue = [0, 0, 0] + bfValue = 0 + sfValue = 0 + + def flowInit(length): + vec = [0] * length + return vec + + def updatebucket(Elevation, surfaceCon, previousValue, flow): + if Elevation > surfaceCon: + flow = Elevation - surfaceCon + previousValue = surfaceCon + else: + flow = 0 + previousValue = max(Elevation, 0) + return previousValue, flow + + # Calculate Thiessen weightings + weights = thiessen_weights[thiessen_weights['Name'] == c['Cat_Name']][ + station_names] + + rain_local = (rain_master[station_names] * + weights.values.flatten()).sum(axis=1).values + + eto_local = (eto_master[station_names] * + weights.values.flatten()).sum(axis=1).values + + # Count number of timesteps + n = len(rain_master.index) + + Excess = [flowInit(n) for i in range(3)] + ExcessTotal = flowInit(n) + ExcessBF = flowInit(n) + ExcessSF = flowInit(n) + ExcessRunoff = flowInit(n) + Qflow = flowInit(n) + + [aa, bb] = updatebucket(1, 2, 3, 4) + for i in range(1, len(rain_local)): + ElevTemp = [[bucketValue[j] + rain_local[i] - eto_local[i]] + for j in range(3)] + for k in range(3): + [bucketValue[k], Excess[k][i]] = updatebucket( + ElevTemp[k][0], consS[k], bucketValue[k], Excess[k][i - 1]) + ExcessTotal[i] = (Excess[0][i] * consA[0] + Excess[1][i] * consA[1] + + Excess[2][i] * consA[2]) + ExcessBF[i] = bfValue + ExcessTotal[i] * BFI + bfValue = max(consKB * ExcessBF[i], 0) + ExcessSF[i] = sfValue + (1 - BFI) * ExcessTotal[i] + sfValue = max(consKS * ExcessSF[i], 0) + ExcessRunoff[i] = ((1 - consKB) * ExcessBF[i] + + (1 - consKS) * ExcessSF[i]) + + Qflow = [ + a * (1e-3) * totalArea / (timeStep * 86400) for a in ExcessRunoff + ] # flow in m3/s + + Qflow_df = pd.DataFrame(Qflow, index=rain_master.index) + Qflow_df.columns = [c['Cat_Name']] + #inflow_timeseries[c['Cat_Name']] = Qflow + inflow_timeseries = pd.concat([inflow_timeseries, Qflow_df], axis=1) + #interpolate the NA value of the leap year 29th of March + inflow_timeseries[c['Cat_Name']]= inflow_timeseries[c['Cat_Name']].interpolate(method='linear', axis=0) + +# Calculate irrigation demand +if S['include_crop_model'].lower() == 'yes': + + print('Calculating irrigation demand') + + # Create QA summary + qa_fraction_used = pd.DataFrame( + index=pd.date_range(start=start_date, end=end_date, freq='AS')) + + # Load water licence holders + licences = pd.read_csv(S['licences_file']) + licences['point'] = [ + pyrma.point(x, y) + for x, y in zip(licences['Easting'], licences['Northing']) + ] + + # Assign water licences to RMA mesh + for index, row in licences.iterrows(): + + # Find nearest element in RMA mesh + x = row.Easting + y = row.Northing + mesh['calc'] = [pyrma.point(x, y).dist(c) for c in mesh['centroid']] + idx = mesh['calc'].idxmin() + + # Add nearest mesh element location to dataframe + mesh.at[idx, 'inflows'] = np.append(mesh.loc[idx, 'inflows'], + row.CWLICENSE) + + weather_stations = pd.read_excel(S['weather_station_file']) + weather_stations['point'] = [ + pyrma.point(x, y) for x, y in zip(weather_stations['E_MGA56'], + weather_stations['N_MGA56']) + ] + + # Find nearest weather station + licences['station_name'] = '' + for index, row in licences.iterrows(): + idx = np.argmin( + [row['point'].dist(p) for p in weather_stations['point']]) + licences.at[index, 'station_name'] = weather_stations['Name'][idx] + + # http://www.fao.org/docrep/x0490e/x0490e0e.htm + crop_types = { + 'type': [ + 'pasture', 'turf', 'lucerne', 'vegetables', 'orchard', + 'non_irrigation' + ], + 'crop_coefficient': [1, 1, 1, 1, 1, np.nan], + 'root_zone_depth': [1000, 750, 1500, 500, 1500, np.nan], + 'allowable_depletion': [0.6, 0.5, 0.6, 0.5, 0.5, np.nan], + } + + crop_types = pd.DataFrame(crop_types) + + # Check number of days to spread irrigation over + irrigation_days = int(S['irrigation_days']) + + # Check if moisture in soil should be kept full (saturated) or empty + if S['irrigate_to_saturation'].lower() == 'yes': + saturation_mode = True + else: + saturation_mode = False + + irrigation_time_factor = 1 / irrigation_days + + # Iterate through licences + for index, lic in tqdm(licences.iterrows(), total=licences.shape[0]): + + # Initialise variables for new licence + currently_irrigating = False + licence_exhausted = False + + # Annual extraction volume + annual_volume_capped = lic['SHARECOMPO'] * 1000 + annual_volume_uncapped = np.inf + + # Set a maximum daily extraction limit + daily_maximum_volume = annual_volume_capped * float( + S['daily_limit_fraction']) + + if S['capped_to_licence'].lower() == 'yes': + # Limited to share component + annual_volume = annual_volume_capped + else: + # Unlimited + annual_volume = annual_volume_uncapped + + # Check if licence is for non-irrigation purposes + if lic['PRIMARY_USE'].lower() == 'non_irrigation': + # Distribute licenced amount evenly over year (m3/year to m3/s) + irrigation_q = annual_volume / 365.25 / 24 / 3600 + inflow_timeseries[lic['CWLICENSE']] = -irrigation_q + continue + + # Irrigation area (m2) + area = lic['Area'] + + # Available water holding capacity (mm per m) + water_holding_capacity = 110 / 1000 + + # Get parameters for specific crop type + crop = crop_types[crop_types['type'] == lic['PRIMARY_USE'].lower()] + + # Crop coefficient (from FAO56) + crop_coefficient = float(crop['crop_coefficient']) + + # Root zone depth (mm) + root_zone_depth = float(crop['root_zone_depth']) + + # Allowable water level depletion, before irrigation is required (%) + allowable_depletion = float(crop['allowable_depletion']) + + # Irrigation efficiency (percent) + efficiency = float(S['irrigation_efficiency']) + + # Plant available water (mm) + plant_available_water = root_zone_depth * water_holding_capacity + + # Irrigation trigger depth (mm) + threshold_depth = plant_available_water * allowable_depletion + + # Calculate soil moisture over time + rain_local = rain_master[lic['station_name']] + eto_local = eto_master[lic['station_name']] + date = rain_master.index + depletion_depth = np.zeros(len(date)) + irrigation_volume = np.zeros(len(date)) + annual_irrigation_volume = np.zeros(len(date)) + + etc = eto_local * crop_coefficient + + for i in range(1, len(date)): + + if not saturation_mode: + currently_irrigating = False + + # Calculate remaining licence allocation + remaining_allocation = annual_volume - annual_irrigation_volume[i - + 1] + + # Check if licence is exhausted + if remaining_allocation <= 0: + licence_exhausted = True + + # Apply evapotranspiration and rain + current_depth = depletion_depth[i - 1] + etc[i] - rain_local[i - 1] + + # Check if soil was irrigated the previous day + if irrigation_volume[i - 1] > 0: + current_depth = ( + current_depth - + irrigation_volume[i - 1] / area * 1000 * efficiency) + + # If soil is saturated from rain or irrigation, do not store excess + if current_depth < 0: + current_depth = 0 + currently_irrigating = False + + # Check if soil moisture is too low + if (((current_depth > threshold_depth) and + (rain_local[i] < 0.2 * current_depth)) or currently_irrigating): + + if currently_irrigating: + idx_last_irrigation = np.where( + irrigation_volume[i::-1])[0][0] + irrigation_volume[i] = np.min([ + irrigation_volume[i - idx_last_irrigation], + remaining_allocation, daily_maximum_volume + ]) + else: + currently_irrigating = True + irrigation_volume[i] = np.min([ + current_depth / 1000 * area / efficiency * + irrigation_time_factor, remaining_allocation, + daily_maximum_volume + ]) + + if licence_exhausted: + irrigation_volume[i] = 0 + current_depth = threshold_depth + currently_irrigating = False + + # Check if new year has started + if date[i].dayofyear == 1: + annual_irrigation_volume[i] = 0 + irrigation_volume[i] + licence_exhausted = False + else: + annual_irrigation_volume[ + i] = annual_irrigation_volume[i - 1] + irrigation_volume[i] + + # Update depletion depth + depletion_depth[i] = current_depth + + # Update QA table at end of year + if (date[i].month == 12) & (date[i].day == 31): + q_fraction_of_licence = annual_irrigation_volume[ + i] / annual_volume_capped + qa_fraction_used.loc[datetime(date[i].year, 1, 1), lic[ + 'CWLICENSE']] = q_fraction_of_licence + + # Update inflows with irrigation demand (sign is negative for outflow) + irrigation_q = irrigation_volume / 24 / 3600 + irrigation_q_df = pd.DataFrame(irrigation_q, index=rain_master.index) + irrigation_q_df.columns = [lic['CWLICENSE']] + inflow_timeseries = pd.concat([inflow_timeseries, irrigation_q_df], axis=1) + #interpolate the NA value of the leap year 29th of March + inflow_timeseries[lic['CWLICENSE']]= inflow_timeseries[lic['CWLICENSE']].interpolate(method='linear', axis=0) + #inflow_timeseries[lic['CWLICENSE']] = -irrigation_q + + +# Consolidate wq data into single dataframe +if S['include_WQ'].lower() == 'yes': + wq_df = pd.DataFrame() + wq_cols = wq_timeseries.keys() + + ####Written by tino############################################## + # # Generate empty dataframe for inflows + # Full_present_period_df = pd.DataFrame(index=pd.date_range(pres_start_date, pres_end_date)) + # # Generate empty dataframe for inflows + # start_date = datetime( + # int(S['start_year']), int(S['start_month']), int(S['start_day'])) + # end_date = datetime(int(S['end_year']), int(S['end_month']), int(S['end_day'])) + # for n in wq_cols: + # Full_present_period_df = pd.concat([Full_present_period_df, pd.DataFrame(wq_timeseries[n]['Salinity'])], axis=1) + # Full_present_period_df = pd.DataFrame(Full_present_period_df.loc[(Full_present_period_df .index >= pres_start_date) & (Full_present_period_df .index <= pres_end_date)]) + # wq = Full_present_period_df.replace(np.nan, 0) + # wq.columns = wq_cols + # #shift the WQ time series into the future if a future model run is executed + # wq.index = wq.index + (start_date - pres_start_date) + # #wq.index.name = 'constituent' + # #wq = wq.reset_index() + # #wq.index = np.tile(1, wq.shape[0]) + # wq_df = wq + # #wq_df = wq_df.append(wq) + ####Written by tino############################################## + + #there is a problem here if the model run goes earlier than 1994, it + #then can't find the 1990 index from teh inflow_timeseries + #wq_timeseries[n].index + for i in inflow_timeseries.index: + wq = pd.DataFrame([wq_timeseries[n].loc[i, :] for n in wq_cols]).T + wq.columns = wq_cols + wq.index.name = 'constituent' + wq = wq.reset_index() + wq.index = np.tile(i, wq.shape[0]) + wq_df = wq_df.append(wq) + + #Shift the water quality time series data frame by + wq_df.index = wq_df.index + (start_date - pres_start_date) + + +# Write element inflows for RMA +# Consolidate inflow elements in RMA mesh (only include those with inflows) +inflow_elements = mesh.loc[[len(n) > 0 + for n in mesh['inflows']], ['name', 'inflows']] + +# Iterate through years +for current_year in range(start_date.year, end_date.year + 1): + + # RMA2: create input file + fq = open( + os.path.join(S['output_dir'], 'RMA2', '{}.elt'.format(current_year)), + 'w') + fq.write('TE Generated Runoff (see end of file for run parameters)\n') + + # RMA11: create input file + if S['include_WQ'].lower() == 'yes': + fwq = open( + os.path.join(S['output_dir'], 'RMA11', + '{}.wqg'.format(current_year)), 'w') + + # Create progress bar + pbar = tqdm( + inflow_elements['inflows'].iteritems(), total=inflow_elements.shape[0]) + + # Iterate through mesh elements + for ID, q_names in pbar: + + # Update progess bar + pbar.set_description('Writing input for year {}'.format(current_year)) + + # For each new element + fq.write('{:<8}{:>8}{:>8}{:>8}'.format('QEI', ID, 1, current_year)) + fq.write(' ### {}\n'.format(list(q_names))) + + if S['include_WQ'].lower() == 'yes': + fwq.write('TI {}\n'.format(list(q_names))) + fwq.write('{:<8}{:>8}{:>8}{:>8}\n'.format('QT', ID, 3, + current_year)) + + # Iterate through time steps + for index, row in inflow_timeseries[inflow_timeseries.index.year == + current_year].iterrows(): + # Calculate flow rate for each timestep + q = sum(row[q_names].values) + fq.write('{:<5}{:>3}{:>8}{:>+8.1E}\n'.format( + 'QE', index.dayofyear, index.hour, q)) + + if S['include_WQ'].lower() == 'yes': + # Get water quality values for current day + #wq = wq_df.loc[index, :].set_index('constituent') + index + 100 + wq = wq_df[wq_df.index == index].set_index('constituent') #TH I changed this since the constituent part did not work here. + # Get names of WWTP, catchment, and boundaries at current element + try: + w_names = [x for x in q_names if x in wwtp_data.keys()] + except NameError: + w_names = [] + + try: + c_names = [x for x in q_names if x in catchments.index] + except NameError: + c_names = [] + + try: + b_names = [x for x in q_names if x in bc_data.keys()] + except NameError: + b_names = [] + + # Initialise water quality values + wq_mass = np.zeros(len(wq.index)) + + # Calculate water quality in catchment runoff + if c_names: + c_natural_frac = catchments.loc[c_names, 'Natural'].values + c_natural_conc = wq['awbm_wq_natural'].values[:, + np.newaxis] + c_urban_frac = catchments.loc[c_names, 'Urban'].values + c_urban_conc = wq['awbm_wq_urban'].values[:, np.newaxis] + c_flow = row[c_names].values + + wq_mass += np.sum( + c_flow * (c_natural_frac * c_natural_conc + + c_urban_frac * c_urban_conc), + axis=1) + + # Calculate water quality from WWTP inflows + if w_names: + w_conc = wq[w_names].values + w_flow = row[w_names].values + wq_mass += np.sum(w_flow * w_conc, axis=1) + + # Calculate water quality from upstream boundaries + if b_names: + b_conc = wq[b_names].values + b_flow = row[b_names].values + wq_mass += np.sum(b_flow * b_conc, axis=1) + + # Calculate water quality concentrations + if q <= 0: + wq_conc = [0] * len(wq_mass) + else: + wq_conc = wq_mass / q + + # Write water quality concentrations + fwq.write('{:<5}{:>3}{:>8}{:>+8.1E}'.format( + 'QD', index.dayofyear, index.hour, q) + ''.join( + '{:>8.2E}'.format(x) for x in wq_conc)) + fwq.write('\n') + + fq.write('ENDDATA\n\n') + fq.write(env_str) + fq.close() + + if S['include_WQ'].lower() == 'yes': + fwq.write('ENDDATA\n\n') + fwq.write(env_str) + fwq.close() + +print(env_str.split('\n')[0]) +print('Done\n') diff --git a/Hunter_CC_Modeling_readme.txt b/Hunter_CC_Modeling_readme.txt index da5c147..a833910 100644 --- a/Hunter_CC_Modeling_readme.txt +++ b/Hunter_CC_Modeling_readme.txt @@ -24,3 +24,22 @@ Key steps: data can reproduce the observed catchment flow time series and also how different the NARcLIM ET is from the observed. Once we generated 12 RMA boundary condition files, one for each NARCCLIM ensemble member, the next step will be to automate the climate change scenario runs for NARcLIM. + +Specific Steps to run RMA2 and RMA11: + run the + Prepare the water quality input time series (they must cover the years specified for the run) + For the Hunter, we use temperature at GRETA which is interpolated linearly to fill a few gaps in the record using the R code: + C:\Users\z5025317\OneDrive - UNSW\WRL_Postdoc_Manual_Backup\WRL_Postdoc\Projects\Paper#1\Analysis\Code\ggplot_time_series_with_trends.R + For the Hunter at greta, the first few month prior to 01/07/1995 0:00 were set to 10.5 degree manually + For seeham old and godswick, we used the same temperature as for the Hunter River + The python code tide generator is used to generate the tidal boundary condition. The yearly files + have to be copy pasted into the run directories. + Check that the nodes in the Matlab code match the ones generated in the elt file. + for that we use the matlab code that's in the RMA generator folder + Once all the run files are generated, start with the RMA2 startup file, wihch will flood the model by 5m and than drop the water level to the starting value. + The RMA batch file is used to run all the years in batch mode. The executable names have to be updated there. + it is run by simply copy pasting the batch file into the windows command line after navigating to the correct + run directory i.e. /HCC001 + + +Download NARCLIM Data for all tributary catchments: