You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
956 lines
32 KiB
Python
956 lines
32 KiB
Python
# coding: utf-8
|
|
import re
|
|
import os
|
|
import time
|
|
import collections
|
|
import numpy as np
|
|
import pandas as pd
|
|
from tqdm import tqdm
|
|
import geopandas as gpd
|
|
from datetime import datetime
|
|
|
|
|
|
# Define classes
|
|
def loadMesh(fname):
|
|
nodes = loadNodes(fname)
|
|
elements = loadElements(fname, nodes)
|
|
for e in elements.values():
|
|
if not e or e.oneD:
|
|
continue
|
|
if not e.polygon.isConvex():
|
|
print(e, 'concave!!!!!!!!')
|
|
print(e.polygon.isConvex())
|
|
return nodes, elements
|
|
|
|
|
|
def loadNodes(fname):
|
|
nodes = {0: None}
|
|
with open(fname, 'r') as f:
|
|
# find start of node listing
|
|
pStop = re.compile(
|
|
r'^\s*9999\s*$') # the regex pattern for when to stop reading
|
|
line = next(f)
|
|
while not pStop.match(line):
|
|
line = next(f)
|
|
# the regex pattern for reading the nodes
|
|
pNode = re.compile(
|
|
r'\s+(?P<ID>\d+)\s+(?P<x>\d+\.\d+)\s+(?P<y>\d+\.\d+)')
|
|
for line in f:
|
|
if pStop.match(line):
|
|
break
|
|
m = pNode.search(line) # get the regex match object
|
|
# create the node object
|
|
nodes[int(m.group('ID'))] = node(
|
|
int(m.group('ID')),
|
|
point(*list(map(float, m.group('x', 'y')))))
|
|
return nodes
|
|
|
|
|
|
def loadElements(fname, nodes):
|
|
elements = {}
|
|
with open(fname, 'r') as f:
|
|
[next(f) for n in range(3)]
|
|
pStop = re.compile(r'^\s*9999\s*$')
|
|
for line in f:
|
|
if pStop.match(line):
|
|
break
|
|
if int(line[47:50]) > 900:
|
|
continue
|
|
ID = int(line[:5])
|
|
try:
|
|
ns = [nodes[int(line[x * 5:(x + 1) * 5])] for x in range(1, 9)]
|
|
except IndexError:
|
|
print(line)
|
|
print([(n, s) for n, s in enumerate(line)])
|
|
print([int(line[x * 5:(x + 1) * 5]) for x in range(1, 9)])
|
|
print(len(nodes))
|
|
print([[x * 5, (x + 1) * 5] for x in range(1, 9)])
|
|
raise
|
|
ns = [n for n in ns if n]
|
|
elements[ID] = element(ID, ns)
|
|
return elements
|
|
|
|
|
|
class node(object):
|
|
# This is the node object for RMA nodes. Its attributes include underlying
|
|
# primitive point object (location) and ID and the elements to which this
|
|
# node belongs
|
|
def __init__(self, ID, location, elements=None):
|
|
self.ID = ID
|
|
self.location = location
|
|
self.elements = elements
|
|
|
|
def __str__(self):
|
|
return 'node {}'.format(self.ID)
|
|
|
|
def __repr__(self):
|
|
return self.__str__()
|
|
|
|
def addElement(self, e):
|
|
if not self.elements:
|
|
self.elements = [e]
|
|
else:
|
|
self.elements.append(e)
|
|
|
|
|
|
class element(object):
|
|
# This is the element object for RMA elements. Its attributes include an ID
|
|
# and#a list of nodes (nodes should be listed in counter clockwise order).
|
|
# It has a method (contains()) for determining if a given point lies within
|
|
# the element.
|
|
def __init__(self, ID, nodes):
|
|
self.ID = ID
|
|
self.nodes = nodes
|
|
self.isUsed = False
|
|
for n in nodes:
|
|
n.addElement(self)
|
|
self.transition = False
|
|
if len(nodes) == 3: # BMM
|
|
self.oneD = True
|
|
else:
|
|
self.oneD = False
|
|
points = [n.location for n in nodes if n]
|
|
# if len(points) in [6, 8]:
|
|
# self.polygon = polygon(points[::2]) #skip mid-side nodes
|
|
if len(points) == 5:
|
|
self.transition = True
|
|
self.polygon = polygon([points[0], points[3], points[4]])
|
|
# elif len(points) == 3:
|
|
# self.polygon = polygon(points)
|
|
else:
|
|
self.polygon = polygon(points)
|
|
|
|
# Find approximate centroid of element
|
|
x = np.mean([n.location.x for n in nodes if n])
|
|
y = np.mean([n.location.y for n in nodes if n])
|
|
self.centroid = point(x, y)
|
|
|
|
def __str__(self):
|
|
return 'element {}'.format(self.ID)
|
|
|
|
def __repr__(self):
|
|
return self.__str__()
|
|
|
|
def used(self):
|
|
self.isUsed = True
|
|
|
|
def contains(self, pointIn):
|
|
# checks whether given point is within this element
|
|
return self.polygon.contains(pointIn)
|
|
|
|
def centroid(self):
|
|
# Calcualtes approximate centroid location
|
|
x = np.mean([n.location.x for n in self.nodes])
|
|
y = np.mean([n.location.y for n in self.nodes])
|
|
return [x, y]
|
|
|
|
|
|
class point(object):
|
|
# This is the primitive point object, it has an x and a y value. Most
|
|
# computational geometry is performed by the methods of this object
|
|
# and of the primitive polygon object
|
|
def __init__(self, x, y):
|
|
self.x = x
|
|
self.y = y
|
|
|
|
def __str__(self):
|
|
return "(" + str(self.x) + ", " + str(self.y) + ")"
|
|
|
|
def __eq__(self, other):
|
|
return self.x == other.x and self.y == other.y
|
|
|
|
def __hash__(self):
|
|
return hash((self.x, self.y))
|
|
|
|
def __add__(self, other):
|
|
return point(self.x + other.x, self.y + other.y)
|
|
|
|
def __rmul__(self, c):
|
|
return point(c * self.x, c * self.y)
|
|
|
|
def close(self, that, epsilon=0.01):
|
|
return self.dist(that) < epsilon
|
|
|
|
def dist(self, that):
|
|
return np.sqrt(self.sqrDist(that))
|
|
|
|
def sqrDist(self, that):
|
|
dx = self.x - that.x
|
|
dy = self.y - that.y
|
|
return dx * dx + dy * dy
|
|
|
|
|
|
class polygon(object):
|
|
# This is the primitive polygon object. It has a list of points defining
|
|
# vertices of the polygon. It includes a method to determine if a given
|
|
# lies within the polygon.
|
|
def __init__(self, points):
|
|
if len(points) < 3:
|
|
raise ValueError("Polygon must have at least three vertices.")
|
|
|
|
self.points = points
|
|
self.n = len(points)
|
|
|
|
def __str__(self):
|
|
s = ""
|
|
for point in self.points:
|
|
if s:
|
|
s += " -> "
|
|
s += str(point)
|
|
return s
|
|
|
|
def __hash__(self):
|
|
return hash(tuple(sorted(self.points, key=lambda p: p.x)))
|
|
|
|
def contains(self, p):
|
|
"""Returns True if p is inside self."""
|
|
if self.isConvex():
|
|
# If convex, use CCW-esque algorithm
|
|
inside = False
|
|
|
|
p1 = self.points[0]
|
|
for i in range(self.n + 1):
|
|
p2 = self.points[i % self.n]
|
|
if p.y > min(p1.y, p2.y):
|
|
if p.y <= max(p1.y, p2.y):
|
|
if p.x <= max(p1.x, p2.x):
|
|
if p1.y != p2.y:
|
|
xints = (p.y - p1.y) * (p2.x - p1.x) / (
|
|
p2.y - p1.y) + p1.x
|
|
if p1.x == p2.x or p.x <= xints:
|
|
inside = not inside
|
|
p1 = p2
|
|
|
|
return inside
|
|
else:
|
|
raise ValueError("Concave polygons not implented")
|
|
|
|
def isConvex(self):
|
|
target = None
|
|
for i in range(self.n):
|
|
# Check every triplet of points
|
|
A = self.points[i % self.n]
|
|
B = self.points[(i + 1) % self.n]
|
|
C = self.points[(i + 2) % self.n]
|
|
|
|
if not target:
|
|
target = ccw(A, B, C)
|
|
else:
|
|
if ccw(A, B, C) != target:
|
|
if collinear(A, B, C):
|
|
continue
|
|
return False
|
|
|
|
return True
|
|
|
|
def ccw(self):
|
|
"""Returns True if the points are provided in CCW order."""
|
|
return ccw(self.points[0], self.points[1], self.points[2])
|
|
|
|
def interiorPoint(self):
|
|
"""Returns a random point interior point via rejection sampling."""
|
|
min_x = min([p.x for p in self.points])
|
|
max_x = max([p.x for p in self.points])
|
|
min_y = min([p.y for p in self.points])
|
|
max_y = max([p.y for p in self.points])
|
|
|
|
def x():
|
|
return min_x + random() * (max_x - min_x)
|
|
|
|
def y():
|
|
return min_y + random() * (max_y - min_y)
|
|
|
|
p = point(x(), y())
|
|
while not self.contains(p):
|
|
p = point(x(), y())
|
|
|
|
return p
|
|
|
|
def exteriorPoint(self):
|
|
"""Returns a random exterior point near the polygon."""
|
|
min_x = min([p.x for p in self.points])
|
|
max_x = max([p.x for p in self.points])
|
|
min_y = min([p.y for p in self.points])
|
|
max_y = max([p.y for p in self.points])
|
|
|
|
def off():
|
|
return 1 - 2 * random()
|
|
|
|
def x():
|
|
return min_x + random() * (max_x - min_x) + off()
|
|
|
|
def y():
|
|
return min_y + random() * (max_y - min_y) + off()
|
|
|
|
p = point(x(), y())
|
|
while self.contains(p):
|
|
p = point(x(), y())
|
|
|
|
return p
|
|
|
|
|
|
def ccw(A, B, C):
|
|
"""Tests whether the line formed by A, B, and C is CounterClockWise"""
|
|
return (B.x - A.x) * (C.y - A.y) > (B.y - A.y) * (C.x - A.x)
|
|
|
|
|
|
def collinear(A, B, C):
|
|
return A.x * (B.y - C.y) + B.x * (C.y - A.y) + C.x * (A.y - B.y) < 1e-5
|
|
|
|
|
|
# Load project settings
|
|
# Establish the settings and run parameters (see the description of
|
|
# settings that are in header of this code)
|
|
if __name__ == '__main__':
|
|
setup_files = [f for f in os.listdir() if f.endswith('.Setupfile')]
|
|
if len(setup_files) == 1:
|
|
settingsfile = setup_files[0]
|
|
else:
|
|
print('Enter the name of the settings file: ')
|
|
settingsfile = input()
|
|
|
|
S = collections.OrderedDict()
|
|
|
|
print('Reading settings file: {}'.format(settingsfile))
|
|
with open(settingsfile, 'r') as f:
|
|
for line in f:
|
|
# Ignore commented and empty lines
|
|
if line[0] is not '#' and line[0] is not '\n':
|
|
# Take key name before colon
|
|
ln = line.strip().split(':', 1)
|
|
key = ln[0]
|
|
# Separate multiple values
|
|
val = [x.strip() for x in ln[1].strip().split(',')]
|
|
if len(val) == 1:
|
|
val = val[0]
|
|
S[key] = val
|
|
|
|
val = ln[1].strip().split(',')
|
|
if len(val) == 1:
|
|
val = val[0]
|
|
S[key] = val
|
|
|
|
# Collect run parameters
|
|
env_str = ''
|
|
env_str += "{0:20} : {1}\n".format("Time Run",
|
|
time.strftime('%Y-%m-%d %H:%M:%S'))
|
|
env_str += "{0:20} : {1}\n".format("Settings File", settingsfile)
|
|
for envvar in S.keys():
|
|
env_str += "{0:20} : {1}\n".format(envvar, S[envvar])
|
|
|
|
# Get water quality parameters
|
|
wq_columns = [k for k in S.keys() if k.startswith('WQ')]
|
|
wq = {}
|
|
for wq_column in wq_columns:
|
|
try:
|
|
wq[wq_column] = np.array(S[wq_column], dtype=float)
|
|
except ValueError:
|
|
wq[wq_column] = np.array(S[wq_column])
|
|
|
|
wq = pd.DataFrame(wq)
|
|
print(wq)
|
|
wq.set_index('WQNames')
|
|
|
|
# Get names of boundary conditions
|
|
wq_bc_headers = [k for k in S.keys() if k.startswith('WQbc_')]
|
|
wq_header_names = {}
|
|
for wq_bc_header in wq_bc_headers:
|
|
bc_id = wq_bc_header[2:]
|
|
wq_header_names[wq_bc_header] = S[bc_id][0]
|
|
wq.rename(columns=wq_header_names, inplace=True)
|
|
|
|
# Load RMA mesh
|
|
print('Reading RMA mesh file')
|
|
nodes, elements = loadMesh(S['mesh_file'])
|
|
mesh = pd.DataFrame(elements, index=[0]).transpose()
|
|
mesh.columns = ['name']
|
|
mesh['centroid'] = [e.centroid for e in elements.values()]
|
|
|
|
# Add empty lists to dataframe
|
|
mesh['inflows'] = np.empty((len(mesh), 0)).tolist()
|
|
mesh['inflows'] = mesh['inflows'].astype(object)
|
|
|
|
# Generate empty dataframe for inflows
|
|
start_date = datetime(
|
|
int(S['start_year']), int(S['start_month']), int(S['start_day']))
|
|
end_date = datetime(int(S['end_year']), int(S['end_month']), int(S['end_day']))
|
|
inflow_timeseries = pd.DataFrame(index=pd.date_range(start_date, end_date))
|
|
|
|
# Read upstream boundary inflows
|
|
if S['include_boundary_data'].lower() == 'yes':
|
|
|
|
# Read boundary condition data from setup file
|
|
bc_data = {}
|
|
for key, val in S.items():
|
|
if re.match('bc_\d', key):
|
|
bc_data[val[0]] = dict(east=int(val[1]), north=int(val[2]))
|
|
|
|
# Find inflow boundary conditions
|
|
bc_volume_factors = {'ML': 1000} # Convert to m^3
|
|
bc_time_factors = {'d': 1 / 24 / 3600, 'HR': 1 / 3600} # Convert to s
|
|
|
|
dir_name = S['bc_directory']
|
|
for key, val in bc_data.items():
|
|
file_name = [x for x in os.listdir(dir_name) if x.startswith(key)][0]
|
|
bc_data[key]['path'] = os.path.join(dir_name, file_name)
|
|
|
|
# Check flow rate units
|
|
with open(bc_data[key]['path'], 'r') as f:
|
|
f.readline() # Inflow name
|
|
f.readline() # Description
|
|
bc_units = f.readline().rstrip().split('/')
|
|
|
|
bc_data[key]['vol_factor'] = bc_volume_factors[bc_units[0]]
|
|
bc_data[key]['t_factor'] = bc_time_factors[bc_units[1]]
|
|
|
|
# Assign upstream boundary inflows to RMA mesh
|
|
for key, val in bc_data.items():
|
|
|
|
# Find nearest element in RMA mesh
|
|
x = val['east']
|
|
y = val['north']
|
|
mesh['calc'] = [point(x, y).dist(c) for c in mesh['centroid']]
|
|
idx = mesh['calc'].idxmin()
|
|
|
|
# Add nearest mesh element location to dataframe
|
|
mesh.set_value(idx, 'inflows', np.append(mesh.ix[idx, 'inflows'], key))
|
|
|
|
for key, val in bc_data.items():
|
|
|
|
# Read upstream boundary condition file
|
|
print('Reading upstream boundary inflow: {}'.format(key))
|
|
df = pd.read_table(
|
|
val['path'],
|
|
delim_whitespace=True,
|
|
skiprows=3,
|
|
header=None,
|
|
usecols=[0, 1])
|
|
df.columns = ['t', key]
|
|
df.t = pd.to_datetime(df.t, format='%d/%m/%Y')
|
|
df.set_index('t', inplace=True)
|
|
# df = df.resample('D', how='mean')
|
|
|
|
# Trim dates to valid range
|
|
df = df[start_date:end_date]
|
|
|
|
# Scale flow units to m3/s
|
|
df[key] = df[key] * val['t_factor'] * val['vol_factor']
|
|
|
|
# Merge all dataframes together
|
|
inflow_timeseries = pd.merge(
|
|
inflow_timeseries, df, right_index=True, left_index=True)
|
|
|
|
# Read WWTP inflows
|
|
if S['include_wwtp_flows'].lower() == 'yes':
|
|
# Check if inflow timeseries is provided
|
|
if 'wwtw_file' in S:
|
|
print('Reading WWTP inflows (variable)')
|
|
wwtp_inflows = pd.read_csv(
|
|
S['wwtw_file'], index_col=0, skiprows=[1, 2],parse_dates=[0], dayfirst=True)
|
|
wwtp_coords = pd.read_csv(S['wwtw_file'], index_col=0, nrows=2, parse_dates=True, dayfirst=True)
|
|
wwtp_coords.index = ['easting', 'northing']
|
|
|
|
wwtp_names = wwtp_inflows.columns.values
|
|
|
|
# Assign WWTP inflows to RMA mesh
|
|
for wwtp_name in wwtp_inflows.columns:
|
|
|
|
# Find nearest element in RMA mesh
|
|
x = wwtp_coords.loc['easting', wwtp_name]
|
|
y = wwtp_coords.loc['northing', wwtp_name]
|
|
mesh['calc'] = [point(x, y).dist(c) for c in mesh['centroid']]
|
|
idx = mesh['calc'].idxmin()
|
|
|
|
# Add nearest mesh element location to dataframe
|
|
mesh.set_value(idx, 'inflows',
|
|
np.append(mesh.ix[idx, 'inflows'],
|
|
wwtp_name))
|
|
|
|
# Convert from ML/day to m3/s
|
|
wwtp_inflows = wwtp_inflows * 1000 / 24 / 3600
|
|
|
|
# Add to inflow time series dataframes
|
|
inflow_timeseries = inflow_timeseries.join(wwtp_inflows)
|
|
else:
|
|
print('Reading WWTP inflows (constant)')
|
|
# Take flow as constant
|
|
wwtp_keys = [k for k in S.keys() if k.startswith('wwtp_')]
|
|
if not wwtp_keys:
|
|
print('No constant WWTP flow rates found in setup file')
|
|
wwtp_data = {}
|
|
wwtp_data['name'] = [S[w][0] for w in wwtp_keys]
|
|
wwtp_data['q_ML_D'] = [float(S[w][1]) for w in wwtp_keys]
|
|
wwtp_data['easting'] = [int(S[w][2]) for w in wwtp_keys]
|
|
wwtp_data['northing'] = [int(S[w][3]) for w in wwtp_keys]
|
|
|
|
# Convert flows to m3/s
|
|
wwtp_data['q_m3_s'] = np.array(wwtp_data['q_ML_D']) * 1000 / 24 / 3600
|
|
|
|
# Convert wwtp data into dataframe
|
|
wwtp = pd.DataFrame(
|
|
wwtp_data, columns=['name', 'easting', 'northing', 'q_m3_s'])
|
|
|
|
# Assign WWTP inflows to RMA mesh
|
|
for i, wwtp_row in wwtp.iterrows():
|
|
|
|
# Find nearest element in RMA mesh
|
|
x = wwtp_row['easting']
|
|
y = wwtp_row['northing']
|
|
mesh['calc'] = [point(x, y).dist(c) for c in mesh['centroid']]
|
|
idx = mesh['calc'].idxmin()
|
|
|
|
# Add nearest mesh element location to dataframe
|
|
mesh.set_value(idx, 'inflows',
|
|
np.append(mesh.ix[idx, 'inflows'],
|
|
wwtp_row['name']))
|
|
|
|
# Add to inflow time series dataframes
|
|
inflow_timeseries[wwtp_row['name']] = wwtp_row['q_m3_s']
|
|
|
|
# Get WWTP names
|
|
wwtp_names = [S[k][0] for k in S.keys() if k.startswith('wwtp')]
|
|
|
|
# Load climate data
|
|
# Load reference evapotranspiration
|
|
print('Reading evaporation data')
|
|
df = pd.read_csv(
|
|
S['evap_file'], parse_dates=['datetime'], index_col=['datetime'])
|
|
climate = df
|
|
#print(climate['eto'])
|
|
# Load rainfall
|
|
print('Reading rainfall data')
|
|
df = pd.read_csv(
|
|
S['rain_file'], parse_dates=['datetime'], index_col=['datetime'])
|
|
print(df.keys())
|
|
print(climate.keys())
|
|
print(climate.dtypes)
|
|
climate = pd.merge(climate, df, right_index=True, left_index=True, how='inner')
|
|
print(start_date)
|
|
print(end_date)
|
|
print(climate.dtypes)
|
|
# Trim dataframe to current date range
|
|
climate = climate[start_date:end_date]
|
|
print(climate)
|
|
# Calculate catchment inflows with AWBM
|
|
if S['include_hydro_model'].lower() == 'yes':
|
|
|
|
print('Calculating AWBM inflows')
|
|
|
|
# Read catchment data
|
|
catchments = pd.read_csv(S['catchment_file'], index_col=[0])
|
|
catchments = catchments.set_index(catchments['Cat_Name'])
|
|
print(catchments)
|
|
for index, row in catchments.iterrows():
|
|
|
|
# Find nearest element in RMA mesh
|
|
x = row.Easting
|
|
y = row.Northing
|
|
mesh['calc'] = [point(x, y).dist(c) for c in mesh['centroid']]
|
|
idx = mesh['calc'].idxmin()
|
|
|
|
# Add nearest mesh element location to dataframe
|
|
mesh.set_value(idx, 'inflows',
|
|
np.append(mesh.ix[idx, 'inflows'], row.Cat_Name))
|
|
|
|
# Get weather station data
|
|
station_names = list(climate.columns)
|
|
station_names.remove('eto')
|
|
station_names.remove('median')
|
|
|
|
# Load weights from Thiessen polygons
|
|
thiessen_weights = pd.read_csv(S['catchment_thiessen_weights'])
|
|
|
|
# Add catchment inflows
|
|
for index, c in catchments.iterrows():
|
|
|
|
# Time step (units: days)
|
|
timeStep = 1.0
|
|
|
|
# Area (units: m2)
|
|
totalArea = c['Area (km2)'] * 1000 * 1000
|
|
|
|
# S
|
|
consS = [c['C1'], c['C2'], c['C3']]
|
|
|
|
# A (must sum to 1)
|
|
consA = [c['A1'], c['A2'], c['A3']]
|
|
|
|
consKB = c['Kbase']
|
|
consKS = c['Ksurf']
|
|
BFI = c['BFI']
|
|
|
|
bucketValue = [0, 0, 0]
|
|
bfValue = 0
|
|
sfValue = 0
|
|
|
|
def flowInit(length):
|
|
vec = [0] * length
|
|
return vec
|
|
|
|
def updatebucket(Elevation, surfaceCon, previousValue, flow):
|
|
if Elevation > surfaceCon:
|
|
flow = Elevation - surfaceCon
|
|
previousValue = surfaceCon
|
|
else:
|
|
flow = 0
|
|
previousValue = max(Elevation, 0)
|
|
return previousValue, flow
|
|
|
|
# Calculate Thiessen weightings
|
|
weights = thiessen_weights[thiessen_weights['Name'] == c['Cat_Name']][
|
|
station_names]
|
|
|
|
rainfall = np.sum(
|
|
np.asarray(climate[station_names]) * np.asarray(weights), axis=1)
|
|
evaporation = climate['eto']
|
|
# Count number of timesteps
|
|
n = len(climate.index)
|
|
|
|
Excess = [flowInit(n) for i in range(3)]
|
|
ExcessTotal = flowInit(n)
|
|
ExcessBF = flowInit(n)
|
|
ExcessSF = flowInit(n)
|
|
ExcessRunoff = flowInit(n)
|
|
Qflow = flowInit(n)
|
|
|
|
[aa, bb] = updatebucket(1, 2, 3, 4)
|
|
for i in range(1, len(rainfall)):
|
|
ElevTemp = [[bucketValue[j] + rainfall[i] - evaporation[i]]
|
|
for j in range(3)]
|
|
for k in range(3):
|
|
[bucketValue[k], Excess[k][i]] = updatebucket(
|
|
ElevTemp[k][0], consS[k], bucketValue[k], Excess[k][i - 1])
|
|
ExcessTotal[i] = (Excess[0][i] * consA[0] + Excess[1][i] * consA[1]
|
|
+ Excess[2][i] * consA[2])
|
|
ExcessBF[i] = bfValue + ExcessTotal[i] * BFI
|
|
bfValue = max(consKB * ExcessBF[i], 0)
|
|
ExcessSF[i] = sfValue + (1 - BFI) * ExcessTotal[i]
|
|
sfValue = max(consKS * ExcessSF[i], 0)
|
|
ExcessRunoff[i] = (
|
|
(1 - consKB) * ExcessBF[i] + (1 - consKS) * ExcessSF[i])
|
|
|
|
Qflow = [
|
|
a * (1e-3) * totalArea / (timeStep * 86400) for a in ExcessRunoff
|
|
] # flow in m3/s
|
|
|
|
inflow_timeseries[c['Cat_Name']] = Qflow
|
|
|
|
# Calculate irrigation demand
|
|
if S['include_crop_model'].lower() == 'yes':
|
|
|
|
print('Calculating irrigation demand')
|
|
|
|
# Create QA summary
|
|
qa_fraction_used = pd.DataFrame(index=pd.date_range(
|
|
start=start_date, end=end_date, freq='AS'))
|
|
|
|
# Load water licence holders
|
|
licences = pd.read_csv(S['licences_file'])
|
|
licences['point'] = [
|
|
point(x, y) for x, y in zip(licences['Easting'], licences['Northing'])
|
|
]
|
|
|
|
# Assign water licences to RMA mesh
|
|
for index, row in licences.iterrows():
|
|
|
|
# Find nearest element in RMA mesh
|
|
x = row.Easting
|
|
y = row.Northing
|
|
mesh['calc'] = [point(x, y).dist(c) for c in mesh['centroid']]
|
|
idx = mesh['calc'].idxmin()
|
|
|
|
# Add nearest mesh element location to dataframe
|
|
mesh.set_value(idx, 'inflows',
|
|
np.append(mesh.ix[idx, 'inflows'], row.CWLICENSE))
|
|
|
|
weather_stations = pd.read_excel(S['weather_station_file'])
|
|
weather_stations['point'] = [
|
|
point(x, y)
|
|
for x, y in zip(weather_stations['E_MGA56'], weather_stations[
|
|
'N_MGA56'])
|
|
]
|
|
|
|
# Find nearest weather station
|
|
licences['station_name'] = ''
|
|
for index, row in licences.iterrows():
|
|
idx = np.argmin(
|
|
[row['point'].dist(p) for p in weather_stations['point']])
|
|
licences.set_value(index, 'station_name',
|
|
weather_stations['Name'][idx])
|
|
|
|
# http://www.fao.org/docrep/x0490e/x0490e0e.htm
|
|
crop_types = {
|
|
'type': [
|
|
'pasture', 'turf', 'lucerne', 'vegetables', 'orchard',
|
|
'non_irrigation'
|
|
],
|
|
'crop_coefficient': [1, 1, 1, 1, 1, np.nan],
|
|
'root_zone_depth': [1000, 750, 1500, 500, 1500, np.nan],
|
|
'allowable_depletion': [0.6, 0.5, 0.6, 0.5, 0.5, np.nan],
|
|
}
|
|
|
|
crop_types = pd.DataFrame(crop_types)
|
|
|
|
# Check number of days to spread irrigation over
|
|
irrigation_days = int(S['irrigation_days'])
|
|
|
|
# Check if moisture in soil should be kept full (saturated) or empty
|
|
if S['irrigate_to_saturation'].lower() == 'yes':
|
|
saturation_mode = True
|
|
else:
|
|
saturation_mode = False
|
|
|
|
irrigation_time_factor = 1 / irrigation_days
|
|
|
|
# Iterate through licences
|
|
for index, lic in tqdm(licences.iterrows(), total=licences.shape[0]):
|
|
|
|
# Initialise variables for new licence
|
|
currently_irrigating = False
|
|
licence_exhausted = False
|
|
|
|
# Annual extraction volume
|
|
annual_volume_capped = lic['SHARECOMPO'] * 1000
|
|
annual_volume_uncapped = np.inf
|
|
|
|
# Set a maximum daily extraction limit
|
|
daily_maximum_volume = annual_volume_capped * float(
|
|
S['daily_limit_fraction'])
|
|
|
|
if S['capped_to_licence'].lower() == 'yes':
|
|
# Limited to share component
|
|
annual_volume = annual_volume_capped
|
|
else:
|
|
# Unlimited
|
|
annual_volume = annual_volume_uncapped
|
|
|
|
# Check if licence is for non-irrigation purposes
|
|
if lic['PRIMARY_USE'].lower() == 'non_irrigation':
|
|
# Distribute licenced amount evenly over year (m3/year to m3/s)
|
|
irrigation_q = annual_volume / 365.25 / 24 / 3600
|
|
inflow_timeseries[lic['CWLICENSE']] = -irrigation_q
|
|
continue
|
|
|
|
# Irrigation area (m2)
|
|
area = lic['Area']
|
|
|
|
# Available water holding capacity (mm per m)
|
|
water_holding_capacity = 110 / 1000
|
|
|
|
# Get parameters for specific crop type
|
|
crop = crop_types[crop_types['type'] == lic['PRIMARY_USE'].lower()]
|
|
|
|
# Crop coefficient (from FAO56)
|
|
crop_coefficient = float(crop['crop_coefficient'])
|
|
|
|
# Root zone depth (mm)
|
|
root_zone_depth = float(crop['root_zone_depth'])
|
|
|
|
# Allowable water level depletion, before irrigation is required (%)
|
|
allowable_depletion = float(crop['allowable_depletion'])
|
|
|
|
# Irrigation efficiency (percent)
|
|
efficiency = float(S['irrigation_efficiency'])
|
|
|
|
# Plant available water (mm)
|
|
plant_available_water = root_zone_depth * water_holding_capacity
|
|
|
|
# Irrigation trigger depth (mm)
|
|
threshold_depth = plant_available_water * allowable_depletion
|
|
|
|
# Calculate soil moisture over time
|
|
rain = climate[lic['station_name']]
|
|
eto = climate['eto']
|
|
date = climate.index
|
|
depletion_depth = np.zeros(len(climate))
|
|
irrigation_volume = np.zeros(len(climate))
|
|
annual_irrigation_volume = np.zeros(len(climate))
|
|
|
|
etc = eto * crop_coefficient
|
|
|
|
for i in range(1, len(climate)):
|
|
|
|
if not saturation_mode:
|
|
currently_irrigating = False
|
|
|
|
# Calculate remaining licence allocation
|
|
remaining_allocation = annual_volume - annual_irrigation_volume[i -
|
|
1]
|
|
|
|
# Check if licence is exhausted
|
|
if remaining_allocation <= 0:
|
|
licence_exhausted = True
|
|
|
|
# Apply evapotranspiration and rain
|
|
current_depth = depletion_depth[i - 1] + etc[i] - rain[i - 1]
|
|
|
|
# Check if soil was irrigated the previous day
|
|
if irrigation_volume[i - 1] > 0:
|
|
current_depth = (
|
|
current_depth -
|
|
irrigation_volume[i - 1] / area * 1000 * efficiency)
|
|
|
|
# If soil is saturated from rain or irrigation, do not store excess
|
|
if current_depth < 0:
|
|
current_depth = 0
|
|
currently_irrigating = False
|
|
|
|
# Check if soil moisture is too low
|
|
if (((current_depth > threshold_depth) and
|
|
(rain[i] < 0.2 * current_depth)) or currently_irrigating):
|
|
|
|
if currently_irrigating:
|
|
idx_last_irrigation = np.where(
|
|
irrigation_volume[i::-1])[0][0]
|
|
irrigation_volume[i] = np.min([
|
|
irrigation_volume[i - idx_last_irrigation],
|
|
remaining_allocation, daily_maximum_volume
|
|
])
|
|
else:
|
|
currently_irrigating = True
|
|
irrigation_volume[i] = np.min([
|
|
current_depth / 1000 * area / efficiency *
|
|
irrigation_time_factor, remaining_allocation,
|
|
daily_maximum_volume
|
|
])
|
|
|
|
if licence_exhausted:
|
|
irrigation_volume[i] = 0
|
|
current_depth = threshold_depth
|
|
currently_irrigating = False
|
|
|
|
# Check if new year has started
|
|
if date[i].dayofyear == 1:
|
|
annual_irrigation_volume[i] = 0 + irrigation_volume[i]
|
|
licence_exhausted = False
|
|
else:
|
|
annual_irrigation_volume[
|
|
i] = annual_irrigation_volume[i - 1] + irrigation_volume[i]
|
|
|
|
# Update depletion depth
|
|
depletion_depth[i] = current_depth
|
|
|
|
# Update QA table at end of year
|
|
if (date[i].month == 12) & (date[i].day == 31):
|
|
q_fraction_of_licence = annual_irrigation_volume[
|
|
i] / annual_volume_capped
|
|
qa_fraction_used.loc[datetime(date[i].year, 1, 1), lic[
|
|
'CWLICENSE']] = q_fraction_of_licence
|
|
|
|
# Update inflows with irrigation demand (sign is negative for outflow)
|
|
irrigation_q = irrigation_volume / 24 / 3600
|
|
inflow_timeseries[lic['CWLICENSE']] = -irrigation_q
|
|
|
|
# Write element inflows for RMA
|
|
# Consolidate inflow elements in RMA mesh (only include those with inflows)
|
|
inflow_elements = mesh.ix[[len(n) > 0
|
|
for n in mesh['inflows']], ['name', 'inflows']]
|
|
|
|
# Iterate through years
|
|
for current_year in range(start_date.year, end_date.year + 1):
|
|
|
|
# RMA2: create input file
|
|
fq = open(
|
|
os.path.join(S['output_dir'], 'RMA2', '{}.elt'.format(current_year)),
|
|
'w')
|
|
fq.write('TE Generated Runoff (see end of file for run parameters)\n')
|
|
|
|
# RMA11: create input file
|
|
fwq = open(
|
|
os.path.join(S['output_dir'], 'RMA11', '{}.wqg'.format(current_year)),
|
|
'w')
|
|
|
|
# Create progress bar
|
|
pbar = tqdm(
|
|
inflow_elements['inflows'].iteritems(), total=inflow_elements.shape[0])
|
|
|
|
# Iterate through mesh elements
|
|
for ID, q_names in pbar:
|
|
|
|
# Update progess bar
|
|
pbar.set_description('Writing input for year {}'.format(current_year))
|
|
|
|
# For each new element
|
|
fq.write('{:<8}{:>8}{:>8}{:>8}'.format('QEI', ID, 1, current_year))
|
|
fq.write(' ### {}\n'.format(list(q_names)))
|
|
fwq.write('TI {}\n'.format(list(q_names)))
|
|
fwq.write('{:<8}{:>8}{:>8}{:>8}\n'.format('QT', ID, 3, current_year))
|
|
|
|
# Iterate through time steps
|
|
for index, row in inflow_timeseries[inflow_timeseries.index.year ==
|
|
current_year].iterrows():
|
|
#print('Here Before Before {}'.format(row[q_names].values))
|
|
# Write flow rate for each timestep
|
|
q = sum(row[q_names].values)
|
|
fq.write('{:<5}{:>3}{:>8}{:>+8.1E}\n'.format(
|
|
'QE', index.dayofyear, index.hour, q))
|
|
|
|
# Get current WWTP and catchment inflows
|
|
try:
|
|
w_names = [x for x in q_names if x in wwtp_names]
|
|
except NameError:
|
|
w_names = []
|
|
|
|
try:
|
|
c_names = [x for x in q_names if x in catchments.index]
|
|
except NameError:
|
|
c_names = []
|
|
|
|
try:
|
|
b_names = [x for x in q_names if x in bc_data.keys()]
|
|
except NameError:
|
|
b_names = []
|
|
# Initialise water quality values
|
|
wq_mass = np.zeros(len(wq.index))
|
|
|
|
# Calculate water quality in catchment runoff
|
|
if c_names:
|
|
c_natural_frac = catchments.loc[c_names, 'Natural'].values
|
|
c_natural_conc = wq['WQNatural'].values[:, np.newaxis]
|
|
c_urban_frac = catchments.loc[c_names, 'Urban'].values
|
|
c_urban_conc = wq['WQUrban'].values[:, np.newaxis]
|
|
c_flow = row[c_names].values
|
|
|
|
wq_mass += np.sum(
|
|
c_flow * (c_natural_frac * c_natural_conc +
|
|
c_urban_frac * c_urban_conc),
|
|
axis=1)
|
|
|
|
# Calculate water quality from WWTP inflows
|
|
if w_names:
|
|
w_conc = wq['WQWWTP'].values[:, np.newaxis]
|
|
w_flow = row[w_names].values
|
|
|
|
wq_mass += np.sum(w_flow * w_conc, axis=1)
|
|
|
|
# Calculate water quality from upstream boundaries
|
|
if b_names:
|
|
b_conc = wq[b_names].values
|
|
b_flow = row[b_names].values
|
|
|
|
wq_mass += np.sum(b_flow * b_conc, axis=1)
|
|
#print('Here Before {}'.format(q))
|
|
# Calculate water quality concentrations
|
|
if q <= 0:
|
|
wq_conc = [0] * len(wq_mass)
|
|
else:
|
|
wq_conc = wq_mass / q
|
|
#print('Here {} -- {}'.format(wq_conc,index.dayofyear))
|
|
if S['include_WQ'].lower() == 'yes':
|
|
# Write water quality concentrations
|
|
fwq.write('{:<5}{:>3}{:>8}{:>+8.1E}'.format(
|
|
'QD', index.dayofyear, index.hour, q) + ''.join(
|
|
'{:>8.2E}'.format(x) for x in wq_conc))
|
|
fwq.write('\n')
|
|
|
|
fq.write('ENDDATA\n\n')
|
|
fq.write(env_str)
|
|
fq.close()
|
|
|
|
fwq.write('ENDDATA\n\n')
|
|
fwq.write(env_str)
|
|
fwq.close()
|
|
|
|
print(env_str.split('\n')[0])
|
|
print('Done\n')
|