#suite of python codes for exploring rma result files
parent
83f89c254d
commit
873eed7540
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,175 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#==========================================================#
|
||||
#Last Updated - June 2018
|
||||
#@author: z5025317 Valentin Heimhuber
|
||||
#code for creating climate prioritization plots for NARCLIM variables.
|
||||
#Inputs: Uses CSV files that contain all 12 NARCLIM model runs time series for 1 grid cell created with: P1_NARCliM_NC_to_CSV_CCRC_SS.py
|
||||
|
||||
#==========================================================#
|
||||
#Load packages
|
||||
#==========================================================#
|
||||
import numpy as np
|
||||
import os
|
||||
import pandas as pd
|
||||
import glob
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
from matplotlib.backends.backend_pdf import PdfPages
|
||||
from ggplot import *
|
||||
matplotlib.style.use('ggplot')
|
||||
|
||||
#==========================================================#
|
||||
#Input parameters
|
||||
#==========================================================#
|
||||
# Set working direcotry (where postprocessed NARClIM data is located)
|
||||
#set beginning and end years and corresponding scenario code
|
||||
fs=['HCC010','HCC040']
|
||||
comparison_code = '10_40'
|
||||
startyear=1995
|
||||
endyear=1997
|
||||
year=range(startyear, endyear+1)
|
||||
|
||||
#set directory path for output files
|
||||
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/Postprocessed' + comparison_code +'/'
|
||||
variable = 'vel' #depth or vel
|
||||
|
||||
#set model element nodes from chainage file
|
||||
node=[837, 666, 981, 59,527, 34, 391] #1,
|
||||
|
||||
#set plot parameters
|
||||
ALPHA_figs = 1
|
||||
font = {'family' : 'sans-serif',
|
||||
'weight' : 'normal',
|
||||
'size' : 14}
|
||||
matplotlib.rc('font', **font)
|
||||
#==========================================================#
|
||||
|
||||
#==========================================================#
|
||||
#100% automated part of the code doing the data extraction
|
||||
#==========================================================
|
||||
Summary_df = pd.DataFrame()
|
||||
for f in fs:
|
||||
#f= 'HCC040'
|
||||
#set input and output directories
|
||||
input_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ f
|
||||
#set directory path for individual png files
|
||||
png_output_directory = output_directory + '/Figures/'
|
||||
# Set working direcotry (where postprocessed NARClIM data is located)
|
||||
os.chdir(input_directory)
|
||||
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
|
||||
if not os.path.exists(output_directory):
|
||||
os.makedirs(output_directory)
|
||||
print('-------------------------------------------')
|
||||
print("output directory folder didn't exist and was generated")
|
||||
print('-------------------------------------------')
|
||||
|
||||
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
|
||||
if not os.path.exists(png_output_directory):
|
||||
os.makedirs(png_output_directory)
|
||||
print('-------------------------------------------')
|
||||
print("output directory folder didn't exist and was generated")
|
||||
print('-------------------------------------------')
|
||||
#==========================================================#
|
||||
#Load data file
|
||||
if variable == 'depth':
|
||||
Clim_Var_CSVs = glob.glob('*' + variable + '*')
|
||||
clim_var_csv_path = Clim_Var_CSVs[0]
|
||||
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
|
||||
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
|
||||
df= df.drop(columns=['Year', 'Hour'])
|
||||
if variable == 'vel':
|
||||
#x velocity
|
||||
Clim_Var_CSVs = glob.glob('*' +'x'+ variable + '*')
|
||||
clim_var_csv_path = Clim_Var_CSVs[0]
|
||||
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
|
||||
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
|
||||
dfx= df.drop(columns=['Year', 'Hour','1'])
|
||||
#y velocity
|
||||
Clim_Var_CSVs = glob.glob('*' +'y'+ variable + '*')
|
||||
clim_var_csv_path = Clim_Var_CSVs[0]
|
||||
df = pd.read_csv(clim_var_csv_path, index_col=False, sep=' ')
|
||||
df.index = pd.to_datetime(df.Year, format = '%Y') + pd.to_timedelta(df.Hour, unit='h')
|
||||
dfy= df.drop(columns=['Year', 'Hour','1'])
|
||||
df = np.sqrt(dfx*dfx + dfy*dfy)
|
||||
|
||||
#if f == 'HCC040':
|
||||
# df.index = df.index + pd.to_timedelta(72, unit='h')
|
||||
#append to summary data frame
|
||||
if f == 'HCC010':
|
||||
scname = 'Present'
|
||||
if f == 'HCC011':
|
||||
scname = 'Near_Future'
|
||||
df.index = df.index - (datetime.strptime('2025 01 01', '%Y %m %d').date() - datetime.strptime('1995 01 01', '%Y %m %d').date())
|
||||
if f == 'HCC012':
|
||||
scname = 'Far_Future'
|
||||
df.index = df.index - (datetime.strptime('2065 01 01', '%Y %m %d').date() - datetime.strptime('1995 01 01', '%Y %m %d').date())
|
||||
if f == 'HCC040':
|
||||
scname = 'Present_Resto'
|
||||
df.columns = df.columns+'_'+ scname
|
||||
Summary_df = pd.concat([Summary_df, df], axis=1, join='outer')
|
||||
|
||||
#cut down the summary df to common years
|
||||
Summary_df = Summary_df[datetime.strptime(str(startyear) + ' 01 05', '%Y %m %d').date():datetime.strptime(str(endyear) + ' 12 31', '%Y %m %d').date()]
|
||||
Summary_df.tail()
|
||||
|
||||
for NODE in node:
|
||||
NODE = str(NODE)
|
||||
#=========#
|
||||
#comparison time series plot at node
|
||||
#=========#
|
||||
#out name
|
||||
png_out_file_name = comparison_code + '_' + variable + '_' + NODE + '_time_series.png'
|
||||
png_out_path = png_output_directory + '/' + png_out_file_name
|
||||
#
|
||||
plotin_df = Summary_df.filter(regex=NODE)
|
||||
#prepare data frame for plot
|
||||
fig = plt.figure(figsize=(14,18))
|
||||
ax=plt.subplot(4,1,1)
|
||||
#start and end dat of plots
|
||||
plt.title(variable + '_' + NODE + ' - time series')
|
||||
start_date = datetime(int('1996'), int('02'), int('01'))
|
||||
end_date = datetime(int('1996'), int('02'), int('5'))
|
||||
plotin_df1=plotin_df[start_date:end_date]
|
||||
ymin = min(plotin_df1.min()) - 0.01 *min(plotin_df1.min())
|
||||
ymax = max(plotin_df1.max()) + 0.01 *max(plotin_df1.max())
|
||||
plotin_df1.plot(ylim=(ymin,ymax) ,legend=False, ax=ax)
|
||||
#plt.legend( loc=1)
|
||||
plt.xticks([])
|
||||
|
||||
ax=plt.subplot(4,1,2)
|
||||
start_date = datetime(int('1996'), int('02'), int('01'))
|
||||
end_date = datetime(int('1996'), int('02'), int('3'))
|
||||
plotin_df2=plotin_df[start_date:end_date]
|
||||
ymin = min(plotin_df2.min()) - 0.1 *min(plotin_df2.min())
|
||||
ymax = max(plotin_df2.max()) + 0.1 *max(plotin_df2.max())
|
||||
plotin_df2.plot(ylim=(ymin,ymax),legend=False, ax=ax)
|
||||
#plt.legend( loc=1)
|
||||
ax.xaxis.grid(False)
|
||||
|
||||
ax=plt.subplot(4,1,3)
|
||||
plt.title(variable + '_' + NODE + ' - monthly maximum')
|
||||
Monthly_max = plotin_df.resample('M').max()
|
||||
Monthly_max.plot(ax=ax,legend=False)
|
||||
plt.legend( loc=1)
|
||||
ax.xaxis.grid(False)
|
||||
|
||||
ax=plt.subplot(4,1,4)
|
||||
plt.title(variable + '_' + NODE + ' - monthly mean')
|
||||
Monthly_mean = plotin_df.resample('M').mean()
|
||||
Monthly_mean.plot(ax=ax)
|
||||
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
|
||||
# ncol=2, mode="expand", borderaxespad=0.)
|
||||
#plt.legend( loc=1)
|
||||
ax.xaxis.grid(False)
|
||||
|
||||
#ax.patch.set_alpha(ALPHA_figs)
|
||||
fig.patch.set_alpha(ALPHA_figs)
|
||||
fig.tight_layout()
|
||||
fig.savefig(png_out_path)
|
||||
plt.close()
|
||||
#=========#
|
||||
|
||||
|
||||
|
@ -0,0 +1,105 @@
|
||||
# Set working direcotry where python code is located and results are to be stored
|
||||
import os
|
||||
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/RMA_result_explorer')
|
||||
from pylab import *
|
||||
import sys
|
||||
from py_rmatools import rma
|
||||
|
||||
|
||||
#==========================================================#
|
||||
#Input parameters
|
||||
#==========================================================#
|
||||
#print "Enter the RMA filename (without extension):"
|
||||
#f=raw_input()
|
||||
|
||||
#set beginning and end years and corresponding scenario code
|
||||
run='HCC012'
|
||||
startyear=2065
|
||||
endyear=2067
|
||||
year=range(startyear, endyear+1)
|
||||
|
||||
#set model element nodes from chainage file
|
||||
node=[666, 59,527]
|
||||
#set run directory where RMA results are located
|
||||
run_directory = 'I:/ENG/WRL/WRL1/Project wrl2016032/Hunter_CC_Modeling/Completed_Simulations/' + run +'/'
|
||||
#set directory path for output files
|
||||
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ run + '/'
|
||||
#==========================================================#
|
||||
|
||||
|
||||
|
||||
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
|
||||
if not os.path.exists(output_directory):
|
||||
os.makedirs(output_directory)
|
||||
print('-------------------------------------------')
|
||||
print("output directory folder didn't exist and was generated")
|
||||
print('-------------------------------------------')
|
||||
|
||||
time=[]
|
||||
files=[]
|
||||
|
||||
for ii in node:
|
||||
files.append(output_directory + run + '_%d_WQ.txt' %ii)
|
||||
|
||||
print node
|
||||
I_const=[1,2]
|
||||
|
||||
for kk in list(enumerate(node)):
|
||||
target = open(files[kk[0]], 'w')
|
||||
target.write("Year Hour")
|
||||
for ii in I_const:
|
||||
target.write(" %d" %(ii))
|
||||
target.write("\n")
|
||||
target.close()
|
||||
|
||||
|
||||
for jj in year:
|
||||
print jj
|
||||
f=run_directory + run +'_%d_SAL' %jj
|
||||
R=rma()
|
||||
R.open(f)
|
||||
while R.next():
|
||||
time.append(R.time)
|
||||
for kk in list(enumerate(node)):
|
||||
target = open(files[kk[0]], 'a')
|
||||
target.write("%i %f" %(jj,time[-1]))
|
||||
for ii in I_const:
|
||||
target.write(" %f" %(R.constit[ii][kk[1]]))
|
||||
target.write("\n")
|
||||
target.close()
|
||||
|
||||
###########################
|
||||
|
||||
|
||||
|
||||
|
||||
#I_const=[1,2]
|
||||
#filename1= output_directory + run + '_SAL.txt'
|
||||
#target = open(filename1, 'w')
|
||||
#target.write("Year Hour ")
|
||||
#for inode in node:
|
||||
# target.write("%i " % inode)
|
||||
#target.write('\n')
|
||||
#print (filename1)
|
||||
#for jj in year:
|
||||
# f1=run_directory + run + '_%d' %jj
|
||||
# R=rma()
|
||||
# print(f1)
|
||||
# R.open(f1)
|
||||
# print (jj)
|
||||
# while R.next():
|
||||
# time.append(R.time)
|
||||
# for kk in list(enumerate(node)):
|
||||
# target.write("%i %f" %(jj,time[-1]))
|
||||
# target.write(" %f" %(R.constit[I_const[0]][kk[1]]))
|
||||
# target.write('\n')
|
||||
#target.close()
|
||||
|
||||
|
||||
|
||||
#for kk in node:
|
||||
|
||||
# filename1='DataCR046_%d_.txt' %kk
|
||||
|
||||
|
||||
#
|
@ -0,0 +1,140 @@
|
||||
|
||||
#==========================================================#
|
||||
#Load packages
|
||||
#==========================================================#
|
||||
# Set working direcotry where python code is located and results are to be stored
|
||||
import os
|
||||
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/RMA_result_explorer')
|
||||
from py_rmatools_v02 import rma
|
||||
#==========================================================#
|
||||
|
||||
|
||||
#==========================================================#
|
||||
#Input parameters
|
||||
#==========================================================#
|
||||
#print "Enter the RMA filename (without extension):"
|
||||
#f=raw_input()
|
||||
|
||||
|
||||
|
||||
#set beginning and end years and corresponding scenario code
|
||||
f='HCC011'
|
||||
startyear=2025
|
||||
endyear=2027
|
||||
year=range(startyear, endyear+1)
|
||||
|
||||
#set model element nodes from chainage file
|
||||
node=[837, 666, 981, 59,527, 34, 1, 391]
|
||||
|
||||
|
||||
#set run directory where RMA results are located
|
||||
run_directory = 'I:/ENG/WRL/WRL1/Project wrl2016032/Hunter_CC_Modeling/Completed_Simulations/' + f +'/'
|
||||
#set directory path for output files
|
||||
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ f + '/'
|
||||
#==========================================================#
|
||||
|
||||
|
||||
|
||||
#==========================================================#
|
||||
#100% automated part of the code doing the data extraction
|
||||
#==========================================================#
|
||||
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
|
||||
if not os.path.exists(output_directory):
|
||||
os.makedirs(output_directory)
|
||||
print('-------------------------------------------')
|
||||
print("output directory folder didn't exist and was generated")
|
||||
print('-------------------------------------------')
|
||||
|
||||
time=[]
|
||||
xvel=[]
|
||||
yvel=[]
|
||||
totvel=[]
|
||||
elevation=[]
|
||||
depth=[]
|
||||
#==========================================================#
|
||||
#exctract depth
|
||||
#==========================================================#
|
||||
filename1= output_directory + f+'_depth.txt'
|
||||
target = open(filename1, 'w')
|
||||
target.write("Year Hour ")
|
||||
for inode in node:
|
||||
target.write("%i " % inode)
|
||||
target.write('\n')
|
||||
print (filename1)
|
||||
for jj in year:
|
||||
f1=run_directory + f + '_%d' %jj
|
||||
R=rma()
|
||||
print(f1)
|
||||
R.open(f1)
|
||||
print (jj)
|
||||
|
||||
while R.next():
|
||||
time.append(R.time)
|
||||
target.write("%.0f %r " %(jj,R.time))
|
||||
for inode in node:
|
||||
target.write("%f " % R.depth[inode])
|
||||
target.write('\n')
|
||||
|
||||
#print (" Press any ENTER to exit")
|
||||
target.close()
|
||||
#f=input()
|
||||
#==========================================================#
|
||||
|
||||
#==========================================================#
|
||||
#exctract xvel
|
||||
#==========================================================#
|
||||
filename1= output_directory + f+'_xvel.txt'
|
||||
target = open(filename1, 'w')
|
||||
|
||||
target.write("Year Hour ")
|
||||
for inode in node:
|
||||
target.write("%i " % inode)
|
||||
target.write('\n')
|
||||
print (filename1)
|
||||
for jj in year:
|
||||
f1=run_directory + f + '_%d' %jj
|
||||
R=rma()
|
||||
print(f1)
|
||||
R.open(f1)
|
||||
print (jj)
|
||||
|
||||
while R.next():
|
||||
time.append(R.time)
|
||||
target.write("%.0f %r " %(jj,R.time))
|
||||
for inode in node:
|
||||
target.write("%f " % R.xvel[inode])
|
||||
target.write('\n')
|
||||
#print (" Press any ENTER to exit")
|
||||
target.close()
|
||||
#f=input()
|
||||
#==========================================================#
|
||||
|
||||
#==========================================================#
|
||||
#xvel
|
||||
#==========================================================#
|
||||
filename1= output_directory + f+'_yvel.txt'
|
||||
target = open(filename1, 'w')
|
||||
|
||||
target.write("Year Hour ")
|
||||
for inode in node:
|
||||
target.write("%i " % inode)
|
||||
target.write('\n')
|
||||
print (filename1)
|
||||
for jj in year:
|
||||
f1=run_directory + f + '_%d' %jj
|
||||
R=rma()
|
||||
print(f1)
|
||||
R.open(f1)
|
||||
print (jj)
|
||||
|
||||
while R.next():
|
||||
time.append(R.time)
|
||||
target.write("%.0f %r " %(jj,R.time))
|
||||
for inode in node:
|
||||
target.write("%f " % R.yvel[inode])
|
||||
target.write('\n')
|
||||
|
||||
#print (" Press any ENTER to exit")
|
||||
target.close()
|
||||
#f=input()
|
||||
#==========================================================#
|
@ -0,0 +1,81 @@
|
||||
# Set working direcotry where python code is located and results are to be stored
|
||||
import os
|
||||
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/RMA_result_explorer')
|
||||
from pylab import *
|
||||
import sys
|
||||
from py_rmatools import rma
|
||||
|
||||
|
||||
#==========================================================#
|
||||
#Input parameters
|
||||
#==========================================================#
|
||||
#print "Enter the RMA filename (without extension):"
|
||||
#f=raw_input()
|
||||
|
||||
#set beginning and end years and corresponding scenario code
|
||||
f='HCC011'
|
||||
startyear=2025
|
||||
endyear=2027
|
||||
year=range(startyear, endyear+1)
|
||||
|
||||
#set model element nodes from chainage file
|
||||
node=[837, 666, 981, 59,527, 34, 1, 391]
|
||||
node=[837, 666]
|
||||
#set run directory where RMA results are located
|
||||
run_directory = 'I:/ENG/WRL/WRL1/Project wrl2016032/Hunter_CC_Modeling/Completed_Simulations/' + f +'/'
|
||||
#set directory path for output files
|
||||
output_directory = 'C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/Output/'+ f + '/'
|
||||
#==========================================================#
|
||||
|
||||
|
||||
|
||||
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
|
||||
if not os.path.exists(output_directory):
|
||||
os.makedirs(output_directory)
|
||||
print('-------------------------------------------')
|
||||
print("output directory folder didn't exist and was generated")
|
||||
print('-------------------------------------------')
|
||||
|
||||
time=[]
|
||||
files=[]
|
||||
for ii in node:
|
||||
files.append(output_directory + f + '_%d_.txt' %ii)
|
||||
|
||||
print node
|
||||
I_const=[1,2]
|
||||
|
||||
for kk in list(enumerate(node)):
|
||||
target = open(files[kk[0]], 'w')
|
||||
target.write("Year Hour")
|
||||
for ii in I_const:
|
||||
target.write(" %d" %(ii))
|
||||
target.write("\n")
|
||||
target.close()
|
||||
|
||||
|
||||
for jj in year:
|
||||
print jj
|
||||
f=run_directory + f +'_%d_SAL' %jj
|
||||
R=rma()
|
||||
R.open(f)
|
||||
while R.next():
|
||||
time.append(R.time)
|
||||
for kk in list(enumerate(node)):
|
||||
target = open(files[kk[0]], 'a')
|
||||
target.write("%i %f" %(jj,time[-1]))
|
||||
for ii in I_const:
|
||||
target.write(" %f" %(R.constit[ii][kk[1]]))
|
||||
target.write("\n")
|
||||
target.close()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#for kk in node:
|
||||
|
||||
# filename1='DataCR046_%d_.txt' %kk
|
||||
|
||||
|
||||
#
|
@ -0,0 +1,391 @@
|
||||
|
||||
# coding: utf-8
|
||||
|
||||
# In[64]:
|
||||
|
||||
# Set working direcotry where python code is located and results are to be stored
|
||||
import os
|
||||
os.chdir('C:/Users/z5025317/OneDrive - UNSW/Hunter_CC_Modeling/08_Results/RMA_result_explorer')
|
||||
|
||||
import struct
|
||||
import matplotlib.pyplot as plt
|
||||
import math
|
||||
from py_rmatools import rma
|
||||
|
||||
get_ipython().magic('matplotlib qt')
|
||||
plt.rcParams.update({'figure.max_open_warning': 0})
|
||||
|
||||
|
||||
f='HCC010'
|
||||
#set run directory where RMA results are located
|
||||
run_directory = 'I:/ENG/WRL/WRL1/Project wrl2016032/Hunter_CC_Modeling/Completed_Simulations/'
|
||||
|
||||
#set directory path for output files
|
||||
output_directory = 'Output/'+ f + '/'
|
||||
#output_directory = 'J:/Project wrl2016032/NARCLIM_Raw_Data/Extracted'
|
||||
if not os.path.exists(output_directory):
|
||||
os.makedirs(output_directory)
|
||||
print('-------------------------------------------')
|
||||
print("output directory folder didn't exist and was generated")
|
||||
print('-------------------------------------------')
|
||||
|
||||
# In[65]:
|
||||
|
||||
meshFilename = 'hcc002.rm1'
|
||||
channelWidth = 100
|
||||
RMAfilename = run_directory + f + '/' + f + '_1995_SAL'
|
||||
|
||||
#If RMA11
|
||||
constNum = [1]
|
||||
|
||||
|
||||
# In[66]:
|
||||
|
||||
def isElementOneD(nodelist):
|
||||
if len(nodelist) == 2:
|
||||
return True
|
||||
return False
|
||||
|
||||
def isElementSquare(nodelist):
|
||||
if len(nodelist) == 4:
|
||||
return True
|
||||
return False
|
||||
|
||||
def square2Triangle(ElementNum):
|
||||
nodelist = ElementDict[ElementNum]
|
||||
if isElementSquare(nodelist):
|
||||
ElementDict[ElementNum] = [nodelist[0], nodelist[1], nodelist[2]]
|
||||
ElementList.append(max(ElementList) + 1)
|
||||
ElementDict[ElementList[-1]]= [nodelist[0], nodelist[2], nodelist[3]]
|
||||
|
||||
def oneD2triangle(ElementNum):
|
||||
if isElementOneD(ElementDict[ElementNum]):
|
||||
nAe = ElementDict[ElementNum][0] #nAe Node A existing
|
||||
nBe = ElementDict[ElementNum][1]
|
||||
|
||||
if not nAe in node1Dduplicate: node1Dduplicate[nAe] = []
|
||||
if not nBe in node1Dduplicate: node1Dduplicate[nBe] = []
|
||||
|
||||
xA = nodeDict[nAe][0]
|
||||
xB = nodeDict[nBe][0]
|
||||
yA = nodeDict[nAe][1]
|
||||
yB = nodeDict[nBe][1]
|
||||
|
||||
normalVec = [-(yB - yA),(xB - xA)]
|
||||
dist = math.sqrt(normalVec[0]**2 + normalVec[1]**2)
|
||||
normalVec[0] = normalVec[0] / dist
|
||||
normalVec[1] = normalVec[1] / dist
|
||||
xA2 = xA + channelWidth * normalVec[0]
|
||||
xB2 = xB + channelWidth * normalVec[0]
|
||||
yA2 = yA + channelWidth * normalVec[1]
|
||||
yB2 = yB + channelWidth * normalVec[1]
|
||||
|
||||
|
||||
nA = max(NodeList) + 1
|
||||
nB = max(NodeList) + 2
|
||||
|
||||
node1Dduplicate[nAe].append(nA)
|
||||
node1Dduplicate[nBe].append(nB)
|
||||
|
||||
node2nodevalue[nA] = nAe
|
||||
node2nodevalue[nB] = nBe
|
||||
|
||||
|
||||
NodeList.append(nA)
|
||||
NodeList.append(nB)
|
||||
nodeDict[nA] = [xA2, yA2, -1.01]
|
||||
nodeDict[nB] = [xB2, yB2, -1.01]
|
||||
|
||||
newEle = max(ElementList) + 1
|
||||
ElementList .append(newEle)
|
||||
ElementDict[ElementNum] = [nAe, nA, nBe]
|
||||
ElementDict[newEle] = [nA, nB, nBe]
|
||||
|
||||
def RMA11toSerafin():
|
||||
f = open('{}.slf'.format(RMAfilename), 'wb')
|
||||
|
||||
f.write(struct.pack(">l",80))
|
||||
str='{0: >80}'.format('SERAFIN ')
|
||||
f.write(str.encode('ascii'))
|
||||
f.write(struct.pack(">l",80))
|
||||
|
||||
f.write(struct.pack(">l",8))
|
||||
f.write(struct.pack(">l",len(constNum)))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",8))
|
||||
|
||||
for c in constName:
|
||||
f.write(struct.pack(">l",32))
|
||||
str='{0: <32}'.format(c)
|
||||
f.write(str.encode('ascii'))
|
||||
f.write(struct.pack(">l",32))
|
||||
|
||||
|
||||
f.write(struct.pack(">l",40))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",40))
|
||||
|
||||
f.write(struct.pack(">l",24))
|
||||
f.write(struct.pack(">l",R.year))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",24))
|
||||
|
||||
f.write(struct.pack(">l",16))
|
||||
f.write(struct.pack(">l",len(ElementList)))
|
||||
f.write(struct.pack(">l",len(NodeList)))
|
||||
f.write(struct.pack(">l",3))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",16))
|
||||
|
||||
|
||||
f.write(struct.pack(">l",len(ElementList)*3*4))
|
||||
for el in ElementList:
|
||||
for nd in ElementDict[el]:
|
||||
f.write(struct.pack(">l",nodeOrdered[nd]))
|
||||
f.write(struct.pack(">l",len(ElementList)*3*4))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)))
|
||||
for i in range(0,len(NodeList)):
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",len(NodeList)))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
f.write(struct.pack(">f",value[0]))
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
f.write(struct.pack(">f",value[1]))
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
|
||||
|
||||
while R.next():
|
||||
#for i in range(3):
|
||||
f.write(struct.pack(">l",4))
|
||||
f.write(struct.pack(">f",R.time * 3600))
|
||||
f.write(struct.pack(">l",4))
|
||||
|
||||
for c in constNum:
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
if key in node2nodevalue.keys():
|
||||
f.write(struct.pack(">f",R.constit[c][node2nodevalue[key]]))
|
||||
else:
|
||||
f.write(struct.pack(">f",R.constit[c][key]))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
R.next()
|
||||
|
||||
f.close()
|
||||
|
||||
|
||||
|
||||
def RMA2toSerafin():
|
||||
f = open('{}.slf'.format(RMAfilename), 'wb')
|
||||
|
||||
f.write(struct.pack(">l",80))
|
||||
str='{0: >80}'.format('SERAFIN ')
|
||||
f.write(str.encode('ascii'))
|
||||
f.write(struct.pack(">l",80))
|
||||
|
||||
f.write(struct.pack(">l",8))
|
||||
f.write(struct.pack(">l",len(constName)))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",8))
|
||||
|
||||
for c in constName:
|
||||
f.write(struct.pack(">l",32))
|
||||
str='{0: <32}'.format(c)
|
||||
f.write(str.encode('ascii'))
|
||||
f.write(struct.pack(">l",32))
|
||||
|
||||
|
||||
f.write(struct.pack(">l",40))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",40))
|
||||
|
||||
f.write(struct.pack(">l",24))
|
||||
f.write(struct.pack(">l",R.year))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",24))
|
||||
|
||||
f.write(struct.pack(">l",16))
|
||||
f.write(struct.pack(">l",len(ElementList)))
|
||||
f.write(struct.pack(">l",len(NodeList)))
|
||||
f.write(struct.pack(">l",3))
|
||||
f.write(struct.pack(">l",1))
|
||||
f.write(struct.pack(">l",16))
|
||||
|
||||
|
||||
f.write(struct.pack(">l",len(ElementList)*3*4))
|
||||
for el in ElementList:
|
||||
for nd in ElementDict[el]:
|
||||
f.write(struct.pack(">l",nodeOrdered[nd]))
|
||||
f.write(struct.pack(">l",len(ElementList)*3*4))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)))
|
||||
for i in range(0,len(NodeList)):
|
||||
f.write(struct.pack(">l",0))
|
||||
f.write(struct.pack(">l",len(NodeList)))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
f.write(struct.pack(">f",value[0]))
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
f.write(struct.pack(">f",value[1]))
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
|
||||
|
||||
while R.next():
|
||||
|
||||
f.write(struct.pack(">l",4))
|
||||
f.write(struct.pack(">f",R.time * 3600))
|
||||
f.write(struct.pack(">l",4))
|
||||
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
if key in node2nodevalue.keys():
|
||||
f.write(struct.pack(">f",R.xvel[node2nodevalue[key]]))
|
||||
else:
|
||||
f.write(struct.pack(">f",R.xvel[key]))
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
if key in node2nodevalue.keys():
|
||||
f.write(struct.pack(">f",R.yvel[node2nodevalue[key]]))
|
||||
else:
|
||||
f.write(struct.pack(">f",R.yvel[key]))
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
if key in node2nodevalue.keys():
|
||||
f.write(struct.pack(">f",R.depth[node2nodevalue[key]]))
|
||||
else:
|
||||
f.write(struct.pack(">f",R.depth[key]))
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
for key, value in nodeDict.items():
|
||||
if key in node2nodevalue.keys():
|
||||
f.write(struct.pack(">f",R.elevation[node2nodevalue[key]]))
|
||||
else:
|
||||
f.write(struct.pack(">f",R.elevation[key]))
|
||||
f.write(struct.pack(">l",len(NodeList)*4))
|
||||
|
||||
|
||||
R.next()
|
||||
|
||||
f.close()
|
||||
|
||||
|
||||
# In[67]:
|
||||
|
||||
#Read mesh file and extract node (except mid node) and elements - plus convert 1D element to 2D for vizualisation
|
||||
NodeList = []
|
||||
ElementList = []
|
||||
ElementDict = {}
|
||||
nodeDict = {}
|
||||
node1Dduplicate = {} #Original Number: List of Duplicates
|
||||
node2nodevalue = {} #link between the node number and the node value to use
|
||||
#(e.g. if node 10 is a 1D node: 10 is not duplicate so {1:1},
|
||||
#but node 2050 (duplicate of 10) (1D to 2D) the value of the duplicated
|
||||
#node will be the same as the original so we might have {2050: 10})
|
||||
|
||||
with open(meshFilename) as f:
|
||||
line = f.readline()
|
||||
line = f.readline()
|
||||
line = f.readline()
|
||||
line = f.readline()
|
||||
|
||||
cpt = 1
|
||||
while line and line != ' 9999\n':
|
||||
temp = line.split()
|
||||
ElementDict[int(temp[0])] = [int(temp[i]) for i in range(1,9,2) if temp[i] != '0' and int(temp[9]) < 100]
|
||||
ElementList.append(int(temp[0]))
|
||||
line = f.readline()
|
||||
|
||||
for key, value in ElementDict.items():
|
||||
NodeList.extend(value)
|
||||
|
||||
NodeList = list(set(NodeList))
|
||||
|
||||
line = f.readline()
|
||||
while line and line != ' 9999\n':
|
||||
temp = line.split()
|
||||
if int(temp[0]) in NodeList:
|
||||
nodeDict[int(temp[0])] = [float(temp[1]),float(temp[2]),float(temp[3])]
|
||||
line = f.readline()
|
||||
|
||||
|
||||
for e in ElementList:
|
||||
oneD2triangle(e)
|
||||
square2Triangle(e)
|
||||
|
||||
for key in list(ElementDict): #Remove Special Element 902.....
|
||||
if len(ElementDict[key]) != 3:
|
||||
print(key, ElementDict[key])
|
||||
ElementDict.pop(key)
|
||||
ElementList.remove(key)
|
||||
|
||||
nodeOrdered = {}
|
||||
cpt = 1
|
||||
for key, value in nodeDict.items():
|
||||
nodeOrdered[key] = cpt
|
||||
cpt +=1
|
||||
|
||||
|
||||
|
||||
# # Open and Read First Step of the RMA File and Save a Serafin
|
||||
|
||||
# In[72]:
|
||||
|
||||
R=rma()
|
||||
R.open(RMAfilename)
|
||||
R.next()
|
||||
|
||||
if R.type==b'RMA11 ':
|
||||
constName = []
|
||||
for c in constNum:
|
||||
constName.append(R.constit_name[c].decode("utf-8"))
|
||||
RMA11toSerafin()
|
||||
|
||||
if R.type==b'RMA2 ':
|
||||
constName = ['X-VEL','Y-VEL','DEPTH','FREE SURFACE']
|
||||
RMA2toSerafin()
|
||||
|
@ -0,0 +1,163 @@
|
||||
from struct import *
|
||||
from pylab import *
|
||||
import sys
|
||||
|
||||
# Updated 10/11/15 BMM : Added mesh.
|
||||
# Updated 10/11/15 MD : Added dictionnary initialisation in mesh.
|
||||
|
||||
class rma:
|
||||
def __init__(self):
|
||||
self.file='not opened yet'
|
||||
# May want to do some other things here later
|
||||
|
||||
def open(self,filename):
|
||||
self.file=open(('%s.rma' % (filename)),'rb')
|
||||
self.header=self.file.read(1000)
|
||||
self.type=self.header[0:10]
|
||||
self.title=self.header[100:172]
|
||||
self.geometry=self.header[200:300]
|
||||
self.num_nodes=int(self.header[40:50])
|
||||
self.num_elements=int(self.header[50:60])
|
||||
if self.type==b'RMA11 ':
|
||||
self.num_constits=int(self.header[60:70])
|
||||
if len(self.header[80:90].strip())==0:
|
||||
self.num_sedlayers=0
|
||||
else:
|
||||
self.num_sedlayers=int(self.header[80:90])
|
||||
self.constit_name=[]
|
||||
self.constit_name.append("NULL")
|
||||
i=1
|
||||
#print(self.num_constits)
|
||||
while i <= self.num_constits:
|
||||
#print(i, self.num_constits, i <= self.num_constits)
|
||||
self.constit_name.append(self.header[300+(i-1)*8:308+(i-1)*8])
|
||||
if self.header[300+(i-1)*8:308+(i-1)*8]==" SSED ":
|
||||
self.constit_name.append(" BSHEAR")
|
||||
self.constit_name.append("BedThick")
|
||||
j=1
|
||||
while j<=self.num_sedlayers:
|
||||
self.constit_name.append(" L%dThick" % j)
|
||||
j=j+1
|
||||
i=i+1
|
||||
#if self.num_sedlayers > 0:
|
||||
# self.num_constits=self.num_constits+2+self.num_sedlayers
|
||||
|
||||
# Header format in RMA2
|
||||
|
||||
# HEADER(1:10) ='RMA2 '
|
||||
# HEADER(11:20)=DATEC = Date of run
|
||||
# HEADER(21:30)=TIMEC = Time of run
|
||||
# HEADER(31:40)=ZONEC = Time zone
|
||||
# HEADER(41:50)=NP = Number of nodes
|
||||
# HEADER(51:60)=NE = Number of elements
|
||||
# HEADER(101:172)=TITLE = Title line
|
||||
# HEADER(201:300)=FNAME = File name of binary geometry
|
||||
|
||||
# Header format in RMA11
|
||||
# HEADER(1:10) - Model
|
||||
# HEADER(11:20) - DATEC of model runtime
|
||||
# HEADER(21:30) - TIMEC of model runtime
|
||||
# HEADER(31:40) - ZONEC of model runtime
|
||||
# HEADER(41:90) - '(5I10)' NP,NE,NQAL,IGS,LGS or '(5I10)' NP,NE,NQAL,ISEDS,NLAYT
|
||||
# HEADER(101:172) - TITLE
|
||||
# HEADER(301:540) - '(30A8)' CLABL
|
||||
# HEADER(201:248) - FNAMD directory of the geometry
|
||||
# HEADER(251:298) - FNAM2 filename of the geometry
|
||||
|
||||
def mesh(self,filename):
|
||||
self.meshfile=open(('%s.rm1' % (filename)),'r')
|
||||
l=self.meshfile.readline()
|
||||
while l[0:5]!=' 9999' and len(l)!=5:
|
||||
l=self.meshfile.readline()
|
||||
l=self.meshfile.readline()
|
||||
self.node_e={}
|
||||
self.node_n={}
|
||||
self.node_z={}
|
||||
while l[0:10]!=' 9999' and len(l)!=10:
|
||||
ll=l.split()
|
||||
self.node_e[ll[0]]=ll[1]
|
||||
self.node_n[ll[0]]=ll[2]
|
||||
self.node_z[ll[0]]=ll[3]
|
||||
l=self.meshfile.readline()
|
||||
|
||||
|
||||
def next(self):
|
||||
# This reads the next timestep and populates the variables.
|
||||
# Reset all of the variables
|
||||
self.time=0.0
|
||||
self.year=0
|
||||
self.xvel=[]
|
||||
self.yvel=[]
|
||||
self.zvel=[]
|
||||
self.depth=[]
|
||||
self.elevation=[]
|
||||
|
||||
# Add in an entry to fill position 0. Important since RMA files start numbering nodes from 1.
|
||||
self.xvel.append(-999)
|
||||
self.yvel.append(-999)
|
||||
self.zvel.append(-999)
|
||||
self.depth.append(-999)
|
||||
self.elevation.append(-999)
|
||||
|
||||
if self.type==b'RMA2 ':
|
||||
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
|
||||
t=self.file.read(12)
|
||||
if t:
|
||||
a=unpack('fii',t)
|
||||
self.time=a[0]
|
||||
np=int(a[1])
|
||||
self.year=a[2]
|
||||
if (np!=self.num_nodes):
|
||||
print ("Warning - NP (%d) on this timestep does not match header (%d)" % (np,self.num_nodes))
|
||||
b=unpack('%df' % 5*np, self.file.read(20*np))
|
||||
i=1
|
||||
while i<=np:
|
||||
self.xvel.append(b[0+(i-1)*3])
|
||||
self.yvel.append(b[1+(i-1)*3])
|
||||
self.depth.append(b[2+(i-1)*3])
|
||||
self.elevation.append(b[np*3+(i-1)])
|
||||
i=i+1
|
||||
|
||||
if self.type==b'RMA11 ':
|
||||
self.constit=[]
|
||||
c=0
|
||||
# Start at zero to leave an empty array space. Constits numbered 1 .. num_constits
|
||||
while c<=self.num_constits:
|
||||
self.constit.append([])
|
||||
# Put a null into the 0 position so that RMA node numbers are in the same numbered array position.
|
||||
self.constit[c].append(-999)
|
||||
c=c+1
|
||||
# READ(file1,END=100) TETT1,NQAL,NP,IYRR, ((VEL(K,J),J=1,NP),K=1,3), (wd(j),j=1,np), (wsel(j),j=1,np), ((TCON1(K,J),J=1,NP),K=1,NQAL-5)
|
||||
t=self.file.read(16)
|
||||
if t:
|
||||
a=unpack('fiii',t)
|
||||
self.time=a[0]
|
||||
nqal=int(a[1])
|
||||
np=int(a[2])
|
||||
self.year=a[3]
|
||||
if ((nqal-5)!=(self.num_constits)):
|
||||
print ("Warning - NQAL-5 (%d) on this timestep does not match header (%d)" % (nqal-5,self.num_constits))
|
||||
if (np!=self.num_nodes):
|
||||
print ("Warning - NP (%d) on this timestep does not match header (%d)" % (np,self.num_nodes))
|
||||
b=unpack('%df' % nqal*np, self.file.read(4*nqal*np))
|
||||
i=1
|
||||
while i<=np:
|
||||
|
||||
self.xvel.append(b[0+(i-1)*3])
|
||||
self.yvel.append(b[1+(i-1)*3])
|
||||
self.zvel.append(b[2+(i-1)*3])
|
||||
self.depth.append(b[np*3+(i-1)])
|
||||
self.elevation.append(b[np*4+(i-1)])
|
||||
c=1
|
||||
while c<=self.num_constits:
|
||||
self.constit[c].append(b[np*((c-1)+5)+(i-1)])
|
||||
c=c+1
|
||||
i=i+1
|
||||
|
||||
|
||||
if len(self.xvel)==1:
|
||||
# Note that this is a 1 now as we've filled the zero array position
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
@ -0,0 +1,199 @@
|
||||
from struct import *
|
||||
from pylab import *
|
||||
import sys
|
||||
|
||||
|
||||
# Updated 10/11/15 BMM : Added mesh.
|
||||
# Updated 10/11/15 MD : Added dictionnary initialisation in mesh.
|
||||
|
||||
class rma:
|
||||
def __init__(self):
|
||||
self.file = 'not opened yet'
|
||||
|
||||
# May want to do some other things here later
|
||||
|
||||
def open(self, filename):
|
||||
self.file = open(('%s.rma' % (filename)), 'rb')
|
||||
self.header = self.file.read(1000).decode("utf-8")
|
||||
self.type = self.header[0:10]
|
||||
self.title = self.header[100:172]
|
||||
self.geometry = self.header[200:300]
|
||||
self.num_nodes = int(self.header[40:50])
|
||||
self.num_elements = int(self.header[50:60])
|
||||
if self.type == 'RMA11 ':
|
||||
self.num_constits = int(self.header[60:70])
|
||||
if len(self.header[80:90].strip()) == 0:
|
||||
self.num_sedlayers = 0
|
||||
else:
|
||||
self.num_sedlayers = int(self.header[80:90])
|
||||
self.constit_name = []
|
||||
self.constit_name.append("NULL")
|
||||
i = 1
|
||||
print(self.num_constits)
|
||||
print(self.header[300:1000])
|
||||
while i <= self.num_constits:
|
||||
# print self.header[300:400]
|
||||
self.constit_name.append(self.header[300 + (i - 1) * 8:308 + (i - 1) * 8])
|
||||
if self.header[300 + (i - 1) * 8:308 + (i - 1) * 8] == " SSED ":
|
||||
self.constit_name.append(" BSHEAR")
|
||||
self.constit_name.append("BedThick")
|
||||
j = 1
|
||||
print(self.num_sedlayers)
|
||||
while j <= self.num_sedlayers:
|
||||
self.constit_name.append(" L%dThick" % j)
|
||||
j = j + 1
|
||||
i = i + 1
|
||||
# print i
|
||||
|
||||
# print self.num_constits
|
||||
# print i
|
||||
# if self.num_sedlayers > 0:
|
||||
# self.num_constits=self.num_constits+2+self.num_sedlayers
|
||||
|
||||
# Header format in RMA2
|
||||
|
||||
# HEADER(1:10) ='RMA2 '
|
||||
# HEADER(11:20)=DATEC = Date of run
|
||||
# HEADER(21:30)=TIMEC = Time of run
|
||||
# HEADER(31:40)=ZONEC = Time zone
|
||||
# HEADER(41:50)=NP = Number of nodes
|
||||
# HEADER(51:60)=NE = Number of elements
|
||||
# HEADER(101:172)=TITLE = Title line
|
||||
# HEADER(201:300)=FNAME = File name of binary geometry
|
||||
|
||||
# Header format in RMA11
|
||||
# HEADER(1:10) - Model
|
||||
# HEADER(11:20) - DATEC of model runtime
|
||||
# HEADER(21:30) - TIMEC of model runtime
|
||||
# HEADER(31:40) - ZONEC of model runtime
|
||||
# HEADER(41:90) - '(5I10)' NP,NE,NQAL,IGS,LGS or '(5I10)' NP,NE,NQAL,ISEDS,NLAYT
|
||||
# HEADER(101:172) - TITLE
|
||||
# HEADER(301:540) - '(30A8)' CLABL
|
||||
# HEADER(201:248) - FNAMD directory of the geometry
|
||||
# HEADER(251:298) - FNAM2 filename of the geometry
|
||||
|
||||
def mesh(self, filename):
|
||||
self.meshfile = open(('%s.rm1' % (filename)), 'r')
|
||||
l = self.meshfile.readline()
|
||||
|
||||
while l[0:5] != ' 9999' and len(l) != 5:
|
||||
l = self.meshfile.readline()
|
||||
l = self.meshfile.readline()
|
||||
self.node_e = {}
|
||||
self.node_n = {}
|
||||
self.node_z = {}
|
||||
while l[0:10] != ' 9999' and len(l) != 10:
|
||||
ll = l.split()
|
||||
self.node_e[ll[0]] = ll[1]
|
||||
self.node_n[ll[0]] = ll[2]
|
||||
self.node_z[ll[0]] = ll[3]
|
||||
l = self.meshfile.readline
|
||||
|
||||
def next(self):
|
||||
# This reads the next timestep and populates the variables.
|
||||
# Reset all of the variables
|
||||
self.time = 0.0
|
||||
self.year = 0
|
||||
self.xvel = []
|
||||
self.yvel = []
|
||||
self.zvel = []
|
||||
self.depth = []
|
||||
self.elevation = []
|
||||
self.temperature = []
|
||||
self.salinity = []
|
||||
self.sussed = []
|
||||
|
||||
# Add in an entry to fill position 0. Important since RMA files start numbering nodes from 1.
|
||||
self.xvel.append(-999)
|
||||
self.yvel.append(-999)
|
||||
self.zvel.append(-999)
|
||||
self.depth.append(-999)
|
||||
self.elevation.append(-999)
|
||||
self.temperature.append(-999)
|
||||
self.salinity.append(-999)
|
||||
self.sussed.append(-999)
|
||||
|
||||
if self.type == 'RMA2 ':
|
||||
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
|
||||
t = self.file.read(12)
|
||||
if t:
|
||||
a = unpack('fii', t)
|
||||
self.time = a[0]
|
||||
np = int(a[1])
|
||||
self.year = a[2]
|
||||
if (np != self.num_nodes):
|
||||
print("Warning - NP (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
|
||||
b = unpack('%df' % 5 * np, self.file.read(20 * np))
|
||||
tempA = [(x - 1) * 3 for x in range(1, np + 1)]
|
||||
tempB = [np * 3 + (x - 1) for x in range(1, np + 1)]
|
||||
self.xvel.extend([b[i] for i in tempA])
|
||||
self.yvel.extend([b[i + 1] for i in tempA])
|
||||
self.depth.extend([b[i + 2] for i in tempA])
|
||||
self.elevation.extend([b[i] for i in tempB])
|
||||
|
||||
if self.type == 'RMA11 ':
|
||||
self.constit = []
|
||||
c = 0
|
||||
# Start at zero to leave an empty array space. Constits numbered 1 .. num_constits
|
||||
while c <= self.num_constits:
|
||||
self.constit.append([])
|
||||
# Put a null into the 0 position so that RMA node numbers are in the same numbered array position.
|
||||
self.constit[c].append(-999)
|
||||
c = c + 1
|
||||
# READ(file1,END=100) TETT1,NQAL,NP,IYRR, ((VEL(K,J),J=1,NP),K=1,3), (wd(j),j=1,np), (wsel(j),j=1,np), ((TCON1(K,J),J=1,NP),K=1,NQAL-5)
|
||||
t = self.file.read(16)
|
||||
if t:
|
||||
a = unpack('fiii', t)
|
||||
self.time = a[0]
|
||||
nqal = int(a[1])
|
||||
np = int(a[2])
|
||||
self.year = a[3]
|
||||
if ((nqal - 5) != (self.num_constits)):
|
||||
print("Warning - NQAL-5 (%d) on this timestep does not match header (%d)" % (
|
||||
nqal - 5, self.num_constits))
|
||||
if (np != self.num_nodes):
|
||||
print("Warning - NP (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
|
||||
b = unpack('%df' % nqal * np, self.file.read(4 * nqal * np))
|
||||
|
||||
tempA = [(x - 1) * 3 for x in range(1, np + 1)]
|
||||
tempB = [np * 3 + (x - 1) for x in range(1, np + 1)]
|
||||
tempC = [np * 4 + (x - 1) for x in range(1, np + 1)]
|
||||
self.xvel.extend([b[i] for i in tempA])
|
||||
self.yvel.extend([b[i + 1] for i in tempA])
|
||||
self.zvel.extend([b[i + 2] for i in tempA])
|
||||
self.depth.extend([b[i] for i in tempB])
|
||||
self.elevation.extend([b[i] for i in tempC])
|
||||
for c in range(1, self.num_constits + 1):
|
||||
(self.constit[c].extend([b[np * ((c - 1) + 5) + (x - 1)] for x in range(1, np + 1)]))
|
||||
if self.type == 'RMA10 ':
|
||||
# WRITE(IRMAFM) TETT,NP,NDF,NE,IYRR,((VSING(K,J),K=1,NDF),VVEL(J),WSLL(J),J=1,NP),(DFCT(J),J=1,NE),(VSING(7,J),J=1,NP)
|
||||
# WRITE(IRMAFM) TETT,NP,IYRR,((VEL(J,K),J=1,3),K=1,NP),(WSEL(J),J=1,NP),(VDOT(3,K),K=1,NP)
|
||||
t = self.file.read(20)
|
||||
if t:
|
||||
a = unpack('fiiii', t)
|
||||
self.time = a[0]
|
||||
np = a[1]
|
||||
ndf = 6
|
||||
ne = a[3]
|
||||
self.year = a[4]
|
||||
if (np != self.num_nodes):
|
||||
print("Warning - NP1 (%d) on this timestep does not match header (%d)" % (np, self.num_nodes))
|
||||
tempRead = np * (3 + ndf) + ne
|
||||
b = unpack('%df' % tempRead, self.file.read(4 * tempRead))
|
||||
i = 1
|
||||
while i <= np:
|
||||
self.xvel.append(b[0 + (i - 1) * 8])
|
||||
self.yvel.append(b[1 + (i - 1) * 8])
|
||||
self.depth.append(b[2 + (i - 1) * 8])
|
||||
self.salinity.append(b[3 + (i - 1) * 8])
|
||||
self.temperature.append(b[4 + (i - 1) * 8])
|
||||
self.sussed.append(b[5 + (i - 1) * 8])
|
||||
self.zvel.append(b[6 + (i - 1) * 8])
|
||||
self.elevation.append(b[7 + (i - 1) * 8])
|
||||
i = i + 1
|
||||
|
||||
if len(self.xvel) == 1:
|
||||
# Note that this is a 1 now as we've filled the zero array position
|
||||
return False
|
||||
else:
|
||||
return True
|
Binary file not shown.
Loading…
Reference in New Issue