|
|
|
@ -13,102 +13,19 @@
|
|
|
|
|
import os
|
|
|
|
|
import io
|
|
|
|
|
import re
|
|
|
|
|
import math
|
|
|
|
|
import datetime
|
|
|
|
|
import subprocess
|
|
|
|
|
import pandas as pd
|
|
|
|
|
import numpy as np
|
|
|
|
|
import neilson_volumes
|
|
|
|
|
import pandas as pd
|
|
|
|
|
from cycler import cycler
|
|
|
|
|
import matplotlib.pyplot as plt
|
|
|
|
|
from matplotlib.ticker import MultipleLocator
|
|
|
|
|
import datetime
|
|
|
|
|
import xlsxwriter
|
|
|
|
|
import math
|
|
|
|
|
from cycler import cycler
|
|
|
|
|
|
|
|
|
|
import neilson_volumes
|
|
|
|
|
from survey_tools import call_lastools, extract_pts, update_survey_output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def profile_plots_volume(csv_loc, LL_xlsx, output_xlsx, graph_location):
|
|
|
|
|
#get a list of all csvs which will each be analysed
|
|
|
|
|
file_list=[]
|
|
|
|
|
for file in os.listdir(csv_loc):
|
|
|
|
|
if file.endswith(".csv"):
|
|
|
|
|
file_list.append(os.path.join(csv_loc, file))
|
|
|
|
|
|
|
|
|
|
#now read the LL file
|
|
|
|
|
LL_limit_file=pd.read_excel(LL_xlsx, 'profile_locations')
|
|
|
|
|
LL_info={}
|
|
|
|
|
for i in range(0, len(LL_limit_file)):
|
|
|
|
|
#make a dictionary that alllows you to search the LL
|
|
|
|
|
prof="%s_%s" % (LL_limit_file['Profile'][i].split(" ")[0], LL_limit_file['Profile'][i].split(" ")[-1])
|
|
|
|
|
|
|
|
|
|
LL_info[prof]=LL_limit_file['Landward Limit'][i]
|
|
|
|
|
|
|
|
|
|
all_dates=[]
|
|
|
|
|
results_volume={}
|
|
|
|
|
for file in file_list:
|
|
|
|
|
#read the profile data - this should have all dates
|
|
|
|
|
profile_data=CC_split_profile(file)
|
|
|
|
|
profile=profile_data['info']['Profile']
|
|
|
|
|
|
|
|
|
|
#plot all of the profiles
|
|
|
|
|
print(profile)
|
|
|
|
|
plot_profiles(profile_data, profile, graph_location,LL_info[profile])
|
|
|
|
|
|
|
|
|
|
results_volume[profile]={}
|
|
|
|
|
|
|
|
|
|
#nowgo through each date and do a neilson volume calculations
|
|
|
|
|
for date in profile_data.keys():
|
|
|
|
|
if date!='info':
|
|
|
|
|
if date not in all_dates:
|
|
|
|
|
all_dates.append(date)
|
|
|
|
|
|
|
|
|
|
chainage=profile_data[date]['Chainage']
|
|
|
|
|
elevation=[0 if pd.isnull(profile_data[date]['Elevation'][i]) else profile_data[date]['Elevation'][i] for i in range(0, len(profile_data[date]['Elevation']))]
|
|
|
|
|
LL_limit=LL_info[profile]
|
|
|
|
|
#do a neilson calculation to get the ZSA volume
|
|
|
|
|
if len(elevation)>2:
|
|
|
|
|
#if there aren't enough available points don't do it
|
|
|
|
|
volume=neilson_volumes.volume_available(chainage, elevation, LL_limit)
|
|
|
|
|
if volume<0:
|
|
|
|
|
volume=0
|
|
|
|
|
print('%s %s has a negative volume available' % (profile, date))
|
|
|
|
|
else:
|
|
|
|
|
volume=0
|
|
|
|
|
|
|
|
|
|
results_volume[profile][date]=volume
|
|
|
|
|
|
|
|
|
|
#write an excel sheet which summarises the data
|
|
|
|
|
workbook = xlsxwriter.Workbook(output_xlsx)
|
|
|
|
|
worksheet=workbook.add_worksheet('Volumes')
|
|
|
|
|
|
|
|
|
|
row=0
|
|
|
|
|
col=0
|
|
|
|
|
|
|
|
|
|
worksheet.write(row, col, 'Profile')
|
|
|
|
|
for date in all_dates:
|
|
|
|
|
col=col+1
|
|
|
|
|
worksheet.write(row, col, date)
|
|
|
|
|
|
|
|
|
|
col=0
|
|
|
|
|
row=1
|
|
|
|
|
|
|
|
|
|
for prof in results_volume.keys():
|
|
|
|
|
|
|
|
|
|
worksheet.write(row, col, prof)
|
|
|
|
|
|
|
|
|
|
for date in all_dates:
|
|
|
|
|
col=col+1
|
|
|
|
|
try:
|
|
|
|
|
vol=results_volume[prof][date]
|
|
|
|
|
except KeyError:
|
|
|
|
|
print("error with profile %s on %s" % (prof, date))
|
|
|
|
|
vol=None
|
|
|
|
|
worksheet.write(row, col, vol)
|
|
|
|
|
col=0
|
|
|
|
|
row=row+1
|
|
|
|
|
return results_volume
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def remove_temp_files(directory):
|
|
|
|
|
for f in os.listdir(directory):
|
|
|
|
|
os.unlink(os.path.join(directory, f))
|
|
|
|
|