You cannot select more than 25 topics
			Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
		
		
		
		
		
			
		
			
				
	
	
		
			410 lines
		
	
	
		
			16 KiB
		
	
	
	
		
			Python
		
	
			
		
		
	
	
			410 lines
		
	
	
		
			16 KiB
		
	
	
	
		
			Python
		
	
| # python 3.5
 | |
| #requires LAStools to be installed (with the appropriate license). Note that LAStools requires no spaces in file names
 | |
| 
 | |
| #should have previously run 2017088_las_manipulation to have a las that has the buildings and veg removed
 | |
| #note that the neilson volumes script must be in the same folder
 | |
| 
 | |
| # this script will:
 | |
| #crop to a given polygon (crop away the swash zone)
 | |
| # extract values along a predefined profile,
 | |
| # do the volume analysis
 | |
| #export pngs of the surveys
 | |
| ########################### IMPORTS ###########################################
 | |
| import os
 | |
| import io
 | |
| import subprocess
 | |
| import pandas as pd
 | |
| import numpy as np
 | |
| import neilson_volumes
 | |
| import matplotlib.pyplot as plt
 | |
| from matplotlib.ticker import MultipleLocator
 | |
| import datetime
 | |
| import xlsxwriter
 | |
| import math
 | |
| from cycler import cycler
 | |
| 
 | |
| ###############################################################################
 | |
| ########################## FIXED INPUTS #######################################
 | |
| ######### UNCOMMENT THIS SECTION IF YOU WANT TO DEFINE EACH INPUT INDIVIDUALLY ######################
 | |
| ##path to LasTools NOTE THERE CAN BE NO SPACES
 | |
| path_2_lastools='C:/ProgramData/chocolatey'
 | |
| # input_file=r"J:\Project\wrl2017088 Central Coast Council Aerial Survey and Coastal Analysis\04_Working\Python\Survey 2\Parameter Files\las outputs survey2.xlsx"
 | |
| input_file='Parameter Files/las outputs survey2.xlsx'
 | |
| #input_file=r"J:\Project\wrl2017088 Central Coast Council Aerial Survey and Coastal Analysis\04_Working\Python\Survey 2\Parameter Files\las outputs survey2_V2.xlsx"
 | |
| ##############################
 | |
| ############################### SUB ROUTINES ##################################
 | |
| def check_output(command,console):
 | |
| 
 | |
|     if console == True:
 | |
|         process = subprocess.Popen(command)
 | |
|     else:
 | |
|         process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
 | |
|     output,error = process.communicate()
 | |
|     returncode = process.poll()
 | |
| 
 | |
|     return returncode,output
 | |
| 
 | |
| def crop_las(las, shp, output, lastools_loc):
 | |
|     # output is the full path and filename (inc extension) to put in
 | |
|     path_2_lasclip=lastools_loc+"\\bin\\lasclip"
 | |
| 
 | |
|     command="%s -i %s -poly %s -o %s" % (path_2_lasclip, las, shp, output)
 | |
| 
 | |
|     returncode,output = check_output(command, False)
 | |
| 
 | |
|     if returncode!= 0:
 | |
|         print("Error. lasclip failed on %s" % shp.split('//')[-1].split('.')[0])
 | |
|     else:
 | |
|         return None
 | |
| 
 | |
| def las_boundary(las, crop_poly_name, path_2_crop_polygon, lastools_loc, zone):
 | |
|     path_2_lasboundary=lastools_loc+"\\bin\\lasboundary"
 | |
|     fname=crop_poly_name
 | |
|     prjfname="%s%s.prj" %(path_2_crop_polygon, fname)
 | |
|     path_2_crop_poly='%s%s.shp' % (path_2_crop_polygon, fname)
 | |
|     command="%s -i %s -o %s" % (path_2_lasboundary, las, path_2_crop_poly)
 | |
| 
 | |
|     prj=open(prjfname, 'w')
 | |
|     if zone==56:
 | |
|         prj.write('PROJCS["GDA_1994_MGA_Zone_56",GEOGCS["GCS_GDA_1994",DATUM["D_GDA_1994",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Transverse_Mercator"],PARAMETER["False_Easting",500000.0],PARAMETER["False_Northing",10000000.0],PARAMETER["Central_Meridian",153.0],PARAMETER["Scale_Factor",0.9996],PARAMETER["Latitude_Of_Origin",0.0],UNIT["Meter",1.0]]')
 | |
|     elif zone==55:
 | |
|         prj.write('PROJCS["GDA_1994_MGA_Zone_55",GEOGCS["GCS_GDA_1994",DATUM["D_GDA_1994",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Transverse_Mercator"],PARAMETER["False_Easting",500000.0],PARAMETER["False_Northing",10000000.0],PARAMETER["Central_Meridian",147.0],PARAMETER["Scale_Factor",0.9996],PARAMETER["Latitude_Of_Origin",0.0],UNIT["Meter",1.0]]')
 | |
|     elif zone==57:
 | |
|         prj.write('PROJCS["GDA_1994_MGA_Zone_57",GEOGCS["GCS_GDA_1994",DATUM["D_GDA_1994",SPHEROID["GRS_1980",6378137.0,298.257222101]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Transverse_Mercator"],PARAMETER["False_Easting",500000.0],PARAMETER["False_Northing",10000000.0],PARAMETER["Central_Meridian",159.0],PARAMETER["Scale_Factor",0.9996],PARAMETER["Latitude_Of_Origin",0.0],UNIT["Meter",1.0]]')
 | |
|     prj.close()
 | |
| 
 | |
|     returncode,output = check_output(command, False)
 | |
| 
 | |
|     return None
 | |
| 
 | |
| def make_raster(las, output, lastools_loc, keep_only_ground=False, step=0.2):
 | |
|     #not that keep ground only option only rasters points classified as "2" in the lidar (ie ground)
 | |
|     #this effectively creates a "bare earth dem"
 | |
|     #note that this should only be used after remove_veg and/or remove_buildings has been run
 | |
| 
 | |
|     path_2_blastdem=lastools_loc+"\\bin\\blast2dem"
 | |
| 
 | |
|     if keep_only_ground==False:
 | |
|         command="%s -i %s -o %s -step %s" % (path_2_blastdem, las, output, step)
 | |
|     else:
 | |
|         command="%s -i %s -o %s -step %s -keep_class 2" % (path_2_blastdem, las, output,step)
 | |
| 
 | |
|     returncode,output2 = check_output(command, False)
 | |
| 
 | |
|     if returncode!= 0:
 | |
|         print("Error. blast2dem failed on %s" % las.split('\\')[-1].split('.')[0])
 | |
|     else:
 | |
|         return None
 | |
| 
 | |
| 
 | |
| def extract_pts(las_in, cp_in, survey_date, beach, keep_only_ground=True):
 | |
|     """Extract elevations from a las surface based on x and y coordinates.
 | |
| 
 | |
|     Requires lastools in system path.
 | |
| 
 | |
|     Args:
 | |
|         las_in:            input point cloud (las)
 | |
|         cp_in:             point coordinates with columns: id, x, y, z (csv)
 | |
|         survey_date:       survey date string, e.g. '19700101'
 | |
|         beach:             beach name
 | |
|         keep_only_ground:  only keep points classified as 'ground' (boolean)
 | |
| 
 | |
|     Returns:
 | |
|         Dataframe containing input coordinates with extracted elevations
 | |
|     """
 | |
| 
 | |
|     cmd = ['lascontrol', '-i', las_in, '-cp', cp_in, '-parse', 'sxyz']
 | |
| 
 | |
|     if keep_only_ground == True:
 | |
|         cmd += ['-keep_class', '2']
 | |
| 
 | |
|     # Call lastools
 | |
|     process = subprocess.Popen(
 | |
|         cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 | |
|     stdout, stderr = process.communicate()
 | |
|     errcode = process.returncode
 | |
| 
 | |
|     # Handle errors, if detected
 | |
|     if errcode != 0:
 | |
|         print("Error. lascontrol failed on {}".format(
 | |
|             os.path.basename(las_in)))
 | |
|         print(stderr.decode())
 | |
| 
 | |
|     # Load result into pandas dataframe
 | |
|     df = pd.read_csv(io.BytesIO(stdout))
 | |
| 
 | |
|     # Tidy up dataframe
 | |
|     df = df.drop(columns=['diff'])
 | |
|     df['lidar_z'] = pd.to_numeric(df['lidar_z'], errors='coerce')
 | |
|     df['Beach'] = beach
 | |
|     df = df[[
 | |
|         'Beach', 'ProfileNum', 'Easting', 'Northing', 'Chainage', 'lidar_z'
 | |
|     ]]
 | |
| 
 | |
|     # Rename columns
 | |
|     new_names = {
 | |
|         'ProfileNum': 'Profile',
 | |
|         'lidar_z': 'Elevation_{}'.format(survey_date),
 | |
|     }
 | |
|     df = df.rename(columns=new_names)
 | |
| 
 | |
|     return df
 | |
| 
 | |
| 
 | |
| def update_survey_output(df, output_dir):
 | |
|     """Update survey profile output csv files with current survey.
 | |
| 
 | |
|     Args:
 | |
|         df:         dataframe containing current survey elevations
 | |
|         output_dir: directory where csv files are saved
 | |
| 
 | |
|     Returns:
 | |
|         None
 | |
|     """
 | |
|     # Merge current survey with existing data
 | |
|     profiles = df['Profile'].unique()
 | |
|     for profile in profiles:
 | |
|         csv_name = os.path.join(output_csv_dir, profile + '.csv')
 | |
| 
 | |
|         # Extract survey data for current profile
 | |
|         current_profile = df[df['Profile'] == profile]
 | |
|         try:
 | |
|             # Load existing results
 | |
|             master = pd.read_csv(csv_name)
 | |
|         except FileNotFoundError:
 | |
|             master = current_profile.copy()
 | |
| 
 | |
|         # Add (or update) current survey
 | |
|         current_survey_col = df.columns[-1]
 | |
|         master[current_survey_col] = current_profile[current_survey_col]
 | |
| 
 | |
|         # Export updated results
 | |
|         master.to_csv(csv_name)
 | |
| 
 | |
| 
 | |
| def plot_profiles(profile_info, profile, output_loc, LL_limit):
 | |
|     #plot the profile.  expects output from CC_split_profile
 | |
| 
 | |
|     YminorLocator=MultipleLocator(0.5)
 | |
|     XminorLocator=MultipleLocator(5)
 | |
| 
 | |
| 
 | |
|     fig,ax=plt.subplots(figsize=(8, 3))
 | |
| 
 | |
|     num_plots=len(profile_info.keys())-1
 | |
|     colormap = plt.cm.jet
 | |
|     ax.set_prop_cycle(cycler('color', [colormap(i) for i in np.linspace(0, 0.9, num_plots)]))
 | |
| 
 | |
|     max_y=0
 | |
|     for date in profile_info.keys():
 | |
|         if date!='info':
 | |
| 
 | |
|             plt.plot(profile_info[date]['Chainage'], profile_info[date]['Elevation'], label=date)
 | |
|             try:
 | |
|                 if max([i for i in profile_info[date]['Elevation'] if pd.isnull(i)==False])>max_y:
 | |
|                     max_y=max([i for i in profile_info[date]['Elevation'] if pd.isnull(i)==False])
 | |
|             except:
 | |
|                 print("empty elevation section for  %s" %  date)
 | |
| 
 | |
| 
 | |
|     plt.plot([LL_limit,LL_limit], [-1,max_y], 'r--', alpha=0.5, label="Landward Limit")
 | |
| 
 | |
| 
 | |
|     plt.xlabel('Chainage (m)',weight='bold')
 | |
|     plt.ylabel('Elevation (m AHD)',weight='bold')
 | |
|     plt.legend(loc='upper right', bbox_to_anchor=(1.3,1))
 | |
|     plt.title(profile)
 | |
|     plt.rcParams['font.size']=8
 | |
| 
 | |
|     ax.set_ylim([-1,math.ceil(max_y)])
 | |
| 
 | |
|     ax.xaxis.set_minor_locator(XminorLocator)
 | |
|     ax.yaxis.set_minor_locator(YminorLocator)
 | |
|     ax.xaxis.grid(True, which='minor', color='k', linestyle='-', alpha=0.3)
 | |
|     ax.yaxis.grid(True,which='minor',color='k', linestyle='-', alpha=0.3)
 | |
| 
 | |
|     plt.grid(which='major', color='k', linestyle='-')
 | |
|     today=datetime.datetime.now().date().strftime('%Y%m%d')
 | |
|     plt.savefig(os.path.join(output_loc, '%s_%s.png' % (today, profile)),bbox_inches='tight',dpi=900)
 | |
|     plt.clf()
 | |
| 
 | |
|     return None
 | |
| 
 | |
| 
 | |
| def CC_split_profile(file2read):
 | |
|     # this reads the profile files and splits it into dates
 | |
| 
 | |
|     file_master=pd.read_csv(file2read)
 | |
| 
 | |
|     beach_original=file_master['Beach'].tolist()
 | |
|     profile_original=file_master['Profile'].tolist()
 | |
|     date_original=file_master['Date'].tolist()
 | |
|     chainage_original=file_master['Chainage'].tolist()
 | |
|     elevation_original=file_master['Elevation'].tolist()
 | |
|     easting_original=file_master['Easting'].tolist()
 | |
|     northing_original=file_master['Northing'].tolist()
 | |
| 
 | |
|     data={}
 | |
|     i=0
 | |
| 
 | |
|     #add info on the beach and profile number
 | |
|     data['info']={'Profile':profile_original[0], 'Beach':beach_original[0]}
 | |
|     date_now=date_original[0]
 | |
| 
 | |
|     while i<len(file_master):
 | |
|         chainage_tmp=[]
 | |
|         elevation_tmp=[]
 | |
|         easting_tmp=[]
 | |
|         northing_tmp=[]
 | |
|         while i<len(file_master) and date_now==date_original[i]:
 | |
|             chainage_tmp.append(chainage_original[i])
 | |
|             elevation_tmp.append(elevation_original[i])
 | |
|             easting_tmp.append(easting_original[i])
 | |
|             northing_tmp.append(northing_original[i])
 | |
|             i=i+1
 | |
| 
 | |
|         data[date_now]={'Beach': beach_original[i-1], 'Profile':profile_original[i-1],'Easting': easting_tmp, 'Northing':northing_tmp, 'Elevation':elevation_tmp, 'Chainage':chainage_tmp}
 | |
| 
 | |
|         if i<len(file_master):
 | |
|             date_now=date_original[i]
 | |
| 
 | |
|     return data
 | |
| 
 | |
| def profile_plots_volume(csv_loc, LL_xlsx, output_xlsx, graph_location):
 | |
|     #get a list of all csvs which will each be analysed
 | |
|     file_list=[]
 | |
|     for file in os.listdir(csv_loc):
 | |
|         if file.endswith(".csv"):
 | |
|             file_list.append(os.path.join(csv_loc, file))
 | |
| 
 | |
|     #now read the LL file
 | |
|     LL_limit_file=pd.read_excel(LL_xlsx, 'profile_locations')
 | |
|     LL_info={}
 | |
|     for i in range(0, len(LL_limit_file)):
 | |
|         #make a dictionary that alllows you to search the LL
 | |
|         prof="%s_%s" % (LL_limit_file['Profile'][i].split(" ")[0], LL_limit_file['Profile'][i].split(" ")[-1])
 | |
| 
 | |
|         LL_info[prof]=LL_limit_file['Landward Limit'][i]
 | |
| 
 | |
|     all_dates=[]
 | |
|     results_volume={}
 | |
|     for file in file_list:
 | |
|         #read the profile data - this should have all dates
 | |
|         profile_data=CC_split_profile(file)
 | |
|         profile=profile_data['info']['Profile']
 | |
| 
 | |
|         #plot all of the profiles
 | |
|         print(profile)
 | |
|         plot_profiles(profile_data, profile, graph_location,LL_info[profile])
 | |
| 
 | |
|         results_volume[profile]={}
 | |
| 
 | |
|         #nowgo through each date and do a neilson volume calculations
 | |
|         for date in profile_data.keys():
 | |
|             if date!='info':
 | |
|                 if date not in all_dates:
 | |
|                     all_dates.append(date)
 | |
| 
 | |
|                 chainage=profile_data[date]['Chainage']
 | |
|                 elevation=[0 if pd.isnull(profile_data[date]['Elevation'][i]) else profile_data[date]['Elevation'][i] for i in range(0, len(profile_data[date]['Elevation']))]
 | |
|                 LL_limit=LL_info[profile]
 | |
|                 #do a neilson calculation to get the ZSA volume
 | |
|                 if len(elevation)>2:
 | |
|                     #if there aren't enough available points don't do it
 | |
|                     volume=neilson_volumes.volume_available(chainage, elevation, LL_limit)
 | |
|                     if volume<0:
 | |
|                         volume=0
 | |
|                         print('%s %s has a negative volume available' % (profile, date))
 | |
|                 else:
 | |
|                     volume=0
 | |
| 
 | |
|                 results_volume[profile][date]=volume
 | |
| 
 | |
|     #write an excel sheet which summarises the data
 | |
|     workbook = xlsxwriter.Workbook(output_xlsx)
 | |
|     worksheet=workbook.add_worksheet('Volumes')
 | |
| 
 | |
|     row=0
 | |
|     col=0
 | |
| 
 | |
|     worksheet.write(row, col, 'Profile')
 | |
|     for date in all_dates:
 | |
|         col=col+1
 | |
|         worksheet.write(row, col, date)
 | |
| 
 | |
|     col=0
 | |
|     row=1
 | |
| 
 | |
|     for prof in results_volume.keys():
 | |
| 
 | |
|         worksheet.write(row, col, prof)
 | |
| 
 | |
|         for date in all_dates:
 | |
|             col=col+1
 | |
|             try:
 | |
|                 vol=results_volume[prof][date]
 | |
|             except KeyError:
 | |
|                 print("error with profile %s on %s" % (prof, date))
 | |
|                 vol=None
 | |
|             worksheet.write(row, col, vol)
 | |
|         col=0
 | |
|         row=row+1
 | |
|     return results_volume
 | |
| 
 | |
| def remove_temp_files(directory):
 | |
|     for f in os.listdir(directory):
 | |
|         os.unlink(os.path.join(directory, f))
 | |
| 
 | |
|     return None
 | |
| 
 | |
| ###############################################################################
 | |
| ########################### RUN CODE ##########################################
 | |
| params_file=pd.read_excel(input_file, sheet_name="PARAMS")
 | |
| 
 | |
| for i in range(0, len(params_file)): #0, len(params_file)
 | |
|     print("Starting to process %s" % params_file['Beach'][i])
 | |
|     beach=params_file['Beach'][i]
 | |
|     survey_date=params_file['SURVEY DATE'][i]
 | |
|     input_las5=params_file['INPUT LAS4'][i]
 | |
|     crop_swash_poly=params_file['CROP SWASH POLY'][i]
 | |
|     heatmap_crop_poly=params_file['HEATMAP CROP POLY'][i]
 | |
|     final_las = params_file['FINAL LAS'][i]
 | |
|     heatmap_las = params_file['HEATMAP LAS'][i]
 | |
|     zone_MGA=params_file['ZONE MGA'][i]
 | |
|     output_poly_name=params_file['OUTPUT POLY NAME'][i]
 | |
|     path_2_output_poly=params_file['PATH TO OUTPUT'][i]
 | |
|     output_raster=params_file['OUTPUT RASTER'][i]
 | |
|     input_csv=params_file['INPUT CSV'][i]
 | |
|     tmp_csv = params_file['TMP CSV'][i]
 | |
|     LL_file=params_file['LL FILE'][i]
 | |
|     csv_loc=params_file['OUT CSV LOC'][i]
 | |
|     graph_loc = params_file['GRAPH LOC'][i]
 | |
|     volume_output=params_file['VOLUME OUTPUT'][i]
 | |
|     tmp_dir=params_file['TEMP DIR'][i]
 | |
|     int_dir=params_file['INTERIM DIR'][i]
 | |
| 
 | |
|     # crop and get the output las
 | |
|     crop_las(input_las5, crop_swash_poly, final_las, path_2_lastools)
 | |
| 
 | |
|     #now crop out the heatmap las
 | |
|     crop_las(final_las, heatmap_crop_poly, heatmap_las, path_2_lastools)
 | |
| 
 | |
|     #create a polygon to crop a raster
 | |
|     las_boundary(heatmap_las, output_poly_name, path_2_output_poly, path_2_lastools, zone_MGA)
 | |
|     #make a raster
 | |
|     make_raster(heatmap_las, output_raster, path_2_lastools, keep_only_ground=True)
 | |
| 
 | |
|     #extract the points and get volumes
 | |
|     df = extract_pts(final_las, input_csv, survey_date, beach, keep_only_ground=True)
 | |
|     update_survey_output(df, csv_loc)
 | |
| 
 | |
|     #colourise the point cloud
 | |
| 
 | |
| 
 | |
|     #delete the temp files from the tmp_dir and the interim_dir
 | |
|     remove_temp_files(tmp_dir)
 | |
|     #remove_temp_files(int_dir)
 | |
| 
 | |
| print("doing the volume analysis")
 | |
| test=profile_plots_volume(csv_loc, LL_file, volume_output, graph_loc)
 |