Compare commits
No commits in common. '9802fe1b41aa6e75f7c5d6123219d037cee3151d' and '46c1022dc9e0a57a76a75058e531be102a1e3e21' have entirely different histories.
9802fe1b41
...
46c1022dc9
@ -1,95 +0,0 @@
|
|||||||
# Converts shoreline chainages to MGA coordinates
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import ast
|
|
||||||
import json
|
|
||||||
import yaml
|
|
||||||
import argparse
|
|
||||||
import numpy as np
|
|
||||||
from scipy import interpolate
|
|
||||||
import pandas as pd
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from shapely.geometry import LineString
|
|
||||||
import geopandas as gpd
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
sys.path.insert(0, '../lidar/')
|
|
||||||
from nielsen import Gridder # noqa
|
|
||||||
|
|
||||||
MGA55 = 28355 # GDA94, MGA Zone 55
|
|
||||||
BEACH = 'Roches Beach'
|
|
||||||
|
|
||||||
# Load profile data
|
|
||||||
xlsx_path = '../lidar/Profiles 1 to 12 2019 DEM.xlsx'
|
|
||||||
workbook = pd.ExcelFile(xlsx_path)
|
|
||||||
|
|
||||||
with open('../lidar/settings.json', 'r') as f:
|
|
||||||
sheets = json.loads(f.read())
|
|
||||||
|
|
||||||
# Create grids to convert chainages to eastings and northings
|
|
||||||
grids = {}
|
|
||||||
for s in sheets:
|
|
||||||
names = ['Chainage', 'Easting', 'Northing', 'Elevation']
|
|
||||||
p = workbook.parse(s, names=names)
|
|
||||||
k = (sheets[s]['block'], sheets[s]['profile'])
|
|
||||||
grids[k] = g = Gridder(chainage=p['Chainage'],
|
|
||||||
elevation=p['Elevation'],
|
|
||||||
easting=p['Easting'],
|
|
||||||
northing=p['Northing'])
|
|
||||||
|
|
||||||
input_dir = 'output_csv'
|
|
||||||
output_dir = 'output_shp'
|
|
||||||
|
|
||||||
master = gpd.GeoDataFrame(columns=['name', 'year', 'ep', 'type'])
|
|
||||||
|
|
||||||
# Load probabilistic recession chainages
|
|
||||||
file_list = [f for f in os.listdir(input_dir) if f.startswith(BEACH)]
|
|
||||||
|
|
||||||
df = pd.DataFrame()
|
|
||||||
for f in file_list:
|
|
||||||
info = re.search(r'(\d{4}) (Z\w+)', f).groups()
|
|
||||||
data = pd.read_csv(os.path.join(input_dir, f))
|
|
||||||
|
|
||||||
ep_cols = [c for c in data.columns if c.startswith('ep_')]
|
|
||||||
for i, d in data.iterrows():
|
|
||||||
row = {}
|
|
||||||
# row['beach'] = BEACH
|
|
||||||
row['block'] = d['block']
|
|
||||||
row['profile'] = d['profile']
|
|
||||||
row['year'] = int(info[0])
|
|
||||||
row['type'] = info[1]
|
|
||||||
for e in ep_cols:
|
|
||||||
row['ep'] = e[3:]
|
|
||||||
ch = d[e]
|
|
||||||
g = grids[d['block'], d['profile']] # Get correct gridder
|
|
||||||
east, north = g.from_chainage(ch)
|
|
||||||
row['easting'] = east.round(3)
|
|
||||||
row['northing'] = north.round(3)
|
|
||||||
df = df.append(row, ignore_index=True)
|
|
||||||
|
|
||||||
# Convert floats to int
|
|
||||||
for col in ['block', 'profile', 'year']:
|
|
||||||
df[col] = df[col].astype(int)
|
|
||||||
|
|
||||||
df = df.set_index(['year', 'type', 'ep', 'block', 'profile']).sort_index()
|
|
||||||
years = df.index.unique(level='year')
|
|
||||||
types = df.index.unique(level='type')
|
|
||||||
eps = df.index.unique(level='ep')
|
|
||||||
|
|
||||||
lines = []
|
|
||||||
for y in years:
|
|
||||||
for t in types:
|
|
||||||
for e in eps:
|
|
||||||
line = {}
|
|
||||||
line['year'] = y
|
|
||||||
line['type'] = t
|
|
||||||
line['ep'] = str(e)
|
|
||||||
line['geometry'] = LineString(df.loc[y, t, e].to_numpy())
|
|
||||||
lines.append(line)
|
|
||||||
|
|
||||||
gdf = gpd.GeoDataFrame(lines).set_geometry('geometry')
|
|
||||||
gdf = gdf.set_crs(MGA55)
|
|
||||||
gdf.to_file(os.path.join(output_dir, 'hazard-lines.shp'),
|
|
||||||
driver='ESRI Shapefile')
|
|
Loading…
Reference in New Issue