Compare commits

..

2 Commits

Author SHA1 Message Date
Dan Howe 9802fe1b41 Add 'csv_to_shp.py' 3 years ago
Dan Howe 8b1885307c Update readme 3 years ago

@ -9,6 +9,9 @@ Double-click `anaconda-prompt.bat`
#### 2. Generate ZSA and ZRFC recession tables. #### 2. Generate ZSA and ZRFC recession tables.
The package required to calculate setbacks based on Nielsen et al. (1992) can be found here:
http://git.wrl.unsw.edu.au:3000/coastal/nielsen
```shell ```shell
> cd lidar > cd lidar
> python generate_recession_tables.py > python generate_recession_tables.py

@ -0,0 +1,95 @@
# Converts shoreline chainages to MGA coordinates
import os
import re
import sys
import ast
import json
import yaml
import argparse
import numpy as np
from scipy import interpolate
import pandas as pd
import matplotlib.pyplot as plt
from shapely.geometry import LineString
import geopandas as gpd
from tqdm import tqdm
sys.path.insert(0, '../lidar/')
from nielsen import Gridder # noqa
MGA55 = 28355 # GDA94, MGA Zone 55
BEACH = 'Roches Beach'
# Load profile data
xlsx_path = '../lidar/Profiles 1 to 12 2019 DEM.xlsx'
workbook = pd.ExcelFile(xlsx_path)
with open('../lidar/settings.json', 'r') as f:
sheets = json.loads(f.read())
# Create grids to convert chainages to eastings and northings
grids = {}
for s in sheets:
names = ['Chainage', 'Easting', 'Northing', 'Elevation']
p = workbook.parse(s, names=names)
k = (sheets[s]['block'], sheets[s]['profile'])
grids[k] = g = Gridder(chainage=p['Chainage'],
elevation=p['Elevation'],
easting=p['Easting'],
northing=p['Northing'])
input_dir = 'output_csv'
output_dir = 'output_shp'
master = gpd.GeoDataFrame(columns=['name', 'year', 'ep', 'type'])
# Load probabilistic recession chainages
file_list = [f for f in os.listdir(input_dir) if f.startswith(BEACH)]
df = pd.DataFrame()
for f in file_list:
info = re.search(r'(\d{4}) (Z\w+)', f).groups()
data = pd.read_csv(os.path.join(input_dir, f))
ep_cols = [c for c in data.columns if c.startswith('ep_')]
for i, d in data.iterrows():
row = {}
# row['beach'] = BEACH
row['block'] = d['block']
row['profile'] = d['profile']
row['year'] = int(info[0])
row['type'] = info[1]
for e in ep_cols:
row['ep'] = e[3:]
ch = d[e]
g = grids[d['block'], d['profile']] # Get correct gridder
east, north = g.from_chainage(ch)
row['easting'] = east.round(3)
row['northing'] = north.round(3)
df = df.append(row, ignore_index=True)
# Convert floats to int
for col in ['block', 'profile', 'year']:
df[col] = df[col].astype(int)
df = df.set_index(['year', 'type', 'ep', 'block', 'profile']).sort_index()
years = df.index.unique(level='year')
types = df.index.unique(level='type')
eps = df.index.unique(level='ep')
lines = []
for y in years:
for t in types:
for e in eps:
line = {}
line['year'] = y
line['type'] = t
line['ep'] = str(e)
line['geometry'] = LineString(df.loc[y, t, e].to_numpy())
lines.append(line)
gdf = gpd.GeoDataFrame(lines).set_geometry('geometry')
gdf = gdf.set_crs(MGA55)
gdf.to_file(os.path.join(output_dir, 'hazard-lines.shp'),
driver='ESRI Shapefile')
Loading…
Cancel
Save