You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
geetools_VH/read_images_old.py

285 lines
11 KiB
Python

# -*- coding: utf-8 -*-
#==========================================================#
# Extract shorelines from Landsat images
#==========================================================#
# Initial settings
import os
import numpy as np
import matplotlib.pyplot as plt
import ee
import pdb
# other modules
from osgeo import gdal, ogr, osr
import pickle
import matplotlib.cm as cm
from pylab import ginput
from shapely.geometry import LineString
# image processing modules
import skimage.filters as filters
import skimage.exposure as exposure
import skimage.transform as transform
import sklearn.decomposition as decomposition
import skimage.measure as measure
import skimage.morphology as morphology
# machine learning modules
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler, Normalizer
from sklearn.externals import joblib
# import own modules
import functions.utils as utils
import functions.sds as sds
# some settings
np.seterr(all='ignore') # raise/ignore divisions by 0 and nans
plt.rcParams['axes.grid'] = True
plt.rcParams['figure.max_open_warning'] = 100
ee.Initialize()
# parameters
cloud_thresh = 0.5 # threshold for cloud cover
plot_bool = False # if you want the plots
min_contour_points = 100# minimum number of points contained in each water line
output_epsg = 28356 # GDA94 / MGA Zone 56
buffer_size = 7 # radius (in pixels) of disk for buffer (pixel classification)
min_beach_size = 20 # number of pixels in a beach (pixel classification)
dist_ref = 100
min_length_wl = 300
# load metadata (timestamps and epsg code) for the collection
satname = 'L8'
sitename = 'NARRA'
#sitename = 'OLDBAR'
# Load metadata
filepath = os.path.join(os.getcwd(), 'data', satname, sitename)
with open(os.path.join(filepath, sitename + '_timestamps' + '.pkl'), 'rb') as f:
timestamps = pickle.load(f)
with open(os.path.join(filepath, sitename + '_accuracy_georef' + '.pkl'), 'rb') as f:
acc_georef = pickle.load(f)
with open(os.path.join(filepath, sitename + '_epsgcode' + '.pkl'), 'rb') as f:
input_epsg = pickle.load(f)
with open(os.path.join(filepath, sitename + '_refpoints2' + '.pkl'), 'rb') as f:
refpoints = pickle.load(f)
# sort timestamps and georef accuracy (dowloaded images are sorted by date in directory)
timestamps_sorted = sorted(timestamps)
idx_sorted = sorted(range(len(timestamps)), key=timestamps.__getitem__)
acc_georef_sorted = [acc_georef[j] for j in idx_sorted]
# path to images
file_path_pan = os.path.join(os.getcwd(), 'data', satname, sitename, 'pan')
file_path_ms = os.path.join(os.getcwd(), 'data', satname, sitename, 'ms')
file_names_pan = os.listdir(file_path_pan)
file_names_ms = os.listdir(file_path_ms)
N = len(file_names_pan)
# initialise some variables
cloud_cover_ts = []
date_acquired_ts = []
acc_georef_ts = []
idx_skipped = []
idx_nocloud = []
t = []
shorelines = []
#%%
for i in range(N):
# read pan image
fn_pan = os.path.join(file_path_pan, file_names_pan[i])
data = gdal.Open(fn_pan, gdal.GA_ReadOnly)
georef = np.array(data.GetGeoTransform())
bands = [data.GetRasterBand(i + 1).ReadAsArray() for k in range(data.RasterCount)]
im_pan = np.stack(bands, 2)[:,:,0]
nrows = im_pan.shape[0]
ncols = im_pan.shape[1]
# read ms image
fn_ms = os.path.join(file_path_ms, file_names_ms[i])
data = gdal.Open(fn_ms, gdal.GA_ReadOnly)
bands = [data.GetRasterBand(i + 1).ReadAsArray() for k in range(data.RasterCount)]
im_ms = np.stack(bands, 2)
# cloud mask
im_qa = im_ms[:,:,5]
cloud_mask = sds.create_cloud_mask(im_qa, satname, plot_bool)
cloud_mask = transform.resize(cloud_mask, (im_pan.shape[0], im_pan.shape[1]),
order=0, preserve_range=True,
mode='constant').astype('bool_')
# resize the image using bilinear interpolation (order 1)
im_ms = transform.resize(im_ms,(im_pan.shape[0], im_pan.shape[1]),
order=1, preserve_range=True, mode='constant')
# check if -inf or nan values and add to cloud mask
im_inf = np.isin(im_ms[:,:,0], -np.inf)
im_nan = np.isnan(im_ms[:,:,0])
cloud_mask = np.logical_or(np.logical_or(cloud_mask, im_inf), im_nan)
# calculate cloud cover and skip image if too high
cloud_cover = sum(sum(cloud_mask.astype(int)))/(cloud_mask.shape[0]*cloud_mask.shape[1])
if cloud_cover > cloud_thresh:
print('skip ' + str(i) + ' - cloudy (' + str(np.round(cloud_cover*100).astype(int)) + '%)')
idx_skipped.append(i)
continue
idx_nocloud.append(i)
# pansharpen rgb image
im_ms_ps = sds.pansharpen(im_ms[:,:,[0,1,2]], im_pan, cloud_mask, plot_bool)
# rescale pansharpened RGB for visualisation
im_display = sds.rescale_image_intensity(im_ms_ps[:,:,[2,1,0]], cloud_mask, 100, False)
# add down-sized bands for NIR and SWIR (since pansharpening is not possible)
im_ms_ps = np.append(im_ms_ps, im_ms[:,:,[3,4]], axis=2)
# classify image in 4 classes (sand, whitewater, water, other) with NN classifier
im_classif, im_labels = sds.classify_image_NN(im_ms_ps, im_pan, cloud_mask, min_beach_size, plot_bool)
# # manually validate classification
# pt_in = np.array(ginput(n=1, timeout=1000))
# if pt_in[0][1] > nrows/2:
# print('skip ' + str(i) + ' - wrong classification')
# idx_skipped.append(i)
# continue
# if there are no sand pixels, skip the image (maybe later change the detection method with old method)
if sum(sum(im_labels[:,:,0])) == 0 :
print('skip ' + str(i) + ' - no sand')
idx_skipped.append(i)
continue
# extract shorelines (new method)
contours_wi, contours_mwi = sds.find_wl_contours2(im_ms_ps, im_labels, cloud_mask, buffer_size, plot_bool)
plt.figure()
im = np.copy(im_display)
# define colours for plot
colours = np.array([[1,128/255,0/255],[204/255,1,1],[0,0,204/255]])
for k in range(0,im_labels.shape[2]):
im[im_labels[:,:,k],0] = colours[k,0]
im[im_labels[:,:,k],1] = colours[k,1]
im[im_labels[:,:,k],2] = colours[k,2]
plt.imshow(im)
for k,contour in enumerate(contours_mwi): plt.plot(contour[:, 1], contour[:, 0], linewidth=2, color='k', linestyle='--')
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.tight_layout()
plt.draw()
# manually validate detection
pt_in = np.array(ginput(n=1, timeout=1000))
if pt_in[0][1] > nrows/2:
print('skip ' + str(i) + ' - wrong detection')
idx_skipped.append(i)
continue
# remove contour points that are around clouds (nan values)
for k, contour in enumerate(contours_mwi):
if np.any(np.isnan(contour)):
index_nan = np.where(np.isnan(contour))[0]
contour = np.delete(contour, index_nan, axis=0)
# convert from pixels to world coordinates
wl_coords = sds.convert_pix2world(contours_mwi, georef)
# convert to output epsg spatial reference
wl = sds.convert_epsg(wl_coords, input_epsg, output_epsg)
# remove contours that have a perimeter < min_length_wl as usually they are not shoreline
wl_good = []
for l, wls in enumerate(wl):
coords = [(wls[k,0], wls[k,1]) for k in range(len(wls))]
a = LineString(coords) # shapely LineString structure
if a.length >= min_length_wl:
wl_good.append(wls)
# pre-process points (list of arrays to single array of points)
x_points = np.array([])
y_points = np.array([])
for k in range(len(wl_good)):
x_points = np.append(x_points,wl_good[k][:,0])
y_points = np.append(y_points,wl_good[k][:,1])
wl_good = np.transpose(np.array([x_points,y_points]))
# only select points around Narrabeen beach (refpoints given)
temp = np.zeros((len(wl_good))).astype(bool)
for k in range(len(refpoints)):
temp = np.logical_or(np.linalg.norm(wl_good - refpoints[k,[0,1]], axis=1) < dist_ref, temp)
wl_final = wl_good[temp]
plt.figure()
plt.axis('equal')
plt.plot(wl_final[:,0],wl_final[:,1],'k.')
plt.draw()
# check if image for that date already exists and choose the best in terms of cloud cover and georeferencing
if file_names_pan[i][len(satname)+1+len(sitename)+1:len(satname)+1+len(sitename)+1+10] in date_acquired_ts:
# find the index of the image that is repeated
idx_samedate = utils.find_indices(date_acquired_ts, lambda e : e == file_names_pan[i][9:19])
idx_samedate = idx_samedate[0]
# print('cloud cover ' + str(cloud_cover) + ' - ' + str(cloud_cover_ts[idx_samedate]))
# print('acc georef ' + str(acc_georef_sorted[i]) + ' - ' + str(acc_georef_ts[idx_samedate]))
# keep image with less cloud cover or best georeferencing accuracy
if cloud_cover < cloud_cover_ts[idx_samedate] - 0.01:
skip = False
elif acc_georef_sorted[i] < acc_georef_ts[idx_samedate]:
skip = False
else:
skip = True
if skip:
print('skip ' + str(i) + ' - repeated')
idx_skipped.append(i)
continue
else:
del shorelines[idx_samedate]
del t[idx_samedate]
del cloud_cover_ts[idx_samedate]
del date_acquired_ts[idx_samedate]
del acc_georef_ts[idx_samedate]
print('keep ' + str(i) + ' - deleted ' + str(idx_samedate))
# save data
shorelines.append(wl_final)
t.append(timestamps_sorted[i])
cloud_cover_ts.append(cloud_cover)
acc_georef_ts.append(acc_georef_sorted[i])
date_acquired_ts.append(file_names_pan[i][9:19])
output = {'t':t, 'shorelines':shorelines, 'cloud_cover':cloud_cover_ts, 'acc_georef':acc_georef_ts}
#with open(os.path.join(filepath, sitename + '_output2' + '.pkl'), 'wb') as f:
# pickle.dump(output, f)
#
#with open(os.path.join(filepath, sitename + '_skipped2' + '.pkl'), 'wb') as f:
# pickle.dump(idx_skipped, f)
#
#with open(os.path.join(filepath, sitename + '_idxnocloud2' + '.pkl'), 'wb') as f:
# pickle.dump(idx_nocloud, f)
# plt.figure()
# plt.axis('equal')
# plt.plot(refpoints[:,0], refpoints[:,1], 'ko')
# plt.plot(all_points[temp,0], all_points[temp,1], 'go')
# plt.plot(all_points[~temp,0], all_points[~temp,1], 'ro')
# plt.draw()
# extract shorelines (old method)
# im_ndwi = sds.nd_index(im_ms_ps[:,:,3], im_ms_ps[:,:,1], cloud_mask, plot_bool)
# wl_pix = sds.find_wl_contours(im_ndwi, cloud_mask, min_contour_points, plot_bool)
# plt.figure()
# plt.imshow(im_display)
# for i,contour in enumerate(contours_mwi): plt.plot(contour[:, 1], contour[:, 0], linewidth=3, color='k')
# for i,contour in enumerate(wl_pix): plt.plot(contour[:, 1], contour[:, 0], linestyle='--', linewidth=1, color='w')
# plt.draw()