|
|
@ -43,16 +43,23 @@ output_epsg = 28356 # GDA94 / MGA Zone 56
|
|
|
|
|
|
|
|
|
|
|
|
# load metadata (timestamps and epsg code) for the collection
|
|
|
|
# load metadata (timestamps and epsg code) for the collection
|
|
|
|
satname = 'L8'
|
|
|
|
satname = 'L8'
|
|
|
|
#sitename = 'NARRA'
|
|
|
|
sitename = 'NARRA'
|
|
|
|
sitename = 'OLDBAR'
|
|
|
|
#sitename = 'OLDBAR'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Load metadata
|
|
|
|
filepath = os.path.join(os.getcwd(), 'data', satname, sitename)
|
|
|
|
filepath = os.path.join(os.getcwd(), 'data', satname, sitename)
|
|
|
|
with open(os.path.join(filepath, sitename + '_timestamps' + '.pkl'), 'rb') as f:
|
|
|
|
with open(os.path.join(filepath, sitename + '_timestamps' + '.pkl'), 'rb') as f:
|
|
|
|
timestamps = pickle.load(f)
|
|
|
|
timestamps = pickle.load(f)
|
|
|
|
timestamps_sorted = sorted(timestamps) # sort timestamps since images are sorted in directory
|
|
|
|
with open(os.path.join(filepath, sitename + '_accuracy_georef' + '.pkl'), 'rb') as f:
|
|
|
|
|
|
|
|
acc_georef = pickle.load(f)
|
|
|
|
with open(os.path.join(filepath, sitename + '_epsgcode' + '.pkl'), 'rb') as f:
|
|
|
|
with open(os.path.join(filepath, sitename + '_epsgcode' + '.pkl'), 'rb') as f:
|
|
|
|
input_epsg = pickle.load(f)
|
|
|
|
input_epsg = pickle.load(f)
|
|
|
|
with open(os.path.join(filepath, sitename + '_refpoints' + '.pkl'), 'rb') as f:
|
|
|
|
with open(os.path.join(filepath, sitename + '_refpoints' + '.pkl'), 'rb') as f:
|
|
|
|
refpoints = pickle.load(f)
|
|
|
|
refpoints = pickle.load(f)
|
|
|
|
|
|
|
|
# sort timestamps and georef accuracy (dowloaded images are sorted by date in directory)
|
|
|
|
|
|
|
|
timestamps_sorted = sorted(timestamps)
|
|
|
|
|
|
|
|
idx_sorted = sorted(range(len(timestamps)), key=timestamps.__getitem__)
|
|
|
|
|
|
|
|
acc_georef_sorted = [acc_georef[j] for j in idx_sorted]
|
|
|
|
|
|
|
|
|
|
|
|
# path to images
|
|
|
|
# path to images
|
|
|
|
file_path_pan = os.path.join(os.getcwd(), 'data', satname, sitename, 'pan')
|
|
|
|
file_path_pan = os.path.join(os.getcwd(), 'data', satname, sitename, 'pan')
|
|
|
@ -64,6 +71,7 @@ N = len(file_names_pan)
|
|
|
|
# initialise some variables
|
|
|
|
# initialise some variables
|
|
|
|
cloud_cover_ts = []
|
|
|
|
cloud_cover_ts = []
|
|
|
|
date_acquired_ts = []
|
|
|
|
date_acquired_ts = []
|
|
|
|
|
|
|
|
acc_georef_ts = []
|
|
|
|
idx_skipped = []
|
|
|
|
idx_skipped = []
|
|
|
|
idx_nocloud = []
|
|
|
|
idx_nocloud = []
|
|
|
|
t = []
|
|
|
|
t = []
|
|
|
@ -96,17 +104,26 @@ for i in range(N):
|
|
|
|
cloud_mask = np.logical_or(np.logical_or(cloud_mask, im_inf), im_nan)
|
|
|
|
cloud_mask = np.logical_or(np.logical_or(cloud_mask, im_inf), im_nan)
|
|
|
|
cloud_cover = sum(sum(cloud_mask.astype(int)))/(cloud_mask.shape[0]*cloud_mask.shape[1])
|
|
|
|
cloud_cover = sum(sum(cloud_mask.astype(int)))/(cloud_mask.shape[0]*cloud_mask.shape[1])
|
|
|
|
if cloud_cover > cloud_thresh:
|
|
|
|
if cloud_cover > cloud_thresh:
|
|
|
|
print('skipped cloud ' + str(i))
|
|
|
|
print('skip ' + str(i) + ' - cloudy (' + str(cloud_cover) + ')')
|
|
|
|
idx_skipped.append(i)
|
|
|
|
idx_skipped.append(i)
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
idx_nocloud.append(i)
|
|
|
|
idx_nocloud.append(i)
|
|
|
|
# check if image for that date is already present and keep the one with less clouds
|
|
|
|
# check if image for that date is already present
|
|
|
|
if file_names_pan[i][len(satname)+1+len(sitename)+1:len(satname)+1+len(sitename)+1+10] in date_acquired_ts:
|
|
|
|
if file_names_pan[i][len(satname)+1+len(sitename)+1:len(satname)+1+len(sitename)+1+10] in date_acquired_ts:
|
|
|
|
|
|
|
|
# find the index of the image that is repeated
|
|
|
|
idx_samedate = utils.find_indices(date_acquired_ts, lambda e : e == file_names_pan[i][9:19])
|
|
|
|
idx_samedate = utils.find_indices(date_acquired_ts, lambda e : e == file_names_pan[i][9:19])
|
|
|
|
idx_samedate = idx_samedate[0]
|
|
|
|
idx_samedate = idx_samedate[0]
|
|
|
|
print(str(cloud_cover) + ' - ' + str(cloud_cover_ts[idx_samedate]))
|
|
|
|
print('cloud cover ' + str(cloud_cover) + ' - ' + str(cloud_cover_ts[idx_samedate]))
|
|
|
|
if cloud_cover >= cloud_cover_ts[idx_samedate]:
|
|
|
|
print('acc georef ' + str(acc_georef_sorted[i]) + ' - ' + str(acc_georef_ts[idx_samedate]))
|
|
|
|
print('skipped double ' + str(i))
|
|
|
|
# keep image with less cloud cover or best georeferencing accuracy
|
|
|
|
|
|
|
|
if cloud_cover < cloud_cover_ts[idx_samedate] - 0.01:
|
|
|
|
|
|
|
|
skip = False
|
|
|
|
|
|
|
|
elif acc_georef_sorted[i] < acc_georef_ts[idx_samedate]:
|
|
|
|
|
|
|
|
skip = False
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
skip = True
|
|
|
|
|
|
|
|
if skip:
|
|
|
|
|
|
|
|
print('skip ' + str(i) + ' - repeated')
|
|
|
|
idx_skipped.append(i)
|
|
|
|
idx_skipped.append(i)
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
else:
|
|
|
@ -114,7 +131,8 @@ for i in range(N):
|
|
|
|
del t[idx_samedate]
|
|
|
|
del t[idx_samedate]
|
|
|
|
del cloud_cover_ts[idx_samedate]
|
|
|
|
del cloud_cover_ts[idx_samedate]
|
|
|
|
del date_acquired_ts[idx_samedate]
|
|
|
|
del date_acquired_ts[idx_samedate]
|
|
|
|
print('deleted ' + str(idx_samedate))
|
|
|
|
del acc_georef_ts[idx_samedate]
|
|
|
|
|
|
|
|
print('keep ' + str(i) + ' - deleted ' + str(idx_samedate))
|
|
|
|
|
|
|
|
|
|
|
|
# rescale intensities
|
|
|
|
# rescale intensities
|
|
|
|
im_ms = sds.rescale_image_intensity(im_ms, cloud_mask, prob_high, plot_bool)
|
|
|
|
im_ms = sds.rescale_image_intensity(im_ms, cloud_mask, prob_high, plot_bool)
|
|
|
@ -159,7 +177,7 @@ for i in range(N):
|
|
|
|
# click on the left image to discard, otherwise on the closest centroid in the right image
|
|
|
|
# click on the left image to discard, otherwise on the closest centroid in the right image
|
|
|
|
pt_in = np.array(ginput(n=1, timeout=1000))
|
|
|
|
pt_in = np.array(ginput(n=1, timeout=1000))
|
|
|
|
if pt_in[0][0] < 10000:
|
|
|
|
if pt_in[0][0] < 10000:
|
|
|
|
print('skipped manual ' + str(i))
|
|
|
|
print('skip ' + str(i) + ' - manual')
|
|
|
|
idx_skipped.append(i)
|
|
|
|
idx_skipped.append(i)
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
# get contour that was selected (clock closest to centroid)
|
|
|
|
# get contour that was selected (clock closest to centroid)
|
|
|
@ -167,6 +185,7 @@ for i in range(N):
|
|
|
|
shorelines.append(wl[np.argmin(dist_centroid)])
|
|
|
|
shorelines.append(wl[np.argmin(dist_centroid)])
|
|
|
|
t.append(timestamps_sorted[i])
|
|
|
|
t.append(timestamps_sorted[i])
|
|
|
|
cloud_cover_ts.append(cloud_cover)
|
|
|
|
cloud_cover_ts.append(cloud_cover)
|
|
|
|
|
|
|
|
acc_georef_ts.append(acc_georef_sorted[i])
|
|
|
|
date_acquired_ts.append(file_names_pan[i][9:19])
|
|
|
|
date_acquired_ts.append(file_names_pan[i][9:19])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -176,13 +195,13 @@ for i in range(N):
|
|
|
|
# plt.plot(shorelines[j][:,0], shorelines[j][:,1])
|
|
|
|
# plt.plot(shorelines[j][:,0], shorelines[j][:,1])
|
|
|
|
#plt.draw()
|
|
|
|
#plt.draw()
|
|
|
|
|
|
|
|
|
|
|
|
output = {'t':t, 'shorelines':shorelines, 'cloud_cover':cloud_cover_ts}
|
|
|
|
output = {'t':t, 'shorelines':shorelines, 'cloud_cover':cloud_cover_ts, 'acc_georef':acc_georef_ts}
|
|
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(filepath, sitename + '_output' + '.pkl'), 'wb') as f:
|
|
|
|
|
|
|
|
pickle.dump(output, f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(filepath, sitename + '_skipped' + '.pkl'), 'wb') as f:
|
|
|
|
|
|
|
|
pickle.dump(idx_skipped, f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(filepath, sitename + '_idxnocloud' + '.pkl'), 'wb') as f:
|
|
|
|
#with open(os.path.join(filepath, sitename + '_output' + '.pkl'), 'wb') as f:
|
|
|
|
pickle.dump(idx_nocloud, f)
|
|
|
|
# pickle.dump(output, f)
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
#with open(os.path.join(filepath, sitename + '_skipped' + '.pkl'), 'wb') as f:
|
|
|
|
|
|
|
|
# pickle.dump(idx_skipped, f)
|
|
|
|
|
|
|
|
#
|
|
|
|
|
|
|
|
#with open(os.path.join(filepath, sitename + '_idxnocloud' + '.pkl'), 'wb') as f:
|
|
|
|
|
|
|
|
# pickle.dump(idx_nocloud, f)
|