add georeferencing accuracy in the metadata

development
kvos 7 years ago
parent 0e78aace0c
commit f3f13ec180

@ -48,17 +48,17 @@ def download_tif(image, polygon, bandsId, filepath):
# select collection
input_col = ee.ImageCollection('LANDSAT/LC08/C01/T1_RT_TOA')
# location (Narrabeen-Collaroy beach)
#polygon = [[[151.301454, -33.700754],
# [151.311453, -33.702075],
# [151.307237, -33.739761],
# [151.294220, -33.736329],
# [151.301454, -33.700754]]];
polygon = [[[151.301454, -33.700754],
[151.311453, -33.702075],
[151.307237, -33.739761],
[151.294220, -33.736329],
[151.301454, -33.700754]]];
# location (Oldbar beach)
polygon = [[[152.664508, -31.896163],
[152.665827, -31.897112],
[152.631516, -31.924846],
[152.629285, -31.923362],
[152.664508, -31.896163]]]
#polygon = [[[152.664508, -31.896163],
# [152.665827, -31.897112],
# [152.631516, -31.924846],
# [152.629285, -31.923362],
# [152.664508, -31.896163]]]
# location (Oldbar inlet)
#polygon = [[[152.676283, -31.866784],
# [152.709174, -31.869993],
@ -68,17 +68,17 @@ polygon = [[[152.664508, -31.896163],
# dates
start_date = '2013-01-01'
end_date = '2018-12-31'
end_date = '2018-03-25'
# filter by location
flt_col = input_col.filterBounds(ee.Geometry.Polygon(polygon))#.filterDate(start_date, end_date)
flt_col = input_col.filterBounds(ee.Geometry.Polygon(polygon)).filterDate(start_date, end_date)
n_img = flt_col.size().getInfo()
print('Number of images covering the area:', n_img)
im_all = flt_col.getInfo().get('features')
satname = 'L8'
#sitename = 'NARRA'
sitename = 'OLDBAR'
sitename = 'NARRA'
#sitename = 'OLDBAR'
suffix = '.tif'
filepath = os.path.join(os.getcwd(), 'data', satname, sitename)
filepath_pan = os.path.join(filepath, 'pan')
@ -87,6 +87,7 @@ filepath_ms = os.path.join(filepath, 'ms')
all_names_pan = []
all_names_ms = []
timestamps = []
acc_georef = []
# loop through all images
for i in range(n_img):
# find each image in ee database
@ -98,6 +99,7 @@ for i in range(n_img):
im_timestamp = datetime.fromtimestamp(t/1000, tz=pytz.utc)
timestamps.append(im_timestamp)
im_epsg = int(im_dic['bands'][0]['crs'][5:])
acc_georef.append(im_dic['properties']['GEOMETRIC_RMSE_MODEL'])
# delete dimensions key from dictionnary, otherwise the entire image is extracted
for j in range(len(im_bands)): del im_bands[j]['dimensions']
@ -113,14 +115,15 @@ for i in range(n_img):
filename_ms = satname + '_' + sitename + '_' + im_date + '_ms' + '_r' + suffix
all_names_pan.append(filename_pan)
local_data_pan = download_tif(im, polygon, pan_band, filepath_pan)
os.rename(local_data_pan, os.path.join(filepath_pan, filename_pan))
local_data_ms = download_tif(im, polygon, ms_bands, filepath_ms)
os.rename(local_data_ms, os.path.join(filepath_ms, filename_ms))
# local_data_pan = download_tif(im, polygon, pan_band, filepath_pan)
# os.rename(local_data_pan, os.path.join(filepath_pan, filename_pan))
# local_data_ms = download_tif(im, polygon, ms_bands, filepath_ms)
# os.rename(local_data_ms, os.path.join(filepath_ms, filename_ms))
with open(os.path.join(filepath, sitename + '_timestamps' + '.pkl'), 'wb') as f:
pickle.dump(timestamps, f)
with open(os.path.join(filepath, sitename + '_epsgcode' + '.pkl'), 'wb') as f:
pickle.dump(im_epsg, f)
#with open(os.path.join(filepath, sitename + '_timestamps' + '.pkl'), 'wb') as f:
# pickle.dump(timestamps, f)
#with open(os.path.join(filepath, sitename + '_epsgcode' + '.pkl'), 'wb') as f:
# pickle.dump(im_epsg, f)
with open(os.path.join(filepath, sitename + '_accuracy_georef' + '.pkl'), 'wb') as f:
pickle.dump(acc_georef, f)

@ -43,16 +43,23 @@ output_epsg = 28356 # GDA94 / MGA Zone 56
# load metadata (timestamps and epsg code) for the collection
satname = 'L8'
#sitename = 'NARRA'
sitename = 'OLDBAR'
sitename = 'NARRA'
#sitename = 'OLDBAR'
# Load metadata
filepath = os.path.join(os.getcwd(), 'data', satname, sitename)
with open(os.path.join(filepath, sitename + '_timestamps' + '.pkl'), 'rb') as f:
timestamps = pickle.load(f)
timestamps_sorted = sorted(timestamps) # sort timestamps since images are sorted in directory
with open(os.path.join(filepath, sitename + '_accuracy_georef' + '.pkl'), 'rb') as f:
acc_georef = pickle.load(f)
with open(os.path.join(filepath, sitename + '_epsgcode' + '.pkl'), 'rb') as f:
input_epsg = pickle.load(f)
with open(os.path.join(filepath, sitename + '_refpoints' + '.pkl'), 'rb') as f:
refpoints = pickle.load(f)
# sort timestamps and georef accuracy (dowloaded images are sorted by date in directory)
timestamps_sorted = sorted(timestamps)
idx_sorted = sorted(range(len(timestamps)), key=timestamps.__getitem__)
acc_georef_sorted = [acc_georef[j] for j in idx_sorted]
# path to images
file_path_pan = os.path.join(os.getcwd(), 'data', satname, sitename, 'pan')
@ -64,6 +71,7 @@ N = len(file_names_pan)
# initialise some variables
cloud_cover_ts = []
date_acquired_ts = []
acc_georef_ts = []
idx_skipped = []
idx_nocloud = []
t = []
@ -96,17 +104,26 @@ for i in range(N):
cloud_mask = np.logical_or(np.logical_or(cloud_mask, im_inf), im_nan)
cloud_cover = sum(sum(cloud_mask.astype(int)))/(cloud_mask.shape[0]*cloud_mask.shape[1])
if cloud_cover > cloud_thresh:
print('skipped cloud ' + str(i))
print('skip ' + str(i) + ' - cloudy (' + str(cloud_cover) + ')')
idx_skipped.append(i)
continue
idx_nocloud.append(i)
# check if image for that date is already present and keep the one with less clouds
# check if image for that date is already present
if file_names_pan[i][len(satname)+1+len(sitename)+1:len(satname)+1+len(sitename)+1+10] in date_acquired_ts:
# find the index of the image that is repeated
idx_samedate = utils.find_indices(date_acquired_ts, lambda e : e == file_names_pan[i][9:19])
idx_samedate = idx_samedate[0]
print(str(cloud_cover) + ' - ' + str(cloud_cover_ts[idx_samedate]))
if cloud_cover >= cloud_cover_ts[idx_samedate]:
print('skipped double ' + str(i))
print('cloud cover ' + str(cloud_cover) + ' - ' + str(cloud_cover_ts[idx_samedate]))
print('acc georef ' + str(acc_georef_sorted[i]) + ' - ' + str(acc_georef_ts[idx_samedate]))
# keep image with less cloud cover or best georeferencing accuracy
if cloud_cover < cloud_cover_ts[idx_samedate] - 0.01:
skip = False
elif acc_georef_sorted[i] < acc_georef_ts[idx_samedate]:
skip = False
else:
skip = True
if skip:
print('skip ' + str(i) + ' - repeated')
idx_skipped.append(i)
continue
else:
@ -114,7 +131,8 @@ for i in range(N):
del t[idx_samedate]
del cloud_cover_ts[idx_samedate]
del date_acquired_ts[idx_samedate]
print('deleted ' + str(idx_samedate))
del acc_georef_ts[idx_samedate]
print('keep ' + str(i) + ' - deleted ' + str(idx_samedate))
# rescale intensities
im_ms = sds.rescale_image_intensity(im_ms, cloud_mask, prob_high, plot_bool)
@ -159,7 +177,7 @@ for i in range(N):
# click on the left image to discard, otherwise on the closest centroid in the right image
pt_in = np.array(ginput(n=1, timeout=1000))
if pt_in[0][0] < 10000:
print('skipped manual ' + str(i))
print('skip ' + str(i) + ' - manual')
idx_skipped.append(i)
continue
# get contour that was selected (clock closest to centroid)
@ -167,6 +185,7 @@ for i in range(N):
shorelines.append(wl[np.argmin(dist_centroid)])
t.append(timestamps_sorted[i])
cloud_cover_ts.append(cloud_cover)
acc_georef_ts.append(acc_georef_sorted[i])
date_acquired_ts.append(file_names_pan[i][9:19])
@ -176,13 +195,13 @@ for i in range(N):
# plt.plot(shorelines[j][:,0], shorelines[j][:,1])
#plt.draw()
output = {'t':t, 'shorelines':shorelines, 'cloud_cover':cloud_cover_ts}
with open(os.path.join(filepath, sitename + '_output' + '.pkl'), 'wb') as f:
pickle.dump(output, f)
with open(os.path.join(filepath, sitename + '_skipped' + '.pkl'), 'wb') as f:
pickle.dump(idx_skipped, f)
output = {'t':t, 'shorelines':shorelines, 'cloud_cover':cloud_cover_ts, 'acc_georef':acc_georef_ts}
with open(os.path.join(filepath, sitename + '_idxnocloud' + '.pkl'), 'wb') as f:
pickle.dump(idx_nocloud, f)
#with open(os.path.join(filepath, sitename + '_output' + '.pkl'), 'wb') as f:
# pickle.dump(output, f)
#
#with open(os.path.join(filepath, sitename + '_skipped' + '.pkl'), 'wb') as f:
# pickle.dump(idx_skipped, f)
#
#with open(os.path.join(filepath, sitename + '_idxnocloud' + '.pkl'), 'wb') as f:
# pickle.dump(idx_nocloud, f)
Loading…
Cancel
Save